text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def utc_offset_by_timezone(timezone_name):
"""Returns the UTC offset of the given timezone in hours. Arguments --------- timezone_name: str A string with a name of a timezone. Returns ------- int The UTC offset of the given timezone, in hours. """ |
return int(pytz.timezone(timezone_name).utcoffset(
utc_time()).total_seconds()/SECONDS_IN_HOUR) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def localize_datetime(datetime_obj, timezone_name):
"""Localizes the given UTC-aligned datetime by the given timezone. Arguments --------- datetime_obj : datetime.datetime A datetime object decipting a specific point in time, aligned by UTC. timezone_name: str A string with a name of a timezone. Returns ------- datetime.datetime An datetime object aligned by the given timezone. """ |
return datetime_obj.replace(tzinfo=pytz.utc).astimezone(
pytz.timezone(timezone_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_build(self):
"""See `pickle.py` in Python's source code.""" |
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def path_to_zip(path):
""" Compress `path` to the ZIP. Args: path (str):
Path to the directory. Returns: str: Path to the zipped file (in /tmp). """ |
if not os.path.exists(path):
raise IOError("%s doesn't exists!" % path)
with tempfile.NamedTemporaryFile(delete=False) as ntf:
zip_fn = ntf.name
with zipfile.ZipFile(zip_fn, mode="w") as zip_file:
for root, dirs, files in os.walk(path):
for fn in files:
zip_file.write(os.path.join(root, fn))
return zip_fn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_as_base64(fn):
""" Convert given `fn` to base64 and return it. This method does the process in not-so-much memory consuming way. Args: fn (str):
Path to the file which should be converted. Returns: str: File encoded as base64. """ |
with open(fn) as unpacked_file:
with tempfile.TemporaryFile() as b64_file:
base64.encode(unpacked_file, b64_file)
b64_file.flush()
b64_file.seek(0)
return b64_file.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pad_input(incoming):
"""Avoid IndexError and KeyError by ignoring un-related fields. Example: '{0}{autored}' becomes '{{0}}{autored}'. Positional arguments: incoming -- the input unicode value. Returns: Padded unicode value. """ |
incoming_expanded = incoming.replace('{', '{{').replace('}', '}}')
for key in _BASE_CODES:
before, after = '{{%s}}' % key, '{%s}' % key
if before in incoming_expanded:
incoming_expanded = incoming_expanded.replace(before, after)
return incoming_expanded |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_input(incoming):
"""Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. """ |
codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming)
color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items())
incoming_padded = _pad_input(incoming)
output_colors = incoming_padded.format(**color_codes)
# Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m'
groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent.
groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups]
groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes.
assert len(groups_compiled) == len(groups) # For testing.
output_colors_simplified = output_colors
for i in range(len(groups)):
output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i])
output_no_colors = _RE_SPLIT.sub('', output_colors_simplified)
# Strip any remaining color codes.
if _AutoCodes.DISABLE_COLORS:
output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified)
return output_colors_simplified, output_no_colors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_tags():
"""Lists the available tags. Returns: Tuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value). """ |
codes = _AutoCodes()
grouped = set([(k, '/{0}'.format(k), codes[k], codes['/{0}'.format(k)]) for k in codes if not k.startswith('/')])
# Add half-tags like /all.
found = [c for r in grouped for c in r[:2]]
missing = set([('', r[0], None, r[1]) if r[0].startswith('/') else (r[0], '', r[1], None)
for r in _AutoCodes().items() if r[0] not in found])
grouped |= missing
# Sort.
payload = sorted([i for i in grouped if i[2] is None], key=lambda x: x[3]) # /all /fg /bg
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[2] < 10], key=lambda x: x[2])) # b i u flash
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[0].startswith('auto')], key=lambda x: x[2])) # auto colors
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if not i[0].startswith('hi')], key=lambda x: x[2])) # dark colors
grouped -= set(payload)
payload.extend(sorted(grouped, key=lambda x: x[2])) # light colors
return tuple(payload) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_color(self, color_code):
"""Changes the foreground and background colors for subsequently printed characters. Since setting a color requires including both foreground and background codes (merged), setting just the foreground color resets the background color to black, and vice versa. This function first gets the current background and foreground colors, merges in the requested color code, and sets the result. However if we need to remove just the foreground color but leave the background color the same (or vice versa) such as when {/red} is used, we must merge the default foreground color with the current background color. This is the reason for those negative values. Positional arguments: color_code -- integer color code from _WINDOWS_CODES. """ |
# Get current color code.
current_fg, current_bg = self._get_colors()
# Handle special negative codes. Also determine the final color code.
if color_code == -39:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == -49:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == -33:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == -8:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
_WindowsCSBI.WINDLL.kernel32.SetConsoleTextAttribute(self.win32_stream_handle, final_color_code) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mimetype(self):
""" Use the ending of the template name to infer response's Content-Type header. """ |
template_name = self.get_template_names()[0]
for extension, mimetype in turrentine_settings.TURRENTINE_MIMETYPE_EXTENSIONS:
if template_name.endswith(extension):
return mimetype
return 'text/html' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, request, *args, **kwargs):
""" Check user authentication if the page requires a login. We could do this by overriding dispatch() instead, but we assume that only GET requests will be required by the CMS pages. """ |
try:
page = self.object = self.get_object()
except Http404:
# If APPEND_SLASH is set and our url has no trailing slash,
# look for a CMS page at the alternate url:
if settings.APPEND_SLASH and not self.kwargs.get('path', '/').endswith('/'):
return self._try_url_with_appended_slash()
else:
raise Http404
# Check request.user's credentials in accessing this page:
if page.staff_only and not request.user.is_staff:
# Block out non-staff users on restricted pages.
# Django 1.4 will introduce better HTTP 403 support, but until then
# we'll just render a plain "permission denied" template (which can be overridden):
return render(request, 'turrentine/403.html', status=403)
if page.login_required and request.user.is_anonymous():
redirect_url = '%s?next=%s' % (settings.LOGIN_URL, self.kwargs.get('path', ''))
return HttpResponseRedirect(redirect_url)
else:
self.object = self._mark_html_fields_as_safe(self.object)
context = self.get_context_data(object=self.object)
return self.render_to_response(context, content_type=self.get_mimetype()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _try_url_with_appended_slash(self):
""" Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it. If no page is found at that URL, raise Http404. """ |
new_url_to_try = self.kwargs.get('path', '') + '/'
if not new_url_to_try.startswith('/'):
new_url_to_try = '/' + new_url_to_try
if CMSPage.objects.published().filter(url=new_url_to_try).exists():
return HttpResponsePermanentRedirect(new_url_to_try)
else:
raise Http404 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_rule_name(self, name):
""" Validate rule name. Arguments: name (string):
Rule name. Returns: bool: ``True`` if rule name is valid. """ |
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_variable_name(self, name):
""" Validate variable name. Arguments: name (string):
Property name. Returns: bool: ``True`` if variable name is valid. """ |
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value_splitter(self, reference, prop, value, mode):
""" Split a string into a list items. Default behavior is to split on white spaces. Arguments: reference (string):
Reference name used when raising possible error. prop (string):
Property name used when raising possible error. value (string):
Property value to split. mode (string):
Splitter mode. Default should come from ``ManifestSerializer._DEFAULT_SPLITTER``. Available splitter are: * ``white-space``: Simply split a string on white spaces; * ``json-list``: Assume the string is a JSON list to parse; Returns: list: """ |
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_to_json(self, name, datas):
""" Serialize given datas to any object from assumed JSON string. Arguments: name (string):
Name only used inside possible exception message. datas (dict):
Datas to serialize. Returns: object: Object depending from JSON content. """ |
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_to_list(self, name, datas):
""" Serialize given datas to a list structure. List structure is very simple and only require a variable ``--items`` which is a string of values separated with an empty space. Every other properties are ignored. Arguments: name (string):
Name only used inside possible exception message. datas (dict):
Datas to serialize. Returns: list: List of serialized reference datas. """ |
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_to_string(self, name, datas):
""" Serialize given datas to a string. Simply return the value from required variable``value``. Arguments: name (string):
Name only used inside possible exception message. datas (dict):
Datas to serialize. Returns: string: Value. """ |
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_meta_references(self, datas):
""" Get manifest enabled references declaration This required declaration is readed from ``styleguide-metas-references`` rule that require either a ``--names`` or ``--auto`` variable, each one define the mode to enable reference: Manually Using ``--names`` which define a list of names to enable, every other non enabled rule will be ignored. Section name (and so Reference name also) must not contains special character nor ``-`` so they still be valid variable name for almost any languages. For word separator inside name, use ``_``. Automatic Using ``--auto`` variable every reference rules will be enabled. The value of this variable is not important since it is not empty. If both of these variables are defined, the manual enable mode is used. Arguments: datas (dict):
Data where to search for meta references declaration. This is commonly the fully parsed manifest. Returns: list: A list of reference names. """ |
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_reference(self, datas, name):
""" Get serialized reference datas Because every reference is turned to a dict (that stands on ``keys`` variable that is a list of key names), every variables must have the same exact length of word than the key name list. A reference name starts with 'styleguide-reference-' followed by name for reference. A reference can contains variable ``--structure`` setted to ``"flat"``, ``"list"`` or ``"string"`` to define serialization structure. Arguments: datas (dict):
Data where to search for reference declaration. This is commonly the fully parsed manifest. name (string):
Reference name to get and serialize. Returns: collections.OrderedDict: Serialized reference datas. """ |
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_available_references(self, datas):
""" Get available manifest reference names. Every rules starting with prefix from ``nomenclature.RULE_REFERENCE`` are available references. Only name validation is performed on these references. Arguments: datas (dict):
Data where to search for reference declarations. Returns: list: List of every available reference names. This is the real name unprefixed. """ |
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_enabled_references(self, datas, meta_references):
""" Get enabled manifest references declarations. Enabled references are defined through meta references declaration, every other references are ignored. Arguments: datas (dict):
Data where to search for reference declarations. This is commonly the fully parsed manifest. meta_references (list):
List of enabled reference names. Returns: collections.OrderedDict: Serialized enabled references datas. """ |
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, datas):
""" Serialize datas to manifest structure with metas and references. Only references are returned, metas are assigned to attribute ``ManifestSerializer._metas``. Arguments: datas (dict):
Data where to search for reference declarations. This is commonly the fully parsed manifest. Returns: collections.OrderedDict: Serialized enabled references datas. """ |
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contribute_to_class(self, cls, name):
""" Add each of the names and fields in the ``fields`` attribute to the model the relationship field is applied to, and set up the related item save and delete signals for calling ``related_items_changed``. """ |
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = self.rel.to
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _related_items_changed(self, **kwargs):
""" Ensure that the given related item is actually for the model this field applies to, and pass the instance to the real ``related_items_changed`` handler. """ |
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def related_items_changed(self, instance, related_manager):
""" Stores the number of comments. A custom ``count_filter`` queryset gets checked for, allowing managers to implement custom count logic. """ |
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formfield(self, **kwargs):
""" Provide the custom form widget for the admin, since there isn't a form field mapped to ``GenericRelation`` model fields. """ |
from yacms.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_form_data(self, instance, data):
""" The ``KeywordsWidget`` field will return data as a string of comma separated IDs for the ``Keyword`` model - convert these into actual ``AssignedKeyword`` instances. Also delete ``Keyword`` instances if their last related ``AssignedKeyword`` instance is being removed. """ |
from yacms.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
super(KeywordsField, self).save_form_data(instance, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contribute_to_class(self, cls, name):
""" Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``. """ |
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def related_items_changed(self, instance, related_manager):
""" Stores the keywords as a single string for searching. """ |
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def related_items_changed(self, instance, related_manager):
""" Calculates and saves the average rating. """ |
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_new_packages(apt_output, include_automatic=False):
""" Given the output from an apt or aptitude command, determine which packages are newly-installed. """ |
pat = r'^The following NEW packages will be installed:[\r\n]+(.*?)[\r\n]\w'
matcher = re.search(pat, apt_output, re.DOTALL | re.MULTILINE)
if not matcher:
return []
new_pkg_text = matcher.group(1)
raw_names = re.findall(r'[\w{}\.+-]+', new_pkg_text)
all_packages = list(map(PackageName.from_apt, raw_names))
manual_packages = [pack for pack in all_packages if not pack.automatic]
return all_packages if include_automatic else manual_packages |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_iterator(filehandle, verbose=False):
"""Iterate over a file and yield stripped lines. Optionally show progress.""" |
if type(filehandle).__name__ == "str":
filehandle = open(filehandle)
if verbose:
try:
pind = ProgressIndicator(totalToDo=os.path.getsize(filehandle.name),
messagePrefix="completed",
messageSuffix="of processing " +
filehandle.name)
except AttributeError:
sys.stderr.write("BEDIterator -- warning: " +
"unable to show progress for stream")
verbose = False
for line in filehandle:
# chomp just the newline char, leave eveerything else alone, so we can
# handle empty columns in the first and last positions
line = line.rstrip('\n')
if verbose:
pind.done = filehandle.tell()
pind.showProgress()
if line == "":
continue
yield line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_entry(parts, existing_list_d, key_value, key_field_num, key_is_field_number, header=None, output_type=OutputType.error_on_dups, ignore_missing_keys=False, keep_key_col=False):
""" Build and add an entry to existing_list_d. If the key is a field number, the entry added will be a list of lists. The inner list contains one item per column. and the outer list allows more than one entry to be stored per key (but if allow_duplicates is false, an exception will be raised if more than one needs to be stored). :param parts: the (already tokenized) list of column entries. :param existing_list_d: a dictionary indexed by key value containing the the already processed entries. The new entry will be added to this using <key_value> as a key :param key_value: the key value to use to add the new entry to <existing_list_d> :param key_field_num: which column is the key_value from (number, indexed from 0) :param key_is_field_number: True if the <key_value> is actually the column index, rather than a column name :param header: list giving the names of the columns. Can be None if columns have no names (no header) """ |
if key_value.strip() == "":
if ignore_missing_keys:
return
raise MissingKeyError("missing key value")
if key_value in existing_list_d:
if output_type is OutputType.error_on_dups:
raise DuplicateKeyError(key_value + " appears multiple times as key")
elif (output_type is OutputType.all_pairwise_combinations or
output_type is OutputType.column_wise_join):
pass # dups okay for these output methods
else:
raise ValueError("Unknown duplicate handling method")
else:
existing_list_d[key_value] = []
if key_is_field_number:
# the entry in the dictionary is a list, minus the key field, in the
# order they occur.
ne = [parts[i] for i in range(0, len(parts))
if i != key_field_num or keep_key_col]
existing_list_d[key_value].append(ne)
else:
# the entry in the dictionary is another dictionary indexed by
# the header value
ne = {}
for i in range(0, len(parts)):
if i == key_field_num and not keep_key_col:
continue
else:
ne[header[i]] = parts[i]
existing_list_d[key_value].append(ne) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __output_unpaired_vals(d_vals, used_ff_keys, f_f_header, sf_d, s_f_header, missing_val, out_handler, outfh, delim="\t"):
""" Use an output handler to output keys that could not be paired. Go over the keys in d_vals and for any that were not used (i.e. not in used_ff_keys), build an output line using the values from d_vals, populated the missing columns with missing_val, and output these using the provided output hander. """ |
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
for k in d_vals:
if k not in used_ff_keys:
f_f_flds = d_vals[k]
if s_f_header is not None:
s_f_flds = [dict(zip(s_f_header, [missing_val] * len(s_f_header)))]
else:
s_f_num_cols = len(sf_d[d_vals.keys()[0]][0])
s_f_flds = [[missing_val] * s_f_num_cols]
out_handler.write_output(outfh, delim, s_f_flds, f_f_flds,
s_f_header, f_f_header) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_key_field(ui, ui_option_name, default_val=0, default_is_number=True):
""" parse an option from a UI object as the name of a key field. If the named option is not set, return the default values for the tuple. :return: a tuple of two items, first is the value of the option, second is a boolean value that indicates whether the value is a column name or a column number (numbers start at 0). """ |
key = default_val
key_is_field_number = default_is_number
if ui.optionIsSet(ui_option_name):
key = ui.getValue(ui_option_name)
try:
key = int(key) - 1
key_is_field_number = True
except ValueError:
key_is_field_number = False
return key, key_is_field_number |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def populate_unpaired_line(d_vals, f_f_header, missing_val=None):
""" used when a value in d_vals doesn't match anything in the other file. :return: a dictionary, indexed by key value, with the correct missing values populated for the other file. """ |
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
if f_f_header is not None:
f_f_flds = [dict(zip(f_f_header, [missing_val] * len(f_f_header)))]
else:
assert(len(d_vals) > 0)
f_f_num_cols = len(d_vals[d_vals.keys()[0]][0])
f_f_flds = [[missing_val] * f_f_num_cols]
return f_f_flds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_mock_open_side_effect(string_d, stream_d):
""" Build a mock open side effect using a dictionary of content for the files. :param string_d: keys are file names, values are string file contents :param stream_d: keys are file names, values are stream of contents """ |
assert(len(set(string_d.keys()).intersection(set(stream_d.keys()))) == 0)
def mock_open_side_effect(*args, **kwargs):
if args[0] in string_d:
return StringIO.StringIO(string_d[args[0]])
elif args[0] in stream_d:
return stream_d[args[0]]
else:
raise IOError("No such file: " + args[0])
return mock_open_side_effect |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields, f1_header=None, f2_header=None, missing_val=None):
""" Write the header for a joined file. If headers are provided for one or more of the input files, then a header is generated for the output file. Otherwise, this does not output anything. :param out_strm: write to this stream :param delim: :param f1_num_fields: the number of columns in the first file :param f2_num_fields: the number of columns in the second file :param f1_header: :param f2_header: :param missing_val: """ |
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_restructuredtext(form_instance, content):
""" RST syntax validation """ |
if content:
errors = SourceReporter(content)
if errors:
raise ValidationError(map(map_parsing_errors, errors))
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def returns(*checkers_args):
""" Create a decorator for validating function return values. Parameters checkers_args: positional arguments A single functions to apply to the output of the decorated function. If a tuple is returned by the decorated function, multiple function can be listed and are assumed to match by possition to the elements in the returned tuple. Examples -------- @returns(df_checker) def do_something_with_df(df, args*, kw**):
print(df.head()) return df @returns(df_checker1, df_checker2) def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs return (df1, df2) """ |
@decorator
def run_checkers(func, *args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) != tuple:
ret = (ret, )
assert len(ret) == len(checkers_args)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(ret[idx])
return ret
return run_checkers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def regular_generic_msg(hostname, result, oneline, caption):
''' output on the result of a module run that is not command '''
if not oneline:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
else:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def command_generic_msg(hostname, result, oneline, caption):
''' output the result of a command run '''
rc = result.get('rc', '0')
stdout = result.get('stdout','')
stderr = result.get('stderr', '')
msg = result.get('msg', '')
hostname = hostname.encode('utf-8')
caption = caption.encode('utf-8')
if not oneline:
buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
if stdout:
buf += stdout
if stderr:
buf += stderr
if msg:
buf += msg
return buf + "\n"
else:
if stderr:
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def host_report_msg(hostname, module_name, result, oneline):
''' summarize the JSON results for a particular host '''
failed = utils.is_failed(result)
msg = ''
if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
if not failed:
msg = command_generic_msg(hostname, result, oneline, 'success')
else:
msg = command_generic_msg(hostname, result, oneline, 'FAILED')
else:
if not failed:
msg = regular_generic_msg(hostname, result, oneline, 'success')
else:
msg = regular_generic_msg(hostname, result, oneline, 'FAILED')
return msg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not ignore_errors and (('failed' in value and bool(value['failed'])) or
('rc' in value and value['rc'] != 0)):
self._increment('failures', host)
elif 'skipped' in value and bool(value['skipped']):
self._increment('skipped', host)
elif 'changed' in value and bool(value['changed']):
if not setup and not poll:
self._increment('changed', host)
self._increment('ok', host)
else:
if not poll or ('finished' in value and bool(value['finished'])):
self._increment('ok', host)
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_get_dbs(connection):
""" validates the connection object is capable of read access to rethink should be at least one test database by default :param connection: <rethinkdb.net.DefaultConnection> :return: <set> list of databases :raises: ReqlDriverError AssertionError """ |
remote_dbs = set(rethinkdb.db_list().run(connection))
assert remote_dbs
return remote_dbs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_brain_requirements(connection, remote_dbs, requirements):
""" validates the rethinkdb has the 'correct' databases and tables should get remote_dbs from brain.connection.validate_get_dbs :param connection: <rethinkdb.net.DefaultConnection> :param remote_dbs: <set> database names present in remote database :param requirements: <dict> example(brain.connection.SELF_TEST) :return: <bool> True :raises: AssertionError or Reql*Error """ |
for database in requirements:
assert (database in remote_dbs), "database {} must exist".format(database)
remote_tables = frozenset(rethinkdb.db(database).table_list().run(connection))
for table in requirements[database]:
assert (table in remote_tables), "{} must exist in {}".format(table, database)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def brain_post(connection, requirements=None):
""" Power On Self Test for the brain. Checks that the brain is appropriately seeded and ready for use. Raises AssertionError's if the brain is not ready. :param connection: <rethinkdb.net.DefaultConnection> :param requirements:<dict> keys=Required Databases, key-values=Required Tables in each database :return: <rethinkdb.net.DefaultConnection> if verified """ |
assert isinstance(connection, DefaultConnection)
remote_dbs = validate_get_dbs(connection)
assert validate_brain_requirements(connection, remote_dbs, requirements)
assert validate_write_access(connection)
return connection |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(host=None, port=rethinkdb.DEFAULT_PORT, timeout=20, verify=True, **kwargs):
""" RethinkDB semantic connection wrapper raises <brain.connection.BrainNotReady> if connection verification fails :param verify: <bool> (default True) whether to run POST :param timeout: <int> max time (s) to wait for connection :param kwargs: <dict> passthrough rethinkdb arguments :return: """ |
if not host:
host = DEFAULT_HOSTS.get(check_stage_env())
connection = None
tries = 0
time_quit = time() + timeout
while not connection and time() <= time_quit:
tries += 1
connection = _attempt_connect(host, port, timeout/3, verify, **kwargs)
if not connection:
sleep(0.5)
if not connection:
raise BrainNotReady(
"Tried ({}:{}) {} times at {} second max timeout".format(host,
port,
tries,
timeout))
return connection |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
r""" Links the sensor to a dashboard. :param dashboard_id: Id of the dashboard to link to. Enter a * if the sensor should be linked to all dashboards. :type dashboard_id: ``str`` :param panel_id: Id of the panel to link to. This is the id of a panel you have added your self to a dashboard or one of the system panels *sys-header*, *header* or *footer* :type panel_id: ``str`` :Keyword Arguments: * **link_to_header** (``str``) -- Link this input to header of the panel. * **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-* * **label** (``str``) -- Label text, default value is the name of the sensor. * **flat** (``bool``) -- Flat look and feel. * **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel * **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*. * **show_sparkline** (``bool``) -- Show a sparkline next to the value. * **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*. * **show_value** (``bool``) -- Show the numeric value and unit. * **label** (``str``) -- Label to show default is the name of the sensor. """ |
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs)
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _new_sensor_reading(self, sensor_value):
""" Call this method to signal a new sensor reading. This method handles DB storage and triggers different events. :param value: New value to be stored in the system. """ |
if not self._active and not self._enabled:
return
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value)
else:
self._sensor_value.value = sensor_value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def process_path(label, pth):
"check and expand paths"
if pth is None:
sys.exit("no %s path given" % label)
if pth.startswith("/"):
pass
elif pth[0] in (".", "~"):
pth = os.path.realpath(pth)
else:
pth = os.getcwd() + os.sep + pth
if not os.path.exists(pth):
sys.exit("%s path %s does not exist" % (label, pth))
return pth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_svg(svgstr, size, filepath, target):
"convert to PDF or PNG"
# PREPARE CONVERSION PER TYPE
if target == "PDF":
img = cairo.PDFSurface(filepath, size, size)
elif target == "PNG":
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
else:
system.exit("unknown file type conversion")
# PROCESS
ctx = cairo.Context(img)
handler= rsvg.Handle(None, svgstr)
iw,ih, fw,fh = handler.get_dimension_data()
ctx.translate(0,0)
ctx.scale(size/fw, size/fh) # assumes bigger source SVG template
handler.render_cairo(ctx)
# FINALIZE PER TYPE
if target == "PNG":
img.write_to_png(filepath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def var_dump(*obs):
""" shows structured information of a object, list, tuple etc """ |
i = 0
for x in obs:
str = var_dump_output(x, 0, ' ', '\n', True)
print (str.strip())
#dump(x, 0, i, '', object)
i += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def difficulties_by_voivodeship(voivodeship, dt=datetime.now()):
""" Get difficulties in voivodeship. :param voivodeship: Voivodeship numeric value. :param dt: Datetime for data. Default: datetime.now() :return: List of difficulties by voivodeship. """ |
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.headers.update({'X-Requested-With': 'XMLHttpRequest'})
session.get('{}/Mapa/'.format(HOST))
url = '{}/Mapa/PodajUtrudnieniaWWojewodztwie?KodWojewodztwa={}&_={}'.format(HOST, str(voivodeship), _datetime_to_asp_date(dt))
response = session.get(url)
json_data = response.json() if len(response.text) > 0 else []
return json_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def table_to_string(headers, table, align="", *, lines=("-", "-+-", " | ")):
"""Write a list of headers and a table of rows to the terminal in a nice format. Parameters: - headers: a list of strings that are the headers of the table - table: a list of lists, the actual data to be printed. - align: a string whose elements are in the set {'<', '^', '>'}. These define how the values printed in the cells will align. '<': left, '^': center, '>': right. This argument is optional and all unspecified columns will align as if '<' were passed for them. - lines: a tuple of line characters to print for the table: (row_sep, row_intersection, col_sep) """ |
header_separator, header_junction, row_separator = lines
align = ("{0:<<" + str(len(headers)) + "}").format(align or "")
all_lens = [tuple(len(c) for c in r) for r in table]
if headers:
all_lens.append(tuple(len(h) for h in headers))
max_lens = [max(r[i] for r in all_lens) for i in range(len(headers))]
col_outs = ["{{{0}: {1}{2}}}".format(i, align[i], w) for i, w in enumerate(max_lens)]
fmt_str = row_separator.join(col_outs)
if headers:
yield fmt_str.format(*headers)
yield header_junction.join((header_separator * ml for ml in max_lens))
for row in table:
yield fmt_str.format(*row) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ratio_and_percentage(current, total, time_remaining):
"""Returns the progress ratio and percentage.""" |
return "{} / {} ({}% completed)".format(current, total, int(current / total * 100)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ratio_and_percentage_with_time_remaining(current, total, time_remaining):
"""Returns the progress ratio, percentage and time remaining.""" |
return "{} / {} ({}% completed) (~{} remaining)".format(
current,
total,
int(current / total * 100),
time_remaining) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_generator(self):
""" Returns a generator for the frange object instance. Returns ------- gen : generator A generator that yields successive samples from start (inclusive) to stop (exclusive) in step steps. """ |
s = self.slice
gen = drange(s.start, s.stop, s.step) # intialises the generator
return gen |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_lookups(field_type, nullable):
""" Return lookup table value and append isnull if this is a nullable field """ |
return LOOKUP_TABLE.get(field_type) + ['isnull'] if nullable else LOOKUP_TABLE.get(field_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_field(field_class):
""" Iterates the field_classes and returns the first match """ |
for cls in field_class.mro():
if cls in list(LOOKUP_TABLE.keys()):
return cls
# could not match the field class
raise Exception('{0} None Found '.format(field_class)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_template(template, target):
"""Given a dictionary template containing at least most of the relevant information and a dictionary target containing sections, options and values, consolidate target into a new confmanager object and return it.""" |
c = ConfManager('')
for section in template:
c.add_section(section)
for option, o in template[section].items():
try:
value = type(template[section][option]['value'])(target[section][option])
except KeyError:
value = o['value']
finally:
if 'value' in o:
del o['value']
c.set(section, option, value, **o)
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_json(target, json, create_sections = False, create_options = False):
"""Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes.""" |
is_dict = isinstance(json, dict)
for o in json:
if is_dict:
section = o
else:
section = o[0]
if not target.has_section(section):
if create_sections:
target.add_section(section)
else:
continue
for k, v in (json[o].items() if is_dict else o[1]):
if target.has_option(section, k) or create_options:
target.set(section, k, v) # Don't add if it shouldn't be there.
return target |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_sitemap(sitemap: typing.Mapping, prefix: list=None):
"""Create a sitemap template from the given sitemap. The `sitemap` should be a mapping where the key is a string which represents a single URI segment, and the value is either another mapping or a callable (e.g. function) object. Args: sitemap: The definition of the routes and their views prefix: The base url segment which gets prepended to the given map. Examples: The sitemap should follow the following format: The key points here are thus: - Any string key not matched by the following rule will be matched literally - Any string key surrounded by curly brackets matches a url segment which represents a parameter whose name is the enclosed string (i.e. should be a valid keyword argument) - *note* a side effect of this is that an empty string key will match all routes leading up to the current given mapping The above sitemap would compile to the following url mappings: - /string_literal/ -> calls `func1()` - /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)` """ |
# Ensures all generated urls are prefixed with a the prefix string
if prefix is None:
prefix = []
for segment, sub_segment in sitemap.items():
if isinstance(sub_segment, collections.abc.Mapping):
yield from generate_sitemap(sub_segment, prefix + [segment])
elif isinstance(sub_segment, collections.abc.Callable):
if segment:
prefix = prefix + [segment]
yield (prefix, sub_segment)
else:
raise ValueError('Invalid datatype for sitemap') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _mock_input(self, target, content):
""" mock human input :param target: the element to input to :param content: the content :return: """ |
content = helper.to_str(content)
for w in content:
target.send_keys(w)
rand_block(0.01, 0.01) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __has_next_page(self, current_page_num=0):
""" |
try:
next_page = self.robot.get_elements(
self.base.get('next_page'),
multiple=True
)
log.debug('<Site> has {} next page elems'.format(len(next_page)))
if not next_page:
return False
for i, ele in enumerate(next_page):
if ele.get_attribute('innerText') == 'Next':
log.debug('<Site> {} is the right link'.format(i))
self.next_page = ele
break
return True
except Exception as _:
self.next_page = None
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def response_result(self, **kwargs):
""" default will fetch MAX_AP pages yield `self.driver.page_source, self.driver.current_url, 1` after mock submit, the first page is crawled. so start@ index of 1, and yield first page first when running over, use else to yield the last page. 程序运行到此, 已经load 了第一页, 故在进行操作 `点击下一页` 之前, 需要 yield range(1, page_togo), 则在 page_togo - 1时跳出循环, 此时程序 已经完成点击了下一页, 故 page_togo 这一页已经 load 完成, 故在 else 跳出时 yield """ |
page_togo = kwargs.get('page_togo', self.max_page_togo)
if page_togo <= 1:
return self.robot.driver.page_source, self.robot.driver.current_url, 1
# 从 `1` 开始是由于已经加载了第一页
# 到 `page_togo` 结束, 是因为在 `page_togo -1` 时,已经点击了下一页
# 因此此处不能写为 range(0, page_togo), 或者(1, page_togo + 1)
yield_last = kwargs.get('yield_last', False)
start_yval = 0
for page_done in range(1, page_togo):
# log.debug(self.robot.driver.current_url)
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_done
# click any popups
self.mock_popovers()
if self.has_next_page(page_done):
start_yval = self.goto_next(start_yval)
else:
# 如果无下一页, 直接退出
log.debug('page {} is the last result page!'.format(page_done))
break
else:
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo
if yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _bulk_flag(self, request, queryset, action, done_message):
""" Flag, approve, or remove some comments from an admin action. Actually calls the `action` argument to perform the heavy lifting. """ |
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext('1 comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, key, value):
""" Updates the value of the given key in the file. Args: key (str):
Key of the property to update. value (str):
New value of the property. Return: bool: Indicates whether or not a change was made. """ |
changed = super().set(key=key, value=value)
if not changed:
return False
self._log.info('Saving configuration to "%s"...', self._filename)
with open(self._filename, 'w') as stream:
stream.write(self.content)
self._log.info('Saved configuration to "%s".', self._filename)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next_color(self):
""" Returns the next color. Currently returns a random color from the Colorbrewer 11-class diverging BrBG palette. Returns ------- next_rgb_color: tuple of ImageColor """ |
next_rgb_color = ImageColor.getrgb(random.choice(BrBG_11.hex_colors))
return next_rgb_color |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def paint_cube(self, x, y):
""" Paints a cube at a certain position a color. Parameters x: int Horizontal position of the upper left corner of the cube. y: int Vertical position of the upper left corner of the cube. """ |
# get the color
color = self.next_color()
# calculate the position
cube_pos = [x, y, x + self.cube_size, y + self.cube_size]
# draw the cube
draw = ImageDraw.Draw(im=self.image)
draw.rectangle(xy=cube_pos, fill=color) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def paint_pattern(self):
""" Paints all the cubes. """ |
x = 0
while x < self.width:
y = 0
while y < self.height:
self.paint_cube(x, y)
y += self.cube_size
x += self.cube_size |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_key_files(kfiles, dirname, names):
"""Return key files""" |
for name in names:
fullname = os.path.join(dirname, name)
if os.path.isfile(fullname) and \
fullname.endswith('_rsa') or \
fullname.endswith('_dsa'):
kfiles.put(fullname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ssh_keys(sshdir):
"""Get SSH keys""" |
keys = Queue()
for root, _, files in os.walk(os.path.abspath(sshdir)):
if not files:
continue
for filename in files:
fullname = os.path.join(root, filename)
if (os.path.isfile(fullname) and fullname.endswith('_rsa') or
fullname.endswith('_dsa')):
keys.put(fullname)
return keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ssh_dir(config, username):
"""Get the users ssh dir""" |
sshdir = config.get('ssh_config_dir')
if not sshdir:
sshdir = os.path.expanduser('~/.ssh')
if not os.path.isdir(sshdir):
pwentry = getpwnam(username)
sshdir = os.path.join(pwentry.pw_dir, '.ssh')
if not os.path.isdir(sshdir):
sshdir = None
return sshdir |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_local_user(username):
"""Get the local username""" |
try:
_ = getpwnam(username)
luser = username
except KeyError:
luser = getuser()
return luser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_host_keys(hostname, sshdir):
"""get host key""" |
hostkey = None
try:
host_keys = load_host_keys(os.path.join(sshdir, 'known_hosts'))
except IOError:
host_keys = {}
if hostname in host_keys:
hostkeytype = host_keys[hostname].keys()[0]
hostkey = host_keys[hostname][hostkeytype]
return hostkey |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sftp_conn(config):
"""Make a SFTP connection, returns sftp client and connection objects""" |
remote = config.get('remote_location')
parts = urlparse(remote)
if ':' in parts.netloc:
hostname, port = parts.netloc.split(':')
else:
hostname = parts.netloc
port = 22
port = int(port)
username = config.get('remote_username') or getuser()
luser = get_local_user(username)
sshdir = get_ssh_dir(config, luser)
hostkey = get_host_keys(hostname, sshdir)
try:
sftp = None
keys = get_ssh_keys(sshdir)
transport = Transport((hostname, port))
while not keys.empty():
try:
key = PKey.from_private_key_file(keys.get())
transport.connect(
hostkey=hostkey,
username=username,
password=None,
pkey=key)
sftp = SFTPClient.from_transport(transport)
break
except (PasswordRequiredException, SSHException):
pass
if sftp is None:
raise SaChannelUpdateTransportError("SFTP connection failed")
return sftp, transport
except BaseException as msg:
raise SaChannelUpdateTransportError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_and_consume(self):
"""Returns True if there is currently at least one token, and reduces it by one. """ |
if self._count < 1.0:
self._fill()
consumable = self._count >= 1.0
if consumable:
self._count -= 1.0
self.throttle_count = 0
else:
self.throttle_count += 1
return consumable |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fill(self):
"""Fills bucket with accrued tokens since last fill.""" |
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mysql_batch_and_fetch(mysql_config, *sql_queries):
""" Excute a series of SQL statements before the final Select query Parameters mysql_config : dict The user credentials as defined in MySQLdb.connect, e.g. mysql_conig = {'user': 'myname', 'passwd': 'supersecret', 'host': '<ip adress or domain>', 'db': '<myschema>'} sql_queries : list or tuple A list or tuple of SQL queries wheras the last SQL command have to be final Select query. (If a string is provided the semicolon ";" is used to split the string into a list of strings) Returns ------- result_table : tuple The result table as tuple of tuples. Sources ------- * http://mysqlclient.readthedocs.io/user_guide.html """ |
# load modules
import MySQLdb as mydb
import sys
import gc
# ensure that `sqlqueries` is a list/tuple
# split a string into a list
if len(sql_queries) == 1:
if isinstance(sql_queries[0], str):
sql_queries = sql_queries[0].split(";")
if isinstance(sql_queries[0], (list, tuple)):
sql_queries = sql_queries[0]
# connect and execute queries
try:
conn = mydb.connect(**mysql_config)
curs = conn.cursor()
for sql_query in sql_queries:
if len(sql_query) > 0:
curs.execute(sql_query)
result_table = curs.fetchall()
except mydb.Error as err:
print(err)
gc.collect()
sys.exit(1)
else:
if conn:
conn.close()
gc.collect()
return result_table |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_camel_case(snake_case_name):
""" Converts snake_cased_names to CamelCaseNames. :param snake_case_name: The name you'd like to convert from. :type snake_case_name: string :returns: A converted string :rtype: string """ |
bits = snake_case_name.split('_')
return ''.join([bit.capitalize() for bit in bits]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def html_to_rst(html):
""" Converts the service HTML docs to reStructured Text, for use in docstrings. :param html: The raw HTML to convert :type html: string :returns: A reStructured Text formatted version of the text :rtype: string """ |
doc = ReSTDocument()
doc.include_doc_string(html)
raw_doc = doc.getvalue()
return raw_doc.decode('utf-8') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resize_image_folder(bucket, key_prefix, pil_size):
""" This function resizes all the images in a folder """ |
con = boto.connect_s3()
b = con.get_bucket(bucket)
for key in b.list(key_prefix):
key = b.get_key(key.name)
if 'image' not in key.content_type:
continue
size = key.get_metadata('size')
if size == str(pil_size):
continue
with tempfile.TemporaryFile() as big, tempfile.TemporaryFile() as small:
# download file and resize
key.get_contents_to_file(big)
big.flush()
big.seek(0)
img = Image.open(big)
img.thumbnail(pil_size, Image.ANTIALIAS)
img.save(small, img.format)
small.flush()
small.seek(0)
key.set_metadata('size', str(pil_size))
key.set_contents_from_file(small, headers={'Content-Type': key.content_type}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_section(self, section, friendly_name = None):
"""Adds a section and optionally gives it a friendly name..""" |
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section)
# See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off.
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title()
if '&' not in friendly_name:
friendly_name = '&' + friendly_name
self.section_names[section] = friendly_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toggle(self, section, option):
"""Toggles option in section.""" |
self.set(section, option, not self.get(section, option)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, section, option, default = None):
"""Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError.""" |
if self.has_section(section):
try:
return self.config[section][option].get('value', None)
except KeyError:
if default == None:
raise NoOptionError(option)
else:
return default
else:
raise NoSectionError(section) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dump(self):
"""Returns options and values.""" |
res = []
for section in self.sections():
sec = []
for option in self.options(section):
sec.append([option, self.get(section, option)])
res.append([section, sec])
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _onError(self, error):
""" Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions. """ |
self.stop()
self._logModule.err(
error,
"Unhandled error logging exception to %s" % (self.airbrakeURL,))
self.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_arguments():
""" Parses all the command line arguments using argparse and returns them. """ |
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar="FILE", nargs='+',
help='file to be made executable')
parser.add_argument("-p", "--python", metavar="VERSION",
help="python version (2 or 3)")
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__, help='show version')
parser.add_argument('-r', '--recursive', action='store_true',
help='recursively iterate the directories')
args = parser.parse_args()
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains_shebang(f):
""" Returns true if any shebang line is present in the first line of the file. """ |
first_line = f.readline()
if first_line in shebangs.values():
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_exec(fname, version):
""" Writes the shebang and makes the file executable. """ |
# if no version is specified, use system default.
if version is None:
version = 'default'
# write the shebang and then make the file executable.
with open(fname, 'rb+') as f:
put_shebang(f, version)
# make the file
os.chmod(fname, os.stat(fname).st_mode | 0o0111)
print("{} is now executable".format(fname)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_file(self):
"""Original uploaded data file the subject was created from. Returns ------- File-type object Reference to file on local disk """ |
return os.path.join(self.upload_directory, self.properties[datastore.PROPERTY_FILENAME]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY):
"""Create an anatomy object on local disk from the given file. Currently, only Freesurfer anatomy directories are supported. Expects a tar file. Parameters filename : string Name of the (uploaded) file file_type : string File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY) Returns ------- SubjectHandle Handle for created subject in database """ |
# We currently only support one file type (i.e., FREESURFER_DIRECTORY).
if file_type != FILE_TYPE_FREESURFER_DIRECTORY:
raise ValueError('Unsupported file type: ' + file_type)
return self.upload_freesurfer_archive(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_freesurfer_archive(self, filename, object_identifier=None, read_only=False):
"""Create an anatomy object on local disk from a Freesurfer anatomy tar file. If the given file is a Freesurfer file it will be copied to the created subject's upload directory. Parameters filename : string Name of the (uploaded) file object_identifier : string Unique object identifier, optional read_only : boolean, optional Optional value for the read-only property Returns ------- SubjectHandle Handle for created subject in database """ |
# At this point we expect the file to be a (compressed) tar archive.
# Extract the archive contents into a new temporary directory
temp_dir = tempfile.mkdtemp()
try:
tf = tarfile.open(name=filename, mode='r')
tf.extractall(path=temp_dir)
except (tarfile.ReadError, IOError) as err:
# Clean up in case there is an error during extraction
shutil.rmtree(temp_dir)
raise ValueError(str(err))
# Find a folder that contains sub-folders 'surf' and 'mri'. These
# are the only folders we keep in the new anatomy folder. Raise an
# error if no such folder esists
freesurf_dir = get_freesurfer_dir(temp_dir)
if not freesurf_dir:
# Remove anatomy directory and extracted files
shutil.rmtree(temp_dir)
raise ValueError('not a valid subject directory')
# Create a new identifier. This identifier will be used as the
# directory name.
if object_identifier is None:
identifier = str(uuid.uuid4()).replace('-', '')
else:
identifier = object_identifier
subject_dir = os.path.join(self.directory, identifier)
# Create the initial set of properties for the new anatomy object. The
# name is derived from the filename minus any known extensions
prop_filename = os.path.basename(os.path.normpath(filename))
prop_name = prop_filename
# Based on the valid list of suffixes the file is either a tar-file
# or a zipped tar-file.
prop_mime = 'application/x-tar' if filename.endswith('.tar') else 'application/x-gzip'
for suffix in ['.tar', '.tgz', '.tar.gz']:
if prop_name.endswith(suffix):
prop_name = prop_name[:-len(suffix)]
break
properties = {
datastore.PROPERTY_FILENAME : prop_filename,
datastore.PROPERTY_FILESIZE : os.path.getsize(filename),
datastore.PROPERTY_FILETYPE : FILE_TYPE_FREESURFER_DIRECTORY,
datastore.PROPERTY_MIMETYPE : prop_mime,
datastore.PROPERTY_NAME : prop_name
}
if read_only:
properties[datastore.PROPERTY_READONLY] = True
# Create the directory for the anatomy object, the unpacked data files
# and the original uploaded file (for download).
os.mkdir(subject_dir)
data_dir = os.path.join(subject_dir, DATA_DIRECTORY)
os.mkdir(data_dir)
upload_dir = os.path.join(subject_dir, UPLOAD_DIRECTORY)
os.mkdir(upload_dir)
# Move all sub-folders from the Freesurfer directory to the new anatomy
# data directory
for f in os.listdir(freesurf_dir):
sub_folder = os.path.join(freesurf_dir, f)
if os.path.isdir(sub_folder):
shutil.move(sub_folder, data_dir)
# Move original upload file to upload directory
shutil.copyfile(filename, os.path.join(upload_dir, prop_filename))
# Remove the temp directory
shutil.rmtree(temp_dir)
# Use current time in UTC as the object's timestamp
obj = SubjectHandle(
identifier,
properties,
subject_dir
)
self.insert_object(obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getUI(args):
""" build and return a UI object for this script. :param args: raw arguments to parse """ |
programName = os.path.basename(sys.argv[0])
longDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
shortDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
ui = CLI(programName, shortDescription, longDescription)
ui.minArgs = 2
ui.maxArgs = 2
ui.addOption(Option(short="o", long="output", argName="filename",
description="output to given file, else stdout",
required=False, type=str))
ui.addOption(Option(short="s", long="stranded",
description="treat regions on separate strands as " +
"disjoint, even if they overlap",
required=False))
ui.addOption(Option(short="v", long="verbose",
description="output additional messages to stderr " +
"about run", required=False))
ui.addOption(Option(short="h", long="help",
description="show this help message ", special=True))
ui.addOption(Option(short="u", long="test",
description="run unit tests ", special=True))
ui.parseCommandLine(args)
return ui |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(args):
""" main entry point for the GenomicIntIntersection script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list. """ |
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# stranded?
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# get input file-handles -- we know we'll get exactly two, since we
# specified it in the UI definition
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getAsTuple(self, section):
"""Get section name tuple :param section: section name :return: tuple object """ |
keys = self.getKeys(section)
value_dict = self.getValues(section)
return namedtuple(section, keys)(**value_dict) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.