text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def split_by_connected_component(self, idents):
'''Split idents into equivalence classes based on connected
components.
'''
idents_remaining = set(idents)
connected_components = []
for ident in idents:
if ident not in idents_remaining:
continue
idents_remaining.remove(ident)
connected_component = [ident]
for label in self.connected_component(ident):
cids = label.content_id1, label.content_id2
for cid in cids:
if cid in idents_remaining:
connected_component.append(cid)
idents_remaining.remove(cid)
connected_components.append(sorted(connected_component))
return connected_components |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def connected_component(self, ident):
'''Return a connected component generator for ``ident``.
``ident`` may be a ``content_id`` or a ``(content_id,
subtopic_id)``.
Given an ``ident``, return the corresponding connected
component by following all positive transitivity relationships.
For example, if ``(a, b, 1)`` is a label and ``(b, c, 1)`` is
a label, then ``connected_component('a')`` will return both
labels even though ``a`` and ``c`` are not directly connected.
(Note that even though this returns a generator, it will still
consume memory proportional to the number of labels in the
connected component.)
:param ident: content id or (content id and subtopic id)
:type ident: ``str`` or ``(str, str)``
:rtype: generator of :class:`Label`
'''
ident = normalize_ident(ident)
done = set() # set of cids that we've queried with
todo = set([ident]) # set of cids to do a query for
labels = set()
while todo:
ident = todo.pop()
done.add(ident)
for label in self.directly_connected(ident):
if label.value != CorefValue.Positive:
continue
ident1, ident2 = idents_from_label(
label, subtopic=ident_has_subtopic(ident))
if ident1 not in done:
todo.add(ident1)
if ident2 not in done:
todo.add(ident2)
if label not in labels:
labels.add(label)
yield label |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def expand(self, ident):
'''Return expanded set of labels from a connected component.
The connected component is derived from ``ident``. ``ident``
may be a ``content_id`` or a ``(content_id, subtopic_id)``.
If ``ident`` identifies a subtopic, then expansion is done
on a subtopic connected component (and expanded labels retain
subtopic information).
The labels returned by :meth:`LabelStore.connected_component`
contains only the :class:`Label` stored in the
:class:`LabelStore`, and does not include the labels you can
infer from the connected component. This method returns both
the data-backed labels and the inferred labels.
Subtopic assignments of the expanded labels will be empty. The
``annotator_id`` will be an arbitrary ``annotator_id`` within
the connected component.
:param str content_id: content id
:param value: coreferent value
:type value: :class:`CorefValue`
:rtype: ``list`` of :class:`Label`
'''
subtopic = ident_has_subtopic(normalize_ident(ident))
labels = list(self.connected_component(ident))
labels.extend(expand_labels(labels, subtopic=subtopic))
return labels |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def negative_inference(self, content_id):
'''Return a generator of inferred negative label relationships
centered on ``content_id``.
Negative labels are inferred by getting all other content ids
connected to ``content_id`` through a negative label, then
running :meth:`LabelStore.negative_label_inference` on those
labels. See :meth:`LabelStore.negative_label_inference` for
more information.
'''
neg_labels = ifilter(lambda l: l.value == CorefValue.Negative,
self.directly_connected(content_id))
for label in neg_labels:
label_inf = self.negative_label_inference(label)
for label in label_inf:
yield label |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def negative_label_inference(self, label):
'''Return a generator of inferred negative label relationships.
Construct ad-hoc negative labels between ``label.content_id1``
and the positive connected component of ``label.content_id2``,
and ``label.content_id2`` to the connected component of
``label.content_id1``.
Note this will allocate memory proportional to the size of the
connected components of ``label.content_id1`` and
``label.content_id2``.
'''
assert label.value == CorefValue.Negative
yield label
cid2_comp = self.connected_component(label.content_id2)
for comp_label in cid2_comp:
comp_cids = (comp_label.content_id1,
comp_label.content_id2)
for comp_cid in comp_cids:
if not comp_cid == label.content_id2:
yield Label(label.content_id1, comp_cid,
'auto', CorefValue.Negative)
cid1_comp = self.connected_component(label.content_id1)
for comp_label in cid1_comp:
comp_cids = (comp_label.content_id1,
comp_label.content_id2)
for comp_cid in comp_cids:
if not comp_cid == label.content_id1:
yield Label(label.content_id2, comp_cid,
'auto', CorefValue.Negative) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _filter_keys(self, content_id=None, prefix=None, subtopic_id=None):
'''Filter out-of-order labels by key tuple.
:class:`Label` always sorts by `(cid1,cid2,sid1,sid2)`, but
for efficient lookups on `cid2` this class also stores in
order `(cid2,cid1,sid2,sid1)`. Filter out things that are
in the wrong order. But, if an original query specified
`content_id` or `subtopic_id`, account for the possibility
that something might be apparently out-of-order but its
dual will not be in the query at all.
'''
def accept(kvp):
(content_id1, content_id2, subtopic_id1, subtopic_id2,
annotator_id, inverted_epoch_ticks) = kvp[0]
# In general we'll accept the label if
# it's the natural order; l.content_id1 == cid1
is_sorted = (content_id1 < content_id2 or
(content_id1 == content_id2 and
subtopic_id1 <= subtopic_id2))
if content_id is not None:
# We will only see tuples where content_id1 is the
# requested content ID
assert content_id1 == content_id
elif prefix is not None:
assert content_id1.startswith(prefix)
# If we'll see both orderings of this record, only
# accept the sorted one
if content_id2.startswith(prefix) and not is_sorted:
return False
elif not is_sorted:
# We'll see both orderings of everything, reject the
# unsorted ones
return False
# If we're not looking for subtopic IDs, then accept records
# matching the content ID that are in natural order
if subtopic_id is None:
if content_id2 != content_id:
return True # will never see its dual
return subtopic_id1 <= subtopic_id2
# The scan range doesn't include subtopic IDs (the key schema
# is oriented towards querying content-to-content labels)
# so we need to accept records where either part matches
# (if both parts match then the record is its own dual)
if subtopic_id == subtopic_id1:
return True
if content_id == content_id2 and subtopic_id == subtopic_id2:
return True
return False
return accept |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def apply_diff(self, diff):
'''Applies a diff to the label table.
A ``diff`` is a dictionary with three keys: ``add``, ``delete``
and ``change``. Each key should map to a list of labels.
``add`` corresponds to the labels that are in ``new`` but not in
``old``.
``delete`` corresponds to the labels that are in ``old`` but not
in ``new``.
``change`` corresponds to the labels that are in both ``old``
and ``new`` but have different coref/rating values.
'''
# add and change are easy---just put the labels.
# delete is a little trickier. We need to scrub out the impact of
# the previous label without actually deleting it. For this, we
# use an unknown coref value.
insert = (
diff['add'] +
diff['change'] +
[lab.update(value=CorefValue.Unknown)
for lab in diff['delete']]
)
self.put(*insert) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_exists(original_file):
""" Check to make sure the original file exists """ |
if original_file.startswith("s3://"):
from filesystem import s3
return s3.file_exists(original_file)
else:
if not os.path.exists(original_file):
return False
if not os.path.isfile(original_file):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def complete_task(self, task):
"""Complete logging of a task Returns the time lapsed since `start_task` was called Parameters task : str Name of the task to be started Returns ------- time : float The time lapsed between task start and completion """ |
try:
runtime = self.timer() - self.tasks[task]
del self.tasks[task]
if runtime >= self.min_runtime:
self.info("Calculated {} in {:.2f} seconds.".format(
task, runtime))
return runtime
except KeyError:
self.info("Calculated {}.".format(task)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_miz( # noqa: C901 infile: str, outfile: str = None, metar: typing.Union[str, Metar] = None, time: str = None, min_wind: int = 0, max_wind: int = 40 ) -> str: # noinspection SpellCheckingInspection """ Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error """ |
if outfile is None:
LOGGER.debug('editing in place: %s', infile)
outfile = infile
else:
LOGGER.debug('editing miz file: %s -> %s', infile, outfile)
mission_weather = mission_time = None
if metar:
error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar)
if error:
return error
mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind)
if time:
try:
mission_time = MissionTime.from_string(time)
except ValueError:
return f'badly formatted time string: {time}'
if not mission_weather and not mission_time:
return 'nothing to do!'
with Miz(infile) as miz:
if mission_weather:
LOGGER.debug('applying MissionWeather')
if not mission_weather.apply_to_miz(miz):
return 'error while applying METAR to mission'
if mission_time:
LOGGER.debug('applying MissionTime')
if not mission_time.apply_to_miz(miz):
return 'error while setting time on mission'
try:
miz.zip(outfile)
return ''
except OSError:
return f'permission error: cannot edit "{outfile}"; maybe it is in use ?' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(self, loader, filename, encoding='utf-8', silent=False):
""" Updates recursively the value in the the config from some file. :param loader: (function) a function that receives a file object and returns a dictionary to be merged into settings :param filename: (str) a filename to be opened :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.4.0 """ |
conf = {}
try:
with open(filename, encoding=encoding) as f:
conf = loader(f)
except Exception:
if not silent:
raise
self.update(conf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_json(self, filename, encoding='utf-8', silent=False):
""" Updates recursively the value in the the config from a JSON file. :param filename: (str) a filename of the JSON file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.3.0 """ |
self.from_file(json.load, filename, encoding, silent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_yaml(self, filename, encoding='utf-8', silent=False):
""" Updates recursively the value in the the config from a YAML file. The method requires the PyYAML to be installed. :param filename: (str) a filename of the YAML file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with yaml file .. versionadded:: 0.3.0 """ |
if not yaml:
raise AttributeError(
'You need to install PyYAML before using this method!')
self.from_file(yaml.load, filename, encoding, silent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, iterable={}, **kwargs):
""" Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff """ |
def _merge(a, *args):
for key, value in itertools.chain(*args):
if key in a and isinstance(value, (dict, Conf)):
value = _merge(a[key], value.items())
a[key] = value
return a
# adopt iterable sequence to unified interface: (key, value)
if isinstance(iterable, (dict, Conf)):
iterable = iterable.items()
# iterate and update values
_merge(self._data, iterable, kwargs.items()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def baseline(dataset, column=1, fn=None, fail_silently=True):
""" Substract baseline from the dataset Parameters dataset : list of numpy array list A list of numpy array list column : integer An index of column which will be proceeded fn : function A function which require data and return baseline. If it is `None`, the first value of data will be used for subtracting fail_silently : boolean If `True`, do not raise exception if no data exists Returns ------- ndarray A list of numpy array list Examples -------- True """ |
try:
if fn is None:
fn = lambda columns, column: columns[column][0]
for i, data in enumerate(dataset):
_baseline = fn(data, column=column)
dataset[i][column] -= _baseline
return dataset
except IndexError, e:
if fail_silently:
# fail silently
return dataset
raise e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _dataset_info(dataset):
"""Return information about dataset as a dict.""" |
info = {}
info["uri"] = dataset.uri
info["uuid"] = dataset.uuid
# Computer and human readable size of dataset.
tot_size = sum([dataset.item_properties(i)["size_in_bytes"]
for i in dataset.identifiers])
info["size_int"] = tot_size
info["size_str"] = sizeof_fmt(tot_size)
info["creator"] = dataset._admin_metadata["creator_username"]
info["name"] = dataset._admin_metadata["name"]
info["date"] = date_fmt(dataset._admin_metadata["frozen_at"])
info["num_items"] = len(dataset.identifiers)
info["readme_content"] = dataset.get_readme_content()
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inventory(uri, format):
"""Generate an inventory of datasets in a base URI.""" |
base_uri = dtoolcore.utils.sanitise_uri(uri)
info = _base_uri_info(base_uri)
if format is None:
_cmd_line_report(info)
elif format == "csv":
_csv_tsv_report(info, ",")
elif format == "tsv":
_csv_tsv_report(info, "\t")
elif format == "html":
_html_report(info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def custom_prompt(msg, delims="", completer=lambda: None):
"""Start up a prompt that with particular delims and completer""" |
try:
orig_delims = readline.get_completer_delims()
orig_completer = readline.get_completer()
readline.set_completer_delims(delims)
readline.set_completer(completer)
try:
ret = input(msg)
finally:
readline.set_completer_delims(orig_delims)
readline.set_completer(orig_completer)
return ret
except EOFError:
raise UserQuit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def primitive_form(obj, **kwargs):
'''Return obj, if possible, in a form composed of primitive or builtin objects.'''
if isinstance(obj, type):
return obj
return Type.dispatch(obj).primitive_form(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def evalPDF(self, u_values):
'''Returns the PDF of the uncertain parameter evaluated at the values
provided in u_values.
:param iterable u_values: values of the uncertain parameter at which to
evaluate the PDF
*Example Usage* ::
>>> u = UniformParameter()
>>> X = numpy.linspace(-1, 1, 100)
>>> Y = [u.evalPDF(x) for x in X]
'''
if isinstance(u_values, np.ndarray):
return self._evalPDF(u_values)
else:
try:
iter(u_values)
return [self._evalPDF(u) for u in u_values]
except:
return self._evalPDF(u_values) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def term_regex(term):
""" Returns a case-insensitive regex for searching terms """ |
return re.compile(r'^{0}$'.format(re.escape(term)), re.IGNORECASE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show_fact(term):
""" Shows a fact stored for a given term, using a case-insensitive search. If a fact has an author, it will be shown. If it has a timestamp, that will also be shown. """ |
logger.info('Showing fact %s', term)
record = db.facts.find_one({'term': term_regex(term)})
if record is None:
return None
# Fix double spacing in older facts
if record['fact']:
record['fact'] = record['fact'].replace(' ', ' ')
# If it isn't authored
if not record.get('set_by', ''):
return record['fact']
if 'set_date' not in record:
return '{fact} ({set_by})'.format(**record)
# Otherwise, do normal formatting
tz = getattr(settings, 'TIMEZONE', 'US/Eastern')
try:
timestamp = datetime.fromtimestamp(record['set_date'], tz=pytz.timezone(tz))
except TypeError:
timestamp = record['set_date'].replace(tzinfo=pytz.timezone(tz))
record['fmt_dt'] = datetime.strftime(timestamp, '%m/%d/%Y %I:%M%p')
return '{fact} ({set_by} on {fmt_dt})'.format(**record) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_fact(term, fact, author=''):
""" Records a new fact with a given term. Optionally can set an author """ |
logger.info('Adding new fact %s: %s', term, fact)
if not db.facts.find({'term': term_regex(term)}).count():
db.facts.insert({
'term': term,
'fact': fact,
'set_by': author,
'set_date': time.time()
})
db.facts.ensure_index('term') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forget_fact(term):
""" Forgets a fact by removing it from the database """ |
logger.info('Removing fact %s', term)
db.facts.remove({'term': term_regex(term)})
return random.choice(ACKS) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_fact(term, fact, author=''):
""" Replaces an existing fact by removing it, then adding the new definition """ |
forget_fact(term)
add_fact(term, fact, author)
return random.choice(ACKS) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_module_docstring(app, what, name, obj, options, lines):
""" Ignore the docstring of the ``clusterpolate`` module. """ |
if what == "module" and name == "clusterpolate":
del lines[:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wind(direction: Number, speed: Number, gust: Number, vardir: typing.List[Number] = None, # type: ignore unit: str = 'kt', cardinals: bool = True, spoken: bool = False) -> str: """ Format wind elements into a readable sentence Returns the translation string Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt """ |
ret = ''
target = 'spoken' if spoken else 'repr'
# Wind direction
if direction:
if direction.repr in WIND_DIR_REPR:
ret += WIND_DIR_REPR[direction.repr]
elif direction.value is None:
ret += direction.repr
else:
if cardinals:
ret += get_cardinal_direction(direction.value) + '-' # type: ignore
ret += getattr(direction, target)
# Variable direction
if vardir and isinstance(vardir, list):
vardir = [getattr(var, target) for var in vardir]
ret += ' (variable {} to {})'.format(*vardir)
# Speed
if speed and speed.value:
ret += f' at {speed.value}{unit}'
# Gust
if gust and gust.value:
ret += f' gusting to {gust.value}{unit}'
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visibility(vis: Number, unit: str = 'm') -> str: """ Formats a visibility element into a string with both km and sm values Ex: 8km ( 5sm ) """ |
if not (vis and unit in ('m', 'sm')):
return ''
if vis.repr in VIS_REPR:
return VIS_REPR[vis.repr]
if unit == 'm':
converted = vis.value * 0.000621371
converted = str(round(converted, 1)).replace('.0', '') + 'sm' # type: ignore
value = str(round(vis.value / 1000, 1)).replace('.0', '')
unit = 'km'
elif unit == 'sm':
converted = vis.value / 0.621371
converted = str(round(converted, 1)).replace('.0', '') + 'km' # type: ignore
value = str(vis.value).replace('.0', '')
return f'{value}{unit} ({converted})' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def temperature(temp: Number, unit: str = 'C') -> str: """ Formats a temperature element into a string with both C and F values Used for both Temp and Dew Ex: 34°C (93°F) """ |
unit = unit.upper()
if not (temp and unit in ('C', 'F')):
return ''
if unit == 'C':
converted = temp.value * 1.8 + 32
converted = str(int(round(converted))) + '°F' # type: ignore
elif unit == 'F':
converted = (temp.value - 32) / 1.8
converted = str(int(round(converted))) + '°C' # type: ignore
return f'{temp.value}°{unit} ({converted})' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def altimeter(alt: Number, unit: str = 'hPa') -> str: """ Formats the altimter element into a string with hPa and inHg values Ex: 30.11 inHg (10.20 hPa) """ |
if not (alt and unit in ('hPa', 'inHg')):
return ''
if unit == 'hPa':
value = alt.repr
converted = alt.value / 33.8638866667
converted = str(round(converted, 2)) + ' inHg' # type: ignore
elif unit == 'inHg':
value = alt.repr[:2] + '.' + alt.repr[2:]
converted = float(value) * 33.8638866667
converted = str(int(round(converted))) + ' hPa' # type: ignore
return f'{value} {unit} ({converted})' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clouds(clds: [Cloud], unit: str = 'ft') -> str: # type: ignore """ Format cloud list into a readable sentence Returns the translation string Ex: Broken layer at 2200ft (Cumulonimbus), Overcast layer at 3600ft - Reported AGL """ |
if clds is None:
return ''
ret = []
for cloud in clds:
if cloud.altitude is None:
continue
cloud_str = CLOUD_TRANSLATIONS[cloud.type]
if cloud.modifier:
cloud_str += f' ({CLOUD_TRANSLATIONS[cloud.modifier]})'
ret.append(cloud_str.format(cloud.altitude * 100, unit))
if ret:
return ', '.join(ret) + ' - Reported AGL'
return 'Sky clear' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wxcode(code: str) -> str: """ Translates weather codes into readable strings Returns translated string of variable length """ |
if not code:
return ''
ret = ''
if code[0] == '+':
ret = 'Heavy '
code = code[1:]
elif code[0] == '-':
ret = 'Light '
code = code[1:]
# Return code if code is not a code, ex R03/03002V03
if len(code) not in [2, 4, 6]:
return code
for _ in range(len(code) // 2):
if code[:2] in WX_TRANSLATIONS:
ret += WX_TRANSLATIONS[code[:2]] + ' '
else:
ret += code[:2]
code = code[2:]
return ret.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wind_shear(shear: str, unit_alt: str = 'ft', unit_wind: str = 'kt', spoken: bool = False) -> str: """ Translate wind shear into a readable string Ex: Wind shear 2000ft from 140 at 30kt """ |
if not shear or 'WS' not in shear or '/' not in shear:
return ''
shear = shear[2:].rstrip(unit_wind.upper()).split('/') # type: ignore
wdir = core.spoken_number(shear[1][:3]) if spoken else shear[1][:3]
return f'Wind shear {int(shear[0])*100}{unit_alt} from {wdir} at {shear[1][3:]}{unit_wind}' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def turb_ice(turbice: [str], unit: str = 'ft') -> str: # type: ignore """ Translate the list of turbulance or icing into a readable sentence Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft """ |
if not turbice:
return ''
# Determine turbulance or icing
if turbice[0][0] == '5':
conditions = TURBULANCE_CONDITIONS
elif turbice[0][0] == '6':
conditions = ICING_CONDITIONS
else:
return ''
# Create list of split items (type, floor, height)
split = []
for item in turbice:
if len(item) == 6:
split.append([item[1:2], item[2:5], item[5]])
# Combine items that cover a layer greater than 9000ft
for i in reversed(range(len(split) - 1)):
if split[i][2] == '9' and split[i][0] == split[i + 1][0] \
and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10):
split[i][2] = str(int(split[i][2]) + int(split[i + 1][2]))
split.pop(i + 1)
# Return joined, formatted string from split items
return ', '.join(['{conditions} from {low_alt}{unit} to {high_alt}{unit}'.format(
conditions=conditions[item[0]], low_alt=int(item[1]) * 100,
high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def min_max_temp(temp: str, unit: str = 'C') -> str: """ Format the Min and Max temp elemets into a readable string Ex: Maximum temperature of 23°C (73°F) at 18-15:00Z """ |
if not temp or len(temp) < 7:
return ''
if temp[:2] == 'TX':
temp_type = 'Maximum'
elif temp[:2] == 'TN':
temp_type = 'Minimum'
else:
return ''
temp = temp[2:].replace('M', '-').replace('Z', '').split('/') # type: ignore
if len(temp[1]) > 2:
temp[1] = temp[1][:2] + '-' + temp[1][2:] # type: ignore
temp_value = temperature(core.make_number(temp[0]), unit)
return f'{temp_type} temperature of {temp_value} at {temp[1]}:00Z' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shared(wxdata: ReportData, units: Units) -> typing.Dict[str, str]: """ Translate Visibility, Altimeter, Clouds, and Other """ |
translations = {}
translations['visibility'] = visibility(wxdata.visibility, units.visibility) # type: ignore
translations['altimeter'] = altimeter(wxdata.altimeter, units.altimeter) # type: ignore
translations['clouds'] = clouds(wxdata.clouds, units.altitude) # type: ignore
translations['other'] = other_list(wxdata.other) # type: ignore
return translations |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metar(wxdata: MetarData, units: Units) -> MetarTrans: """ Translate the results of metar.parse Keys: Wind, Visibility, Clouds, Temperature, Dewpoint, Altimeter, Other """ |
translations = shared(wxdata, units)
translations['wind'] = wind(wxdata.wind_direction, wxdata.wind_speed,
wxdata.wind_gust, wxdata.wind_variable_direction,
units.wind_speed)
translations['temperature'] = temperature(wxdata.temperature, units.temperature)
translations['dewpoint'] = temperature(wxdata.dewpoint, units.temperature)
translations['remarks'] = remarks.translate(wxdata.remarks) # type: ignore
return MetarTrans(**translations) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def taf(wxdata: TafData, units: Units) -> TafTrans: """ Translate the results of taf.parse Keys: Forecast, Min-Temp, Max-Temp Forecast keys: Wind, Visibility, Clouds, Altimeter, Wind-Shear, Turbulance, Icing, Other """ |
translations = {'forecast': []} # type: ignore
for line in wxdata.forecast:
trans = shared(line, units) # type: ignore
trans['wind'] = wind(line.wind_direction, line.wind_speed,
line.wind_gust, unit=units.wind_speed)
trans['wind_shear'] = wind_shear(line.wind_shear, units.altitude, units.wind_speed)
trans['turbulance'] = turb_ice(line.turbulance, units.altitude)
trans['icing'] = turb_ice(line.icing, units.altitude)
# Remove false 'Sky Clear' if line type is 'BECMG'
if line.type == 'BECMG' and trans['clouds'] == 'Sky clear':
trans['clouds'] = None # type: ignore
translations['forecast'].append(TafLineTrans(**trans)) # type: ignore
translations['min_temp'] = min_max_temp(wxdata.min_temp, units.temperature) # type: ignore
translations['max_temp'] = min_max_temp(wxdata.max_temp, units.temperature) # type: ignore
translations['remarks'] = remarks.translate(wxdata.remarks)
return TafTrans(**translations) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_substitute_element(head, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap Instance
'''
if not isinstance(head, ElementDeclaration):
return None
return ElementDeclaration.getSubstitutionElement(head, elt, ps) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _is_substitute_element(head, sub):
'''if head and sub are both GEDs, and sub declares
head as its substitutionGroup then return True.
head -- Typecode instance
sub -- Typecode instance
'''
if not isinstance(head, ElementDeclaration) or not isinstance(sub, ElementDeclaration):
return False
try:
group = sub.substitutionGroup
except (AttributeError, TypeError):
return False
ged = GED(*group)
# TODO: better way of representing element references. Wrap them with
# facets, and dereference when needed and delegate to..
print (head.nspname == ged.nspname and head.pname == ged.pname)
if head is ged or not (head.nspname == ged.nspname and head.pname == ged.pname):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False):
'''Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition.
'''
key = (namespaceURI, name)
if isref:
klass = cls.elements.get(key,None)
if klass is not None and lazy is True:
return _Mirage(klass)
return klass
typecode = cls.element_typecode_cache.get(key, None)
if typecode is None:
tcls = cls.elements.get(key,None)
if tcls is not None:
typecode = cls.element_typecode_cache[key] = tcls()
typecode.typed = False
return typecode |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def checkSubstitute(self, typecode):
'''If this is True, allow typecode to be substituted
for "self" typecode.
'''
if not isinstance(typecode, ElementDeclaration):
return False
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return False
if (nsuri,ncname) != (self.schema,self.literal):
# allow slop with the empty namespace
if not nsuri and not self.schema and ncname == self.literal:
return True
return False
sub = GED(self.schema, self.literal)
if sub is None or sub is not typecode:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getSubstitutionElement(self, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance
'''
nsuri,ncname = _get_element_nsuri_name(elt)
typecode = GED(nsuri,ncname)
if typecode is None:
return
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return
if (ncname == self.pname) and (nsuri == self.nspname or
(not nsuri and not self.nspname)):
return typecode
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def RegisterBuiltin(cls, arg):
'''register a builtin, create a new wrapper.
'''
if arg in cls.types_dict:
raise RuntimeError, '%s already registered' %arg
class _Wrapper(arg):
'Wrapper for builtin %s\n%s' %(arg, cls.__doc__)
_Wrapper.__name__ = '_%sWrapper' %arg.__name__
cls.types_dict[arg] = _Wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def RegisterAnyElement(cls):
'''If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper.
'''
for k,v in cls.types_dict.items():
what = Any.serialmap.get(k)
if what is None: continue
if v in what.__class__.seriallist: continue
what.__class__.seriallist.append(v)
RegisterType(what.__class__, clobber=1, **what.__dict__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def render_doc(self):
'''Override this method to customize the documentation page'''
if self._doc_view:
return self._doc_view()
elif not self._doc:
self.abort(self.bc_HTTPStatus_NOT_FOUND)
res = render_template('swagger-ui.html', title=self.title, specs_url=self.specs_url)
res = res.replace(self.complexReplaceString,self.APIDOCSPath)
regexp="\"https?:\/\/[a-zA-Z0\-9._]*(:[0-9]*)?" + self.internal_api_prefix.replace("/","\/") + "\/swagger.json\""
regexp="\"https?:\/\/[a-zA-Z0\-9._]*(:[0-9]*)?" + self.internal_apidoc_prefix.replace("/","\/") + "\/swagger.json\""
p = re.compile(regexp)
res = p.sub("\"" + self.apidocsurl + "/swagger.json\"", res)
'''
if (self.overrideAPIDOCSPath()):
#print("About to replace")
#print(res)
res = self.reaplcements(res)
#print("Replaced")
#print(res)
#print("End")
'''
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __createLayout(self):
"""Creates the dialog layout""" |
self.resize(450, 150)
self.setSizeGripEnabled(True)
verticalLayout = QVBoxLayout(self)
whereGroupbox = QGroupBox(self)
whereGroupbox.setTitle("Garbage collector message destination")
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
whereGroupbox.sizePolicy().hasHeightForWidth())
whereGroupbox.setSizePolicy(sizePolicy)
layoutWhere = QVBoxLayout(whereGroupbox)
self.__silentRButton = QRadioButton(whereGroupbox)
self.__silentRButton.setText("Silent")
layoutWhere.addWidget(self.__silentRButton)
self.__statusbarRButton = QRadioButton(whereGroupbox)
self.__statusbarRButton.setText("Status bar")
layoutWhere.addWidget(self.__statusbarRButton)
self.__logtabRButton = QRadioButton(whereGroupbox)
self.__logtabRButton.setText("Log tab")
layoutWhere.addWidget(self.__logtabRButton)
verticalLayout.addWidget(whereGroupbox)
buttonBox = QDialogButtonBox(self)
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.__OKButton = buttonBox.button(QDialogButtonBox.Ok)
self.__OKButton.setDefault(True)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.close)
verticalLayout.addWidget(buttonBox) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getCheckedOption(self):
"""Returns what destination is selected""" |
if self.__silentRButton.isChecked():
return GCPluginConfigDialog.SILENT
if self.__statusbarRButton.isChecked():
return GCPluginConfigDialog.STATUS_BAR
return GCPluginConfigDialog.LOG |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):
""" required sequence as string. Parameters: seq_record (SeqRecordExpanded object):
codon_positions (str):
aminoacids (boolean):
Returns: Namedtuple containing ``seq (str)`` and ``warning (str)``. """ |
Sequence = namedtuple('Sequence', ['seq', 'warning'])
if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise WrongParameterFormat("`codon_positions` argument should be any of the following"
": 1st, 2nd, 3rd, 1st-2nd or ALL")
if aminoacids:
aa = seq_record.translate()
if '*' in aa:
warning = "Gene {0}, sequence {1} contains stop codons '*'".format(seq_record.gene_code,
seq_record.voucher_code)
else:
warning = None
return Sequence(seq=aa, warning=warning)
if degenerate:
return Sequence(seq=seq_record.degenerate(degenerate), warning=None)
if codon_positions == '1st':
return Sequence(seq=seq_record.first_codon_position(), warning=None)
elif codon_positions == '2nd':
return Sequence(seq=seq_record.second_codon_position(), warning=None)
elif codon_positions == '3rd':
return Sequence(seq=seq_record.third_codon_position(), warning=None)
elif codon_positions == '1st-2nd':
return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)
else: # None and ALL
return Sequence(seq=str(seq_record.seq), warning=None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
""" Converts nexus format to Phylip and Fasta using Biopython tools. :param dataset_as_nexus: :param dataset_format: :return: """ |
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def truncate_sentence(text, max_chars, break_words=False, padding=0):
"""Truncates a sentence. :param max_chars: The maximum characters of truncated sentence. :param break_words: If you wish to truncate given sentence strictly even if it breaks a word, set it to ``True``. It defaults to ``False`` which means truncating given sentence shorter but never breaking words. :param padding: The padding size for truncating. It is usually used to :return: The truncated sentence. """ |
if break_words:
return text[:-abs(max_chars - len(text)) - padding]
words = []
for word in text.split():
predicted_len = (
sum(map(len, words)) + # length of words
len(word) + # length of next word
len(words) - 1 + # length of spaces
padding)
if predicted_len >= max_chars:
break
words.append(word)
return ' '.join(words) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def always_win(cls, request) -> [(200, 'Ok', String)]:
'''Perform an always succeeding task.'''
task_id = uuid4().hex.upper()[:5]
log.info('Starting always OK task {}'.format(task_id))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
log.info('Finished always OK task {}'.format(task_id))
msg = 'I am finally done with task {}!'.format(task_id)
Respond(200, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def always_fail(cls, request) -> [
(200, 'Ok', String),
(406, 'Not Acceptable', Void)]:
'''Perform an always failing task.'''
task_id = uuid4().hex.upper()[:5]
log.info('Starting always FAILING task {}'.format(task_id))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
Respond(406)
Respond(200, 'Foobar') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fibonacci(cls, request,
limit: (Ptypes.path,
Integer('Upper limit of the series'))) -> [
(200, 'Ok', FibonacciFragment)]:
'''Return Fibonacci sequence whose last number is <= limit.'''
def fibonacci_generator():
last_two = (0, 1)
while last_two[1] <= limit:
log.debug('Fibonacci number generated: {}'.format(last_two[1]))
yield last_two[1]
last_two = last_two[1], sum(last_two)
log.info('Starting Fibonacci generation, max: {}'.format(limit))
limit = int(limit)
Respond(200, fibonacci_generator()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def query_echo(cls, request,
foo: (Ptypes.query, String('A query parameter'))) -> [
(200, 'Ok', String)]:
'''Echo the query parameter.'''
log.info('Echoing query param, value is: {}'.format(foo))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(foo)
Respond(200, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def body_echo(cls, request,
foo: (Ptypes.body, String('A body parameter'))) -> [
(200, 'Ok', String)]:
'''Echo the body parameter.'''
log.info('Echoing body param, value is: {}'.format(foo))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(foo)
Respond(200, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def header_echo(cls, request,
api_key: (Ptypes.header, String('API key'))) -> [
(200, 'Ok', String)]:
'''Echo the header parameter.'''
log.info('Echoing header param, value is: {}'.format(api_key))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(api_key)
Respond(200, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def form_echo(cls, request,
foo: (Ptypes.form, String('A form parameter'))) -> [
(200, 'Ok', String)]:
'''Echo the form parameter.'''
log.info('Echoing form param, value is: {}'.format(foo))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(foo)
Respond(200, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve(data_type, name=None, listify_default=False):
"""Retrieve the properties for a given data type. This is the main routine where most of the work is done. It converts Nani's data types into properties that can be used to define a new NumPy array and to wrap it into a view object. Use :func:`validate` to check if the input data type is well-formed. Parameters data_type : nani data type Type of the array elements. name : str Name for the view to be generated for the array. listify_default : bool ``True`` to output the default values with lists in place of tuples. This might cause the output to be incompatible with array creation routines such as ``numpy.array`` but it should still work for element assignment. Returns ------- nani.Nani The properties to use to initalize a NumPy array around the data type. Examples -------- Create a NumPy array where each element represents a color: <class 'nani.Color'> [255, 255, 255] [255, 255, 255] """ |
data_type = _consolidate(data_type)
return Nani(
dtype=numpy.dtype(_resolve_dtype(data_type)),
default=_resolve_default(data_type, listify=listify_default),
view=_resolve_view(Array(element_type=data_type, shape=-1, name=name))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _consolidate(data_type):
"""Enforce the structure of the data type. Specifically, ensure that if a field is defined as a generic tuple, then it will be converted into an instance of `Field`. """ |
if isinstance(data_type, _ATOMIC):
out = data_type
elif isinstance(data_type, Array):
element_type = _consolidate(data_type.element_type)
out = data_type._replace(element_type=element_type)
elif isinstance(data_type, Structure):
fields = tuple(
Field(*(_consolidate(field[i]) if i == _FIELD_TYPE_IDX
else field[i]
for i in _range(len(field))))
for field in data_type.fields)
out = data_type._replace(fields=fields)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_dtype(data_type):
"""Retrieve the corresponding NumPy's `dtype` for a given data type.""" |
if isinstance(data_type, _FIXED_ATOMIC):
out = _get_atomic_dtype(data_type)
elif isinstance(data_type, _FLEXIBLE_ATOMIC):
out = (_get_atomic_dtype(data_type), data_type.length)
elif isinstance(data_type, Array):
shape = data_type.shape
if isinstance(shape, _SEQUENCE_TYPES) and len(shape) == 1:
# Workaround the exception `ValueError: invalid itemsize in
# generic type tuple` when an `Array` of shape 0 or (0,) is nested
# within another `Array`.
shape = shape[0]
out = (_resolve_dtype(data_type.element_type), shape)
elif isinstance(data_type, Structure):
out = [(field.name, _resolve_dtype(field.type))
for field in data_type.fields]
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_default(data_type, listify=False):
"""Retrieve the default value for a given data type.""" |
if isinstance(data_type, _ATOMIC):
# A Python's object type needs to be left as is instead of being
# wrapped into a NumPy type.
out = (data_type.default if isinstance(data_type, Object)
else _get_atomic_dtype(data_type)(data_type.default))
elif isinstance(data_type, Array):
element_default = _resolve_default(data_type.element_type,
listify=listify)
Sequence = list if listify else tuple
shape = ((data_type.shape,) if isinstance(data_type.shape, int)
else data_type.shape)
out = element_default
for dimension in shape:
out = Sequence(copy.deepcopy(out) for _ in _range(dimension))
elif isinstance(data_type, Structure):
if listify:
out = [_resolve_default(field.type, listify=listify)
for field in data_type.fields]
else:
field_defaults = collections.OrderedDict(
(field.name, _resolve_default(field.type, listify=listify))
for field in data_type.fields)
name = ('StructureDefault_%s' % (data_type.name,)
if data_type.name else 'StructureDefault')
struct = collections.namedtuple(name, field_defaults.keys())
out = struct(**field_defaults)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_view(data_type):
"""Retrieve the view for a given data type. Only one view class is returned, that is the one representing the root data type, but more class objects might be dynamically created if the input data type has nested elements, such as for the `Array` and `Structure` types. The default behaviour of dynamically and recursively creating a new view class can be overriden by setting the `view` attribute of a data type. """ |
view = getattr(data_type, 'view', None)
if view is not None:
return view
if isinstance(data_type, _ATOMIC):
out = None
elif isinstance(data_type, Array):
out = _define_array_view(data_type)
elif isinstance(data_type, Structure):
out = _define_structure_view(data_type)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _define_array_view(data_type):
"""Define a new view object for a `Array` type.""" |
element_type = data_type.element_type
element_view = _resolve_view(element_type)
if element_view is None:
mixins = (_DirectArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
elif isinstance(element_type, _ATOMIC):
mixins = (_IndirectAtomicArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
attributes.update({
'_element_view': element_view,
})
else:
mixins = (_IndirectCompositeArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
attributes.update({
'_element_view': element_view,
})
name = data_type.name if data_type.name else 'ArrayView'
return type(name, (), attributes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _define_structure_view(data_type):
"""Define a new view object for a `Structure` type.""" |
def define_getter(field_index, field_type, field_view):
if field_view is None:
def getter(self):
return self._data[field_index]
elif isinstance(field_type, _ATOMIC):
def getter(self):
return field_view(self._data, field_index)
else:
def getter(self):
return field_view(self._data[field_index])
return getter
def define_setter(field_index, read_only):
def setter(self, value):
self._data[field_index] = value
return None if read_only else setter
field_views = [_resolve_view(field.type) for field in data_type.fields]
mixins = (_StructuredViewMixin,)
attributes = _get_mixin_attributes(mixins)
attributes.update({
'_fields': tuple(field.name for field in data_type.fields),
})
attributes.update({
field.name: property(
fget=define_getter(i, field.type, field_view),
fset=define_setter(i, field.read_only),
fdel=None)
for i, (field, field_view)
in enumerate(zip(data_type.fields, field_views))})
name = data_type.name if data_type.name else 'StructureView'
return type(name, (), attributes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mixin_attributes(mixins):
"""Retrieve the attributes for a given set of mixin classes. The attributes of each mixin class are being merged into a single dictionary. """ |
return {attribute: mixin.__dict__[attribute]
for mixin in mixins
for attribute in _MIXIN_ATTRIBUTES[mixin]} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_atomic_dtype(data_type):
"""Retrieve the NumPy's `dtype` for a given atomic data type.""" |
atomic_type = getattr(data_type, 'type', None)
if atomic_type is not None:
return atomic_type
return _PREDEFINED_ATOMIC_NUMPY_TYPES[_find_base_type(data_type)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_base_type(data_type):
"""Find the Nani's base type for a given data type. This is useful when Nani's data types were subclassed and the original type is required. """ |
bases = type(data_type).__mro__
for base in bases:
if base in _ALL:
return base
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_duplicates(seq):
"""Find the duplicate elements from a sequence.""" |
seen = set()
return [element for element in seq
if seq.count(element) > 1
and element not in seen and seen.add(element) is None] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_type(cls):
"""Format a type name for printing.""" |
if cls.__module__ == _BUILTIN_MODULE:
return cls.__name__
else:
return '%s.%s' % (cls.__module__, cls.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_element(element, count, index, last_separator):
"""Format an element from a sequence. This only prepends a separator for the last element and wraps each element with single quotes. """ |
return ("%s'%s'" % (last_separator, element)
if count > 1 and index == count - 1
else "'%s'" % (element,)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _join_sequence(seq, last_separator=''):
"""Join a sequence into a string.""" |
count = len(seq)
return ', '.join(_format_element(element, count, i, last_separator)
for i, element in enumerate(seq)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _join_types(seq, last_separator=''):
"""Join class object names into a string.""" |
class_names = [_format_type(cls) for cls in seq]
return _join_sequence(class_names, last_separator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def slice_columns(x, using=None):
""" Slice a numpy array to make columns Parameters x : ndarray A numpy array instance using : list of integer or slice instance or None, optional A list of index or slice instance Returns ------- ndarray A list of numpy array columns sliced """ |
if using is None:
using = range(0, len(x[0]))
return [x[:,s] for s in using] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unite_dataset(dataset, basecolumn=0):
""" Unite dataset into a single data Parameters dataset : list of ndarray A data list of a column list of a numpy arrays basecolumn : integer, optional An index of base column. All data will be trimmed based on the order of this column when the number of samples are different among the dataset Returns ------- list of numpy array A column list of a numpy array """ |
ndata = [None] * len(dataset[0])
for pdata in dataset:
# select basecolumn
bnx = ndata[basecolumn]
bpx = pdata[basecolumn]
if bnx is not None and bnx.ndim >= 2:
bnx = bnx[:,-1]
if bpx is not None and bpx.ndim >= 2:
bpx = bpx[:,-1]
# calculate min and max of this and final data
if bnx is not None and len(bnx) != len(bpx):
# the number of samples is different, so regulation is required
xmin = max(np.min(bnx), np.min(bpx))
xmax = min(np.max(bnx), np.max(bpx))
# slice the data
nindex = np.where((bnx>xmin) & (bnx<xmax))
pindex = np.where((bpx>xmin) & (bpx<xmax))
else:
nindex = None
pindex = None
for i, (nx, px) in enumerate(itertools.izip(ndata, pdata)):
if nindex:
nx = nx[nindex]
if pindex:
px = px[pindex]
ndata[i] = px if nx is None else np.c_[nx, px]
return [ndata] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename, using=None, parser=None, **kwargs):
""" Load data from file using a specified parser. Return value will be separated or sliced into a column list Parameters filename : string A data file path using : list of integer, slice instance, or None, optional A list of index or slice instance used to slice data into column If it is not specified, :attr:`using` specified in constructor will be used instead. parser : instance or None, optional An instance or registered name of parser class. If it is not specified, :attr:`parser` specified in constructor will be used instead. Returns ------- ndarray A list of numpy array """ |
using = using or self.using
parser = parser or self.parser
if parser is None:
raise AttributeError("A parser instance must be specified")
# parse iterator with the specified parser
data = parser.load(filename, **kwargs)
# slice column by using
return slice_columns(data, using) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_filenames(self, normalized_url, request):
""" Prepare template filename list based on the user authenticated state If user is authenticated user, it use '_authenticated' as a suffix. Otherwise it use '_anonymous' as a suffix to produce the template filename list. The list include original filename at the end of the list. Args: normalized_url (str):
A normalized url request (instance):
An instance of HttpRequest Returns: list Examples: """ |
filenames = [normalized_url]
if request.user.is_authenticated():
filenames.insert(0, normalized_url + ".authenticated")
else:
filenames.insert(0, normalized_url + ".anonymous")
return filenames |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadFromStream(self, stream, name=None):
"""Return a WSDL instance loaded from a stream object.""" |
document = DOM.loadDocument(stream)
wsdl = WSDL()
if name:
wsdl.location = name
elif hasattr(stream, 'name'):
wsdl.location = stream.name
wsdl.load(document)
return wsdl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadFromURL(self, url):
"""Return a WSDL instance loaded from the given url.""" |
document = DOM.loadFromURL(url)
wsdl = WSDL()
wsdl.location = url
wsdl.load(document)
return wsdl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadFromFile(self, filename):
"""Return a WSDL instance loaded from the given file.""" |
file = open(filename, 'rb')
try:
wsdl = self.loadFromStream(file)
finally:
file.close()
return wsdl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toDom(self):
""" Generate a DOM representation of the WSDL instance. Not dealing with generating XML Schema, thus the targetNamespace of all XML Schema elements or types used by WSDL message parts needs to be specified via import information items. """ |
namespaceURI = DOM.GetWSDLUri(self.version)
self.document = DOM.createDocument(namespaceURI ,'wsdl:definitions')
# Set up a couple prefixes for easy reading.
child = DOM.getElement(self.document, None)
child.setAttributeNS(None, 'targetNamespace', self.targetNamespace)
child.setAttributeNS(XMLNS.BASE, 'xmlns:wsdl', namespaceURI)
child.setAttributeNS(XMLNS.BASE, 'xmlns:xsd', 'http://www.w3.org/1999/XMLSchema')
child.setAttributeNS(XMLNS.BASE, 'xmlns:soap', 'http://schemas.xmlsoap.org/wsdl/soap/')
child.setAttributeNS(XMLNS.BASE, 'xmlns:tns', self.targetNamespace)
if self.name:
child.setAttributeNS(None, 'name', self.name)
# wsdl:import
for item in self.imports:
item.toDom()
# wsdl:message
for item in self.messages:
item.toDom()
# wsdl:portType
for item in self.portTypes:
item.toDom()
# wsdl:binding
for item in self.bindings:
item.toDom()
# wsdl:service
for item in self.services:
item.toDom() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getWSDL(self):
"""Return the WSDL object that contains this information item.""" |
parent = self
while 1:
# skip any collections
if isinstance(parent, WSDL):
return parent
try: parent = parent.parent()
except: break
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getBinding(self):
"""Return the Binding object that is referenced by this port.""" |
wsdl = self.getService().getWSDL()
return wsdl.bindings[self.binding] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getPortType(self):
"""Return the PortType object that is referenced by this port.""" |
wsdl = self.getService().getWSDL()
binding = wsdl.bindings[self.binding]
return wsdl.portTypes[binding.type] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getAddressBinding(self):
"""A convenience method to obtain the extension element used as the address binding for the port.""" |
for item in self.extensions:
if isinstance(item, SoapAddressBinding) or \
isinstance(item, HttpAddressBinding):
return item
raise WSDLError(
'No address binding found in port.'
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addInParameter(self, name, type, namespace=None, element_type=0):
"""Add an input parameter description to the call info.""" |
parameter = ParameterInfo(name, type, namespace, element_type)
self.inparams.append(parameter)
return parameter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addOutParameter(self, name, type, namespace=None, element_type=0):
"""Add an output parameter description to the call info.""" |
parameter = ParameterInfo(name, type, namespace, element_type)
self.outparams.append(parameter)
return parameter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setReturnParameter(self, name, type, namespace=None, element_type=0):
"""Set the return parameter description for the call info.""" |
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addInHeaderInfo(self, name, type, namespace, element_type=0, mustUnderstand=0):
"""Add an input SOAP header description to the call info.""" |
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.inheaders.append(headerinfo)
return headerinfo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addOutHeaderInfo(self, name, type, namespace, element_type=0, mustUnderstand=0):
"""Add an output SOAP header description to the call info.""" |
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.outheaders.append(headerinfo)
return headerinfo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list""" |
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self):
""" Create a context manager to store records in the cleaned table. """ |
output = tempfile.NamedTemporaryFile(suffix='.json')
try:
def write(o):
line = json.dumps(o, default=json_default)
return output.write(line + '\n')
yield write
output.seek(0)
log.info("Uploading generated table (%s)...", self._obj)
self.save_file(output.name, destructive=True)
finally:
try:
output.close()
except:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def records(self):
""" Get each record that has been stored in the table. """ |
output = tempfile.NamedTemporaryFile(suffix='.json')
try:
log.info("Loading table from (%s)...", self._obj)
shutil.copyfileobj(self.fh(), output)
output.seek(0)
for line in output.file:
yield json.loads(line, object_hook=json_hook)
finally:
try:
output.close()
except:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cleanRecursive(self, subSelf):
""" Delete all NestedOrderedDict that haven't any entries. """ |
for key, item in list(subSelf.items()):
if self.isNestedDict(item):
if not item:
subSelf.pop(key)
else:
self._cleanRecursive(item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def belongsToModule(obj, module):
"""Returns True is an object belongs to a module.""" |
return obj.__module__ == module.__name__ or obj.__module__.startswith(
module.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_table(self):
"""Create the DynamoDB table used by this ObjectStore, only if it does not already exists. """ |
all_tables = self.aws_conn.list_tables()['TableNames']
if self.table_name in all_tables:
log.info("Table %s already exists" % self.table_name)
else:
log.info("Table %s does not exist: creating it" % self.table_name)
self.table = Table.create(
self.table_name,
schema=[
HashKey('key')
],
throughput={
'read': 10,
'write': 10,
},
connection=self.aws_conn,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, key, value, overwrite=True):
"""Marshall the python object given as 'value' into a string, using the to_string marshalling method passed in the constructor, and store it in the DynamoDB table under key 'key'. """ |
self._get_table()
s = self.to_string(value)
log.debug("Storing in key '%s' the object: '%s'" % (key, s))
self.table.put_item(
data={
'key': key,
'value': s,
},
overwrite=overwrite
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, key):
"""Get the string representation of the object stored in DynamoDB under this key, convert it back to an object using the 'from_string' unmarshalling method passed in the constructor and return the object. Return None if no object found. """ |
self._get_table()
s = self.table.get_item(key=key)
log.debug("Retrieved from key '%s' the object: '%s'" % (key, s['value']))
return self.from_string(s['value']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, key):
"""If this key exists, delete it""" |
self._get_table()
self.table.delete_item(key=key)
log.debug("Deleted item at key '%s'" % (key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_env(key, default=None, coerce=lambda x: x, required=False):
""" Return env var coerced into a type other than string. This function extends the standard os.getenv function to enable the coercion of values into data types other than string (all env vars are strings by default). Args: key: string, the name of the env var to look up Kwargs: default: the default value to return if the env var does not exist. NB the default value is **not** coerced, and is assumed to be of the correct type. coerce: a function that is used to coerce the value returned into another type required: bool, if True, then a RequiredSettingMissing error is raised if the env var does not exist. Returns the env var, passed through the coerce function """ |
try:
value = os.environ[key]
except KeyError:
if required is True:
raise RequiredSettingMissing(key)
else:
return default
try:
return coerce(value)
except Exception:
raise CoercianError(key, value, coerce) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.