code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value | dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list. | Below is the the instruction that describes the task:
### Input:
dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
### Response:
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value |
def _get_rsi(cls, df, n_days):
""" Calculate the RSI (Relative Strength Index) within N days
calculated based on the formula at:
https://en.wikipedia.org/wiki/Relative_strength_index
:param df: data
:param n_days: N days
:return: None
"""
n_days = int(n_days)
d = df['close_-1_d']
df['closepm'] = (d + d.abs()) / 2
df['closenm'] = (-d + d.abs()) / 2
closepm_smma_column = 'closepm_{}_smma'.format(n_days)
closenm_smma_column = 'closenm_{}_smma'.format(n_days)
p_ema = df[closepm_smma_column]
n_ema = df[closenm_smma_column]
rs_column_name = 'rs_{}'.format(n_days)
rsi_column_name = 'rsi_{}'.format(n_days)
df[rs_column_name] = rs = p_ema / n_ema
df[rsi_column_name] = 100 - 100 / (1.0 + rs)
del df['closepm']
del df['closenm']
del df[closepm_smma_column]
del df[closenm_smma_column] | Calculate the RSI (Relative Strength Index) within N days
calculated based on the formula at:
https://en.wikipedia.org/wiki/Relative_strength_index
:param df: data
:param n_days: N days
:return: None | Below is the the instruction that describes the task:
### Input:
Calculate the RSI (Relative Strength Index) within N days
calculated based on the formula at:
https://en.wikipedia.org/wiki/Relative_strength_index
:param df: data
:param n_days: N days
:return: None
### Response:
def _get_rsi(cls, df, n_days):
""" Calculate the RSI (Relative Strength Index) within N days
calculated based on the formula at:
https://en.wikipedia.org/wiki/Relative_strength_index
:param df: data
:param n_days: N days
:return: None
"""
n_days = int(n_days)
d = df['close_-1_d']
df['closepm'] = (d + d.abs()) / 2
df['closenm'] = (-d + d.abs()) / 2
closepm_smma_column = 'closepm_{}_smma'.format(n_days)
closenm_smma_column = 'closenm_{}_smma'.format(n_days)
p_ema = df[closepm_smma_column]
n_ema = df[closenm_smma_column]
rs_column_name = 'rs_{}'.format(n_days)
rsi_column_name = 'rsi_{}'.format(n_days)
df[rs_column_name] = rs = p_ema / n_ema
df[rsi_column_name] = 100 - 100 / (1.0 + rs)
del df['closepm']
del df['closenm']
del df[closepm_smma_column]
del df[closenm_smma_column] |
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0]) | Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match | Below is the the instruction that describes the task:
### Input:
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
### Response:
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0]) |
def write_bus_data(self, file):
""" Writes bus data to an Excel spreadsheet.
"""
bus_sheet = self.book.add_sheet("Buses")
for i, bus in enumerate(self.case.buses):
for j, attr in enumerate(BUS_ATTRS):
bus_sheet.write(i, j, getattr(bus, attr)) | Writes bus data to an Excel spreadsheet. | Below is the the instruction that describes the task:
### Input:
Writes bus data to an Excel spreadsheet.
### Response:
def write_bus_data(self, file):
""" Writes bus data to an Excel spreadsheet.
"""
bus_sheet = self.book.add_sheet("Buses")
for i, bus in enumerate(self.case.buses):
for j, attr in enumerate(BUS_ATTRS):
bus_sheet.write(i, j, getattr(bus, attr)) |
def _get_rule_definition(self, rule):
"""Generates the source code for a rule."""
fmt = """def {rule_fxn_name}(self, text):
{indent}\"\"\"{rule_source}\"\"\"
{indent}self._attempting(text)
{indent}return {rule_definition}(text){transform}
"""
fmt = self._clean_fmt(fmt)
source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True)
# All the primitives will accept a string x in place of terminal(x). This is terminal shorthand.
# However, if a rule is only a wrapper around a single terminal, we have to actually make a
# terminal call. This handles that situation.
if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')):
source = ["terminal({})".format(source[0])]
rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name),
indent=self.indent,
rule_source=self._get_rule_source(rule),
rule_definition="\n".join(source),
transform=self._get_rule_transform(rule))
return self._indent(rule_source, 1) | Generates the source code for a rule. | Below is the the instruction that describes the task:
### Input:
Generates the source code for a rule.
### Response:
def _get_rule_definition(self, rule):
"""Generates the source code for a rule."""
fmt = """def {rule_fxn_name}(self, text):
{indent}\"\"\"{rule_source}\"\"\"
{indent}self._attempting(text)
{indent}return {rule_definition}(text){transform}
"""
fmt = self._clean_fmt(fmt)
source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True)
# All the primitives will accept a string x in place of terminal(x). This is terminal shorthand.
# However, if a rule is only a wrapper around a single terminal, we have to actually make a
# terminal call. This handles that situation.
if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')):
source = ["terminal({})".format(source[0])]
rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name),
indent=self.indent,
rule_source=self._get_rule_source(rule),
rule_definition="\n".join(source),
transform=self._get_rule_transform(rule))
return self._indent(rule_source, 1) |
def _load_vertex_buffers(self):
"""Load each vertex buffer into each material"""
fd = gzip.open(cache_name(self.file_name), 'rb')
for buff in self.meta.vertex_buffers:
mat = self.wavefront.materials.get(buff['material'])
if not mat:
mat = Material(name=buff['material'], is_default=True)
self.wavefront.materials[mat.name] = mat
mat.vertex_format = buff['vertex_format']
self.load_vertex_buffer(fd, mat, buff['byte_length'])
fd.close() | Load each vertex buffer into each material | Below is the the instruction that describes the task:
### Input:
Load each vertex buffer into each material
### Response:
def _load_vertex_buffers(self):
"""Load each vertex buffer into each material"""
fd = gzip.open(cache_name(self.file_name), 'rb')
for buff in self.meta.vertex_buffers:
mat = self.wavefront.materials.get(buff['material'])
if not mat:
mat = Material(name=buff['material'], is_default=True)
self.wavefront.materials[mat.name] = mat
mat.vertex_format = buff['vertex_format']
self.load_vertex_buffer(fd, mat, buff['byte_length'])
fd.close() |
def execute(self, duration):
"""
Executes the measurement, recording the event status.
:param duration: the time to run for.
:return: nothing.
"""
self.statuses.append({'name': ScheduledMeasurementStatus.RUNNING.name, 'time': datetime.utcnow()})
try:
self.recording = True
self.device.start(self.name, durationInSeconds=duration)
finally:
self.recording = False
if self.device.status == RecordingDeviceStatus.FAILED:
self.statuses.append({'name': ScheduledMeasurementStatus.FAILED.name,
'time': datetime.utcnow(),
'reason': self.device.failureCode})
else:
self.statuses.append({'name': ScheduledMeasurementStatus.COMPLETE.name, 'time': datetime.utcnow()})
# this is a bit of a hack, need to remove this at some point by refactoring the way measurements are stored
if self.callback is not None:
self.callback() | Executes the measurement, recording the event status.
:param duration: the time to run for.
:return: nothing. | Below is the the instruction that describes the task:
### Input:
Executes the measurement, recording the event status.
:param duration: the time to run for.
:return: nothing.
### Response:
def execute(self, duration):
"""
Executes the measurement, recording the event status.
:param duration: the time to run for.
:return: nothing.
"""
self.statuses.append({'name': ScheduledMeasurementStatus.RUNNING.name, 'time': datetime.utcnow()})
try:
self.recording = True
self.device.start(self.name, durationInSeconds=duration)
finally:
self.recording = False
if self.device.status == RecordingDeviceStatus.FAILED:
self.statuses.append({'name': ScheduledMeasurementStatus.FAILED.name,
'time': datetime.utcnow(),
'reason': self.device.failureCode})
else:
self.statuses.append({'name': ScheduledMeasurementStatus.COMPLETE.name, 'time': datetime.utcnow()})
# this is a bit of a hack, need to remove this at some point by refactoring the way measurements are stored
if self.callback is not None:
self.callback() |
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars) | Get a new dict with the exported variables. | Below is the the instruction that describes the task:
### Input:
Get a new dict with the exported variables.
### Response:
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars) |
def get_context(self):
"""
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
"""
if not self.is_valid():
raise ValueError(
"Cannot generate Context from invalid contact form"
)
return dict(self.cleaned_data, site=get_current_site(self.request)) | Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``). | Below is the the instruction that describes the task:
### Input:
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
### Response:
def get_context(self):
"""
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
"""
if not self.is_valid():
raise ValueError(
"Cannot generate Context from invalid contact form"
)
return dict(self.cleaned_data, site=get_current_site(self.request)) |
def _set_adj_type(self, v, load=False):
"""
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adj_type must be of a type compatible with isis-adj-type""",
'defined-type': "brocade-isis-operational:isis-adj-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)""",
})
self.__adj_type = t
if hasattr(self, '_set'):
self._set() | Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency | Below is the the instruction that describes the task:
### Input:
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency
### Response:
def _set_adj_type(self, v, load=False):
"""
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adj_type must be of a type compatible with isis-adj-type""",
'defined-type': "brocade-isis-operational:isis-adj-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)""",
})
self.__adj_type = t
if hasattr(self, '_set'):
self._set() |
def run_with_gunicorn(self, **options):
"""Run with gunicorn."""
import gunicorn.app.base
from gunicorn.six import iteritems
import multiprocessing
class GourdeApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(GourdeApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
options = {
'bind': '%s:%s' % (self.host, self.port),
'workers': self.threads or ((multiprocessing.cpu_count() * 2) + 1),
'debug': self.debug,
**options,
}
GourdeApplication(self.app, options).run() | Run with gunicorn. | Below is the the instruction that describes the task:
### Input:
Run with gunicorn.
### Response:
def run_with_gunicorn(self, **options):
"""Run with gunicorn."""
import gunicorn.app.base
from gunicorn.six import iteritems
import multiprocessing
class GourdeApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(GourdeApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
options = {
'bind': '%s:%s' % (self.host, self.port),
'workers': self.threads or ((multiprocessing.cpu_count() * 2) + 1),
'debug': self.debug,
**options,
}
GourdeApplication(self.app, options).run() |
def create_entry_line_from_text(self, text):
"""
Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry`
object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`.
"""
split_line = re.match(self.entry_line_regexp, text)
if not split_line:
raise ParseError("Line must have an alias, a duration and a description")
alias = split_line.group('alias')
start_time = end_time = None
if split_line.group('start_time') is not None:
if split_line.group('start_time'):
try:
start_time = create_time_from_text(split_line.group('start_time'))
except ValueError:
raise ParseError("Start time is not a valid time, it must be in format hh:mm or hhmm")
else:
start_time = None
if split_line.group('end_time') is not None:
if split_line.group('end_time') == '?':
end_time = None
else:
try:
end_time = create_time_from_text(split_line.group('end_time'))
except ValueError:
raise ParseError("End time is not a valid time, it must be in format hh:mm or hhmm")
if split_line.group('duration') is not None:
duration = float(split_line.group('duration'))
elif start_time or end_time:
duration = (start_time, end_time)
else:
duration = (None, None)
description = split_line.group('description')
# Parse and set line flags
if split_line.group('flags'):
try:
flags = self.extract_flags_from_text(split_line.group('flags'))
# extract_flags_from_text will raise `KeyError` if one of the flags is not recognized. This should never
# happen though as the list of accepted flags is bundled in self.entry_line_regexp
except KeyError as e:
raise ParseError(*e.args)
else:
flags = set()
# Backwards compatibility with previous notation that allowed to end the alias with a `?` to ignore it
if alias.endswith('?'):
flags.add(Entry.FLAG_IGNORED)
alias = alias[:-1]
if description == '?':
flags.add(Entry.FLAG_IGNORED)
line = (
split_line.group('flags') or '',
split_line.group('spacing1') or '',
split_line.group('alias'),
split_line.group('spacing2'),
split_line.group('time'),
split_line.group('spacing3'),
split_line.group('description'),
)
entry_line = Entry(alias, duration, description, flags=flags, text=line)
return entry_line | Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry`
object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`. | Below is the the instruction that describes the task:
### Input:
Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry`
object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`.
### Response:
def create_entry_line_from_text(self, text):
"""
Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry`
object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`.
"""
split_line = re.match(self.entry_line_regexp, text)
if not split_line:
raise ParseError("Line must have an alias, a duration and a description")
alias = split_line.group('alias')
start_time = end_time = None
if split_line.group('start_time') is not None:
if split_line.group('start_time'):
try:
start_time = create_time_from_text(split_line.group('start_time'))
except ValueError:
raise ParseError("Start time is not a valid time, it must be in format hh:mm or hhmm")
else:
start_time = None
if split_line.group('end_time') is not None:
if split_line.group('end_time') == '?':
end_time = None
else:
try:
end_time = create_time_from_text(split_line.group('end_time'))
except ValueError:
raise ParseError("End time is not a valid time, it must be in format hh:mm or hhmm")
if split_line.group('duration') is not None:
duration = float(split_line.group('duration'))
elif start_time or end_time:
duration = (start_time, end_time)
else:
duration = (None, None)
description = split_line.group('description')
# Parse and set line flags
if split_line.group('flags'):
try:
flags = self.extract_flags_from_text(split_line.group('flags'))
# extract_flags_from_text will raise `KeyError` if one of the flags is not recognized. This should never
# happen though as the list of accepted flags is bundled in self.entry_line_regexp
except KeyError as e:
raise ParseError(*e.args)
else:
flags = set()
# Backwards compatibility with previous notation that allowed to end the alias with a `?` to ignore it
if alias.endswith('?'):
flags.add(Entry.FLAG_IGNORED)
alias = alias[:-1]
if description == '?':
flags.add(Entry.FLAG_IGNORED)
line = (
split_line.group('flags') or '',
split_line.group('spacing1') or '',
split_line.group('alias'),
split_line.group('spacing2'),
split_line.group('time'),
split_line.group('spacing3'),
split_line.group('description'),
)
entry_line = Entry(alias, duration, description, flags=flags, text=line)
return entry_line |
def list_objects(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
FetchOwner=False, StartAfter=None, region=None, key=None,
keyid=None, profile=None):
'''
List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_objects mybucket
'''
try:
Contents = []
args = {'Bucket': Bucket, 'FetchOwner': FetchOwner}
args.update({'Delimiter': Delimiter}) if Delimiter else None
args.update({'EncodingType': EncodingType}) if Delimiter else None
args.update({'Prefix': Prefix}) if Prefix else None
args.update({'StartAfter': StartAfter}) if StartAfter else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
IsTruncated = True
while IsTruncated:
ret = conn.list_objects_v2(**args)
IsTruncated = ret.get('IsTruncated', False)
if IsTruncated in ('True', 'true', True):
args['ContinuationToken'] = ret['NextContinuationToken']
Contents += ret.get('Contents', [])
return {'Contents': Contents}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_objects mybucket | Below is the the instruction that describes the task:
### Input:
List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_objects mybucket
### Response:
def list_objects(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
FetchOwner=False, StartAfter=None, region=None, key=None,
keyid=None, profile=None):
'''
List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_objects mybucket
'''
try:
Contents = []
args = {'Bucket': Bucket, 'FetchOwner': FetchOwner}
args.update({'Delimiter': Delimiter}) if Delimiter else None
args.update({'EncodingType': EncodingType}) if Delimiter else None
args.update({'Prefix': Prefix}) if Prefix else None
args.update({'StartAfter': StartAfter}) if StartAfter else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
IsTruncated = True
while IsTruncated:
ret = conn.list_objects_v2(**args)
IsTruncated = ret.get('IsTruncated', False)
if IsTruncated in ('True', 'true', True):
args['ContinuationToken'] = ret['NextContinuationToken']
Contents += ret.get('Contents', [])
return {'Contents': Contents}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather | Create an oemof weather object for the given geometry | Below is the the instruction that describes the task:
### Input:
Create an oemof weather object for the given geometry
### Response:
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather |
def allreduce(self, f, value, flat=True):
"""parallel reduce followed by broadcast of the result"""
return self.reduce(f, value, flat=flat, all=True) | parallel reduce followed by broadcast of the result | Below is the the instruction that describes the task:
### Input:
parallel reduce followed by broadcast of the result
### Response:
def allreduce(self, f, value, flat=True):
"""parallel reduce followed by broadcast of the result"""
return self.reduce(f, value, flat=flat, all=True) |
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
text = f.readlines()
assert len(text) == n_S, \
'Number of lines does not match number of runs!'
for i_s, line in enumerate(text):
events = line.strip().split()
if events[0] == '*':
continue
for event in events:
assert event != '*'
tmp = str.split(event, ':')
if len(tmp) == 2:
duration = float(tmp[1])
else:
duration = 1.0
tmp = str.split(tmp[0], '*')
if len(tmp) == 2:
weight = float(tmp[1])
else:
weight = 1.0
if (float(tmp[0]) >= 0
and float(tmp[0])
< scan_onoff[i_s + 1] - scan_onoff[i_s]):
design_info[i_s][i_c]['onset'].append(float(tmp[0]))
design_info[i_s][i_c]['duration'].append(duration)
design_info[i_s][i_c]['weight'].append(weight)
return design_info | Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design | Below is the the instruction that describes the task:
### Input:
Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
### Response:
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
text = f.readlines()
assert len(text) == n_S, \
'Number of lines does not match number of runs!'
for i_s, line in enumerate(text):
events = line.strip().split()
if events[0] == '*':
continue
for event in events:
assert event != '*'
tmp = str.split(event, ':')
if len(tmp) == 2:
duration = float(tmp[1])
else:
duration = 1.0
tmp = str.split(tmp[0], '*')
if len(tmp) == 2:
weight = float(tmp[1])
else:
weight = 1.0
if (float(tmp[0]) >= 0
and float(tmp[0])
< scan_onoff[i_s + 1] - scan_onoff[i_s]):
design_info[i_s][i_c]['onset'].append(float(tmp[0]))
design_info[i_s][i_c]['duration'].append(duration)
design_info[i_s][i_c]['weight'].append(weight)
return design_info |
def count_protein_group_hits(lineproteins, groups):
"""Takes a list of protein accessions and a list of protein groups
content from DB. Counts for each group in list how many proteins
are found in lineproteins. Returns list of str amounts.
"""
hits = []
for group in groups:
hits.append(0)
for protein in lineproteins:
if protein in group:
hits[-1] += 1
return [str(x) for x in hits] | Takes a list of protein accessions and a list of protein groups
content from DB. Counts for each group in list how many proteins
are found in lineproteins. Returns list of str amounts. | Below is the the instruction that describes the task:
### Input:
Takes a list of protein accessions and a list of protein groups
content from DB. Counts for each group in list how many proteins
are found in lineproteins. Returns list of str amounts.
### Response:
def count_protein_group_hits(lineproteins, groups):
"""Takes a list of protein accessions and a list of protein groups
content from DB. Counts for each group in list how many proteins
are found in lineproteins. Returns list of str amounts.
"""
hits = []
for group in groups:
hits.append(0)
for protein in lineproteins:
if protein in group:
hits[-1] += 1
return [str(x) for x in hits] |
def _update_limits_from_api(self):
"""
Query RDS's DescribeAccountAttributes API action, and update limits
with the quotas returned. Updates ``self.limits``.
We ignore the usage information from the API,
"""
self.connect()
logger.info("Querying RDS DescribeAccountAttributes for limits")
lims = self.conn.describe_account_attributes()['AccountQuotas']
for lim in lims:
if lim['AccountQuotaName'] not in self.API_NAME_TO_LIMIT:
logger.info('RDS DescribeAccountAttributes returned unknown'
'limit: %s (max: %s; used: %s)',
lim['AccountQuotaName'], lim['Max'], lim['Used'])
continue
lname = self.API_NAME_TO_LIMIT[lim['AccountQuotaName']]
self.limits[lname]._set_api_limit(lim['Max'])
if len(self.limits[lname].get_current_usage()) < 1:
self.limits[lname]._add_current_usage(lim['Used'])
logger.debug('Done setting limits from API.') | Query RDS's DescribeAccountAttributes API action, and update limits
with the quotas returned. Updates ``self.limits``.
We ignore the usage information from the API, | Below is the the instruction that describes the task:
### Input:
Query RDS's DescribeAccountAttributes API action, and update limits
with the quotas returned. Updates ``self.limits``.
We ignore the usage information from the API,
### Response:
def _update_limits_from_api(self):
"""
Query RDS's DescribeAccountAttributes API action, and update limits
with the quotas returned. Updates ``self.limits``.
We ignore the usage information from the API,
"""
self.connect()
logger.info("Querying RDS DescribeAccountAttributes for limits")
lims = self.conn.describe_account_attributes()['AccountQuotas']
for lim in lims:
if lim['AccountQuotaName'] not in self.API_NAME_TO_LIMIT:
logger.info('RDS DescribeAccountAttributes returned unknown'
'limit: %s (max: %s; used: %s)',
lim['AccountQuotaName'], lim['Max'], lim['Used'])
continue
lname = self.API_NAME_TO_LIMIT[lim['AccountQuotaName']]
self.limits[lname]._set_api_limit(lim['Max'])
if len(self.limits[lname].get_current_usage()) < 1:
self.limits[lname]._add_current_usage(lim['Used'])
logger.debug('Done setting limits from API.') |
def create_module_rst_file(module_name):
"""Function for creating content in each .rst file for a module.
:param module_name: name of the module.
:type module_name: str
:returns: A content for auto module.
:rtype: str
"""
return_text = 'Module: ' + module_name
dash = '=' * len(return_text)
return_text += '\n' + dash + '\n\n'
return_text += '.. automodule:: ' + module_name + '\n'
return_text += ' :members:\n\n'
return return_text | Function for creating content in each .rst file for a module.
:param module_name: name of the module.
:type module_name: str
:returns: A content for auto module.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Function for creating content in each .rst file for a module.
:param module_name: name of the module.
:type module_name: str
:returns: A content for auto module.
:rtype: str
### Response:
def create_module_rst_file(module_name):
"""Function for creating content in each .rst file for a module.
:param module_name: name of the module.
:type module_name: str
:returns: A content for auto module.
:rtype: str
"""
return_text = 'Module: ' + module_name
dash = '=' * len(return_text)
return_text += '\n' + dash + '\n\n'
return_text += '.. automodule:: ' + module_name + '\n'
return_text += ' :members:\n\n'
return return_text |
def map_v2_event_into_v1(event):
'''
Helper method to convert Sensu 2.x event into Sensu 1.x event.
'''
# return the event if it has already been mapped
if "v2_event_mapped_into_v1" in event:
return event
# Trigger mapping code if enity exists and client does not
if not bool(event.get('client')) and "entity" in event:
event['client'] = event['entity']
# Fill in missing client attributes
if "name" not in event['client']:
event['client']['name'] = event['entity']['id']
if "subscribers" not in event['client']:
event['client']['subscribers'] = event['entity']['subscriptions']
# Fill in renamed check attributes expected in 1.4 event
if "subscribers" not in event['check']:
event['check']['subscribers'] = event['check']['subscriptions']
if "source" not in event['check']:
event['check']['source'] = event['check']['proxy_entity_id']
# Mimic 1.4 event action based on 2.0 event state
# action used in logs and fluentd plugins handlers
action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve',
'failing': 'create'}
if "state" in event['check']:
state = event['check']['state']
else:
state = "unknown::2.0_event"
if "action" not in event and state.lower() in action_state_mapping:
event['action'] = action_state_mapping[state.lower()]
else:
event['action'] = state
# Mimic 1.4 event history based on 2.0 event history
if "history" in event['check']:
# save the original history
event['check']['history_v2'] = deepcopy(event['check']['history'])
legacy_history = []
for history in event['check']['history']:
if isinstance(history['status'], int):
legacy_history.append(str(history['status']))
else:
legacy_history.append("3")
event['check']['history'] = legacy_history
# Setting flag indicating this function has already been called
event['v2_event_mapped_into_v1'] = True
# return the updated event
return event | Helper method to convert Sensu 2.x event into Sensu 1.x event. | Below is the the instruction that describes the task:
### Input:
Helper method to convert Sensu 2.x event into Sensu 1.x event.
### Response:
def map_v2_event_into_v1(event):
'''
Helper method to convert Sensu 2.x event into Sensu 1.x event.
'''
# return the event if it has already been mapped
if "v2_event_mapped_into_v1" in event:
return event
# Trigger mapping code if enity exists and client does not
if not bool(event.get('client')) and "entity" in event:
event['client'] = event['entity']
# Fill in missing client attributes
if "name" not in event['client']:
event['client']['name'] = event['entity']['id']
if "subscribers" not in event['client']:
event['client']['subscribers'] = event['entity']['subscriptions']
# Fill in renamed check attributes expected in 1.4 event
if "subscribers" not in event['check']:
event['check']['subscribers'] = event['check']['subscriptions']
if "source" not in event['check']:
event['check']['source'] = event['check']['proxy_entity_id']
# Mimic 1.4 event action based on 2.0 event state
# action used in logs and fluentd plugins handlers
action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve',
'failing': 'create'}
if "state" in event['check']:
state = event['check']['state']
else:
state = "unknown::2.0_event"
if "action" not in event and state.lower() in action_state_mapping:
event['action'] = action_state_mapping[state.lower()]
else:
event['action'] = state
# Mimic 1.4 event history based on 2.0 event history
if "history" in event['check']:
# save the original history
event['check']['history_v2'] = deepcopy(event['check']['history'])
legacy_history = []
for history in event['check']['history']:
if isinstance(history['status'], int):
legacy_history.append(str(history['status']))
else:
legacy_history.append("3")
event['check']['history'] = legacy_history
# Setting flag indicating this function has already been called
event['v2_event_mapped_into_v1'] = True
# return the updated event
return event |
def __handle_rate_limit_exception(self, rate_limit_exception):
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info(
"Hit Zendesk API rate limit. Pausing for %s seconds",
retry_after
)
time.sleep(retry_after) | Sleep for the time specified in the exception. If not specified, wait
for 60 seconds. | Below is the the instruction that describes the task:
### Input:
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
### Response:
def __handle_rate_limit_exception(self, rate_limit_exception):
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info(
"Hit Zendesk API rate limit. Pausing for %s seconds",
retry_after
)
time.sleep(retry_after) |
def nworker(data, smpchunk, tests):
""" The workhorse function. Not numba. """
## tell engines to limit threads
#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1
## open the seqarray view, the modified array is in bootsarr
with h5py.File(data.database.input, 'r') as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:]
## create an N-mask array of all seq cols (this isn't really too slow)
nall_mask = seqview[:] == 78
## tried numba compiling everythign below here, but was not faster
## than making nmask w/ axis arg in numpy
## get the input arrays ready
rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16)
rweights = None
#rweights = np.ones(smpchunk.shape[0], dtype=np.float64)
rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32)
#times = []
## fill arrays with results using numba funcs
for idx in xrange(smpchunk.shape[0]):
## get seqchunk for 4 samples (4, ncols)
sidx = smpchunk[idx]
seqchunk = seqview[sidx]
## get N-containing columns in 4-array, and invariant sites.
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this?
## get matrices if there are any shared SNPs
## returns best-tree index, qscores, and qstats
#bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
## get weights from the three scores sorted.
## Only save to file if the quartet has information
rdstats[idx] = qstats
rquartets[idx] = smpchunk[idx][bidx]
return rquartets, rweights, rdstats | The workhorse function. Not numba. | Below is the the instruction that describes the task:
### Input:
The workhorse function. Not numba.
### Response:
def nworker(data, smpchunk, tests):
""" The workhorse function. Not numba. """
## tell engines to limit threads
#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1
## open the seqarray view, the modified array is in bootsarr
with h5py.File(data.database.input, 'r') as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:]
## create an N-mask array of all seq cols (this isn't really too slow)
nall_mask = seqview[:] == 78
## tried numba compiling everythign below here, but was not faster
## than making nmask w/ axis arg in numpy
## get the input arrays ready
rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16)
rweights = None
#rweights = np.ones(smpchunk.shape[0], dtype=np.float64)
rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32)
#times = []
## fill arrays with results using numba funcs
for idx in xrange(smpchunk.shape[0]):
## get seqchunk for 4 samples (4, ncols)
sidx = smpchunk[idx]
seqchunk = seqview[sidx]
## get N-containing columns in 4-array, and invariant sites.
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this?
## get matrices if there are any shared SNPs
## returns best-tree index, qscores, and qstats
#bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
## get weights from the three scores sorted.
## Only save to file if the quartet has information
rdstats[idx] = qstats
rquartets[idx] = smpchunk[idx][bidx]
return rquartets, rweights, rdstats |
def connect(self):
# type: () -> None
"""
Connect to server
Returns:
None
"""
if self.connection_type.lower() == 'ssl':
self.server = smtplib.SMTP_SSL(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
elif self.connection_type.lower() == 'lmtp':
self.server = smtplib.LMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
source_address=self.source_address)
else:
self.server = smtplib.SMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
self.server.login(self.username, self.password) | Connect to server
Returns:
None | Below is the the instruction that describes the task:
### Input:
Connect to server
Returns:
None
### Response:
def connect(self):
# type: () -> None
"""
Connect to server
Returns:
None
"""
if self.connection_type.lower() == 'ssl':
self.server = smtplib.SMTP_SSL(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
elif self.connection_type.lower() == 'lmtp':
self.server = smtplib.LMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
source_address=self.source_address)
else:
self.server = smtplib.SMTP(host=self.host, port=self.port, local_hostname=self.local_hostname,
timeout=self.timeout, source_address=self.source_address)
self.server.login(self.username, self.password) |
def gprmc_to_degdec(lat, latDirn, lng, lngDirn):
"""Converts GPRMC formats (Decimal Minutes) to Degrees Decimal."""
x = float(lat[0:2]) + float(lat[2:]) / 60
y = float(lng[0:3]) + float(lng[3:]) / 60
if latDirn == 'S':
x = -x
if lngDirn == 'W':
y = -y
return x, y | Converts GPRMC formats (Decimal Minutes) to Degrees Decimal. | Below is the the instruction that describes the task:
### Input:
Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.
### Response:
def gprmc_to_degdec(lat, latDirn, lng, lngDirn):
"""Converts GPRMC formats (Decimal Minutes) to Degrees Decimal."""
x = float(lat[0:2]) + float(lat[2:]) / 60
y = float(lng[0:3]) + float(lng[3:]) / 60
if latDirn == 'S':
x = -x
if lngDirn == 'W':
y = -y
return x, y |
def creep_kill(self, target, timestamp):
"""
A creep was tragically killed. Need to split this into radiant/dire
and neutrals
"""
self.creep_kill_types[target] += 1
matched = False
for k, v in self.creep_types.iteritems():
if target.startswith(k):
matched = True
setattr(self, v, getattr(self, v) + 1)
break
if not matched:
print('> unhandled creep type'.format(target)) | A creep was tragically killed. Need to split this into radiant/dire
and neutrals | Below is the the instruction that describes the task:
### Input:
A creep was tragically killed. Need to split this into radiant/dire
and neutrals
### Response:
def creep_kill(self, target, timestamp):
"""
A creep was tragically killed. Need to split this into radiant/dire
and neutrals
"""
self.creep_kill_types[target] += 1
matched = False
for k, v in self.creep_types.iteritems():
if target.startswith(k):
matched = True
setattr(self, v, getattr(self, v) + 1)
break
if not matched:
print('> unhandled creep type'.format(target)) |
def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others | leaves related articles and wikitables in place | Below is the the instruction that describes the task:
### Input:
leaves related articles and wikitables in place
### Response:
def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others |
def copy(self):
'''Create a copy of the current instance.
:returns: A safely editable copy of the current sequence.
'''
# Significant performance improvements by skipping alphabet check
return type(self)(self.seq, self.material, run_checks=False) | Create a copy of the current instance.
:returns: A safely editable copy of the current sequence. | Below is the the instruction that describes the task:
### Input:
Create a copy of the current instance.
:returns: A safely editable copy of the current sequence.
### Response:
def copy(self):
'''Create a copy of the current instance.
:returns: A safely editable copy of the current sequence.
'''
# Significant performance improvements by skipping alphabet check
return type(self)(self.seq, self.material, run_checks=False) |
def dimension_name(dimension):
"""Return the Dimension.name for a dimension-like object.
Args:
dimension: Dimension or dimension string, tuple or dict
Returns:
The name of the Dimension or what would be the name if the
input as converted to a Dimension.
"""
if isinstance(dimension, Dimension):
return dimension.name
elif isinstance(dimension, basestring):
return dimension
elif isinstance(dimension, tuple):
return dimension[0]
elif isinstance(dimension, dict):
return dimension['name']
elif dimension is None:
return None
else:
raise ValueError('%s type could not be interpreted as Dimension. '
'Dimensions must be declared as a string, tuple, '
'dictionary or Dimension type.'
% type(dimension).__name__) | Return the Dimension.name for a dimension-like object.
Args:
dimension: Dimension or dimension string, tuple or dict
Returns:
The name of the Dimension or what would be the name if the
input as converted to a Dimension. | Below is the the instruction that describes the task:
### Input:
Return the Dimension.name for a dimension-like object.
Args:
dimension: Dimension or dimension string, tuple or dict
Returns:
The name of the Dimension or what would be the name if the
input as converted to a Dimension.
### Response:
def dimension_name(dimension):
"""Return the Dimension.name for a dimension-like object.
Args:
dimension: Dimension or dimension string, tuple or dict
Returns:
The name of the Dimension or what would be the name if the
input as converted to a Dimension.
"""
if isinstance(dimension, Dimension):
return dimension.name
elif isinstance(dimension, basestring):
return dimension
elif isinstance(dimension, tuple):
return dimension[0]
elif isinstance(dimension, dict):
return dimension['name']
elif dimension is None:
return None
else:
raise ValueError('%s type could not be interpreted as Dimension. '
'Dimensions must be declared as a string, tuple, '
'dictionary or Dimension type.'
% type(dimension).__name__) |
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter. | Below is the the instruction that describes the task:
### Input:
Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
### Response:
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) |
def get_int(self,
key,
is_list=False,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts it to `int`/`list(int)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`int`: value corresponding to the key.
"""
if is_list:
return self._get_typed_list_value(key=key,
target_type=int,
type_convert=int,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options)
return self._get_typed_value(key=key,
target_type=int,
type_convert=int,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) | Get a the value corresponding to the key and converts it to `int`/`list(int)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`int`: value corresponding to the key. | Below is the the instruction that describes the task:
### Input:
Get a the value corresponding to the key and converts it to `int`/`list(int)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`int`: value corresponding to the key.
### Response:
def get_int(self,
key,
is_list=False,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts it to `int`/`list(int)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`int`: value corresponding to the key.
"""
if is_list:
return self._get_typed_list_value(key=key,
target_type=int,
type_convert=int,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options)
return self._get_typed_value(key=key,
target_type=int,
type_convert=int,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) |
def ptz_status_send(self, zoom, pan, tilt, force_mavlink1=False):
'''
Transmits the actual Pan, Tilt and Zoom values of the camera unit
zoom : The actual Zoom Value (uint8_t)
pan : The Pan value in 10ths of degree (int16_t)
tilt : The Tilt value in 10ths of degree (int16_t)
'''
return self.send(self.ptz_status_encode(zoom, pan, tilt), force_mavlink1=force_mavlink1) | Transmits the actual Pan, Tilt and Zoom values of the camera unit
zoom : The actual Zoom Value (uint8_t)
pan : The Pan value in 10ths of degree (int16_t)
tilt : The Tilt value in 10ths of degree (int16_t) | Below is the the instruction that describes the task:
### Input:
Transmits the actual Pan, Tilt and Zoom values of the camera unit
zoom : The actual Zoom Value (uint8_t)
pan : The Pan value in 10ths of degree (int16_t)
tilt : The Tilt value in 10ths of degree (int16_t)
### Response:
def ptz_status_send(self, zoom, pan, tilt, force_mavlink1=False):
'''
Transmits the actual Pan, Tilt and Zoom values of the camera unit
zoom : The actual Zoom Value (uint8_t)
pan : The Pan value in 10ths of degree (int16_t)
tilt : The Tilt value in 10ths of degree (int16_t)
'''
return self.send(self.ptz_status_encode(zoom, pan, tilt), force_mavlink1=force_mavlink1) |
def reset(self):
"""
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
"""
inds = list(range(self.state.obj_get_positions().shape[0]))
self._rad_nms = self.state.param_particle_rad(inds)
self._pos_nms = self.state.param_particle_pos(inds)
self._initial_rad = np.copy(self.state.state[self._rad_nms])
self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-1,3))
self.param_vals[self.rscale_mask] = 0 | Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state. | Below is the the instruction that describes the task:
### Input:
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
### Response:
def reset(self):
"""
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
"""
inds = list(range(self.state.obj_get_positions().shape[0]))
self._rad_nms = self.state.param_particle_rad(inds)
self._pos_nms = self.state.param_particle_pos(inds)
self._initial_rad = np.copy(self.state.state[self._rad_nms])
self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-1,3))
self.param_vals[self.rscale_mask] = 0 |
def getRecord(self, n=None):
"""Returns the nth record"""
if n is None:
assert len(self.fields)>0
n = self.fields[0].numRecords-1
assert (all(field.numRecords>n for field in self.fields))
record = [field.values[n] for field in self.fields]
return record | Returns the nth record | Below is the the instruction that describes the task:
### Input:
Returns the nth record
### Response:
def getRecord(self, n=None):
"""Returns the nth record"""
if n is None:
assert len(self.fields)>0
n = self.fields[0].numRecords-1
assert (all(field.numRecords>n for field in self.fields))
record = [field.values[n] for field in self.fields]
return record |
def upload_directory(self, directory, bucket, key, transfer_config=None, subscribers=None):
''' upload a directory using Aspera '''
check_io_access(directory, os.R_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.SEND) | upload a directory using Aspera | Below is the the instruction that describes the task:
### Input:
upload a directory using Aspera
### Response:
def upload_directory(self, directory, bucket, key, transfer_config=None, subscribers=None):
''' upload a directory using Aspera '''
check_io_access(directory, os.R_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.SEND) |
def size(self,value):
'''
The number of hits to return. Defaults to 10
'''
if not self.params:
self.params = dict(size=value)
return self
self.params['size'] = value
return self | The number of hits to return. Defaults to 10 | Below is the the instruction that describes the task:
### Input:
The number of hits to return. Defaults to 10
### Response:
def size(self,value):
'''
The number of hits to return. Defaults to 10
'''
if not self.params:
self.params = dict(size=value)
return self
self.params['size'] = value
return self |
def _process_batch_write_response(request, response, table_crypto_config):
# type: (Dict, Dict, Dict[Text, CryptoConfig]) -> Dict
"""Handle unprocessed items in the response from a transparently encrypted write.
:param dict request: The DynamoDB plaintext request dictionary
:param dict response: The DynamoDB response from the batch operation
:param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items
:return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values
:rtype: dict
"""
try:
unprocessed_items = response["UnprocessedItems"]
except KeyError:
return response
# Unprocessed items need to be returned in their original state
for table_name, unprocessed in unprocessed_items.items():
original_items = request[table_name]
crypto_config = table_crypto_config[table_name]
if crypto_config.encryption_context.partition_key_name:
items_match = partial(_item_keys_match, crypto_config)
else:
items_match = partial(_item_attributes_match, crypto_config)
for pos, operation in enumerate(unprocessed):
for request_type, item in operation.items():
if request_type != "PutRequest":
continue
for plaintext_item in original_items:
if plaintext_item.get(request_type) and items_match(
plaintext_item[request_type]["Item"], item["Item"]
):
unprocessed[pos] = plaintext_item.copy()
break
return response | Handle unprocessed items in the response from a transparently encrypted write.
:param dict request: The DynamoDB plaintext request dictionary
:param dict response: The DynamoDB response from the batch operation
:param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items
:return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Handle unprocessed items in the response from a transparently encrypted write.
:param dict request: The DynamoDB plaintext request dictionary
:param dict response: The DynamoDB response from the batch operation
:param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items
:return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values
:rtype: dict
### Response:
def _process_batch_write_response(request, response, table_crypto_config):
# type: (Dict, Dict, Dict[Text, CryptoConfig]) -> Dict
"""Handle unprocessed items in the response from a transparently encrypted write.
:param dict request: The DynamoDB plaintext request dictionary
:param dict response: The DynamoDB response from the batch operation
:param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items
:return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values
:rtype: dict
"""
try:
unprocessed_items = response["UnprocessedItems"]
except KeyError:
return response
# Unprocessed items need to be returned in their original state
for table_name, unprocessed in unprocessed_items.items():
original_items = request[table_name]
crypto_config = table_crypto_config[table_name]
if crypto_config.encryption_context.partition_key_name:
items_match = partial(_item_keys_match, crypto_config)
else:
items_match = partial(_item_attributes_match, crypto_config)
for pos, operation in enumerate(unprocessed):
for request_type, item in operation.items():
if request_type != "PutRequest":
continue
for plaintext_item in original_items:
if plaintext_item.get(request_type) and items_match(
plaintext_item[request_type]["Item"], item["Item"]
):
unprocessed[pos] = plaintext_item.copy()
break
return response |
def _relative_paths(xs, base_path):
"""Adjust paths to be relative to the provided base path.
"""
if isinstance(xs, six.string_types):
if xs.startswith(base_path):
return xs.replace(base_path + "/", "", 1)
else:
return xs
elif isinstance(xs, (list, tuple)):
return [_relative_paths(x, base_path) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _relative_paths(v, base_path)
return out
else:
return xs | Adjust paths to be relative to the provided base path. | Below is the the instruction that describes the task:
### Input:
Adjust paths to be relative to the provided base path.
### Response:
def _relative_paths(xs, base_path):
"""Adjust paths to be relative to the provided base path.
"""
if isinstance(xs, six.string_types):
if xs.startswith(base_path):
return xs.replace(base_path + "/", "", 1)
else:
return xs
elif isinstance(xs, (list, tuple)):
return [_relative_paths(x, base_path) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _relative_paths(v, base_path)
return out
else:
return xs |
def r_annotation_body(self, sha):
""" Route to retrieve contents of an annotation resource
:param uri: The uri of the annotation resource
:type uri: str
:return: annotation contents
:rtype: {str: Any}
"""
annotation = self.__queryinterface__.getResource(sha)
if not annotation:
return "invalid resource uri", 404
# TODO this should inspect the annotation content
# set appropriate Content-Type headers
# and return the actual content
content = annotation.read()
if isinstance(content, Response):
return content
headers = {"Content-Type": annotation.mimetype}
return Response(content, headers=headers) | Route to retrieve contents of an annotation resource
:param uri: The uri of the annotation resource
:type uri: str
:return: annotation contents
:rtype: {str: Any} | Below is the the instruction that describes the task:
### Input:
Route to retrieve contents of an annotation resource
:param uri: The uri of the annotation resource
:type uri: str
:return: annotation contents
:rtype: {str: Any}
### Response:
def r_annotation_body(self, sha):
""" Route to retrieve contents of an annotation resource
:param uri: The uri of the annotation resource
:type uri: str
:return: annotation contents
:rtype: {str: Any}
"""
annotation = self.__queryinterface__.getResource(sha)
if not annotation:
return "invalid resource uri", 404
# TODO this should inspect the annotation content
# set appropriate Content-Type headers
# and return the actual content
content = annotation.read()
if isinstance(content, Response):
return content
headers = {"Content-Type": annotation.mimetype}
return Response(content, headers=headers) |
def reversed(self):
"""returns a copy of the Arc object with its orientation reversed."""
return Arc(self.end, self.radius, self.rotation, self.large_arc,
not self.sweep, self.start) | returns a copy of the Arc object with its orientation reversed. | Below is the the instruction that describes the task:
### Input:
returns a copy of the Arc object with its orientation reversed.
### Response:
def reversed(self):
"""returns a copy of the Arc object with its orientation reversed."""
return Arc(self.end, self.radius, self.rotation, self.large_arc,
not self.sweep, self.start) |
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document): # pylint: disable=R1702
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(
0,
len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we 'put them back' so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
segment_a_lengths = len(segment_ids)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
segment_b_lengths = len(segment_ids) - segment_a_lengths
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels,
segment_a_lengths=segment_a_lengths,
segment_b_lengths=segment_b_lengths)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances | Creates `TrainingInstance`s for a single document. | Below is the the instruction that describes the task:
### Input:
Creates `TrainingInstance`s for a single document.
### Response:
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document): # pylint: disable=R1702
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(
0,
len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we 'put them back' so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
segment_a_lengths = len(segment_ids)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
segment_b_lengths = len(segment_ids) - segment_a_lengths
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels,
segment_a_lengths=segment_a_lengths,
segment_b_lengths=segment_b_lengths)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances |
def pkg_tracking(self):
"""Tracking package dependencies
"""
flag = []
options = [
"-t",
"--tracking"
]
additional_options = [
"--check-deps",
"--graph=",
"--case-ins"
]
for arg in self.args[2:]:
if arg.startswith(additional_options[1]):
flag.append(arg)
self.args.remove(arg)
if arg in additional_options:
flag.append(arg)
# clean additional options from args
for f in flag:
if f in self.args:
self.args.remove(f)
# print usage message if wrong additional option
for arg in self.args:
if arg.startswith("--"):
if arg not in additional_options:
usage("")
raise SystemExit()
if (len(self.args) >= 3 and len(self.args) <= 3 and
self.args[0] in options and
self.args[1] in self.meta.repositories):
TrackingDeps(self.args[2], self.args[1], flag).run()
elif (len(self.args) >= 2 and
self.args[1] not in self.meta.repositories):
usage(self.args[1])
else:
usage("") | Tracking package dependencies | Below is the the instruction that describes the task:
### Input:
Tracking package dependencies
### Response:
def pkg_tracking(self):
"""Tracking package dependencies
"""
flag = []
options = [
"-t",
"--tracking"
]
additional_options = [
"--check-deps",
"--graph=",
"--case-ins"
]
for arg in self.args[2:]:
if arg.startswith(additional_options[1]):
flag.append(arg)
self.args.remove(arg)
if arg in additional_options:
flag.append(arg)
# clean additional options from args
for f in flag:
if f in self.args:
self.args.remove(f)
# print usage message if wrong additional option
for arg in self.args:
if arg.startswith("--"):
if arg not in additional_options:
usage("")
raise SystemExit()
if (len(self.args) >= 3 and len(self.args) <= 3 and
self.args[0] in options and
self.args[1] in self.meta.repositories):
TrackingDeps(self.args[2], self.args[1], flag).run()
elif (len(self.args) >= 2 and
self.args[1] not in self.meta.repositories):
usage(self.args[1])
else:
usage("") |
def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None):
"""
Helper function which prints a sequence of `num` items
produced by the random generator `gen`.
"""
if seed:
gen.reset(seed)
elems = [format(next(gen), fmt) for _ in range(num)]
sep_initial = "\n\n" if '\n' in sep else " "
print("Generated sequence:{}{}".format(sep_initial, sep.join(elems))) | Helper function which prints a sequence of `num` items
produced by the random generator `gen`. | Below is the the instruction that describes the task:
### Input:
Helper function which prints a sequence of `num` items
produced by the random generator `gen`.
### Response:
def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None):
"""
Helper function which prints a sequence of `num` items
produced by the random generator `gen`.
"""
if seed:
gen.reset(seed)
elems = [format(next(gen), fmt) for _ in range(num)]
sep_initial = "\n\n" if '\n' in sep else " "
print("Generated sequence:{}{}".format(sep_initial, sep.join(elems))) |
def json_hash(obj, digest=None, encoder=None):
"""Hashes `obj` by dumping to JSON.
:param obj: An object that can be rendered to json using the given `encoder`.
:param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`.
:param encoder: An optional custom json encoder.
:type encoder: :class:`json.JSONEncoder`
:returns: A hash of the given `obj` according to the given `encoder`.
:rtype: str
:API: public
"""
json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder)
return hash_all(json_str, digest=digest) | Hashes `obj` by dumping to JSON.
:param obj: An object that can be rendered to json using the given `encoder`.
:param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`.
:param encoder: An optional custom json encoder.
:type encoder: :class:`json.JSONEncoder`
:returns: A hash of the given `obj` according to the given `encoder`.
:rtype: str
:API: public | Below is the the instruction that describes the task:
### Input:
Hashes `obj` by dumping to JSON.
:param obj: An object that can be rendered to json using the given `encoder`.
:param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`.
:param encoder: An optional custom json encoder.
:type encoder: :class:`json.JSONEncoder`
:returns: A hash of the given `obj` according to the given `encoder`.
:rtype: str
:API: public
### Response:
def json_hash(obj, digest=None, encoder=None):
"""Hashes `obj` by dumping to JSON.
:param obj: An object that can be rendered to json using the given `encoder`.
:param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`.
:param encoder: An optional custom json encoder.
:type encoder: :class:`json.JSONEncoder`
:returns: A hash of the given `obj` according to the given `encoder`.
:rtype: str
:API: public
"""
json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder)
return hash_all(json_str, digest=digest) |
def init_huang(X, n_clusters, dissim, random_state):
"""Initialize centroids according to method by Huang [1997]."""
n_attrs = X.shape[1]
centroids = np.empty((n_clusters, n_attrs), dtype='object')
# determine frequencies of attributes
for iattr in range(n_attrs):
freq = defaultdict(int)
for curattr in X[:, iattr]:
freq[curattr] += 1
# Sample centroids using the probabilities of attributes.
# (I assume that's what's meant in the Huang [1998] paper; it works,
# at least)
# Note: sampling using population in static list with as many choices
# as frequency counts. Since the counts are small integers,
# memory consumption is low.
choices = [chc for chc, wght in freq.items() for _ in range(wght)]
# So that we are consistent between Python versions,
# each with different dict ordering.
choices = sorted(choices)
centroids[:, iattr] = random_state.choice(choices, n_clusters)
# The previously chosen centroids could result in empty clusters,
# so set centroid to closest point in X.
for ik in range(n_clusters):
ndx = np.argsort(dissim(X, centroids[ik]))
# We want the centroid to be unique, if possible.
while np.all(X[ndx[0]] == centroids, axis=1).any() and ndx.shape[0] > 1:
ndx = np.delete(ndx, 0)
centroids[ik] = X[ndx[0]]
return centroids | Initialize centroids according to method by Huang [1997]. | Below is the the instruction that describes the task:
### Input:
Initialize centroids according to method by Huang [1997].
### Response:
def init_huang(X, n_clusters, dissim, random_state):
"""Initialize centroids according to method by Huang [1997]."""
n_attrs = X.shape[1]
centroids = np.empty((n_clusters, n_attrs), dtype='object')
# determine frequencies of attributes
for iattr in range(n_attrs):
freq = defaultdict(int)
for curattr in X[:, iattr]:
freq[curattr] += 1
# Sample centroids using the probabilities of attributes.
# (I assume that's what's meant in the Huang [1998] paper; it works,
# at least)
# Note: sampling using population in static list with as many choices
# as frequency counts. Since the counts are small integers,
# memory consumption is low.
choices = [chc for chc, wght in freq.items() for _ in range(wght)]
# So that we are consistent between Python versions,
# each with different dict ordering.
choices = sorted(choices)
centroids[:, iattr] = random_state.choice(choices, n_clusters)
# The previously chosen centroids could result in empty clusters,
# so set centroid to closest point in X.
for ik in range(n_clusters):
ndx = np.argsort(dissim(X, centroids[ik]))
# We want the centroid to be unique, if possible.
while np.all(X[ndx[0]] == centroids, axis=1).any() and ndx.shape[0] > 1:
ndx = np.delete(ndx, 0)
centroids[ik] = X[ndx[0]]
return centroids |
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x) | returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument | Below is the the instruction that describes the task:
### Input:
returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument
### Response:
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x) |
def assertHeader(self, name, value=None, *args, **kwargs):
"""
Returns `True` if ``name`` was in the headers and, if ``value`` is
True, whether or not the values match, or `False` otherwise.
"""
return name in self.raw_headers and (
True if value is None else self.raw_headers[name] == value) | Returns `True` if ``name`` was in the headers and, if ``value`` is
True, whether or not the values match, or `False` otherwise. | Below is the the instruction that describes the task:
### Input:
Returns `True` if ``name`` was in the headers and, if ``value`` is
True, whether or not the values match, or `False` otherwise.
### Response:
def assertHeader(self, name, value=None, *args, **kwargs):
"""
Returns `True` if ``name`` was in the headers and, if ``value`` is
True, whether or not the values match, or `False` otherwise.
"""
return name in self.raw_headers and (
True if value is None else self.raw_headers[name] == value) |
def _parse_summary(tag, parser, parent):
"""Parses a <summary> tag and adds it the Executable parent instance.
:arg parser: an instance of DocParser to create the DocElement with.
"""
summary = DocElement(tag, parser, parent)
parent.docstring.append(summary) | Parses a <summary> tag and adds it the Executable parent instance.
:arg parser: an instance of DocParser to create the DocElement with. | Below is the the instruction that describes the task:
### Input:
Parses a <summary> tag and adds it the Executable parent instance.
:arg parser: an instance of DocParser to create the DocElement with.
### Response:
def _parse_summary(tag, parser, parent):
"""Parses a <summary> tag and adds it the Executable parent instance.
:arg parser: an instance of DocParser to create the DocElement with.
"""
summary = DocElement(tag, parser, parent)
parent.docstring.append(summary) |
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version | Checks if a given req is the latest version available. | Below is the the instruction that describes the task:
### Input:
Checks if a given req is the latest version available.
### Response:
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version |
def get_utt_regions(self):
"""
Return the regions of all utterances, assuming all utterances are concatenated.
It is assumed that the utterances are sorted in ascending order for concatenation.
A region is defined by offset (in chunks), length (num-chunks) and
a list of references to the utterance datasets in the containers.
Returns:
list: List of with a tuple for every utterances containing the region info.
"""
regions = []
current_offset = 0
for utt_idx in sorted(self.utt_ids):
offset = current_offset
num_frames = []
refs = []
for cnt in self.containers:
num_frames.append(cnt.get(utt_idx).shape[0])
refs.append(cnt.get(utt_idx, mem_map=True))
if len(set(num_frames)) != 1:
raise ValueError('Utterance {} has not the same number of frames in all containers!'.format(utt_idx))
num_chunks = math.ceil(num_frames[0] / float(self.frames_per_chunk))
region = (offset, num_chunks, refs)
regions.append(region)
# Sets the offset for the next utterances
current_offset += num_chunks
return regions | Return the regions of all utterances, assuming all utterances are concatenated.
It is assumed that the utterances are sorted in ascending order for concatenation.
A region is defined by offset (in chunks), length (num-chunks) and
a list of references to the utterance datasets in the containers.
Returns:
list: List of with a tuple for every utterances containing the region info. | Below is the the instruction that describes the task:
### Input:
Return the regions of all utterances, assuming all utterances are concatenated.
It is assumed that the utterances are sorted in ascending order for concatenation.
A region is defined by offset (in chunks), length (num-chunks) and
a list of references to the utterance datasets in the containers.
Returns:
list: List of with a tuple for every utterances containing the region info.
### Response:
def get_utt_regions(self):
"""
Return the regions of all utterances, assuming all utterances are concatenated.
It is assumed that the utterances are sorted in ascending order for concatenation.
A region is defined by offset (in chunks), length (num-chunks) and
a list of references to the utterance datasets in the containers.
Returns:
list: List of with a tuple for every utterances containing the region info.
"""
regions = []
current_offset = 0
for utt_idx in sorted(self.utt_ids):
offset = current_offset
num_frames = []
refs = []
for cnt in self.containers:
num_frames.append(cnt.get(utt_idx).shape[0])
refs.append(cnt.get(utt_idx, mem_map=True))
if len(set(num_frames)) != 1:
raise ValueError('Utterance {} has not the same number of frames in all containers!'.format(utt_idx))
num_chunks = math.ceil(num_frames[0] / float(self.frames_per_chunk))
region = (offset, num_chunks, refs)
regions.append(region)
# Sets the offset for the next utterances
current_offset += num_chunks
return regions |
def _raw_read(self):
"""
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
"""
data = self._raw_bytes
try:
data += self._socket.recv(8192)
except (socket_.error):
pass
output = data
written = libssl.BIO_write(self._rbio, data, len(data))
self._raw_bytes = data[written:]
return output | Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only. | Below is the the instruction that describes the task:
### Input:
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
### Response:
def _raw_read(self):
"""
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
"""
data = self._raw_bytes
try:
data += self._socket.recv(8192)
except (socket_.error):
pass
output = data
written = libssl.BIO_write(self._rbio, data, len(data))
self._raw_bytes = data[written:]
return output |
def stop(self):
"""To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server."""
stopped = []
for interface_id, proxy in self.proxies.items():
if interface_id in self.failed_inits:
LOG.warning("ServerThread.stop: Not performing de-init for %s" % interface_id)
continue
if proxy._callbackip and proxy._callbackport:
callbackip = proxy._callbackip
callbackport = proxy._callbackport
else:
callbackip = proxy._localip
callbackport = self._localport
remote = "http://%s:%i" % (callbackip, callbackport)
LOG.debug("ServerThread.stop: init('%s')" % remote)
if not callbackip in stopped:
try:
proxy.init(remote)
stopped.append(callbackip)
LOG.info("Proxy de-initialized: %s" % remote)
except Exception as err:
LOG.debug("proxyInit: Exception: %s" % str(err))
LOG.warning("Failed to de-initialize proxy")
self.proxies.clear()
LOG.info("Shutting down server")
self.server.shutdown()
LOG.debug("ServerThread.stop: Stopping ServerThread")
self.server.server_close()
LOG.info("Server stopped") | To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server. | Below is the the instruction that describes the task:
### Input:
To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server.
### Response:
def stop(self):
"""To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server."""
stopped = []
for interface_id, proxy in self.proxies.items():
if interface_id in self.failed_inits:
LOG.warning("ServerThread.stop: Not performing de-init for %s" % interface_id)
continue
if proxy._callbackip and proxy._callbackport:
callbackip = proxy._callbackip
callbackport = proxy._callbackport
else:
callbackip = proxy._localip
callbackport = self._localport
remote = "http://%s:%i" % (callbackip, callbackport)
LOG.debug("ServerThread.stop: init('%s')" % remote)
if not callbackip in stopped:
try:
proxy.init(remote)
stopped.append(callbackip)
LOG.info("Proxy de-initialized: %s" % remote)
except Exception as err:
LOG.debug("proxyInit: Exception: %s" % str(err))
LOG.warning("Failed to de-initialize proxy")
self.proxies.clear()
LOG.info("Shutting down server")
self.server.shutdown()
LOG.debug("ServerThread.stop: Stopping ServerThread")
self.server.server_close()
LOG.info("Server stopped") |
def get_zonefile_data( self, zonefile_hash, zonefile_dir ):
"""
Get a zonefile by hash
Return the serialized zonefile on success
Return None on error
"""
# check cache
atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False )
if atlas_zonefile_data is not None:
# check hash
zfh = get_zonefile_data_hash( atlas_zonefile_data )
if zfh != zonefile_hash:
log.debug("Invalid local zonefile %s" % zonefile_hash )
remove_atlas_zonefile_data( zonefile_hash, zonefile_dir )
else:
log.debug("Zonefile %s is local" % zonefile_hash)
return atlas_zonefile_data
return None | Get a zonefile by hash
Return the serialized zonefile on success
Return None on error | Below is the the instruction that describes the task:
### Input:
Get a zonefile by hash
Return the serialized zonefile on success
Return None on error
### Response:
def get_zonefile_data( self, zonefile_hash, zonefile_dir ):
"""
Get a zonefile by hash
Return the serialized zonefile on success
Return None on error
"""
# check cache
atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False )
if atlas_zonefile_data is not None:
# check hash
zfh = get_zonefile_data_hash( atlas_zonefile_data )
if zfh != zonefile_hash:
log.debug("Invalid local zonefile %s" % zonefile_hash )
remove_atlas_zonefile_data( zonefile_hash, zonefile_dir )
else:
log.debug("Zonefile %s is local" % zonefile_hash)
return atlas_zonefile_data
return None |
def vectorize(fn):
"""
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
"""
@functools.wraps(fn)
def vectorized_function(values, *vargs, **kwargs):
return [fn(value, *vargs, **kwargs) for value in values]
return vectorized_function | Allows a method to accept a list argument, but internally deal only
with a single item of that list. | Below is the the instruction that describes the task:
### Input:
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
### Response:
def vectorize(fn):
"""
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
"""
@functools.wraps(fn)
def vectorized_function(values, *vargs, **kwargs):
return [fn(value, *vargs, **kwargs) for value in values]
return vectorized_function |
def index(self, shape):
"""Return the index of *shape* in this sequence.
Raises |ValueError| if *shape* is not in the collection.
"""
shape_elms = list(self._element.iter_shape_elms())
return shape_elms.index(shape.element) | Return the index of *shape* in this sequence.
Raises |ValueError| if *shape* is not in the collection. | Below is the the instruction that describes the task:
### Input:
Return the index of *shape* in this sequence.
Raises |ValueError| if *shape* is not in the collection.
### Response:
def index(self, shape):
"""Return the index of *shape* in this sequence.
Raises |ValueError| if *shape* is not in the collection.
"""
shape_elms = list(self._element.iter_shape_elms())
return shape_elms.index(shape.element) |
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value | atom = [CFWS] 1*atext [CFWS] | Below is the the instruction that describes the task:
### Input:
atom = [CFWS] 1*atext [CFWS]
### Response:
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value |
def arrange_all(self):
""" Arrange the components of the node using Graphviz.
"""
# FIXME: Circular reference avoidance.
import godot.dot_data_parser
import godot.graph
graph = godot.graph.Graph(ID="g")
graph.add_node(self)
print "GRAPH DOT:\n", str(graph)
xdot_data = graph.create( format = "xdot" )
print "XDOT DATA:\n", xdot_data
parser = godot.dot_data_parser.GodotDataParser()
# parser.parse_dot_data(xdot_data)
flat_data = xdot_data.replace('\\\n','')
tokens = parser.dotparser.parseString(flat_data)[0]
for element in tokens[3]:
print "TOK:", element
cmd = element[0]
if cmd == 'add_node':
cmd, nodename, opts = element
assert nodename == self.ID
print "OPTIONS:", opts
self.set( **opts ) | Arrange the components of the node using Graphviz. | Below is the the instruction that describes the task:
### Input:
Arrange the components of the node using Graphviz.
### Response:
def arrange_all(self):
""" Arrange the components of the node using Graphviz.
"""
# FIXME: Circular reference avoidance.
import godot.dot_data_parser
import godot.graph
graph = godot.graph.Graph(ID="g")
graph.add_node(self)
print "GRAPH DOT:\n", str(graph)
xdot_data = graph.create( format = "xdot" )
print "XDOT DATA:\n", xdot_data
parser = godot.dot_data_parser.GodotDataParser()
# parser.parse_dot_data(xdot_data)
flat_data = xdot_data.replace('\\\n','')
tokens = parser.dotparser.parseString(flat_data)[0]
for element in tokens[3]:
print "TOK:", element
cmd = element[0]
if cmd == 'add_node':
cmd, nodename, opts = element
assert nodename == self.ID
print "OPTIONS:", opts
self.set( **opts ) |
def _extract_recipients(
message: Message, resent_dates: List[Union[str, Header]] = None
) -> List[str]:
"""
Extract the recipients from the message object given.
"""
recipients = [] # type: List[str]
if resent_dates:
recipient_headers = ("Resent-To", "Resent-Cc", "Resent-Bcc")
else:
recipient_headers = ("To", "Cc", "Bcc")
for header in recipient_headers:
recipients.extend(message.get_all(header, [])) # type: ignore
parsed_recipients = [
str(email.utils.formataddr(address))
for address in email.utils.getaddresses(recipients)
]
return parsed_recipients | Extract the recipients from the message object given. | Below is the the instruction that describes the task:
### Input:
Extract the recipients from the message object given.
### Response:
def _extract_recipients(
message: Message, resent_dates: List[Union[str, Header]] = None
) -> List[str]:
"""
Extract the recipients from the message object given.
"""
recipients = [] # type: List[str]
if resent_dates:
recipient_headers = ("Resent-To", "Resent-Cc", "Resent-Bcc")
else:
recipient_headers = ("To", "Cc", "Bcc")
for header in recipient_headers:
recipients.extend(message.get_all(header, [])) # type: ignore
parsed_recipients = [
str(email.utils.formataddr(address))
for address in email.utils.getaddresses(recipients)
]
return parsed_recipients |
def finish_scan(self, scan_id):
""" Sets a scan as finished. """
self.set_scan_progress(scan_id, 100)
self.set_scan_status(scan_id, ScanStatus.FINISHED)
logger.info("%s: Scan finished.", scan_id) | Sets a scan as finished. | Below is the the instruction that describes the task:
### Input:
Sets a scan as finished.
### Response:
def finish_scan(self, scan_id):
""" Sets a scan as finished. """
self.set_scan_progress(scan_id, 100)
self.set_scan_status(scan_id, ScanStatus.FINISHED)
logger.info("%s: Scan finished.", scan_id) |
def get_gid(path, follow_symlinks=True):
'''
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1) | Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added | Below is the the instruction that describes the task:
### Input:
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
### Response:
def get_gid(path, follow_symlinks=True):
'''
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1) |
def get(self, identity):
"""
Constructs a SyncListPermissionContext
:param identity: Identity of the user to whom the Sync List Permission applies.
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
"""
return SyncListPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=identity,
) | Constructs a SyncListPermissionContext
:param identity: Identity of the user to whom the Sync List Permission applies.
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext | Below is the the instruction that describes the task:
### Input:
Constructs a SyncListPermissionContext
:param identity: Identity of the user to whom the Sync List Permission applies.
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
### Response:
def get(self, identity):
"""
Constructs a SyncListPermissionContext
:param identity: Identity of the user to whom the Sync List Permission applies.
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
"""
return SyncListPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=identity,
) |
def remove_account_user_from_groups(self, account_id, user_id, body, **kwargs): # noqa: E501
"""Remove user from groups. # noqa: E501
An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be removed from the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
else:
(data) = self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
return data | Remove user from groups. # noqa: E501
An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be removed from the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Remove user from groups. # noqa: E501
An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be removed from the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
### Response:
def remove_account_user_from_groups(self, account_id, user_id, body, **kwargs): # noqa: E501
"""Remove user from groups. # noqa: E501
An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be removed from the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
else:
(data) = self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
return data |
def is_equal(self, string1, string2):
''' Simple string comparator '''
return string1.lower().strip() == string2.lower().strip() | Simple string comparator | Below is the the instruction that describes the task:
### Input:
Simple string comparator
### Response:
def is_equal(self, string1, string2):
''' Simple string comparator '''
return string1.lower().strip() == string2.lower().strip() |
def _dequeue(self, local_prof):
"""
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
"""
try:
local_prof.prof('dequeue-thread started', uid=self._uid)
self._logger.info('Dequeue thread started')
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._dequeue_thread_terminate.is_set():
try:
method_frame, header_frame, body = mq_channel.basic_get(
queue=self._completed_queue[0])
if body:
# Get task from the message
completed_task = Task()
completed_task.from_dict(json.loads(body))
self._logger.info(
'Got finished task %s from queue' % (completed_task.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUEING,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Traverse the entire workflow to find out the correct Task
for pipe in self._workflow:
with pipe.lock:
if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)):
if completed_task.parent_pipeline['uid'] == pipe.uid:
self._logger.debug(
'Found parent pipeline: %s' % pipe.uid)
for stage in pipe.stages:
if completed_task.parent_stage['uid'] == stage.uid:
self._logger.debug(
'Found parent stage: %s' % (stage.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUED,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if not completed_task.exit_code:
completed_task.state = states.DONE
else:
completed_task.state = states.FAILED
for task in stage.tasks:
if task.uid == completed_task.uid:
task.state = str(
completed_task.state)
if (task.state == states.FAILED) and (self._resubmit_failed):
task.state = states.INITIAL
transition(obj=task,
obj_type='Task',
new_state=task.state,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if stage._check_stage_complete():
transition(obj=stage,
obj_type='Stage',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Check if Stage has a post-exec that needs to be
# executed
if stage.post_exec:
try:
self._logger.info('Executing post-exec for stage %s'
% stage.uid)
self._prof.prof('Adap: executing post-exec',
uid=self._uid)
stage.post_exec()
self._logger.info(
'Post-exec executed for stage %s' % stage.uid)
self._prof.prof(
'Adap: post-exec executed', uid=self._uid)
except Exception, ex:
self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid)
raise
pipe._increment_stage()
if pipe.completed:
transition(obj=pipe,
obj_type='Pipeline',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Found the task and processed it -- no more iterations needed
break
# Found the stage and processed it -- no more iterations neeeded
break
# Found the pipeline and processed it -- no more iterations neeeded
break
mq_channel.basic_ack(
delivery_tag=method_frame.delivery_tag)
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
except Exception, ex:
self._logger.exception(
'Unable to receive message from completed queue: %s' % ex)
raise
self._logger.info('Terminated dequeue thread')
mq_connection.close()
local_prof.prof('terminating dequeue-thread', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit gracefully...')
mq_connection.close()
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Error in dequeue-thread: %s' % ex)
try:
mq_connection.close()
except:
self._logger.warning('mq_connection not created')
raise EnTKError(ex) | **Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process. | Below is the the instruction that describes the task:
### Input:
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
### Response:
def _dequeue(self, local_prof):
"""
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
"""
try:
local_prof.prof('dequeue-thread started', uid=self._uid)
self._logger.info('Dequeue thread started')
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._dequeue_thread_terminate.is_set():
try:
method_frame, header_frame, body = mq_channel.basic_get(
queue=self._completed_queue[0])
if body:
# Get task from the message
completed_task = Task()
completed_task.from_dict(json.loads(body))
self._logger.info(
'Got finished task %s from queue' % (completed_task.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUEING,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Traverse the entire workflow to find out the correct Task
for pipe in self._workflow:
with pipe.lock:
if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)):
if completed_task.parent_pipeline['uid'] == pipe.uid:
self._logger.debug(
'Found parent pipeline: %s' % pipe.uid)
for stage in pipe.stages:
if completed_task.parent_stage['uid'] == stage.uid:
self._logger.debug(
'Found parent stage: %s' % (stage.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUED,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if not completed_task.exit_code:
completed_task.state = states.DONE
else:
completed_task.state = states.FAILED
for task in stage.tasks:
if task.uid == completed_task.uid:
task.state = str(
completed_task.state)
if (task.state == states.FAILED) and (self._resubmit_failed):
task.state = states.INITIAL
transition(obj=task,
obj_type='Task',
new_state=task.state,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if stage._check_stage_complete():
transition(obj=stage,
obj_type='Stage',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Check if Stage has a post-exec that needs to be
# executed
if stage.post_exec:
try:
self._logger.info('Executing post-exec for stage %s'
% stage.uid)
self._prof.prof('Adap: executing post-exec',
uid=self._uid)
stage.post_exec()
self._logger.info(
'Post-exec executed for stage %s' % stage.uid)
self._prof.prof(
'Adap: post-exec executed', uid=self._uid)
except Exception, ex:
self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid)
raise
pipe._increment_stage()
if pipe.completed:
transition(obj=pipe,
obj_type='Pipeline',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Found the task and processed it -- no more iterations needed
break
# Found the stage and processed it -- no more iterations neeeded
break
# Found the pipeline and processed it -- no more iterations neeeded
break
mq_channel.basic_ack(
delivery_tag=method_frame.delivery_tag)
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
except Exception, ex:
self._logger.exception(
'Unable to receive message from completed queue: %s' % ex)
raise
self._logger.info('Terminated dequeue thread')
mq_connection.close()
local_prof.prof('terminating dequeue-thread', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit gracefully...')
mq_connection.close()
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Error in dequeue-thread: %s' % ex)
try:
mq_connection.close()
except:
self._logger.warning('mq_connection not created')
raise EnTKError(ex) |
def get_ZXY_data_IFFT(Data, zf, xf, yf,
zwidth=10000, xwidth=5000, ywidth=5000,
timeStart=None, timeEnd=None,
show_fig=True):
"""
Given a Data object and the frequencies of the z, x and y peaks (and some
optional parameters for the created filters) this function extracts the
individual z, x and y signals (in volts) by creating IIR filters and filtering
the Data.
Parameters
----------
Data : DataObject
DataObject containing the data for which you want to extract the
z, x and y signals.
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
zwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Z.
xwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter X.
ywidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Y.
timeStart : float, optional
Starting time for filtering
timeEnd : float, optional
Ending time for filtering
show_fig : bool, optional
If True - plot unfiltered and filtered PSD for z, x and y.
If False - don't plot anything
Returns
-------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
timedata : ndarray
Array containing the time data to go with the z, x, and y signal.
"""
if timeStart == None:
timeStart = Data.timeStart
if timeEnd == None:
timeEnd = Data.timeEnd
time = Data.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
SAMPLEFREQ = Data.SampleFreq
input_signal = Data.voltage[StartIndex: EndIndex]
zdata = IFFT_filter(input_signal, SAMPLEFREQ, zf -
zwidth / 2, zf + zwidth / 2)
xdata = IFFT_filter(input_signal, SAMPLEFREQ, xf -
xwidth / 2, xf + xwidth / 2)
ydata = IFFT_filter(input_signal, SAMPLEFREQ, yf -
ywidth / 2, yf + ywidth / 2)
if show_fig == True:
NPerSegment = len(Data.time)
if NPerSegment > 1e7:
NPerSegment = int(1e7)
f, PSD = scipy.signal.welch(
input_signal, SAMPLEFREQ, nperseg=NPerSegment)
f_z, PSD_z = scipy.signal.welch(zdata, SAMPLEFREQ, nperseg=NPerSegment)
f_y, PSD_y = scipy.signal.welch(ydata, SAMPLEFREQ, nperseg=NPerSegment)
f_x, PSD_x = scipy.signal.welch(xdata, SAMPLEFREQ, nperseg=NPerSegment)
_plt.plot(f, PSD)
_plt.plot(f_z, PSD_z, label="z")
_plt.plot(f_x, PSD_x, label="x")
_plt.plot(f_y, PSD_y, label="y")
_plt.legend(loc="best")
_plt.xlim([zf - zwidth, yf + ywidth])
_plt.xlabel('Frequency (Hz)')
_plt.ylabel(r'$S_{xx}$ ($V^2/Hz$)')
_plt.semilogy()
_plt.title("filepath = %s" % (Data.filepath))
_plt.show()
timedata = time[StartIndex: EndIndex]
return zdata, xdata, ydata, timedata | Given a Data object and the frequencies of the z, x and y peaks (and some
optional parameters for the created filters) this function extracts the
individual z, x and y signals (in volts) by creating IIR filters and filtering
the Data.
Parameters
----------
Data : DataObject
DataObject containing the data for which you want to extract the
z, x and y signals.
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
zwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Z.
xwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter X.
ywidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Y.
timeStart : float, optional
Starting time for filtering
timeEnd : float, optional
Ending time for filtering
show_fig : bool, optional
If True - plot unfiltered and filtered PSD for z, x and y.
If False - don't plot anything
Returns
-------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
timedata : ndarray
Array containing the time data to go with the z, x, and y signal. | Below is the the instruction that describes the task:
### Input:
Given a Data object and the frequencies of the z, x and y peaks (and some
optional parameters for the created filters) this function extracts the
individual z, x and y signals (in volts) by creating IIR filters and filtering
the Data.
Parameters
----------
Data : DataObject
DataObject containing the data for which you want to extract the
z, x and y signals.
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
zwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Z.
xwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter X.
ywidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Y.
timeStart : float, optional
Starting time for filtering
timeEnd : float, optional
Ending time for filtering
show_fig : bool, optional
If True - plot unfiltered and filtered PSD for z, x and y.
If False - don't plot anything
Returns
-------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
timedata : ndarray
Array containing the time data to go with the z, x, and y signal.
### Response:
def get_ZXY_data_IFFT(Data, zf, xf, yf,
zwidth=10000, xwidth=5000, ywidth=5000,
timeStart=None, timeEnd=None,
show_fig=True):
"""
Given a Data object and the frequencies of the z, x and y peaks (and some
optional parameters for the created filters) this function extracts the
individual z, x and y signals (in volts) by creating IIR filters and filtering
the Data.
Parameters
----------
Data : DataObject
DataObject containing the data for which you want to extract the
z, x and y signals.
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
zwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Z.
xwidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter X.
ywidth : float, optional
The width of the pass-band of the IIR filter to be generated to
filter Y.
timeStart : float, optional
Starting time for filtering
timeEnd : float, optional
Ending time for filtering
show_fig : bool, optional
If True - plot unfiltered and filtered PSD for z, x and y.
If False - don't plot anything
Returns
-------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
timedata : ndarray
Array containing the time data to go with the z, x, and y signal.
"""
if timeStart == None:
timeStart = Data.timeStart
if timeEnd == None:
timeEnd = Data.timeEnd
time = Data.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
SAMPLEFREQ = Data.SampleFreq
input_signal = Data.voltage[StartIndex: EndIndex]
zdata = IFFT_filter(input_signal, SAMPLEFREQ, zf -
zwidth / 2, zf + zwidth / 2)
xdata = IFFT_filter(input_signal, SAMPLEFREQ, xf -
xwidth / 2, xf + xwidth / 2)
ydata = IFFT_filter(input_signal, SAMPLEFREQ, yf -
ywidth / 2, yf + ywidth / 2)
if show_fig == True:
NPerSegment = len(Data.time)
if NPerSegment > 1e7:
NPerSegment = int(1e7)
f, PSD = scipy.signal.welch(
input_signal, SAMPLEFREQ, nperseg=NPerSegment)
f_z, PSD_z = scipy.signal.welch(zdata, SAMPLEFREQ, nperseg=NPerSegment)
f_y, PSD_y = scipy.signal.welch(ydata, SAMPLEFREQ, nperseg=NPerSegment)
f_x, PSD_x = scipy.signal.welch(xdata, SAMPLEFREQ, nperseg=NPerSegment)
_plt.plot(f, PSD)
_plt.plot(f_z, PSD_z, label="z")
_plt.plot(f_x, PSD_x, label="x")
_plt.plot(f_y, PSD_y, label="y")
_plt.legend(loc="best")
_plt.xlim([zf - zwidth, yf + ywidth])
_plt.xlabel('Frequency (Hz)')
_plt.ylabel(r'$S_{xx}$ ($V^2/Hz$)')
_plt.semilogy()
_plt.title("filepath = %s" % (Data.filepath))
_plt.show()
timedata = time[StartIndex: EndIndex]
return zdata, xdata, ydata, timedata |
def generator(self, random, args):
"""Return a candidate solution for an evolutionary computation."""
locations = [i for i in range(len(self.weights))]
random.shuffle(locations)
return locations | Return a candidate solution for an evolutionary computation. | Below is the the instruction that describes the task:
### Input:
Return a candidate solution for an evolutionary computation.
### Response:
def generator(self, random, args):
"""Return a candidate solution for an evolutionary computation."""
locations = [i for i in range(len(self.weights))]
random.shuffle(locations)
return locations |
def forwards(self, orm):
"Write your forwards methods here."
Project = orm['samples.Project']
Cohort = orm['samples.Cohort']
now = datetime.datetime.now()
# Create default project
try:
project = Project.objects.get(name=DEFAULT_PROJECT_NAME)
except Project.DoesNotExist:
project = Project(name=DEFAULT_PROJECT_NAME,
label=DEFAULT_PROJECT_NAME, created=now, modified=now)
project.save()
# Create default cohort
try:
cohort = Cohort.objects.get(name=DEFAULT_COHORT_NAME)
except Cohort.DoesNotExist:
cohort = Cohort(name=DEFAULT_COHORT_NAME, published=True,
autocreated=True, created=now, modified=now)
cohort.save() | Write your forwards methods here. | Below is the the instruction that describes the task:
### Input:
Write your forwards methods here.
### Response:
def forwards(self, orm):
"Write your forwards methods here."
Project = orm['samples.Project']
Cohort = orm['samples.Cohort']
now = datetime.datetime.now()
# Create default project
try:
project = Project.objects.get(name=DEFAULT_PROJECT_NAME)
except Project.DoesNotExist:
project = Project(name=DEFAULT_PROJECT_NAME,
label=DEFAULT_PROJECT_NAME, created=now, modified=now)
project.save()
# Create default cohort
try:
cohort = Cohort.objects.get(name=DEFAULT_COHORT_NAME)
except Cohort.DoesNotExist:
cohort = Cohort(name=DEFAULT_COHORT_NAME, published=True,
autocreated=True, created=now, modified=now)
cohort.save() |
def get_sequence(self, chrom, start, end, strand='+', indexing=(-1, 0)):
"""
chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11',
start/end are coordinates, which default to python style [0,1) internally. So positions should be
entered with (1,1) indexing. This can be changed with the indexing keyword.
The default is for everything to be relative to the positive strand
"""
try:
divisor = int(self.sequence_index[chrom][2])
except KeyError:
self.open_fasta_index()
try:
divisor = int(self.sequence_index[chrom][2])
except KeyError:
sys.stderr.write("%s cannot be found within the fasta index file.\n" % chrom)
return ""
start+=indexing[0]
end+=indexing[1]
#is it a valid position?
if ( start < 0 or end > int(self.sequence_index[chrom][0] )):
raise ValueError("The range %d-%d is invalid. Valid range for this feature is 1-%d." % (start-indexing[0], end-indexing[1],
int(self.sequence_index[chrom][0])))
#go to start of chromosome
seekpos = int(self.sequence_index[chrom][1])
#find how many newlines we have
seekpos += start+start/divisor
slen = end-start
endpos = int(slen + (slen/divisor) + 1) #a hack of sorts but it works and is easy
self.fasta_file.seek(seekpos, 0)
output = self.fasta_file.read(endpos)
output = output.replace('\n', '')
out = output[:slen]
if strand == '+' or strand == 1:
return out
if strand == '-' or strand == -1:
return _reverse_complement(out) | chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11',
start/end are coordinates, which default to python style [0,1) internally. So positions should be
entered with (1,1) indexing. This can be changed with the indexing keyword.
The default is for everything to be relative to the positive strand | Below is the the instruction that describes the task:
### Input:
chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11',
start/end are coordinates, which default to python style [0,1) internally. So positions should be
entered with (1,1) indexing. This can be changed with the indexing keyword.
The default is for everything to be relative to the positive strand
### Response:
def get_sequence(self, chrom, start, end, strand='+', indexing=(-1, 0)):
"""
chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11',
start/end are coordinates, which default to python style [0,1) internally. So positions should be
entered with (1,1) indexing. This can be changed with the indexing keyword.
The default is for everything to be relative to the positive strand
"""
try:
divisor = int(self.sequence_index[chrom][2])
except KeyError:
self.open_fasta_index()
try:
divisor = int(self.sequence_index[chrom][2])
except KeyError:
sys.stderr.write("%s cannot be found within the fasta index file.\n" % chrom)
return ""
start+=indexing[0]
end+=indexing[1]
#is it a valid position?
if ( start < 0 or end > int(self.sequence_index[chrom][0] )):
raise ValueError("The range %d-%d is invalid. Valid range for this feature is 1-%d." % (start-indexing[0], end-indexing[1],
int(self.sequence_index[chrom][0])))
#go to start of chromosome
seekpos = int(self.sequence_index[chrom][1])
#find how many newlines we have
seekpos += start+start/divisor
slen = end-start
endpos = int(slen + (slen/divisor) + 1) #a hack of sorts but it works and is easy
self.fasta_file.seek(seekpos, 0)
output = self.fasta_file.read(endpos)
output = output.replace('\n', '')
out = output[:slen]
if strand == '+' or strand == 1:
return out
if strand == '-' or strand == -1:
return _reverse_complement(out) |
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
# Log
self.log.info("Collecting SNMP interface statistics from: %s", device)
# Define a list of interface indexes
ifIndexes = []
# Get Interface Indexes
ifIndexOid = '.'.join([self.IF_MIB_INDEX_OID])
ifIndexData = self.walk(ifIndexOid, host, port, community)
ifIndexes = [v for v in ifIndexData.values()]
for ifIndex in ifIndexes:
# Get Interface Type
ifTypeOid = '.'.join([self.IF_MIB_TYPE_OID, ifIndex])
ifTypeData = self.get(ifTypeOid, host, port, community)
if ifTypeData[ifTypeOid] not in self.IF_TYPES:
# Skip Interface
continue
# Get Interface Name
ifNameOid = '.'.join([self.IF_MIB_NAME_OID, ifIndex])
ifNameData = self.get(ifNameOid, host, port, community)
ifName = ifNameData[ifNameOid]
# Remove quotes from string
ifName = re.sub(r'(\"|\')', '', ifName)
# Get Gauges
for gaugeName, gaugeOid in self.IF_MIB_GAUGE_OID_TABLE.items():
ifGaugeOid = '.'.join([self.IF_MIB_GAUGE_OID_TABLE[gaugeName],
ifIndex])
ifGaugeData = self.get(ifGaugeOid, host, port, community)
ifGaugeValue = ifGaugeData[ifGaugeOid]
if not ifGaugeValue:
continue
# Get Metric Name and Value
metricIfDescr = re.sub(r'\W', '_', ifName)
metricName = '.'.join([metricIfDescr, gaugeName])
metricValue = int(ifGaugeValue)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_gauge(metricPath, metricValue)
# Get counters (64bit)
counterItems = self.IF_MIB_COUNTER_OID_TABLE.items()
for counterName, counterOid in counterItems:
ifCounterOid = '.'.join(
[self.IF_MIB_COUNTER_OID_TABLE[counterName], ifIndex])
ifCounterData = self.get(ifCounterOid, host, port, community)
ifCounterValue = ifCounterData[ifCounterOid]
if not ifCounterValue:
continue
# Get Metric Name and Value
metricIfDescr = re.sub(r'\W', '_', ifName)
if counterName in ['ifHCInOctets', 'ifHCOutOctets']:
for unit in self.config['byte_unit']:
# Convert Metric
metricName = '.'.join([metricIfDescr,
counterName.replace('Octets',
unit)])
metricValue = diamond.convertor.binary.convert(
value=ifCounterValue,
oldUnit='byte',
newUnit=unit)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_counter(metricPath,
metricValue,
max_value=18446744073709600000,
)
else:
metricName = '.'.join([metricIfDescr, counterName])
metricValue = int(ifCounterValue)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_counter(metricPath,
metricValue,
max_value=18446744073709600000,
) | Collect SNMP interface data from device | Below is the the instruction that describes the task:
### Input:
Collect SNMP interface data from device
### Response:
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
# Log
self.log.info("Collecting SNMP interface statistics from: %s", device)
# Define a list of interface indexes
ifIndexes = []
# Get Interface Indexes
ifIndexOid = '.'.join([self.IF_MIB_INDEX_OID])
ifIndexData = self.walk(ifIndexOid, host, port, community)
ifIndexes = [v for v in ifIndexData.values()]
for ifIndex in ifIndexes:
# Get Interface Type
ifTypeOid = '.'.join([self.IF_MIB_TYPE_OID, ifIndex])
ifTypeData = self.get(ifTypeOid, host, port, community)
if ifTypeData[ifTypeOid] not in self.IF_TYPES:
# Skip Interface
continue
# Get Interface Name
ifNameOid = '.'.join([self.IF_MIB_NAME_OID, ifIndex])
ifNameData = self.get(ifNameOid, host, port, community)
ifName = ifNameData[ifNameOid]
# Remove quotes from string
ifName = re.sub(r'(\"|\')', '', ifName)
# Get Gauges
for gaugeName, gaugeOid in self.IF_MIB_GAUGE_OID_TABLE.items():
ifGaugeOid = '.'.join([self.IF_MIB_GAUGE_OID_TABLE[gaugeName],
ifIndex])
ifGaugeData = self.get(ifGaugeOid, host, port, community)
ifGaugeValue = ifGaugeData[ifGaugeOid]
if not ifGaugeValue:
continue
# Get Metric Name and Value
metricIfDescr = re.sub(r'\W', '_', ifName)
metricName = '.'.join([metricIfDescr, gaugeName])
metricValue = int(ifGaugeValue)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_gauge(metricPath, metricValue)
# Get counters (64bit)
counterItems = self.IF_MIB_COUNTER_OID_TABLE.items()
for counterName, counterOid in counterItems:
ifCounterOid = '.'.join(
[self.IF_MIB_COUNTER_OID_TABLE[counterName], ifIndex])
ifCounterData = self.get(ifCounterOid, host, port, community)
ifCounterValue = ifCounterData[ifCounterOid]
if not ifCounterValue:
continue
# Get Metric Name and Value
metricIfDescr = re.sub(r'\W', '_', ifName)
if counterName in ['ifHCInOctets', 'ifHCOutOctets']:
for unit in self.config['byte_unit']:
# Convert Metric
metricName = '.'.join([metricIfDescr,
counterName.replace('Octets',
unit)])
metricValue = diamond.convertor.binary.convert(
value=ifCounterValue,
oldUnit='byte',
newUnit=unit)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_counter(metricPath,
metricValue,
max_value=18446744073709600000,
)
else:
metricName = '.'.join([metricIfDescr, counterName])
metricValue = int(ifCounterValue)
# Get Metric Path
metricPath = '.'.join(['devices',
device,
self.config['path'],
metricName])
# Publish Metric
self.publish_counter(metricPath,
metricValue,
max_value=18446744073709600000,
) |
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last | Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence. | Below is the the instruction that describes the task:
### Input:
Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
### Response:
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last |
def compose(self, other, qargs=None, front=False):
"""Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Choi: The composition channel as a Choi object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
"""
if qargs is not None:
return Choi(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Convert to Choi matrix
if not isinstance(other, Choi):
other = Choi(other)
# Check dimensions match up
if front and self._input_dim != other._output_dim:
raise QiskitError(
'input_dim of self must match output_dim of other')
if not front and self._output_dim != other._input_dim:
raise QiskitError(
'input_dim of other must match output_dim of self')
if front:
first = np.reshape(other._data, other._bipartite_shape)
second = np.reshape(self._data, self._bipartite_shape)
input_dim = other._input_dim
input_dims = other.input_dims()
output_dim = self._output_dim
output_dims = self.output_dims()
else:
first = np.reshape(self._data, self._bipartite_shape)
second = np.reshape(other._data, other._bipartite_shape)
input_dim = self._input_dim
input_dims = self.input_dims()
output_dim = other._output_dim
output_dims = other.output_dims()
# Contract Choi matrices for composition
data = np.reshape(
np.einsum('iAjB,AkBl->ikjl', first, second),
(input_dim * output_dim, input_dim * output_dim))
return Choi(data, input_dims, output_dims) | Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Choi: The composition channel as a Choi object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions. | Below is the the instruction that describes the task:
### Input:
Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Choi: The composition channel as a Choi object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
### Response:
def compose(self, other, qargs=None, front=False):
"""Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Choi: The composition channel as a Choi object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
"""
if qargs is not None:
return Choi(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Convert to Choi matrix
if not isinstance(other, Choi):
other = Choi(other)
# Check dimensions match up
if front and self._input_dim != other._output_dim:
raise QiskitError(
'input_dim of self must match output_dim of other')
if not front and self._output_dim != other._input_dim:
raise QiskitError(
'input_dim of other must match output_dim of self')
if front:
first = np.reshape(other._data, other._bipartite_shape)
second = np.reshape(self._data, self._bipartite_shape)
input_dim = other._input_dim
input_dims = other.input_dims()
output_dim = self._output_dim
output_dims = self.output_dims()
else:
first = np.reshape(self._data, self._bipartite_shape)
second = np.reshape(other._data, other._bipartite_shape)
input_dim = self._input_dim
input_dims = self.input_dims()
output_dim = other._output_dim
output_dims = other.output_dims()
# Contract Choi matrices for composition
data = np.reshape(
np.einsum('iAjB,AkBl->ikjl', first, second),
(input_dim * output_dim, input_dim * output_dim))
return Choi(data, input_dims, output_dims) |
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None | Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get(). | Below is the the instruction that describes the task:
### Input:
Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
### Response:
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None |
def response(self, status, content_type, content, headers=None):
"""
Send an HTTP response
"""
assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes'
response_headers = [('Content-Type', content_type)]
if headers:
response_headers.extend(headers)
self.start_response(status, response_headers)
return content | Send an HTTP response | Below is the the instruction that describes the task:
### Input:
Send an HTTP response
### Response:
def response(self, status, content_type, content, headers=None):
"""
Send an HTTP response
"""
assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes'
response_headers = [('Content-Type', content_type)]
if headers:
response_headers.extend(headers)
self.start_response(status, response_headers)
return content |
def set_defaults(self, config_file):
"""Set defaults.
"""
self.defaults = Defaults(config_file)
self.locations = Locations(self.defaults)
self.python = Python()
self.setuptools = Setuptools()
self.scp = SCP()
self.scms = SCMFactory()
self.urlparser = URLParser()
self.skipcommit = not self.defaults.commit
self.skiptag = not self.defaults.tag
self.skipregister = False # per server
self.skipupload = False # special
self.push = self.defaults.push
self.develop = False # special
self.quiet = self.defaults.quiet
self.sign = False # per server
self.list = False
self.manifest = self.defaults.manifest
self.identity = '' # per server
self.branch = ''
self.scmtype = ''
self.infoflags = []
self.formats = []
self.distributions = []
self.directory = os.curdir
self.scm = None | Set defaults. | Below is the the instruction that describes the task:
### Input:
Set defaults.
### Response:
def set_defaults(self, config_file):
"""Set defaults.
"""
self.defaults = Defaults(config_file)
self.locations = Locations(self.defaults)
self.python = Python()
self.setuptools = Setuptools()
self.scp = SCP()
self.scms = SCMFactory()
self.urlparser = URLParser()
self.skipcommit = not self.defaults.commit
self.skiptag = not self.defaults.tag
self.skipregister = False # per server
self.skipupload = False # special
self.push = self.defaults.push
self.develop = False # special
self.quiet = self.defaults.quiet
self.sign = False # per server
self.list = False
self.manifest = self.defaults.manifest
self.identity = '' # per server
self.branch = ''
self.scmtype = ''
self.infoflags = []
self.formats = []
self.distributions = []
self.directory = os.curdir
self.scm = None |
def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir) | Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving. | Below is the the instruction that describes the task:
### Input:
Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
### Response:
def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir) |
def buffer_focus(self, buf, redraw=True):
"""focus given :class:`~alot.buffers.Buffer`."""
# call pre_buffer_focus hook
prehook = settings.get_hook('pre_buffer_focus')
if prehook is not None:
prehook(ui=self, dbm=self.dbman, buf=buf)
success = False
if buf not in self.buffers:
logging.error('tried to focus unknown buffer')
else:
if self.current_buffer != buf:
self.current_buffer = buf
self.mode = buf.modename
if isinstance(self.current_buffer, BufferlistBuffer):
self.current_buffer.rebuild()
self.update()
success = True
# call post_buffer_focus hook
posthook = settings.get_hook('post_buffer_focus')
if posthook is not None:
posthook(ui=self, dbm=self.dbman, buf=buf, success=success) | focus given :class:`~alot.buffers.Buffer`. | Below is the the instruction that describes the task:
### Input:
focus given :class:`~alot.buffers.Buffer`.
### Response:
def buffer_focus(self, buf, redraw=True):
"""focus given :class:`~alot.buffers.Buffer`."""
# call pre_buffer_focus hook
prehook = settings.get_hook('pre_buffer_focus')
if prehook is not None:
prehook(ui=self, dbm=self.dbman, buf=buf)
success = False
if buf not in self.buffers:
logging.error('tried to focus unknown buffer')
else:
if self.current_buffer != buf:
self.current_buffer = buf
self.mode = buf.modename
if isinstance(self.current_buffer, BufferlistBuffer):
self.current_buffer.rebuild()
self.update()
success = True
# call post_buffer_focus hook
posthook = settings.get_hook('post_buffer_focus')
if posthook is not None:
posthook(ui=self, dbm=self.dbman, buf=buf, success=success) |
def prj_view_atype(self, *args, **kwargs):
"""View the, in the atype table view selected, assettype.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
self.view_atype(atype) | View the, in the atype table view selected, assettype.
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
View the, in the atype table view selected, assettype.
:returns: None
:rtype: None
:raises: None
### Response:
def prj_view_atype(self, *args, **kwargs):
"""View the, in the atype table view selected, assettype.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
self.view_atype(atype) |
def _validate_raw_nc(self):
"""Checks that raw netCDF file has the right dimensions and variables.
Returns
-------
int:
Length of rivid dimension.
int:
Length of time dimension.
Remarks: Raises exception if file doesn't validate.
"""
self.raw_nc_list = []
# add one for the first flow value RAPID
# does not include
total_time_len = 1
id_len_list = []
for rapid_output_file in self.rapid_output_file_list:
qout_nc = RAPIDDataset(rapid_output_file)
id_len_list.append(qout_nc.size_river_id)
total_time_len += qout_nc.size_time
self.raw_nc_list.append(qout_nc)
# make sure river id lists are the same
for id_len_undex in range(1, len(id_len_list)):
if id_len_list[id_len_undex] != id_len_list[0]:
raise Exception("River ID size is different in "
"one of the files ...")
for raw_nc_index in range(1, len(self.raw_nc_list)):
if not (self.raw_nc_list[raw_nc_index].get_river_id_array() ==
self.raw_nc_list[0].get_river_id_array()).all():
raise Exception("River IDs are different in "
"files ...")
return id_len_list[0], total_time_len | Checks that raw netCDF file has the right dimensions and variables.
Returns
-------
int:
Length of rivid dimension.
int:
Length of time dimension.
Remarks: Raises exception if file doesn't validate. | Below is the the instruction that describes the task:
### Input:
Checks that raw netCDF file has the right dimensions and variables.
Returns
-------
int:
Length of rivid dimension.
int:
Length of time dimension.
Remarks: Raises exception if file doesn't validate.
### Response:
def _validate_raw_nc(self):
"""Checks that raw netCDF file has the right dimensions and variables.
Returns
-------
int:
Length of rivid dimension.
int:
Length of time dimension.
Remarks: Raises exception if file doesn't validate.
"""
self.raw_nc_list = []
# add one for the first flow value RAPID
# does not include
total_time_len = 1
id_len_list = []
for rapid_output_file in self.rapid_output_file_list:
qout_nc = RAPIDDataset(rapid_output_file)
id_len_list.append(qout_nc.size_river_id)
total_time_len += qout_nc.size_time
self.raw_nc_list.append(qout_nc)
# make sure river id lists are the same
for id_len_undex in range(1, len(id_len_list)):
if id_len_list[id_len_undex] != id_len_list[0]:
raise Exception("River ID size is different in "
"one of the files ...")
for raw_nc_index in range(1, len(self.raw_nc_list)):
if not (self.raw_nc_list[raw_nc_index].get_river_id_array() ==
self.raw_nc_list[0].get_river_id_array()).all():
raise Exception("River IDs are different in "
"files ...")
return id_len_list[0], total_time_len |
def get_tunings(instrument=None, nr_of_strings=None, nr_of_courses=None):
"""Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass')
"""
search = ''
if instrument is not None:
search = str.upper(instrument)
result = []
keys = _known.keys()
inkeys = search in keys
for x in keys:
if (instrument is None or not inkeys and x.find(search) == 0 or
inkeys and search == x):
if nr_of_strings is None and nr_of_courses is None:
result += _known[x][1].values()
elif nr_of_strings is not None and nr_of_courses is None:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings]
elif nr_of_strings is None and nr_of_courses is not None:
result += [y for y in _known[x][1].itervalues()
if y.count_courses() == nr_of_courses]
else:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings
and y.count_courses() == nr_of_courses]
return result | Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass') | Below is the the instruction that describes the task:
### Input:
Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass')
### Response:
def get_tunings(instrument=None, nr_of_strings=None, nr_of_courses=None):
"""Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass')
"""
search = ''
if instrument is not None:
search = str.upper(instrument)
result = []
keys = _known.keys()
inkeys = search in keys
for x in keys:
if (instrument is None or not inkeys and x.find(search) == 0 or
inkeys and search == x):
if nr_of_strings is None and nr_of_courses is None:
result += _known[x][1].values()
elif nr_of_strings is not None and nr_of_courses is None:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings]
elif nr_of_strings is None and nr_of_courses is not None:
result += [y for y in _known[x][1].itervalues()
if y.count_courses() == nr_of_courses]
else:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings
and y.count_courses() == nr_of_courses]
return result |
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s | Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df) | Below is the the instruction that describes the task:
### Input:
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
### Response:
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s |
def where_is(strings, pattern, n=1, lookup_func=re.match):
"""Return index of the nth match found of pattern in strings
Parameters
----------
strings: list of str
List of strings
pattern: str
Pattern to be matched
nth: int
Number of times the match must happen to return the item index.
lookup_func: callable
Function to match each item in strings to the pattern, e.g., re.match or re.search.
Returns
-------
index: int
Index of the nth item that matches the pattern.
If there are no n matches will return -1
"""
count = 0
for idx, item in enumerate(strings):
if lookup_func(pattern, item):
count += 1
if count == n:
return idx
return -1 | Return index of the nth match found of pattern in strings
Parameters
----------
strings: list of str
List of strings
pattern: str
Pattern to be matched
nth: int
Number of times the match must happen to return the item index.
lookup_func: callable
Function to match each item in strings to the pattern, e.g., re.match or re.search.
Returns
-------
index: int
Index of the nth item that matches the pattern.
If there are no n matches will return -1 | Below is the the instruction that describes the task:
### Input:
Return index of the nth match found of pattern in strings
Parameters
----------
strings: list of str
List of strings
pattern: str
Pattern to be matched
nth: int
Number of times the match must happen to return the item index.
lookup_func: callable
Function to match each item in strings to the pattern, e.g., re.match or re.search.
Returns
-------
index: int
Index of the nth item that matches the pattern.
If there are no n matches will return -1
### Response:
def where_is(strings, pattern, n=1, lookup_func=re.match):
"""Return index of the nth match found of pattern in strings
Parameters
----------
strings: list of str
List of strings
pattern: str
Pattern to be matched
nth: int
Number of times the match must happen to return the item index.
lookup_func: callable
Function to match each item in strings to the pattern, e.g., re.match or re.search.
Returns
-------
index: int
Index of the nth item that matches the pattern.
If there are no n matches will return -1
"""
count = 0
for idx, item in enumerate(strings):
if lookup_func(pattern, item):
count += 1
if count == n:
return idx
return -1 |
def _parse_sv8_int(fileobj, limit=9):
"""Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
"""
num = 0
for i in xrange(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
c = bytearray(c)
num = (num << 7) | (c[0] & 0x7F)
if not c[0] & 0x80:
return num, i + 1
if limit > 0:
raise ValueError
return 0, 0 | Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read) | Below is the the instruction that describes the task:
### Input:
Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
### Response:
def _parse_sv8_int(fileobj, limit=9):
"""Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
"""
num = 0
for i in xrange(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
c = bytearray(c)
num = (num << 7) | (c[0] & 0x7F)
if not c[0] & 0x80:
return num, i + 1
if limit > 0:
raise ValueError
return 0, 0 |
def strategyKLogN(kls, n, k=4):
"""Return the directory names to preserve under the KLogN purge strategy."""
assert(k>1)
s = set([n])
i = 0
while k**i <= n:
s.update(range(n, n-k*k**i, -k**i))
i += 1
n -= n % k**i
return set(map(str, filter(lambda x:x>=0, s))) | Return the directory names to preserve under the KLogN purge strategy. | Below is the the instruction that describes the task:
### Input:
Return the directory names to preserve under the KLogN purge strategy.
### Response:
def strategyKLogN(kls, n, k=4):
"""Return the directory names to preserve under the KLogN purge strategy."""
assert(k>1)
s = set([n])
i = 0
while k**i <= n:
s.update(range(n, n-k*k**i, -k**i))
i += 1
n -= n % k**i
return set(map(str, filter(lambda x:x>=0, s))) |
def process_shells_ordered(self, shells):
"""Processing a list of shells one after the other."""
output = []
for shell in shells:
entry = shell['entry']
config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '',
model=shell['model'], env=shell['env'], item=shell['item'],
dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'],
variables=shell['variables'],
temporary_scripts_path=shell['temporary_scripts_path'])
result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config))
output += result.output
self.__handle_variable(entry, result.output)
if not result.success:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | Processing a list of shells one after the other. | Below is the the instruction that describes the task:
### Input:
Processing a list of shells one after the other.
### Response:
def process_shells_ordered(self, shells):
"""Processing a list of shells one after the other."""
output = []
for shell in shells:
entry = shell['entry']
config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '',
model=shell['model'], env=shell['env'], item=shell['item'],
dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'],
variables=shell['variables'],
temporary_scripts_path=shell['temporary_scripts_path'])
result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config))
output += result.output
self.__handle_variable(entry, result.output)
if not result.success:
return {'success': False, 'output': output}
return {'success': True, 'output': output} |
def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.user_params.scratch.value:
remove_plugins = [
("prebuild_plugins", "koji_parent"),
("postbuild_plugins", "compress"), # required only to make an archive for Koji
("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji
("postbuild_plugins", "compare_components"),
("postbuild_plugins", "import_image"),
("exit_plugins", "koji_promote"),
("exit_plugins", "koji_tag_build"),
("exit_plugins", "import_image"),
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled")
]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(("postbuild_plugins", "tag_from_config"))
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from scratch build request') | Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji. | Below is the the instruction that describes the task:
### Input:
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
### Response:
def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.user_params.scratch.value:
remove_plugins = [
("prebuild_plugins", "koji_parent"),
("postbuild_plugins", "compress"), # required only to make an archive for Koji
("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji
("postbuild_plugins", "compare_components"),
("postbuild_plugins", "import_image"),
("exit_plugins", "koji_promote"),
("exit_plugins", "koji_tag_build"),
("exit_plugins", "import_image"),
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled")
]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(("postbuild_plugins", "tag_from_config"))
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from scratch build request') |
def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache) | Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types. | Below is the the instruction that describes the task:
### Input:
Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
### Response:
def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache) |
def filename(self, type_, id_):
"""
cache filename to read for this type/id.
:param type_: str, "user" or "tag"
:param id_: int, eg. 123456
:returns: str
"""
profile = self.connection.profile
return os.path.join(self.directory, profile, type_, str(id_)) | cache filename to read for this type/id.
:param type_: str, "user" or "tag"
:param id_: int, eg. 123456
:returns: str | Below is the the instruction that describes the task:
### Input:
cache filename to read for this type/id.
:param type_: str, "user" or "tag"
:param id_: int, eg. 123456
:returns: str
### Response:
def filename(self, type_, id_):
"""
cache filename to read for this type/id.
:param type_: str, "user" or "tag"
:param id_: int, eg. 123456
:returns: str
"""
profile = self.connection.profile
return os.path.join(self.directory, profile, type_, str(id_)) |
def send_message(self, msg):
"""
Send a message to the MUC.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
:return: The stanza token of the message.
:rtype: :class:`~aioxmpp.stream.StanzaToken`
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. seealso::
:meth:`.AbstractConversation.send_message` for the full interface
specification.
"""
msg.type_ = aioxmpp.MessageType.GROUPCHAT
msg.to = self._mucjid
# see https://mail.jabber.org/pipermail/standards/2017-January/032048.html # NOQA
# for a full discussion on the rationale for this.
# TL;DR: we want to help entities to discover that a message is related
# to a MUC.
msg.xep0045_muc_user = muc_xso.UserExt()
result = self.service.client.enqueue(msg)
return result | Send a message to the MUC.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
:return: The stanza token of the message.
:rtype: :class:`~aioxmpp.stream.StanzaToken`
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. seealso::
:meth:`.AbstractConversation.send_message` for the full interface
specification. | Below is the the instruction that describes the task:
### Input:
Send a message to the MUC.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
:return: The stanza token of the message.
:rtype: :class:`~aioxmpp.stream.StanzaToken`
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. seealso::
:meth:`.AbstractConversation.send_message` for the full interface
specification.
### Response:
def send_message(self, msg):
"""
Send a message to the MUC.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
:return: The stanza token of the message.
:rtype: :class:`~aioxmpp.stream.StanzaToken`
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. seealso::
:meth:`.AbstractConversation.send_message` for the full interface
specification.
"""
msg.type_ = aioxmpp.MessageType.GROUPCHAT
msg.to = self._mucjid
# see https://mail.jabber.org/pipermail/standards/2017-January/032048.html # NOQA
# for a full discussion on the rationale for this.
# TL;DR: we want to help entities to discover that a message is related
# to a MUC.
msg.xep0045_muc_user = muc_xso.UserExt()
result = self.service.client.enqueue(msg)
return result |
def create_derivative(self, word):
''' Creates derivative of (base) word by adding any affixes that apply '''
result = None
if self.char_to_strip != '':
if self.opt == "PFX":
result = word[len(self.char_to_strip):len(word)]
result = self.affix + result
else: # SFX
result = word[0:len(word) - len(self.char_to_strip)]
result = result + self.affix
else: # No characters to strip
if self.opt == "PFX":
result = self.affix + word
else: # SFX
result = word + self.affix
# None means word does not meet the set condition
return result | Creates derivative of (base) word by adding any affixes that apply | Below is the the instruction that describes the task:
### Input:
Creates derivative of (base) word by adding any affixes that apply
### Response:
def create_derivative(self, word):
''' Creates derivative of (base) word by adding any affixes that apply '''
result = None
if self.char_to_strip != '':
if self.opt == "PFX":
result = word[len(self.char_to_strip):len(word)]
result = self.affix + result
else: # SFX
result = word[0:len(word) - len(self.char_to_strip)]
result = result + self.affix
else: # No characters to strip
if self.opt == "PFX":
result = self.affix + word
else: # SFX
result = word + self.affix
# None means word does not meet the set condition
return result |
def container_elem_type(container_type, params):
"""
Returns container element type
:param container_type:
:param params:
:return:
"""
elem_type = params[0] if params else None
if elem_type is None:
elem_type = container_type.ELEM_TYPE
return elem_type | Returns container element type
:param container_type:
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Returns container element type
:param container_type:
:param params:
:return:
### Response:
def container_elem_type(container_type, params):
"""
Returns container element type
:param container_type:
:param params:
:return:
"""
elem_type = params[0] if params else None
if elem_type is None:
elem_type = container_type.ELEM_TYPE
return elem_type |
def add_child(self, n, parent, **attrs):
'''
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
'''
attrs['level'] = self.get_node(parent).get_attr('level') + 1
attrs['parent'] = parent
self.add_node(n, **attrs)
self.add_edge(parent, n)
return self.get_node(n) | API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance. | Below is the the instruction that describes the task:
### Input:
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
### Response:
def add_child(self, n, parent, **attrs):
'''
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
'''
attrs['level'] = self.get_node(parent).get_attr('level') + 1
attrs['parent'] = parent
self.add_node(n, **attrs)
self.add_edge(parent, n)
return self.get_node(n) |
def get_axis_num(self, dim: Union[Hashable, Iterable[Hashable]]
) -> Union[int, Tuple[int, ...]]:
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, Iterable) and not isinstance(dim, str):
return tuple(self._get_axis_num(d) for d in dim)
else:
return self._get_axis_num(dim) | Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions. | Below is the the instruction that describes the task:
### Input:
Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
### Response:
def get_axis_num(self, dim: Union[Hashable, Iterable[Hashable]]
) -> Union[int, Tuple[int, ...]]:
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, Iterable) and not isinstance(dim, str):
return tuple(self._get_axis_num(d) for d in dim)
else:
return self._get_axis_num(dim) |
def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL):
""" Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
"""
# Import(s)
from ..const import atom_num
import numpy as np
from ..const import EnumCheckGeomMismatch as ECGM
# Initialize return value to success condition
match = True
#** Check coords for suitable shape. Assume 1-D np.arrays.
if not len(c1.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c1' is not a vector."))
## end if
if not len(c2.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c2' is not a vector."))
## end if
#** Check atoms for suitable shape. Assume lists of strings, so
# convert to np.array to check.
if not len(a1.shape) == 1:
# Not a vector; complain
raise ValueError(("'a1' is not a simple list."))
## end if
if not len(a2.shape) == 1:
# Not a vector; complain.
raise ValueError(("'a2' is not a simple list."))
## end if
#** Confirm proper lengths of coords vs atoms
if not c1.shape[0] == 3 * a1.shape[0]:
raise ValueError("len(c1) != 3*len(a1)")
## end if
if not c2.shape[0] == 3 * a2.shape[0]:
raise ValueError("len(c2) != 3*len(a2)")
## end if
#** Confirm matching lengths of coords and atoms w/corresponding
# objects among the two geometries
if not c1.shape[0] == c2.shape[0]:
match = False
fail_type = ECGM.DIMENSION
return match, fail_type, None
## end if
#** Element-wise check for geometry match to within 'tol'
fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol)
if sum(fail_loc) != c2.shape[0]:
# Count of matching coordinates should equal the number of
# coordinates. If not, complain with 'coord_mismatch' fail type.
match = False
fail_type = ECGM.COORDS
return match, fail_type, fail_loc
## end if
#** Element-wise check for atoms match. Quietly convert both input and
# instance atom arrays to atom_nums to allow np.equals comparison.
if np.issubdtype(a1.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a1 = np.array([atom_num[e] for e in a1])
## end if
if np.issubdtype(a2.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a2 = np.array([atom_num[e] for e in a2])
## end if
fail_loc = np.equal(a1, a2)
#** Perform the test to ensure all atoms match.
if sum(fail_loc) != a2.shape[0]:
# Count of matching atoms should equal number of atoms. If not,
# complain with the 'atom_mismatch' fail type.
match = False
fail_type = ECGM.ATOMS
return match, fail_type, fail_loc
#** If reached here, all tests passed; return success.
return match, None, None | Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...) | Below is the the instruction that describes the task:
### Input:
Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
### Response:
def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL):
""" Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
"""
# Import(s)
from ..const import atom_num
import numpy as np
from ..const import EnumCheckGeomMismatch as ECGM
# Initialize return value to success condition
match = True
#** Check coords for suitable shape. Assume 1-D np.arrays.
if not len(c1.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c1' is not a vector."))
## end if
if not len(c2.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c2' is not a vector."))
## end if
#** Check atoms for suitable shape. Assume lists of strings, so
# convert to np.array to check.
if not len(a1.shape) == 1:
# Not a vector; complain
raise ValueError(("'a1' is not a simple list."))
## end if
if not len(a2.shape) == 1:
# Not a vector; complain.
raise ValueError(("'a2' is not a simple list."))
## end if
#** Confirm proper lengths of coords vs atoms
if not c1.shape[0] == 3 * a1.shape[0]:
raise ValueError("len(c1) != 3*len(a1)")
## end if
if not c2.shape[0] == 3 * a2.shape[0]:
raise ValueError("len(c2) != 3*len(a2)")
## end if
#** Confirm matching lengths of coords and atoms w/corresponding
# objects among the two geometries
if not c1.shape[0] == c2.shape[0]:
match = False
fail_type = ECGM.DIMENSION
return match, fail_type, None
## end if
#** Element-wise check for geometry match to within 'tol'
fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol)
if sum(fail_loc) != c2.shape[0]:
# Count of matching coordinates should equal the number of
# coordinates. If not, complain with 'coord_mismatch' fail type.
match = False
fail_type = ECGM.COORDS
return match, fail_type, fail_loc
## end if
#** Element-wise check for atoms match. Quietly convert both input and
# instance atom arrays to atom_nums to allow np.equals comparison.
if np.issubdtype(a1.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a1 = np.array([atom_num[e] for e in a1])
## end if
if np.issubdtype(a2.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a2 = np.array([atom_num[e] for e in a2])
## end if
fail_loc = np.equal(a1, a2)
#** Perform the test to ensure all atoms match.
if sum(fail_loc) != a2.shape[0]:
# Count of matching atoms should equal number of atoms. If not,
# complain with the 'atom_mismatch' fail type.
match = False
fail_type = ECGM.ATOMS
return match, fail_type, fail_loc
#** If reached here, all tests passed; return success.
return match, None, None |
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
found = it.find(item_id)
if found:
return found | Recursively find a menu item by its id (useful for event handlers) | Below is the the instruction that describes the task:
### Input:
Recursively find a menu item by its id (useful for event handlers)
### Response:
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
found = it.find(item_id)
if found:
return found |
def new(ruletype, **kwargs):
"""Instantiate a new build rule based on kwargs.
Appropriate args list varies with rule type.
Minimum args required:
[... fill this in ...]
"""
try:
ruleclass = TYPE_MAP[ruletype]
except KeyError:
raise error.InvalidRule('Unrecognized rule type: %s' % ruletype)
try:
return ruleclass(**kwargs)
except TypeError:
log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs)
raise | Instantiate a new build rule based on kwargs.
Appropriate args list varies with rule type.
Minimum args required:
[... fill this in ...] | Below is the the instruction that describes the task:
### Input:
Instantiate a new build rule based on kwargs.
Appropriate args list varies with rule type.
Minimum args required:
[... fill this in ...]
### Response:
def new(ruletype, **kwargs):
"""Instantiate a new build rule based on kwargs.
Appropriate args list varies with rule type.
Minimum args required:
[... fill this in ...]
"""
try:
ruleclass = TYPE_MAP[ruletype]
except KeyError:
raise error.InvalidRule('Unrecognized rule type: %s' % ruletype)
try:
return ruleclass(**kwargs)
except TypeError:
log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs)
raise |
def reserve_ports(self, locations, force=False, reset=True):
""" Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reserve Port.
:param locations: list of ports locations in the form <ip/slot/port> to reserve
:param force: True - take forcefully. False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object)
"""
for location in locations:
ip, module, port = location.split('/')
self.chassis_list[ip].reserve_ports(['{}/{}'.format(module, port)], force, reset)
return self.ports | Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reserve Port.
:param locations: list of ports locations in the form <ip/slot/port> to reserve
:param force: True - take forcefully. False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object) | Below is the the instruction that describes the task:
### Input:
Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reserve Port.
:param locations: list of ports locations in the form <ip/slot/port> to reserve
:param force: True - take forcefully. False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object)
### Response:
def reserve_ports(self, locations, force=False, reset=True):
""" Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reserve Port.
:param locations: list of ports locations in the form <ip/slot/port> to reserve
:param force: True - take forcefully. False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object)
"""
for location in locations:
ip, module, port = location.split('/')
self.chassis_list[ip].reserve_ports(['{}/{}'.format(module, port)], force, reset)
return self.ports |
def calculate_extra_keys(self, buffer):
"""
Determine extra keys pressed since the given buffer was built
"""
extraBs = len(self.inputStack) - len(buffer)
if extraBs > 0:
extraKeys = ''.join(self.inputStack[len(buffer)])
else:
extraBs = 0
extraKeys = ''
return extraBs, extraKeys | Determine extra keys pressed since the given buffer was built | Below is the the instruction that describes the task:
### Input:
Determine extra keys pressed since the given buffer was built
### Response:
def calculate_extra_keys(self, buffer):
"""
Determine extra keys pressed since the given buffer was built
"""
extraBs = len(self.inputStack) - len(buffer)
if extraBs > 0:
extraKeys = ''.join(self.inputStack[len(buffer)])
else:
extraBs = 0
extraKeys = ''
return extraBs, extraKeys |
def create_from_file_extension(cls, file_extension):
"""
Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension.
"""
ext = os.path.splitext(file_extension)[1]
if typepy.is_null_string(ext):
file_extension = file_extension
else:
file_extension = ext
file_extension = file_extension.lstrip(".").lower()
for table_format in TableFormat:
if file_extension not in table_format.file_extensions:
continue
if table_format.format_attribute & FormatAttr.SECONDARY_EXT:
continue
return table_format.writer_class()
raise WriterNotFoundError(
"\n".join(
[
"{:s} (unknown file extension).".format(file_extension),
"",
"acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())),
]
)
) | Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension. | Below is the the instruction that describes the task:
### Input:
Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension.
### Response:
def create_from_file_extension(cls, file_extension):
"""
Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension.
"""
ext = os.path.splitext(file_extension)[1]
if typepy.is_null_string(ext):
file_extension = file_extension
else:
file_extension = ext
file_extension = file_extension.lstrip(".").lower()
for table_format in TableFormat:
if file_extension not in table_format.file_extensions:
continue
if table_format.format_attribute & FormatAttr.SECONDARY_EXT:
continue
return table_format.writer_class()
raise WriterNotFoundError(
"\n".join(
[
"{:s} (unknown file extension).".format(file_extension),
"",
"acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())),
]
)
) |
def parse(cls, prefix):
"""
Extracts informations from `prefix`.
:param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``.
:type prefix: unicode
:return: extracted informations (nickname or host, mode, username, host).
:rtype: tuple(str, str, str, str)
"""
try:
nick, rest = prefix.split(u'!')
except ValueError:
return prefix, None, None, None
try:
mode, rest = rest.split(u'=')
except ValueError:
mode, rest = None, rest
try:
user, host = rest.split(u'@')
except ValueError:
return nick, mode, rest, None
return nick, mode, user, host | Extracts informations from `prefix`.
:param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``.
:type prefix: unicode
:return: extracted informations (nickname or host, mode, username, host).
:rtype: tuple(str, str, str, str) | Below is the the instruction that describes the task:
### Input:
Extracts informations from `prefix`.
:param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``.
:type prefix: unicode
:return: extracted informations (nickname or host, mode, username, host).
:rtype: tuple(str, str, str, str)
### Response:
def parse(cls, prefix):
"""
Extracts informations from `prefix`.
:param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``.
:type prefix: unicode
:return: extracted informations (nickname or host, mode, username, host).
:rtype: tuple(str, str, str, str)
"""
try:
nick, rest = prefix.split(u'!')
except ValueError:
return prefix, None, None, None
try:
mode, rest = rest.split(u'=')
except ValueError:
mode, rest = None, rest
try:
user, host = rest.split(u'@')
except ValueError:
return nick, mode, rest, None
return nick, mode, user, host |
def _clean_prior(self):
""" Cleans up from a previous task that didn't exit cleanly.
Returns ``True`` if previous task was cleaned.
"""
if self._loaded:
try:
pid_file = daemon.get_daemon_pidfile(self)
# check if it exists so we don't raise
if os.path.isfile(pid_file):
# read pid from file
pid = int(common.readfile(pid_file))
# check if pid file is stale
if pid and not daemon.pid_exists(pid):
common.safe_remove_file(pid_file)
raise ValueError
except (ValueError, TypeError):
self._clean()
return True
return False | Cleans up from a previous task that didn't exit cleanly.
Returns ``True`` if previous task was cleaned. | Below is the the instruction that describes the task:
### Input:
Cleans up from a previous task that didn't exit cleanly.
Returns ``True`` if previous task was cleaned.
### Response:
def _clean_prior(self):
""" Cleans up from a previous task that didn't exit cleanly.
Returns ``True`` if previous task was cleaned.
"""
if self._loaded:
try:
pid_file = daemon.get_daemon_pidfile(self)
# check if it exists so we don't raise
if os.path.isfile(pid_file):
# read pid from file
pid = int(common.readfile(pid_file))
# check if pid file is stale
if pid and not daemon.pid_exists(pid):
common.safe_remove_file(pid_file)
raise ValueError
except (ValueError, TypeError):
self._clean()
return True
return False |
def create_or_update_user(self, username, policies=None, groups=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if groups is None:
groups = []
list_required_params = {
'policies': policies,
'groups': groups,
}
for param_name, param_arg in list_required_params.items():
if not isinstance(param_arg, list):
error_msg = '"{param_name}" argument must be an instance of list or None, "{param_type}" provided.'.format(
param_name=param_name,
param_type=type(param_arg),
)
raise exceptions.ParamValidationError(error_msg)
params = {
'policies': ','.join(policies),
'groups': ','.join(groups),
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
return self._adapter.post(
url=api_path,
json=params,
) | Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response
### Response:
def create_or_update_user(self, username, policies=None, groups=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if groups is None:
groups = []
list_required_params = {
'policies': policies,
'groups': groups,
}
for param_name, param_arg in list_required_params.items():
if not isinstance(param_arg, list):
error_msg = '"{param_name}" argument must be an instance of list or None, "{param_type}" provided.'.format(
param_name=param_name,
param_type=type(param_arg),
)
raise exceptions.ParamValidationError(error_msg)
params = {
'policies': ','.join(policies),
'groups': ','.join(groups),
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
return self._adapter.post(
url=api_path,
json=params,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.