code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _list_of_dictionaries_to_csv(
self,
csvType="human"):
"""Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file
"""
self.log.debug(
'starting the ``_list_of_dictionaries_to_csv`` function')
if not len(self.listOfDictionaries):
return "NO MATCH"
dataCopy = copy.deepcopy(self.listOfDictionaries)
tableColumnNames = dataCopy[0].keys()
columnWidths = []
columnWidths[:] = [len(tableColumnNames[i])
for i in range(len(tableColumnNames))]
output = io.BytesIO()
# setup csv styles
if csvType == "machine":
delimiter = ","
elif csvType in ["human", "markdown"]:
delimiter = "|"
elif csvType in ["reST"]:
delimiter = "|"
if csvType in ["markdown"]:
writer = csv.writer(output, delimiter=delimiter,
quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
writer = csv.writer(output, dialect='excel', delimiter=delimiter,
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
if csvType in ["markdown"]:
dividerWriter = csv.writer(
output, delimiter="|", quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
dividerWriter = csv.writer(output, dialect='excel', delimiter="+",
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
# add column names to csv
header = []
divider = []
rstDivider = []
allRows = []
# clean up data
for row in dataCopy:
for c in tableColumnNames:
if isinstance(row[c], float) or isinstance(row[c], Decimal):
row[c] = "%0.9g" % row[c]
elif isinstance(row[c], datetime):
thisDate = str(row[c])[:10]
row[c] = "%(thisDate)s" % locals()
# set the column widths
for row in dataCopy:
for i, c in enumerate(tableColumnNames):
if len(unicode(row[c])) > columnWidths[i]:
columnWidths[i] = len(unicode(row[c]))
# table borders for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
for i, c in enumerate(tableColumnNames):
if csvType == "machine":
header.append(c)
elif csvType in ["human", "markdown", "reST"]:
header.append(
c.ljust(columnWidths[i] + 2).rjust(columnWidths[i] + 3))
divider.append('-' * (columnWidths[i] + 3))
rstDivider.append('=' * (columnWidths[i] + 3))
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
# fill in the data
for row in dataCopy:
thisRow = []
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
for i, c in enumerate(tableColumnNames):
if csvType in ["human", "markdown", "reST"]:
if row[c] == None:
row[c] = ""
row[c] = unicode(unicode(row[c]).ljust(columnWidths[i] + 2)
.rjust(columnWidths[i] + 3))
thisRow.append(row[c])
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
allRows.append(thisRow)
if csvType in ["reST"]:
allRows.append(divider)
if csvType == "machine":
writer.writerow(header)
if csvType in ["reST"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(rstDivider)
if csvType in ["human"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(divider)
elif csvType in ["markdown"]:
writer.writerow(header)
dividerWriter.writerow(divider)
# write out the data
writer.writerows(allRows)
# table border for human readable
if csvType in ["human"]:
dividerWriter.writerow(divider)
output = output.getvalue()
output = output.strip()
if csvType in ["markdown"]:
output = output.replace("|--", "|:-")
if csvType in ["reST"]:
output = output.replace("|--", "+--").replace("--|", "--+")
self.log.debug(
'completed the ``_list_of_dictionaries_to_csv`` function')
return output | Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file | Below is the the instruction that describes the task:
### Input:
Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file
### Response:
def _list_of_dictionaries_to_csv(
self,
csvType="human"):
"""Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file
"""
self.log.debug(
'starting the ``_list_of_dictionaries_to_csv`` function')
if not len(self.listOfDictionaries):
return "NO MATCH"
dataCopy = copy.deepcopy(self.listOfDictionaries)
tableColumnNames = dataCopy[0].keys()
columnWidths = []
columnWidths[:] = [len(tableColumnNames[i])
for i in range(len(tableColumnNames))]
output = io.BytesIO()
# setup csv styles
if csvType == "machine":
delimiter = ","
elif csvType in ["human", "markdown"]:
delimiter = "|"
elif csvType in ["reST"]:
delimiter = "|"
if csvType in ["markdown"]:
writer = csv.writer(output, delimiter=delimiter,
quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
writer = csv.writer(output, dialect='excel', delimiter=delimiter,
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
if csvType in ["markdown"]:
dividerWriter = csv.writer(
output, delimiter="|", quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
dividerWriter = csv.writer(output, dialect='excel', delimiter="+",
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
# add column names to csv
header = []
divider = []
rstDivider = []
allRows = []
# clean up data
for row in dataCopy:
for c in tableColumnNames:
if isinstance(row[c], float) or isinstance(row[c], Decimal):
row[c] = "%0.9g" % row[c]
elif isinstance(row[c], datetime):
thisDate = str(row[c])[:10]
row[c] = "%(thisDate)s" % locals()
# set the column widths
for row in dataCopy:
for i, c in enumerate(tableColumnNames):
if len(unicode(row[c])) > columnWidths[i]:
columnWidths[i] = len(unicode(row[c]))
# table borders for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
for i, c in enumerate(tableColumnNames):
if csvType == "machine":
header.append(c)
elif csvType in ["human", "markdown", "reST"]:
header.append(
c.ljust(columnWidths[i] + 2).rjust(columnWidths[i] + 3))
divider.append('-' * (columnWidths[i] + 3))
rstDivider.append('=' * (columnWidths[i] + 3))
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
# fill in the data
for row in dataCopy:
thisRow = []
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
for i, c in enumerate(tableColumnNames):
if csvType in ["human", "markdown", "reST"]:
if row[c] == None:
row[c] = ""
row[c] = unicode(unicode(row[c]).ljust(columnWidths[i] + 2)
.rjust(columnWidths[i] + 3))
thisRow.append(row[c])
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
allRows.append(thisRow)
if csvType in ["reST"]:
allRows.append(divider)
if csvType == "machine":
writer.writerow(header)
if csvType in ["reST"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(rstDivider)
if csvType in ["human"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(divider)
elif csvType in ["markdown"]:
writer.writerow(header)
dividerWriter.writerow(divider)
# write out the data
writer.writerows(allRows)
# table border for human readable
if csvType in ["human"]:
dividerWriter.writerow(divider)
output = output.getvalue()
output = output.strip()
if csvType in ["markdown"]:
output = output.replace("|--", "|:-")
if csvType in ["reST"]:
output = output.replace("|--", "+--").replace("--|", "--+")
self.log.debug(
'completed the ``_list_of_dictionaries_to_csv`` function')
return output |
def topic_update(channel, topic_channel):
"""
Creates an embed UI for the topic update
Args:
channel (discord.Channel): The Discord channel to bind the embed to
topic_channel: The new topic channel
Returns:
embed: The created embed
"""
if topic_channel is not None:
try:
channel_message = "Topic channel is now `{}`.".format(topic_channel.name)
except Exception as e:
logger.exception(e)
channel_message = "Topic channel has been updated."
else:
channel_message = "Topic channel has been cleared."
# Create embed UI object
gui = ui_embed.UI(
channel,
"Topic channel updated",
channel_message,
modulename=modulename,
colour=modulecolor_info
)
return gui | Creates an embed UI for the topic update
Args:
channel (discord.Channel): The Discord channel to bind the embed to
topic_channel: The new topic channel
Returns:
embed: The created embed | Below is the the instruction that describes the task:
### Input:
Creates an embed UI for the topic update
Args:
channel (discord.Channel): The Discord channel to bind the embed to
topic_channel: The new topic channel
Returns:
embed: The created embed
### Response:
def topic_update(channel, topic_channel):
"""
Creates an embed UI for the topic update
Args:
channel (discord.Channel): The Discord channel to bind the embed to
topic_channel: The new topic channel
Returns:
embed: The created embed
"""
if topic_channel is not None:
try:
channel_message = "Topic channel is now `{}`.".format(topic_channel.name)
except Exception as e:
logger.exception(e)
channel_message = "Topic channel has been updated."
else:
channel_message = "Topic channel has been cleared."
# Create embed UI object
gui = ui_embed.UI(
channel,
"Topic channel updated",
channel_message,
modulename=modulename,
colour=modulecolor_info
)
return gui |
def dropcols(df, start=None, end=None):
"""Drop columns that contain NaN within [start, end] inclusive.
A wrapper around DataFrame.dropna() that builds an easier *subset*
syntax for tseries-indexed DataFrames.
Parameters
----------
df : DataFrame
start : str or datetime, default None
start cutoff date, inclusive
end : str or datetime, default None
end cutoff date, inclusive
Example
-------
df = DataFrame(np.random.randn(10,3),
index=pd.date_range('2017', periods=10))
# Drop in some NaN
df.set_value('2017-01-04', 0, np.nan)
df.set_value('2017-01-02', 2, np.nan)
df.loc['2017-01-05':, 1] = np.nan
# only col2 will be kept--its NaN value falls before `start`
print(dropcols(df, start='2017-01-03'))
2
2017-01-01 0.12939
2017-01-02 NaN
2017-01-03 0.16596
2017-01-04 1.06442
2017-01-05 -1.87040
2017-01-06 -0.17160
2017-01-07 0.94588
2017-01-08 1.49246
2017-01-09 0.02042
2017-01-10 0.75094
"""
if isinstance(df, Series):
raise ValueError("func only applies to `pd.DataFrame`")
if start is None:
start = df.index[0]
if end is None:
end = df.index[-1]
subset = df.index[(df.index >= start) & (df.index <= end)]
return df.dropna(axis=1, subset=subset) | Drop columns that contain NaN within [start, end] inclusive.
A wrapper around DataFrame.dropna() that builds an easier *subset*
syntax for tseries-indexed DataFrames.
Parameters
----------
df : DataFrame
start : str or datetime, default None
start cutoff date, inclusive
end : str or datetime, default None
end cutoff date, inclusive
Example
-------
df = DataFrame(np.random.randn(10,3),
index=pd.date_range('2017', periods=10))
# Drop in some NaN
df.set_value('2017-01-04', 0, np.nan)
df.set_value('2017-01-02', 2, np.nan)
df.loc['2017-01-05':, 1] = np.nan
# only col2 will be kept--its NaN value falls before `start`
print(dropcols(df, start='2017-01-03'))
2
2017-01-01 0.12939
2017-01-02 NaN
2017-01-03 0.16596
2017-01-04 1.06442
2017-01-05 -1.87040
2017-01-06 -0.17160
2017-01-07 0.94588
2017-01-08 1.49246
2017-01-09 0.02042
2017-01-10 0.75094 | Below is the the instruction that describes the task:
### Input:
Drop columns that contain NaN within [start, end] inclusive.
A wrapper around DataFrame.dropna() that builds an easier *subset*
syntax for tseries-indexed DataFrames.
Parameters
----------
df : DataFrame
start : str or datetime, default None
start cutoff date, inclusive
end : str or datetime, default None
end cutoff date, inclusive
Example
-------
df = DataFrame(np.random.randn(10,3),
index=pd.date_range('2017', periods=10))
# Drop in some NaN
df.set_value('2017-01-04', 0, np.nan)
df.set_value('2017-01-02', 2, np.nan)
df.loc['2017-01-05':, 1] = np.nan
# only col2 will be kept--its NaN value falls before `start`
print(dropcols(df, start='2017-01-03'))
2
2017-01-01 0.12939
2017-01-02 NaN
2017-01-03 0.16596
2017-01-04 1.06442
2017-01-05 -1.87040
2017-01-06 -0.17160
2017-01-07 0.94588
2017-01-08 1.49246
2017-01-09 0.02042
2017-01-10 0.75094
### Response:
def dropcols(df, start=None, end=None):
"""Drop columns that contain NaN within [start, end] inclusive.
A wrapper around DataFrame.dropna() that builds an easier *subset*
syntax for tseries-indexed DataFrames.
Parameters
----------
df : DataFrame
start : str or datetime, default None
start cutoff date, inclusive
end : str or datetime, default None
end cutoff date, inclusive
Example
-------
df = DataFrame(np.random.randn(10,3),
index=pd.date_range('2017', periods=10))
# Drop in some NaN
df.set_value('2017-01-04', 0, np.nan)
df.set_value('2017-01-02', 2, np.nan)
df.loc['2017-01-05':, 1] = np.nan
# only col2 will be kept--its NaN value falls before `start`
print(dropcols(df, start='2017-01-03'))
2
2017-01-01 0.12939
2017-01-02 NaN
2017-01-03 0.16596
2017-01-04 1.06442
2017-01-05 -1.87040
2017-01-06 -0.17160
2017-01-07 0.94588
2017-01-08 1.49246
2017-01-09 0.02042
2017-01-10 0.75094
"""
if isinstance(df, Series):
raise ValueError("func only applies to `pd.DataFrame`")
if start is None:
start = df.index[0]
if end is None:
end = df.index[-1]
subset = df.index[(df.index >= start) & (df.index <= end)]
return df.dropna(axis=1, subset=subset) |
def show_stack(self, message_regex="^.*$", min_level=logging.DEBUG,
limit=4096, once=True):
"""
Enable showing the origin of log messages by dumping a stack trace into
the ``stack`` logger at the :const:``logging.INFO`` severity.
:param message_regex: is a full-line regex which the message must
satisfy in order to trigger stack dump
:param min_level: the minimum severity the message must have in order to
trigger the stack dump
:param limit: Maximum stack depth to show
:param once: Only show the stack once per unique ``(logger, origin line
of code)``
"""
value = re.compile(message_regex), limit, once, min_level
self.show_stack_regexes.append(value) | Enable showing the origin of log messages by dumping a stack trace into
the ``stack`` logger at the :const:``logging.INFO`` severity.
:param message_regex: is a full-line regex which the message must
satisfy in order to trigger stack dump
:param min_level: the minimum severity the message must have in order to
trigger the stack dump
:param limit: Maximum stack depth to show
:param once: Only show the stack once per unique ``(logger, origin line
of code)`` | Below is the the instruction that describes the task:
### Input:
Enable showing the origin of log messages by dumping a stack trace into
the ``stack`` logger at the :const:``logging.INFO`` severity.
:param message_regex: is a full-line regex which the message must
satisfy in order to trigger stack dump
:param min_level: the minimum severity the message must have in order to
trigger the stack dump
:param limit: Maximum stack depth to show
:param once: Only show the stack once per unique ``(logger, origin line
of code)``
### Response:
def show_stack(self, message_regex="^.*$", min_level=logging.DEBUG,
limit=4096, once=True):
"""
Enable showing the origin of log messages by dumping a stack trace into
the ``stack`` logger at the :const:``logging.INFO`` severity.
:param message_regex: is a full-line regex which the message must
satisfy in order to trigger stack dump
:param min_level: the minimum severity the message must have in order to
trigger the stack dump
:param limit: Maximum stack depth to show
:param once: Only show the stack once per unique ``(logger, origin line
of code)``
"""
value = re.compile(message_regex), limit, once, min_level
self.show_stack_regexes.append(value) |
def save(self, force_insert=False, force_update=False, commit=True):
""" Se sobreescribe el método save para crear o modificar al User en caso
que el parámetro commit sea True.
"""
usuario = super().save(commit=False)
if commit:
user = None
if self.instance.pk is not None:
user = usuario.user
else:
user = User()
user.username = self.cleaned_data['username']
user.set_password(self.cleaned_data['password'])
user.email = self.cleaned_data['email']
user.save()
usuario.user = user
usuario.save()
return usuario | Se sobreescribe el método save para crear o modificar al User en caso
que el parámetro commit sea True. | Below is the the instruction that describes the task:
### Input:
Se sobreescribe el método save para crear o modificar al User en caso
que el parámetro commit sea True.
### Response:
def save(self, force_insert=False, force_update=False, commit=True):
""" Se sobreescribe el método save para crear o modificar al User en caso
que el parámetro commit sea True.
"""
usuario = super().save(commit=False)
if commit:
user = None
if self.instance.pk is not None:
user = usuario.user
else:
user = User()
user.username = self.cleaned_data['username']
user.set_password(self.cleaned_data['password'])
user.email = self.cleaned_data['email']
user.save()
usuario.user = user
usuario.save()
return usuario |
def _get_interface_name_from_hosting_port(self, port):
"""
Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
"""
try:
vlan = port['hosting_info']['segmentation_id']
int_prefix = port['hosting_info']['physical_interface']
return '%s.%s' % (int_prefix, vlan)
except KeyError as e:
params = {'key': e}
raise cfg_exc.DriverExpectedKeyNotSetException(**params) | Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500 | Below is the the instruction that describes the task:
### Input:
Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
### Response:
def _get_interface_name_from_hosting_port(self, port):
"""
Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
"""
try:
vlan = port['hosting_info']['segmentation_id']
int_prefix = port['hosting_info']['physical_interface']
return '%s.%s' % (int_prefix, vlan)
except KeyError as e:
params = {'key': e}
raise cfg_exc.DriverExpectedKeyNotSetException(**params) |
def from_signed_raw(cls: Type[MembershipType], signed_raw: str) -> MembershipType:
"""
Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return:
"""
lines = signed_raw.splitlines(True)
n = 0
version = int(Membership.parse_field("Version", lines[n]))
n += 1
Membership.parse_field("Type", lines[n])
n += 1
currency = Membership.parse_field("Currency", lines[n])
n += 1
issuer = Membership.parse_field("Issuer", lines[n])
n += 1
membership_ts = BlockUID.from_str(Membership.parse_field("Block", lines[n]))
n += 1
membership_type = Membership.parse_field("Membership", lines[n])
n += 1
uid = Membership.parse_field("UserID", lines[n])
n += 1
identity_ts = BlockUID.from_str(Membership.parse_field("CertTS", lines[n]))
n += 1
signature = Membership.parse_field("Signature", lines[n])
n += 1
return cls(version, currency, issuer, membership_ts,
membership_type, uid, identity_ts, signature) | Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return: | Below is the the instruction that describes the task:
### Input:
Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return:
### Response:
def from_signed_raw(cls: Type[MembershipType], signed_raw: str) -> MembershipType:
"""
Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return:
"""
lines = signed_raw.splitlines(True)
n = 0
version = int(Membership.parse_field("Version", lines[n]))
n += 1
Membership.parse_field("Type", lines[n])
n += 1
currency = Membership.parse_field("Currency", lines[n])
n += 1
issuer = Membership.parse_field("Issuer", lines[n])
n += 1
membership_ts = BlockUID.from_str(Membership.parse_field("Block", lines[n]))
n += 1
membership_type = Membership.parse_field("Membership", lines[n])
n += 1
uid = Membership.parse_field("UserID", lines[n])
n += 1
identity_ts = BlockUID.from_str(Membership.parse_field("CertTS", lines[n]))
n += 1
signature = Membership.parse_field("Signature", lines[n])
n += 1
return cls(version, currency, issuer, membership_ts,
membership_type, uid, identity_ts, signature) |
def _quantize_wp(wp, nbits, qm, axis=0, **kwargs):
"""
Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8
"""
scale = bias = lut = None
# Linear Quantization
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis)
# Lookup tables
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS:
lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp)
elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE:
if 'lut_function' not in kwargs.keys():
raise Exception('Custom lookup table quantization mode '
'selected but no lookup table function passed')
lut_function = kwargs['lut_function']
if not callable(lut_function):
raise Exception('Argument for Lookup Table passed in but is '
'not callable')
try:
lut, qw = lut_function(nbits, wp)
except Exception as e:
raise Exception('{}\nCall to Lookup Table function failed'
.format(e.message))
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR:
lut, qw = _get_linear_lookup_table_and_weight(nbits, wp)
else:
raise NotImplementedError('Quantization method "{}" not supported'.format(qm))
quantized_wp = _np.uint8(qw)
return scale, bias, lut, quantized_wp | Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8 | Below is the the instruction that describes the task:
### Input:
Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8
### Response:
def _quantize_wp(wp, nbits, qm, axis=0, **kwargs):
"""
Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8
"""
scale = bias = lut = None
# Linear Quantization
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis)
# Lookup tables
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS:
lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp)
elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE:
if 'lut_function' not in kwargs.keys():
raise Exception('Custom lookup table quantization mode '
'selected but no lookup table function passed')
lut_function = kwargs['lut_function']
if not callable(lut_function):
raise Exception('Argument for Lookup Table passed in but is '
'not callable')
try:
lut, qw = lut_function(nbits, wp)
except Exception as e:
raise Exception('{}\nCall to Lookup Table function failed'
.format(e.message))
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR:
lut, qw = _get_linear_lookup_table_and_weight(nbits, wp)
else:
raise NotImplementedError('Quantization method "{}" not supported'.format(qm))
quantized_wp = _np.uint8(qw)
return scale, bias, lut, quantized_wp |
def command(self, cmd, expected_retcode=0): # pylint: disable=invalid-name
# expected_retcode kwd argument is used in many test cases, we cannot rename it.
"""
Shortcut for sending a command to this node specifically.
:param cmd: Command to send
:param expected_retcode: Expected return code as int, default is 0
:return: CliResponse
"""
return self.bench.execute_command(self.endpoint_id, cmd, expected_retcode=expected_retcode) | Shortcut for sending a command to this node specifically.
:param cmd: Command to send
:param expected_retcode: Expected return code as int, default is 0
:return: CliResponse | Below is the the instruction that describes the task:
### Input:
Shortcut for sending a command to this node specifically.
:param cmd: Command to send
:param expected_retcode: Expected return code as int, default is 0
:return: CliResponse
### Response:
def command(self, cmd, expected_retcode=0): # pylint: disable=invalid-name
# expected_retcode kwd argument is used in many test cases, we cannot rename it.
"""
Shortcut for sending a command to this node specifically.
:param cmd: Command to send
:param expected_retcode: Expected return code as int, default is 0
:return: CliResponse
"""
return self.bench.execute_command(self.endpoint_id, cmd, expected_retcode=expected_retcode) |
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args | Get arguments as a dict. | Below is the the instruction that describes the task:
### Input:
Get arguments as a dict.
### Response:
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args |
def dens_floc_init(ConcAluminum, ConcClay, coag, material):
"""Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs.
"""
return (conc_floc(ConcAluminum, ConcClay, coag).magnitude
/ frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
) | Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs. | Below is the the instruction that describes the task:
### Input:
Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs.
### Response:
def dens_floc_init(ConcAluminum, ConcClay, coag, material):
"""Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs.
"""
return (conc_floc(ConcAluminum, ConcClay, coag).magnitude
/ frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
) |
def addCallSetFromName(self, sampleName):
"""
Adds a CallSet for the specified sample name.
"""
callSet = CallSet(self, sampleName)
self.addCallSet(callSet) | Adds a CallSet for the specified sample name. | Below is the the instruction that describes the task:
### Input:
Adds a CallSet for the specified sample name.
### Response:
def addCallSetFromName(self, sampleName):
"""
Adds a CallSet for the specified sample name.
"""
callSet = CallSet(self, sampleName)
self.addCallSet(callSet) |
def repositories(doc):
"""View for getting repositories"""
for repository_id, repo in doc.get('repositories', {}).items():
repo['id'] = repository_id
repo['organisation_id'] = doc['_id']
yield repository_id, repo | View for getting repositories | Below is the the instruction that describes the task:
### Input:
View for getting repositories
### Response:
def repositories(doc):
"""View for getting repositories"""
for repository_id, repo in doc.get('repositories', {}).items():
repo['id'] = repository_id
repo['organisation_id'] = doc['_id']
yield repository_id, repo |
def from_series(series):
"""
Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform
"""
result = PercentRankTransform()
result.cdf = series.values
result.bin_edges = series.index.values[1:-1]
return result | Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform | Below is the the instruction that describes the task:
### Input:
Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform
### Response:
def from_series(series):
"""
Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform
"""
result = PercentRankTransform()
result.cdf = series.values
result.bin_edges = series.index.values[1:-1]
return result |
def update_metadata(self, href=None, metadata=None, version=None):
"""Update the metadata in a bundle.
'href' the relative href to the metadata. May not be None.
'metadata' may be None, or an object that can be converted to a
JSON string. See API documentation for restrictions. The
conversion will take place before the API call.
'version' the object version. May be None; if not None, must be
an integer, and the version must match the version of the
bundle. If not, a 409 conflict error will cause an APIException
to be thrown.
Returns a data structure equivalent to the JSON returned by the API.
If the response status is not 2xx, throws an APIException.
If the JSON to python data struct conversion fails, throws an
APIDataException."""
# Argument error checking.
assert href is not None
assert metadata is not None
assert version is None or isinstance(version, int)
# Prepare the data we're going to include in our bundle update.
data = None
fields = {}
if version is not None:
fields['version'] = version
fields['data'] = json.dumps(metadata)
data = fields
raw_result = self.put(href, data)
if raw_result.status < 200 or raw_result.status > 202:
raise APIException(raw_result.status, raw_result.json)
# Convert the JSON to a python data struct.
return self._parse_json(raw_result.json) | Update the metadata in a bundle.
'href' the relative href to the metadata. May not be None.
'metadata' may be None, or an object that can be converted to a
JSON string. See API documentation for restrictions. The
conversion will take place before the API call.
'version' the object version. May be None; if not None, must be
an integer, and the version must match the version of the
bundle. If not, a 409 conflict error will cause an APIException
to be thrown.
Returns a data structure equivalent to the JSON returned by the API.
If the response status is not 2xx, throws an APIException.
If the JSON to python data struct conversion fails, throws an
APIDataException. | Below is the the instruction that describes the task:
### Input:
Update the metadata in a bundle.
'href' the relative href to the metadata. May not be None.
'metadata' may be None, or an object that can be converted to a
JSON string. See API documentation for restrictions. The
conversion will take place before the API call.
'version' the object version. May be None; if not None, must be
an integer, and the version must match the version of the
bundle. If not, a 409 conflict error will cause an APIException
to be thrown.
Returns a data structure equivalent to the JSON returned by the API.
If the response status is not 2xx, throws an APIException.
If the JSON to python data struct conversion fails, throws an
APIDataException.
### Response:
def update_metadata(self, href=None, metadata=None, version=None):
"""Update the metadata in a bundle.
'href' the relative href to the metadata. May not be None.
'metadata' may be None, or an object that can be converted to a
JSON string. See API documentation for restrictions. The
conversion will take place before the API call.
'version' the object version. May be None; if not None, must be
an integer, and the version must match the version of the
bundle. If not, a 409 conflict error will cause an APIException
to be thrown.
Returns a data structure equivalent to the JSON returned by the API.
If the response status is not 2xx, throws an APIException.
If the JSON to python data struct conversion fails, throws an
APIDataException."""
# Argument error checking.
assert href is not None
assert metadata is not None
assert version is None or isinstance(version, int)
# Prepare the data we're going to include in our bundle update.
data = None
fields = {}
if version is not None:
fields['version'] = version
fields['data'] = json.dumps(metadata)
data = fields
raw_result = self.put(href, data)
if raw_result.status < 200 or raw_result.status > 202:
raise APIException(raw_result.status, raw_result.json)
# Convert the JSON to a python data struct.
return self._parse_json(raw_result.json) |
def thread_details(io_handler, thread_id, max_depth=0):
"""
Prints details about the thread with the given ID (not its name)
"""
# Normalize maximum depth
try:
max_depth = int(max_depth)
if max_depth < 1:
max_depth = None
except (ValueError, TypeError):
max_depth = None
# pylint: disable=W0212
try:
# Get the stack
thread_id = int(thread_id)
stack = sys._current_frames()[thread_id]
except KeyError:
io_handler.write_line("Unknown thread ID: {0}", thread_id)
except ValueError:
io_handler.write_line("Invalid thread ID: {0}", thread_id)
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
else:
# Get the name
try:
name = threading._active[thread_id].name
except KeyError:
name = "<unknown>"
lines = [
"Thread ID: {0} - Name: {1}".format(thread_id, name),
"Stack trace:",
]
trace_lines = []
depth = 0
frame = stack
while frame is not None and (
max_depth is None or depth < max_depth
):
# Store the line information
trace_lines.append(format_frame_info(frame))
# Previous frame...
frame = frame.f_back
depth += 1
# Reverse the lines
trace_lines.reverse()
# Add them to the printed lines
lines.extend(trace_lines)
lines.append("")
io_handler.write("\n".join(lines)) | Prints details about the thread with the given ID (not its name) | Below is the the instruction that describes the task:
### Input:
Prints details about the thread with the given ID (not its name)
### Response:
def thread_details(io_handler, thread_id, max_depth=0):
"""
Prints details about the thread with the given ID (not its name)
"""
# Normalize maximum depth
try:
max_depth = int(max_depth)
if max_depth < 1:
max_depth = None
except (ValueError, TypeError):
max_depth = None
# pylint: disable=W0212
try:
# Get the stack
thread_id = int(thread_id)
stack = sys._current_frames()[thread_id]
except KeyError:
io_handler.write_line("Unknown thread ID: {0}", thread_id)
except ValueError:
io_handler.write_line("Invalid thread ID: {0}", thread_id)
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
else:
# Get the name
try:
name = threading._active[thread_id].name
except KeyError:
name = "<unknown>"
lines = [
"Thread ID: {0} - Name: {1}".format(thread_id, name),
"Stack trace:",
]
trace_lines = []
depth = 0
frame = stack
while frame is not None and (
max_depth is None or depth < max_depth
):
# Store the line information
trace_lines.append(format_frame_info(frame))
# Previous frame...
frame = frame.f_back
depth += 1
# Reverse the lines
trace_lines.reverse()
# Add them to the printed lines
lines.extend(trace_lines)
lines.append("")
io_handler.write("\n".join(lines)) |
def advance_dialog(self, *args):
"""Try to display the next dialog described in my ``todo``."""
self.clear_widgets()
try:
self._update_dialog(self.todo[self.idx])
except IndexError:
pass | Try to display the next dialog described in my ``todo``. | Below is the the instruction that describes the task:
### Input:
Try to display the next dialog described in my ``todo``.
### Response:
def advance_dialog(self, *args):
"""Try to display the next dialog described in my ``todo``."""
self.clear_widgets()
try:
self._update_dialog(self.todo[self.idx])
except IndexError:
pass |
def _indent(code, by=1):
"""Indents every nonempty line of the given code."""
return "".join(
(" " * by if line else "") + line for line in code.splitlines(True)
) | Indents every nonempty line of the given code. | Below is the the instruction that describes the task:
### Input:
Indents every nonempty line of the given code.
### Response:
def _indent(code, by=1):
"""Indents every nonempty line of the given code."""
return "".join(
(" " * by if line else "") + line for line in code.splitlines(True)
) |
def create_global_step(session: tf.Session) -> tf.Variable:
"""
Creates the Tensorflow 'global_step' variable (see `MonitorContext.global_step_tensor`).
:param session: Tensorflow session the optimiser is running in
:return: The variable tensor.
"""
global_step_tensor = tf.Variable(0, trainable=False, name="global_step")
session.run(global_step_tensor.initializer)
return global_step_tensor | Creates the Tensorflow 'global_step' variable (see `MonitorContext.global_step_tensor`).
:param session: Tensorflow session the optimiser is running in
:return: The variable tensor. | Below is the the instruction that describes the task:
### Input:
Creates the Tensorflow 'global_step' variable (see `MonitorContext.global_step_tensor`).
:param session: Tensorflow session the optimiser is running in
:return: The variable tensor.
### Response:
def create_global_step(session: tf.Session) -> tf.Variable:
"""
Creates the Tensorflow 'global_step' variable (see `MonitorContext.global_step_tensor`).
:param session: Tensorflow session the optimiser is running in
:return: The variable tensor.
"""
global_step_tensor = tf.Variable(0, trainable=False, name="global_step")
session.run(global_step_tensor.initializer)
return global_step_tensor |
def contains_extra(marker):
"""Check whehter a marker contains an "extra == ..." operand.
"""
if not marker:
return False
marker = Marker(str(marker))
return _markers_contains_extra(marker._markers) | Check whehter a marker contains an "extra == ..." operand. | Below is the the instruction that describes the task:
### Input:
Check whehter a marker contains an "extra == ..." operand.
### Response:
def contains_extra(marker):
"""Check whehter a marker contains an "extra == ..." operand.
"""
if not marker:
return False
marker = Marker(str(marker))
return _markers_contains_extra(marker._markers) |
def restore_scrollbar_positions(self):
"""Restore scrollbar positions once tree is loaded"""
hor, ver = self._scrollbar_positions
self.horizontalScrollBar().setValue(hor)
self.verticalScrollBar().setValue(ver) | Restore scrollbar positions once tree is loaded | Below is the the instruction that describes the task:
### Input:
Restore scrollbar positions once tree is loaded
### Response:
def restore_scrollbar_positions(self):
"""Restore scrollbar positions once tree is loaded"""
hor, ver = self._scrollbar_positions
self.horizontalScrollBar().setValue(hor)
self.verticalScrollBar().setValue(ver) |
def append(self, value):
"""Append an element to the fact."""
if self.asserted:
raise RuntimeError("Fact already asserted")
self._multifield.append(value) | Append an element to the fact. | Below is the the instruction that describes the task:
### Input:
Append an element to the fact.
### Response:
def append(self, value):
"""Append an element to the fact."""
if self.asserted:
raise RuntimeError("Fact already asserted")
self._multifield.append(value) |
def get_session_key(self, username, password_hash):
"""
Retrieve a session key with a username and a md5 hash of the user's
password.
"""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key") | Retrieve a session key with a username and a md5 hash of the user's
password. | Below is the the instruction that describes the task:
### Input:
Retrieve a session key with a username and a md5 hash of the user's
password.
### Response:
def get_session_key(self, username, password_hash):
"""
Retrieve a session key with a username and a md5 hash of the user's
password.
"""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key") |
def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))] | Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored). | Below is the the instruction that describes the task:
### Input:
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
### Response:
def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))] |
def exists(self, filetype, remote=None, **kwargs):
'''Checks if the given type of file exists locally
Parameters
----------
filetype : str
File type parameter.
remote : bool
If True, checks for remote existence of the file
Returns
-------
exists : bool
Boolean indicating if the file exists.
'''
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
if remote:
# check for remote existence using a HEAD request
url = self.url('', full=full)
try:
resp = requests.head(url)
except Exception as e:
raise AccessError('Cannot check for remote file existence for {0}: {1}'.format(url, e))
else:
return resp.ok
else:
return os.path.isfile(full) | Checks if the given type of file exists locally
Parameters
----------
filetype : str
File type parameter.
remote : bool
If True, checks for remote existence of the file
Returns
-------
exists : bool
Boolean indicating if the file exists. | Below is the the instruction that describes the task:
### Input:
Checks if the given type of file exists locally
Parameters
----------
filetype : str
File type parameter.
remote : bool
If True, checks for remote existence of the file
Returns
-------
exists : bool
Boolean indicating if the file exists.
### Response:
def exists(self, filetype, remote=None, **kwargs):
'''Checks if the given type of file exists locally
Parameters
----------
filetype : str
File type parameter.
remote : bool
If True, checks for remote existence of the file
Returns
-------
exists : bool
Boolean indicating if the file exists.
'''
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
if remote:
# check for remote existence using a HEAD request
url = self.url('', full=full)
try:
resp = requests.head(url)
except Exception as e:
raise AccessError('Cannot check for remote file existence for {0}: {1}'.format(url, e))
else:
return resp.ok
else:
return os.path.isfile(full) |
def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
cluster, word, freq = line.split()
clusters[word] = cluster
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) | Read clusters from file and save to model file. | Below is the the instruction that describes the task:
### Input:
Read clusters from file and save to model file.
### Response:
def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
cluster, word, freq = line.split()
clusters[word] = cluster
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) |
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union) | Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object. | Below is the the instruction that describes the task:
### Input:
Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
### Response:
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union) |
def name(self):
"""Dict with locale codes as keys and localized name as value"""
# pylint:disable=E1101
return next((self.names.get(x) for x in self._locales if x in
self.names), None) | Dict with locale codes as keys and localized name as value | Below is the the instruction that describes the task:
### Input:
Dict with locale codes as keys and localized name as value
### Response:
def name(self):
"""Dict with locale codes as keys and localized name as value"""
# pylint:disable=E1101
return next((self.names.get(x) for x in self._locales if x in
self.names), None) |
def retrieve_tx(self, txid):
"""Returns rawtx for <txid>."""
txid = deserialize.txid(txid)
tx = self.service.get_tx(txid)
return serialize.tx(tx) | Returns rawtx for <txid>. | Below is the the instruction that describes the task:
### Input:
Returns rawtx for <txid>.
### Response:
def retrieve_tx(self, txid):
"""Returns rawtx for <txid>."""
txid = deserialize.txid(txid)
tx = self.service.get_tx(txid)
return serialize.tx(tx) |
def colRowIsOnFgsPixel(self, col, row, padding=-50):
"""Is col row on a science pixel?
#See Kepler Flight Segment User's Manual (SP0039-702) \S 5.4 (p88)
Inputs:
col, row (floats or ints)
padding If padding <0, pixel must be on silicon and this many
pixels away from the edge of the CCD to return True
Returns:
boolean
"""
if col < 12. - padding or col > 547 + padding:
return False
if row < 0 - padding or row > 527 + padding :
return False
return True | Is col row on a science pixel?
#See Kepler Flight Segment User's Manual (SP0039-702) \S 5.4 (p88)
Inputs:
col, row (floats or ints)
padding If padding <0, pixel must be on silicon and this many
pixels away from the edge of the CCD to return True
Returns:
boolean | Below is the the instruction that describes the task:
### Input:
Is col row on a science pixel?
#See Kepler Flight Segment User's Manual (SP0039-702) \S 5.4 (p88)
Inputs:
col, row (floats or ints)
padding If padding <0, pixel must be on silicon and this many
pixels away from the edge of the CCD to return True
Returns:
boolean
### Response:
def colRowIsOnFgsPixel(self, col, row, padding=-50):
"""Is col row on a science pixel?
#See Kepler Flight Segment User's Manual (SP0039-702) \S 5.4 (p88)
Inputs:
col, row (floats or ints)
padding If padding <0, pixel must be on silicon and this many
pixels away from the edge of the CCD to return True
Returns:
boolean
"""
if col < 12. - padding or col > 547 + padding:
return False
if row < 0 - padding or row > 527 + padding :
return False
return True |
def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response | Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice | Below is the the instruction that describes the task:
### Input:
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
### Response:
def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response |
def diagnostics(self):
"""Dictionary access to all diagnostic variables
:type: dict
"""
diag_dict = {}
for key in self._diag_vars:
try:
#diag_dict[key] = getattr(self,key)
# using self.__dict__ doesn't count diagnostics defined as properties
diag_dict[key] = self.__dict__[key]
except:
pass
return diag_dict | Dictionary access to all diagnostic variables
:type: dict | Below is the the instruction that describes the task:
### Input:
Dictionary access to all diagnostic variables
:type: dict
### Response:
def diagnostics(self):
"""Dictionary access to all diagnostic variables
:type: dict
"""
diag_dict = {}
for key in self._diag_vars:
try:
#diag_dict[key] = getattr(self,key)
# using self.__dict__ doesn't count diagnostics defined as properties
diag_dict[key] = self.__dict__[key]
except:
pass
return diag_dict |
def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body))) | Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command. | Below is the the instruction that describes the task:
### Input:
Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
### Response:
def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body))) |
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None | Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields. | Below is the the instruction that describes the task:
### Input:
Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
### Response:
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None |
def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes] | Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file. | Below is the the instruction that describes the task:
### Input:
Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
### Response:
def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes] |
def p_expr_lbound(p):
""" bexpr : LBOUND LP ARRAY_ID RP
| UBOUND LP ARRAY_ID RP
"""
entry = SYMBOL_TABLE.access_array(p[3], p.lineno(3))
if entry is None:
p[0] = None
return
entry.accessed = True
if p[1] == 'LBOUND':
p[0] = make_number(entry.bounds[OPTIONS.array_base.value].lower,
p.lineno(3), TYPE.uinteger)
else:
p[0] = make_number(entry.bounds[OPTIONS.array_base.value].upper,
p.lineno(3), TYPE.uinteger) | bexpr : LBOUND LP ARRAY_ID RP
| UBOUND LP ARRAY_ID RP | Below is the the instruction that describes the task:
### Input:
bexpr : LBOUND LP ARRAY_ID RP
| UBOUND LP ARRAY_ID RP
### Response:
def p_expr_lbound(p):
""" bexpr : LBOUND LP ARRAY_ID RP
| UBOUND LP ARRAY_ID RP
"""
entry = SYMBOL_TABLE.access_array(p[3], p.lineno(3))
if entry is None:
p[0] = None
return
entry.accessed = True
if p[1] == 'LBOUND':
p[0] = make_number(entry.bounds[OPTIONS.array_base.value].lower,
p.lineno(3), TYPE.uinteger)
else:
p[0] = make_number(entry.bounds[OPTIONS.array_base.value].upper,
p.lineno(3), TYPE.uinteger) |
def undecorate(func):
"""Returns the decorator and the undecorated function of given object."""
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func | Returns the decorator and the undecorated function of given object. | Below is the the instruction that describes the task:
### Input:
Returns the decorator and the undecorated function of given object.
### Response:
def undecorate(func):
"""Returns the decorator and the undecorated function of given object."""
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func |
def Verify(self):
"""
Verify block using the verification script.
Returns:
bool: True if valid. False otherwise.
"""
if not self.Hash.ToBytes() == GetGenesis().Hash.ToBytes():
return False
bc = GetBlockchain()
if not bc.ContainsBlock(self.Index):
return False
if self.Index > 0:
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if prev_header is None:
return False
if prev_header.Index + 1 != self.Index:
return False
if prev_header.Timestamp >= self.Timestamp:
return False
# this should be done to actually verify the block
if not Helper.VerifyScripts(self):
return False
return True | Verify block using the verification script.
Returns:
bool: True if valid. False otherwise. | Below is the the instruction that describes the task:
### Input:
Verify block using the verification script.
Returns:
bool: True if valid. False otherwise.
### Response:
def Verify(self):
"""
Verify block using the verification script.
Returns:
bool: True if valid. False otherwise.
"""
if not self.Hash.ToBytes() == GetGenesis().Hash.ToBytes():
return False
bc = GetBlockchain()
if not bc.ContainsBlock(self.Index):
return False
if self.Index > 0:
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if prev_header is None:
return False
if prev_header.Index + 1 != self.Index:
return False
if prev_header.Timestamp >= self.Timestamp:
return False
# this should be done to actually verify the block
if not Helper.VerifyScripts(self):
return False
return True |
def how_vulnerable(
chain,
blackbox_mapping,
sanitiser_nodes,
potential_sanitiser,
blackbox_assignments,
interactive,
vuln_deets
):
"""Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
"""
for i, current_node in enumerate(chain):
if current_node in sanitiser_nodes:
vuln_deets['sanitiser'] = current_node
vuln_deets['confident'] = True
return VulnerabilityType.SANITISED, interactive
if isinstance(current_node, BBorBInode):
if current_node.func_name in blackbox_mapping['propagates']:
continue
elif current_node.func_name in blackbox_mapping['does_not_propagate']:
return VulnerabilityType.FALSE, interactive
elif interactive:
user_says = input(
'Is the return value of {} with tainted argument "{}" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(
current_node.label,
chain[i - 1].left_hand_side
)
).lower()
if user_says.startswith('s'):
interactive = False
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if user_says.startswith('n'):
blackbox_mapping['does_not_propagate'].append(current_node.func_name)
return VulnerabilityType.FALSE, interactive
blackbox_mapping['propagates'].append(current_node.func_name)
else:
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if potential_sanitiser:
vuln_deets['sanitiser'] = potential_sanitiser
vuln_deets['confident'] = False
return VulnerabilityType.SANITISED, interactive
return VulnerabilityType.TRUE, interactive | Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is. | Below is the the instruction that describes the task:
### Input:
Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
### Response:
def how_vulnerable(
chain,
blackbox_mapping,
sanitiser_nodes,
potential_sanitiser,
blackbox_assignments,
interactive,
vuln_deets
):
"""Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
"""
for i, current_node in enumerate(chain):
if current_node in sanitiser_nodes:
vuln_deets['sanitiser'] = current_node
vuln_deets['confident'] = True
return VulnerabilityType.SANITISED, interactive
if isinstance(current_node, BBorBInode):
if current_node.func_name in blackbox_mapping['propagates']:
continue
elif current_node.func_name in blackbox_mapping['does_not_propagate']:
return VulnerabilityType.FALSE, interactive
elif interactive:
user_says = input(
'Is the return value of {} with tainted argument "{}" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(
current_node.label,
chain[i - 1].left_hand_side
)
).lower()
if user_says.startswith('s'):
interactive = False
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if user_says.startswith('n'):
blackbox_mapping['does_not_propagate'].append(current_node.func_name)
return VulnerabilityType.FALSE, interactive
blackbox_mapping['propagates'].append(current_node.func_name)
else:
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if potential_sanitiser:
vuln_deets['sanitiser'] = potential_sanitiser
vuln_deets['confident'] = False
return VulnerabilityType.SANITISED, interactive
return VulnerabilityType.TRUE, interactive |
def to_dict(self):
"""
Return a dict representation of an osmnet osmnet_config instance.
"""
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
} | Return a dict representation of an osmnet osmnet_config instance. | Below is the the instruction that describes the task:
### Input:
Return a dict representation of an osmnet osmnet_config instance.
### Response:
def to_dict(self):
"""
Return a dict representation of an osmnet osmnet_config instance.
"""
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
} |
def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):
"""
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
"""
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
# Fileter
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
# This is intentionally ignored because from_file can fail for several reasons.
pass
return tasks_cycles | Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances. | Below is the the instruction that describes the task:
### Input:
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
### Response:
def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):
"""
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
"""
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
# Fileter
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
# This is intentionally ignored because from_file can fail for several reasons.
pass
return tasks_cycles |
def write(filename, mesh, file_format=None, **kwargs):
"""Writes mesh together with data to a file.
:params filename: File to write to.
:type filename: str
:params point_data: Named additional point data to write to the file.
:type point_data: dict
"""
if not file_format:
# deduce file format from extension
file_format = _filetype_from_filename(filename)
# check cells for sanity
for key, value in mesh.cells.items():
if key[:7] == "polygon":
assert value.shape[1] == int(key[7:])
else:
assert value.shape[1] == num_nodes_per_cell[key]
try:
interface, args, default_kwargs = _writer_map[file_format]
except KeyError:
raise KeyError(
"Unknown format '{}'. Pick one of {}".format(
file_format, sorted(list(_writer_map.keys()))
)
)
# Build kwargs
_kwargs = default_kwargs.copy()
_kwargs.update(kwargs)
# Write
return interface.write(filename, mesh, *args, **_kwargs) | Writes mesh together with data to a file.
:params filename: File to write to.
:type filename: str
:params point_data: Named additional point data to write to the file.
:type point_data: dict | Below is the the instruction that describes the task:
### Input:
Writes mesh together with data to a file.
:params filename: File to write to.
:type filename: str
:params point_data: Named additional point data to write to the file.
:type point_data: dict
### Response:
def write(filename, mesh, file_format=None, **kwargs):
"""Writes mesh together with data to a file.
:params filename: File to write to.
:type filename: str
:params point_data: Named additional point data to write to the file.
:type point_data: dict
"""
if not file_format:
# deduce file format from extension
file_format = _filetype_from_filename(filename)
# check cells for sanity
for key, value in mesh.cells.items():
if key[:7] == "polygon":
assert value.shape[1] == int(key[7:])
else:
assert value.shape[1] == num_nodes_per_cell[key]
try:
interface, args, default_kwargs = _writer_map[file_format]
except KeyError:
raise KeyError(
"Unknown format '{}'. Pick one of {}".format(
file_format, sorted(list(_writer_map.keys()))
)
)
# Build kwargs
_kwargs = default_kwargs.copy()
_kwargs.update(kwargs)
# Write
return interface.write(filename, mesh, *args, **_kwargs) |
def create_style_from_font(font):
"""
Convert from font string/tyuple into a Qt style sheet string
:param font: "Arial 10 Bold" or ('Arial', 10, 'Bold)
:return: style string that can be combined with other style strings
"""
if font is None:
return ''
if type(font) is str:
_font = font.split(' ')
else:
_font = font
style = ''
style += 'font-family: %s;\n' % _font[0]
style += 'font-size: %spt;\n' % _font[1]
font_items = ''
for item in _font[2:]:
if item == 'underline':
style += 'text-decoration: underline;\n'
else:
font_items += item + ' '
if font_items != '':
style += 'font: %s;\n' % (font_items)
return style | Convert from font string/tyuple into a Qt style sheet string
:param font: "Arial 10 Bold" or ('Arial', 10, 'Bold)
:return: style string that can be combined with other style strings | Below is the the instruction that describes the task:
### Input:
Convert from font string/tyuple into a Qt style sheet string
:param font: "Arial 10 Bold" or ('Arial', 10, 'Bold)
:return: style string that can be combined with other style strings
### Response:
def create_style_from_font(font):
"""
Convert from font string/tyuple into a Qt style sheet string
:param font: "Arial 10 Bold" or ('Arial', 10, 'Bold)
:return: style string that can be combined with other style strings
"""
if font is None:
return ''
if type(font) is str:
_font = font.split(' ')
else:
_font = font
style = ''
style += 'font-family: %s;\n' % _font[0]
style += 'font-size: %spt;\n' % _font[1]
font_items = ''
for item in _font[2:]:
if item == 'underline':
style += 'text-decoration: underline;\n'
else:
font_items += item + ' '
if font_items != '':
style += 'font: %s;\n' % (font_items)
return style |
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts") | Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object) | Below is the the instruction that describes the task:
### Input:
Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
### Response:
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts") |
def get_cache_time(
self, path: str, modified: Optional[datetime.datetime], mime_type: str
) -> int:
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0 | Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument. | Below is the the instruction that describes the task:
### Input:
Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
### Response:
def get_cache_time(
self, path: str, modified: Optional[datetime.datetime], mime_type: str
) -> int:
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0 |
def _compute_mean(self, C, g, mag, hypo_depth, dists, imt):
"""
Compute mean according to equation on Table 2, page 2275.
"""
delta = 0.00750 * 10 ** (0.507 * mag)
# computing R for different values of mag
if mag < 6.5:
R = np.sqrt(dists.rhypo ** 2 + delta ** 2)
else:
R = np.sqrt(dists.rrup ** 2 + delta ** 2)
mean = (
# 1st term
C['c1'] + C['c2'] * mag +
# 2nd term
C['c3'] * R -
# 3rd term
C['c4'] * np.log10(R) +
# 4th term
C['c5'] * hypo_depth
)
# convert from base 10 to base e
if imt == PGV():
mean = np.log(10 ** mean)
else:
# convert from cm/s**2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
return mean | Compute mean according to equation on Table 2, page 2275. | Below is the the instruction that describes the task:
### Input:
Compute mean according to equation on Table 2, page 2275.
### Response:
def _compute_mean(self, C, g, mag, hypo_depth, dists, imt):
"""
Compute mean according to equation on Table 2, page 2275.
"""
delta = 0.00750 * 10 ** (0.507 * mag)
# computing R for different values of mag
if mag < 6.5:
R = np.sqrt(dists.rhypo ** 2 + delta ** 2)
else:
R = np.sqrt(dists.rrup ** 2 + delta ** 2)
mean = (
# 1st term
C['c1'] + C['c2'] * mag +
# 2nd term
C['c3'] * R -
# 3rd term
C['c4'] * np.log10(R) +
# 4th term
C['c5'] * hypo_depth
)
# convert from base 10 to base e
if imt == PGV():
mean = np.log(10 ** mean)
else:
# convert from cm/s**2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
return mean |
def view_hmap(token, dstore):
"""
Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE
"""
try:
poe = valid.probability(token.split(':')[1])
except IndexError:
poe = 0.1
mean = dict(extract(dstore, 'hcurves?kind=mean'))['mean']
oq = dstore['oqparam']
hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
dt = numpy.dtype([('sid', U32)] + [(imt, F32) for imt in oq.imtls])
array = numpy.zeros(len(hmap), dt)
for i, vals in enumerate(hmap):
array[i] = (i, ) + tuple(vals)
array.sort(order=list(oq.imtls)[0])
return rst_table(array[:20]) | Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE | Below is the the instruction that describes the task:
### Input:
Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE
### Response:
def view_hmap(token, dstore):
"""
Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE
"""
try:
poe = valid.probability(token.split(':')[1])
except IndexError:
poe = 0.1
mean = dict(extract(dstore, 'hcurves?kind=mean'))['mean']
oq = dstore['oqparam']
hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
dt = numpy.dtype([('sid', U32)] + [(imt, F32) for imt in oq.imtls])
array = numpy.zeros(len(hmap), dt)
for i, vals in enumerate(hmap):
array[i] = (i, ) + tuple(vals)
array.sort(order=list(oq.imtls)[0])
return rst_table(array[:20]) |
def _addDPFiles(self, *files):
"""callback to add DPs corresponding to files."""
# quiet flag is always true
self.new_entry_dialog.addDataProducts(self.purrer.makeDataProducts(
[(file, True) for file in files], unbanish=True, unignore=True)) | callback to add DPs corresponding to files. | Below is the the instruction that describes the task:
### Input:
callback to add DPs corresponding to files.
### Response:
def _addDPFiles(self, *files):
"""callback to add DPs corresponding to files."""
# quiet flag is always true
self.new_entry_dialog.addDataProducts(self.purrer.makeDataProducts(
[(file, True) for file in files], unbanish=True, unignore=True)) |
def whois_domains_history(self, domains):
"""Calls WHOIS domain history end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_history_result}
"""
api_name = 'opendns-whois-domain-history'
fmt_url_path = u'whois/{0}/history'
return self._multi_get(api_name, fmt_url_path, domains) | Calls WHOIS domain history end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_history_result} | Below is the the instruction that describes the task:
### Input:
Calls WHOIS domain history end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_history_result}
### Response:
def whois_domains_history(self, domains):
"""Calls WHOIS domain history end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_history_result}
"""
api_name = 'opendns-whois-domain-history'
fmt_url_path = u'whois/{0}/history'
return self._multi_get(api_name, fmt_url_path, domains) |
def get_summary(summary_file):
"""Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
"""
summary_info = OrderedDict()
logger.debug("Retrieving summary information from file: {}".format(
summary_file))
with open(summary_file) as fh:
for line in fh:
# Skip empty lines
if not line.strip():
continue
# Populate summary info
fields = [x.strip() for x in line.split("\t")]
summary_info[fields[1]] = fields[0]
logger.debug("Retrieved summary information from file: {}".format(
summary_info))
return summary_info | Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values. | Below is the the instruction that describes the task:
### Input:
Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
### Response:
def get_summary(summary_file):
"""Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
"""
summary_info = OrderedDict()
logger.debug("Retrieving summary information from file: {}".format(
summary_file))
with open(summary_file) as fh:
for line in fh:
# Skip empty lines
if not line.strip():
continue
# Populate summary info
fields = [x.strip() for x in line.split("\t")]
summary_info[fields[1]] = fields[0]
logger.debug("Retrieved summary information from file: {}".format(
summary_info))
return summary_info |
def _remove_nonascii(self, df):
"""Make copy and remove non-ascii characters from it."""
df_copy = df.copy(deep=True)
for col in df_copy.columns:
if (df_copy[col].dtype == np.dtype('O')):
df_copy[col] = df[col].apply(
lambda x: re.sub(r'[^\x00-\x7f]', r'', x) if isinstance(x, six.string_types) else x)
return df_copy | Make copy and remove non-ascii characters from it. | Below is the the instruction that describes the task:
### Input:
Make copy and remove non-ascii characters from it.
### Response:
def _remove_nonascii(self, df):
"""Make copy and remove non-ascii characters from it."""
df_copy = df.copy(deep=True)
for col in df_copy.columns:
if (df_copy[col].dtype == np.dtype('O')):
df_copy[col] = df[col].apply(
lambda x: re.sub(r'[^\x00-\x7f]', r'', x) if isinstance(x, six.string_types) else x)
return df_copy |
def render(self, context):
self.prepare(context)
" Cached wrapper around self._render(). "
if getattr(settings, 'DOUBLE_RENDER', False) and self.can_double_render:
if 'SECOND_RENDER' not in context:
return self.double_render()
key = self.get_cache_key()
if key:
rend = cache.get(key)
if rend is None:
rend = self._render(context)
cache.set(key, rend, core_settings.CACHE_TIMEOUT)
else:
rend = self._render(context)
return rend | Cached wrapper around self._render(). | Below is the the instruction that describes the task:
### Input:
Cached wrapper around self._render().
### Response:
def render(self, context):
self.prepare(context)
" Cached wrapper around self._render(). "
if getattr(settings, 'DOUBLE_RENDER', False) and self.can_double_render:
if 'SECOND_RENDER' not in context:
return self.double_render()
key = self.get_cache_key()
if key:
rend = cache.get(key)
if rend is None:
rend = self._render(context)
cache.set(key, rend, core_settings.CACHE_TIMEOUT)
else:
rend = self._render(context)
return rend |
def begin_script(self):
"""Indicate we are going to start loading a script."""
if self.remote_bridge.status in (BRIDGE_STATUS.RECEIVED, BRIDGE_STATUS.VALIDATED, BRIDGE_STATUS.EXECUTING):
return [1] #FIXME: Return correct error here
self.remote_bridge.status = BRIDGE_STATUS.WAITING
self.remote_bridge.error = 0
self.remote_bridge.script_error = None
self.remote_bridge.parsed_script = None
self._device.script = bytearray()
return [0] | Indicate we are going to start loading a script. | Below is the the instruction that describes the task:
### Input:
Indicate we are going to start loading a script.
### Response:
def begin_script(self):
"""Indicate we are going to start loading a script."""
if self.remote_bridge.status in (BRIDGE_STATUS.RECEIVED, BRIDGE_STATUS.VALIDATED, BRIDGE_STATUS.EXECUTING):
return [1] #FIXME: Return correct error here
self.remote_bridge.status = BRIDGE_STATUS.WAITING
self.remote_bridge.error = 0
self.remote_bridge.script_error = None
self.remote_bridge.parsed_script = None
self._device.script = bytearray()
return [0] |
def dpub(self, topic, delay_ms, msg, callback=None):
"""
publish multiple messages in one command (efficiently)
:param topic: nsq topic
:param delay_ms: tell nsqd to delay delivery for this long (integer milliseconds)
:param msg: message body (bytes)
:param callback: function which takes (conn, data) (data may be nsq.Error)
"""
self._pub('dpub', topic, msg, delay_ms, callback=callback) | publish multiple messages in one command (efficiently)
:param topic: nsq topic
:param delay_ms: tell nsqd to delay delivery for this long (integer milliseconds)
:param msg: message body (bytes)
:param callback: function which takes (conn, data) (data may be nsq.Error) | Below is the the instruction that describes the task:
### Input:
publish multiple messages in one command (efficiently)
:param topic: nsq topic
:param delay_ms: tell nsqd to delay delivery for this long (integer milliseconds)
:param msg: message body (bytes)
:param callback: function which takes (conn, data) (data may be nsq.Error)
### Response:
def dpub(self, topic, delay_ms, msg, callback=None):
"""
publish multiple messages in one command (efficiently)
:param topic: nsq topic
:param delay_ms: tell nsqd to delay delivery for this long (integer milliseconds)
:param msg: message body (bytes)
:param callback: function which takes (conn, data) (data may be nsq.Error)
"""
self._pub('dpub', topic, msg, delay_ms, callback=callback) |
def to_map_with_default(value, default_value):
"""
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
"""
result = JsonConverter.to_nullable_map(value)
return result if result != None else default_value | Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported. | Below is the the instruction that describes the task:
### Input:
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
### Response:
def to_map_with_default(value, default_value):
"""
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
"""
result = JsonConverter.to_nullable_map(value)
return result if result != None else default_value |
def iter_files(root, exts=None, recursive=False):
"""
Iterate over file paths within root filtered by specified extensions.
:param compat.string_types root: Root folder to start collecting files
:param iterable exts: Restrict results to given file extensions
:param bool recursive: Wether to walk the complete directory tree
:rtype collections.Iterable[str]: absolute file paths with given extensions
"""
if exts is not None:
exts = set((x.lower() for x in exts))
def matches(e):
return (exts is None) or (e in exts)
if recursive is False:
for entry in compat.scandir(root):
if compat.has_scandir:
ext = splitext(entry.name)[-1].lstrip('.').lower()
if entry.is_file() and matches(ext):
yield entry.path
else:
ext = splitext(entry)[-1].lstrip('.').lower()
if not compat.isdir(entry) and matches(ext):
yield join(root, entry)
else:
for root, folders, files in compat.walk(root):
for f in files:
ext = splitext(f)[-1].lstrip('.').lower()
if matches(ext):
yield join(root, f) | Iterate over file paths within root filtered by specified extensions.
:param compat.string_types root: Root folder to start collecting files
:param iterable exts: Restrict results to given file extensions
:param bool recursive: Wether to walk the complete directory tree
:rtype collections.Iterable[str]: absolute file paths with given extensions | Below is the the instruction that describes the task:
### Input:
Iterate over file paths within root filtered by specified extensions.
:param compat.string_types root: Root folder to start collecting files
:param iterable exts: Restrict results to given file extensions
:param bool recursive: Wether to walk the complete directory tree
:rtype collections.Iterable[str]: absolute file paths with given extensions
### Response:
def iter_files(root, exts=None, recursive=False):
"""
Iterate over file paths within root filtered by specified extensions.
:param compat.string_types root: Root folder to start collecting files
:param iterable exts: Restrict results to given file extensions
:param bool recursive: Wether to walk the complete directory tree
:rtype collections.Iterable[str]: absolute file paths with given extensions
"""
if exts is not None:
exts = set((x.lower() for x in exts))
def matches(e):
return (exts is None) or (e in exts)
if recursive is False:
for entry in compat.scandir(root):
if compat.has_scandir:
ext = splitext(entry.name)[-1].lstrip('.').lower()
if entry.is_file() and matches(ext):
yield entry.path
else:
ext = splitext(entry)[-1].lstrip('.').lower()
if not compat.isdir(entry) and matches(ext):
yield join(root, entry)
else:
for root, folders, files in compat.walk(root):
for f in files:
ext = splitext(f)[-1].lstrip('.').lower()
if matches(ext):
yield join(root, f) |
def search_keyword_top(self, category=None, count=20, period='today'):
"""doc: http://open.youku.com/docs/doc?id=84
"""
url = 'https://openapi.youku.com/v2/searches/keyword/top.json'
params = {
'client_id': self.client_id,
'count': count,
'period': period
}
if category:
params['category'] = category
r = requests.get(url, params=params)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=84 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=84
### Response:
def search_keyword_top(self, category=None, count=20, period='today'):
"""doc: http://open.youku.com/docs/doc?id=84
"""
url = 'https://openapi.youku.com/v2/searches/keyword/top.json'
params = {
'client_id': self.client_id,
'count': count,
'period': period
}
if category:
params['category'] = category
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def resume(self, container_id=None, sudo=None):
''' resume a stopped OciImage container, if it exists
Equivalent command line example:
singularity oci resume <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was resumed.
'''
return self._state_command(container_id, command='resume', sudo=sudo) | resume a stopped OciImage container, if it exists
Equivalent command line example:
singularity oci resume <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was resumed. | Below is the the instruction that describes the task:
### Input:
resume a stopped OciImage container, if it exists
Equivalent command line example:
singularity oci resume <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was resumed.
### Response:
def resume(self, container_id=None, sudo=None):
''' resume a stopped OciImage container, if it exists
Equivalent command line example:
singularity oci resume <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was resumed.
'''
return self._state_command(container_id, command='resume', sudo=sudo) |
def tick(self):
"""Clock tick called every self.inter
"""
if self.events:
if self.queueDepth and (len(self.events) > self.queueDepth):
# Remove maximum of self.queueDepth items from queue
events = self.events[:self.queueDepth]
self.events = self.events[self.queueDepth:]
else:
events = self.events
self.events = []
try:
result = yield self.sendEvents(events)
if result.get('errors', False):
log.msg(repr(result))
self.events.extend(events)
except Exception as e:
log.msg('Could not connect to elasticsearch ' + str(e))
self.events.extend(events) | Clock tick called every self.inter | Below is the the instruction that describes the task:
### Input:
Clock tick called every self.inter
### Response:
def tick(self):
"""Clock tick called every self.inter
"""
if self.events:
if self.queueDepth and (len(self.events) > self.queueDepth):
# Remove maximum of self.queueDepth items from queue
events = self.events[:self.queueDepth]
self.events = self.events[self.queueDepth:]
else:
events = self.events
self.events = []
try:
result = yield self.sendEvents(events)
if result.get('errors', False):
log.msg(repr(result))
self.events.extend(events)
except Exception as e:
log.msg('Could not connect to elasticsearch ' + str(e))
self.events.extend(events) |
def calculateImpliedVolatility(
self, contract: Contract,
optionPrice: float, underPrice: float,
implVolOptions: List[TagValue] = None) -> OptionComputation:
"""
Calculate the volatility given the option price.
This method is blocking.
https://interactivebrokers.github.io/tws-api/option_computations.html
Args:
contract: Option contract.
optionPrice: Option price to use in calculation.
underPrice: Price of the underlier to use in calculation
implVolOptions: Unknown
"""
return self._run(
self.calculateImpliedVolatilityAsync(
contract, optionPrice, underPrice, implVolOptions)) | Calculate the volatility given the option price.
This method is blocking.
https://interactivebrokers.github.io/tws-api/option_computations.html
Args:
contract: Option contract.
optionPrice: Option price to use in calculation.
underPrice: Price of the underlier to use in calculation
implVolOptions: Unknown | Below is the the instruction that describes the task:
### Input:
Calculate the volatility given the option price.
This method is blocking.
https://interactivebrokers.github.io/tws-api/option_computations.html
Args:
contract: Option contract.
optionPrice: Option price to use in calculation.
underPrice: Price of the underlier to use in calculation
implVolOptions: Unknown
### Response:
def calculateImpliedVolatility(
self, contract: Contract,
optionPrice: float, underPrice: float,
implVolOptions: List[TagValue] = None) -> OptionComputation:
"""
Calculate the volatility given the option price.
This method is blocking.
https://interactivebrokers.github.io/tws-api/option_computations.html
Args:
contract: Option contract.
optionPrice: Option price to use in calculation.
underPrice: Price of the underlier to use in calculation
implVolOptions: Unknown
"""
return self._run(
self.calculateImpliedVolatilityAsync(
contract, optionPrice, underPrice, implVolOptions)) |
def xyz2stereonet(x, y, z):
"""
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians)
"""
x, y, z = np.atleast_1d(x, y, z)
return cart2sph(-z, x, y) | Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians) | Below is the the instruction that describes the task:
### Input:
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians)
### Response:
def xyz2stereonet(x, y, z):
"""
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians)
"""
x, y, z = np.atleast_1d(x, y, z)
return cart2sph(-z, x, y) |
def name(self):
""" Returns the type name of the `Stream` field (read-only)."""
size = len(self)
if size > 0:
return self.item_type.name.capitalize() + str(size)
else:
return self.item_type.name.capitalize() | Returns the type name of the `Stream` field (read-only). | Below is the the instruction that describes the task:
### Input:
Returns the type name of the `Stream` field (read-only).
### Response:
def name(self):
""" Returns the type name of the `Stream` field (read-only)."""
size = len(self)
if size > 0:
return self.item_type.name.capitalize() + str(size)
else:
return self.item_type.name.capitalize() |
def reroot(self, rppr=None, pretend=False):
"""Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated.
"""
with scratch_file(prefix='tree', suffix='.tre') as name:
# Use a specific path to rppr, otherwise rely on $PATH
subprocess.check_call([rppr or 'rppr', 'reroot',
'-c', self.path, '-o', name])
if not(pretend):
self.update_file('tree', name)
self._log('Rerooting refpkg') | Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated. | Below is the the instruction that describes the task:
### Input:
Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated.
### Response:
def reroot(self, rppr=None, pretend=False):
"""Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated.
"""
with scratch_file(prefix='tree', suffix='.tre') as name:
# Use a specific path to rppr, otherwise rely on $PATH
subprocess.check_call([rppr or 'rppr', 'reroot',
'-c', self.path, '-o', name])
if not(pretend):
self.update_file('tree', name)
self._log('Rerooting refpkg') |
def newFromBehavior(name, id=None):
"""
Given a name, return a behaviored ContentLine or Component.
"""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj | Given a name, return a behaviored ContentLine or Component. | Below is the the instruction that describes the task:
### Input:
Given a name, return a behaviored ContentLine or Component.
### Response:
def newFromBehavior(name, id=None):
"""
Given a name, return a behaviored ContentLine or Component.
"""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj |
def _GetTempOutputFileHandles(self, value_type):
"""Returns the tracker for a given value type."""
try:
return self.temp_output_trackers[value_type], False
except KeyError:
return self._CreateOutputFileHandles(value_type), True | Returns the tracker for a given value type. | Below is the the instruction that describes the task:
### Input:
Returns the tracker for a given value type.
### Response:
def _GetTempOutputFileHandles(self, value_type):
"""Returns the tracker for a given value type."""
try:
return self.temp_output_trackers[value_type], False
except KeyError:
return self._CreateOutputFileHandles(value_type), True |
def serve_forever(args=None):
"""
Creates the server and serves forever
:param args: Optional args if you decided to use your own
argument parser. Default is None to let the JsonServer setup its own
parser and parse command line arguments.
"""
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
server = JsonServer(args=args)
server.serve_forever() | Creates the server and serves forever
:param args: Optional args if you decided to use your own
argument parser. Default is None to let the JsonServer setup its own
parser and parse command line arguments. | Below is the the instruction that describes the task:
### Input:
Creates the server and serves forever
:param args: Optional args if you decided to use your own
argument parser. Default is None to let the JsonServer setup its own
parser and parse command line arguments.
### Response:
def serve_forever(args=None):
"""
Creates the server and serves forever
:param args: Optional args if you decided to use your own
argument parser. Default is None to let the JsonServer setup its own
parser and parse command line arguments.
"""
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
server = JsonServer(args=args)
server.serve_forever() |
def weekday_series(self, start, end, weekday, return_date=False):
"""Generate a datetime series with same weekday number.
ISO weekday number: Mon to Sun = 1 to 7
Usage::
>>> start, end = "2014-01-01 06:30:25", "2014-02-01 06:30:25"
>>> rolex.weekday_series(start, end, weekday=2) # All Tuesday
[
datetime(2014, 1, 7, 6, 30, 25),
datetime(2014, 1, 14, 6, 30, 25),
datetime(2014, 1, 21, 6, 30, 25),
datetime(2014, 1, 28, 6, 30, 25),
]
:param weekday: int or list of int
**中文文档**
生成星期数一致的时间序列。
"""
start = self.parse_datetime(start)
end = self.parse_datetime(end)
if isinstance(weekday, integer_types):
weekday = [weekday, ]
series = list()
for i in self.time_series(
start, end, freq="1day", return_date=return_date):
if i.isoweekday() in weekday:
series.append(i)
return series | Generate a datetime series with same weekday number.
ISO weekday number: Mon to Sun = 1 to 7
Usage::
>>> start, end = "2014-01-01 06:30:25", "2014-02-01 06:30:25"
>>> rolex.weekday_series(start, end, weekday=2) # All Tuesday
[
datetime(2014, 1, 7, 6, 30, 25),
datetime(2014, 1, 14, 6, 30, 25),
datetime(2014, 1, 21, 6, 30, 25),
datetime(2014, 1, 28, 6, 30, 25),
]
:param weekday: int or list of int
**中文文档**
生成星期数一致的时间序列。 | Below is the the instruction that describes the task:
### Input:
Generate a datetime series with same weekday number.
ISO weekday number: Mon to Sun = 1 to 7
Usage::
>>> start, end = "2014-01-01 06:30:25", "2014-02-01 06:30:25"
>>> rolex.weekday_series(start, end, weekday=2) # All Tuesday
[
datetime(2014, 1, 7, 6, 30, 25),
datetime(2014, 1, 14, 6, 30, 25),
datetime(2014, 1, 21, 6, 30, 25),
datetime(2014, 1, 28, 6, 30, 25),
]
:param weekday: int or list of int
**中文文档**
生成星期数一致的时间序列。
### Response:
def weekday_series(self, start, end, weekday, return_date=False):
"""Generate a datetime series with same weekday number.
ISO weekday number: Mon to Sun = 1 to 7
Usage::
>>> start, end = "2014-01-01 06:30:25", "2014-02-01 06:30:25"
>>> rolex.weekday_series(start, end, weekday=2) # All Tuesday
[
datetime(2014, 1, 7, 6, 30, 25),
datetime(2014, 1, 14, 6, 30, 25),
datetime(2014, 1, 21, 6, 30, 25),
datetime(2014, 1, 28, 6, 30, 25),
]
:param weekday: int or list of int
**中文文档**
生成星期数一致的时间序列。
"""
start = self.parse_datetime(start)
end = self.parse_datetime(end)
if isinstance(weekday, integer_types):
weekday = [weekday, ]
series = list()
for i in self.time_series(
start, end, freq="1day", return_date=return_date):
if i.isoweekday() in weekday:
series.append(i)
return series |
def set(self, key: Any, value: Any) -> None:
""" Sets the value of a key to a supplied value """
if key is not None:
self[key] = value | Sets the value of a key to a supplied value | Below is the the instruction that describes the task:
### Input:
Sets the value of a key to a supplied value
### Response:
def set(self, key: Any, value: Any) -> None:
""" Sets the value of a key to a supplied value """
if key is not None:
self[key] = value |
def config():
'''
Display fault manager configuration
CLI Example:
.. code-block:: bash
salt '*' fmadm.config
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} config'.format(
cmd=fmadm
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = 'error executing fmadm config'
else:
result = _parse_fmadm_config(res['stdout'])
return result | Display fault manager configuration
CLI Example:
.. code-block:: bash
salt '*' fmadm.config | Below is the the instruction that describes the task:
### Input:
Display fault manager configuration
CLI Example:
.. code-block:: bash
salt '*' fmadm.config
### Response:
def config():
'''
Display fault manager configuration
CLI Example:
.. code-block:: bash
salt '*' fmadm.config
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} config'.format(
cmd=fmadm
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = 'error executing fmadm config'
else:
result = _parse_fmadm_config(res['stdout'])
return result |
def list_models(self, **kwargs):
"""
List models.
Lists Watson Knowledge Studio [custom
models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html)
that are deployed to your Natural Language Understanding service.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('natural-language-understanding', 'V1',
'list_models')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/models'
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response | List models.
Lists Watson Knowledge Studio [custom
models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html)
that are deployed to your Natural Language Understanding service.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | Below is the the instruction that describes the task:
### Input:
List models.
Lists Watson Knowledge Studio [custom
models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html)
that are deployed to your Natural Language Understanding service.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
### Response:
def list_models(self, **kwargs):
"""
List models.
Lists Watson Knowledge Studio [custom
models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html)
that are deployed to your Natural Language Understanding service.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('natural-language-understanding', 'V1',
'list_models')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/models'
response = self.request(
method='GET',
url=url,
headers=headers,
params=params,
accept_json=True)
return response |
def metadata(self, map_id, secure=False):
"""Returns TileJSON metadata for a tileset.
Parameters
----------
map_id : str
The map's unique identifier in the format username.id.
secure : bool, optional
The representation of the requested resources,
where True indicates representation as HTTPS endpoints.
The default value is False.
Returns
-------
request.Response
The response object with TileJSON metadata for the
specified tileset.
"""
# Create dict to assist in building URI resource path.
path_values = dict(
map_id=map_id
)
# Build URI resource path.
path_part = "/{map_id}.json"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
# Build URI query parameters.
query_parameters = dict()
if secure:
query_parameters["secure"] = ""
# Send HTTP GET request.
response = self.session.get(uri, params=query_parameters)
self.handle_http_error(response)
return response | Returns TileJSON metadata for a tileset.
Parameters
----------
map_id : str
The map's unique identifier in the format username.id.
secure : bool, optional
The representation of the requested resources,
where True indicates representation as HTTPS endpoints.
The default value is False.
Returns
-------
request.Response
The response object with TileJSON metadata for the
specified tileset. | Below is the the instruction that describes the task:
### Input:
Returns TileJSON metadata for a tileset.
Parameters
----------
map_id : str
The map's unique identifier in the format username.id.
secure : bool, optional
The representation of the requested resources,
where True indicates representation as HTTPS endpoints.
The default value is False.
Returns
-------
request.Response
The response object with TileJSON metadata for the
specified tileset.
### Response:
def metadata(self, map_id, secure=False):
"""Returns TileJSON metadata for a tileset.
Parameters
----------
map_id : str
The map's unique identifier in the format username.id.
secure : bool, optional
The representation of the requested resources,
where True indicates representation as HTTPS endpoints.
The default value is False.
Returns
-------
request.Response
The response object with TileJSON metadata for the
specified tileset.
"""
# Create dict to assist in building URI resource path.
path_values = dict(
map_id=map_id
)
# Build URI resource path.
path_part = "/{map_id}.json"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
# Build URI query parameters.
query_parameters = dict()
if secure:
query_parameters["secure"] = ""
# Send HTTP GET request.
response = self.session.get(uri, params=query_parameters)
self.handle_http_error(response)
return response |
def get_send_result(self):
"""Method to get the progress of work notice sending."""
send_result = self.json_response.get("send_result", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return send_result | Method to get the progress of work notice sending. | Below is the the instruction that describes the task:
### Input:
Method to get the progress of work notice sending.
### Response:
def get_send_result(self):
"""Method to get the progress of work notice sending."""
send_result = self.json_response.get("send_result", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return send_result |
def create(cls, name, dead_interval=40, hello_interval=10,
hello_interval_type='normal', dead_multiplier=1,
mtu_mismatch_detection=True, retransmit_interval=5,
router_priority=1, transmit_delay=1,
authentication_type=None, password=None,
key_chain_ref=None):
"""
Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
:param bool mtu_mismatch_detection: True|False
:param int retransmit_interval: in seconds
:param int router_priority: set priority
:param int transmit_delay: in seconds
:param str authentication_type: \|password\|message_digest
:param str password: max 8 chars (required when
authentication_type='password')
:param str,Element key_chain_ref: OSPFKeyChain (required when
authentication_type='message_digest')
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFInterfaceSetting
"""
json = {'name': name,
'authentication_type': authentication_type,
'password': password,
'key_chain_ref': element_resolver(key_chain_ref),
'dead_interval': dead_interval,
'dead_multiplier': dead_multiplier,
'hello_interval': hello_interval,
'hello_interval_type': hello_interval_type,
'mtu_mismatch_detection': mtu_mismatch_detection,
'retransmit_interval': retransmit_interval,
'router_priority': router_priority,
'transmit_delay': transmit_delay}
return ElementCreator(cls, json) | Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
:param bool mtu_mismatch_detection: True|False
:param int retransmit_interval: in seconds
:param int router_priority: set priority
:param int transmit_delay: in seconds
:param str authentication_type: \|password\|message_digest
:param str password: max 8 chars (required when
authentication_type='password')
:param str,Element key_chain_ref: OSPFKeyChain (required when
authentication_type='message_digest')
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFInterfaceSetting | Below is the the instruction that describes the task:
### Input:
Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
:param bool mtu_mismatch_detection: True|False
:param int retransmit_interval: in seconds
:param int router_priority: set priority
:param int transmit_delay: in seconds
:param str authentication_type: \|password\|message_digest
:param str password: max 8 chars (required when
authentication_type='password')
:param str,Element key_chain_ref: OSPFKeyChain (required when
authentication_type='message_digest')
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFInterfaceSetting
### Response:
def create(cls, name, dead_interval=40, hello_interval=10,
hello_interval_type='normal', dead_multiplier=1,
mtu_mismatch_detection=True, retransmit_interval=5,
router_priority=1, transmit_delay=1,
authentication_type=None, password=None,
key_chain_ref=None):
"""
Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
:param bool mtu_mismatch_detection: True|False
:param int retransmit_interval: in seconds
:param int router_priority: set priority
:param int transmit_delay: in seconds
:param str authentication_type: \|password\|message_digest
:param str password: max 8 chars (required when
authentication_type='password')
:param str,Element key_chain_ref: OSPFKeyChain (required when
authentication_type='message_digest')
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFInterfaceSetting
"""
json = {'name': name,
'authentication_type': authentication_type,
'password': password,
'key_chain_ref': element_resolver(key_chain_ref),
'dead_interval': dead_interval,
'dead_multiplier': dead_multiplier,
'hello_interval': hello_interval,
'hello_interval_type': hello_interval_type,
'mtu_mismatch_detection': mtu_mismatch_detection,
'retransmit_interval': retransmit_interval,
'router_priority': router_priority,
'transmit_delay': transmit_delay}
return ElementCreator(cls, json) |
def getPopUpURL(self, CorpNum, NTSConfirmNum, UserID=None):
""" 홈택스 전자세금계산서 보기 팝업 URL
args
CorpNum : 팝빌회원 사업자번호
NTSConfirmNum : 국세청 승인 번호
UserID : 팝빌회원 아이디
return
전자세금계산서 보기 팝업 URL 반환
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "국세청승인번호(NTSConfirmNum)가 올바르지 않습니다.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '/PopUp', CorpNum, UserID).url | 홈택스 전자세금계산서 보기 팝업 URL
args
CorpNum : 팝빌회원 사업자번호
NTSConfirmNum : 국세청 승인 번호
UserID : 팝빌회원 아이디
return
전자세금계산서 보기 팝업 URL 반환
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
홈택스 전자세금계산서 보기 팝업 URL
args
CorpNum : 팝빌회원 사업자번호
NTSConfirmNum : 국세청 승인 번호
UserID : 팝빌회원 아이디
return
전자세금계산서 보기 팝업 URL 반환
raise
PopbillException
### Response:
def getPopUpURL(self, CorpNum, NTSConfirmNum, UserID=None):
""" 홈택스 전자세금계산서 보기 팝업 URL
args
CorpNum : 팝빌회원 사업자번호
NTSConfirmNum : 국세청 승인 번호
UserID : 팝빌회원 아이디
return
전자세금계산서 보기 팝업 URL 반환
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "국세청승인번호(NTSConfirmNum)가 올바르지 않습니다.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '/PopUp', CorpNum, UserID).url |
async def get_access_token(self, oauth_verifier, request_token=None, loop=None, **params):
"""Get access_token from OAuth1 provider.
:returns: (access_token, access_token_secret, provider_data)
"""
# Possibility to provide REQUEST DATA to the method
if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier:
oauth_verifier = oauth_verifier[self.shared_key]
if request_token and self.oauth_token != request_token:
raise web.HTTPBadRequest(
reason='Failed to obtain OAuth 1.0 access token. '
'Request token is invalid')
data = await self.request('POST', self.access_token_url, params={
'oauth_verifier': oauth_verifier, 'oauth_token': request_token}, loop=loop)
self.oauth_token = data.get('oauth_token')
self.oauth_token_secret = data.get('oauth_token_secret')
return self.oauth_token, self.oauth_token_secret, data | Get access_token from OAuth1 provider.
:returns: (access_token, access_token_secret, provider_data) | Below is the the instruction that describes the task:
### Input:
Get access_token from OAuth1 provider.
:returns: (access_token, access_token_secret, provider_data)
### Response:
async def get_access_token(self, oauth_verifier, request_token=None, loop=None, **params):
"""Get access_token from OAuth1 provider.
:returns: (access_token, access_token_secret, provider_data)
"""
# Possibility to provide REQUEST DATA to the method
if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier:
oauth_verifier = oauth_verifier[self.shared_key]
if request_token and self.oauth_token != request_token:
raise web.HTTPBadRequest(
reason='Failed to obtain OAuth 1.0 access token. '
'Request token is invalid')
data = await self.request('POST', self.access_token_url, params={
'oauth_verifier': oauth_verifier, 'oauth_token': request_token}, loop=loop)
self.oauth_token = data.get('oauth_token')
self.oauth_token_secret = data.get('oauth_token_secret')
return self.oauth_token, self.oauth_token_secret, data |
def get_harddisk_sleep():
'''
Display the amount of idle time until the hard disk sleeps.
:return: A string representing the sleep settings for the hard disk
:rtype: str
CLI Example:
..code-block:: bash
salt '*' power.get_harddisk_sleep
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -getharddisksleep')
return salt.utils.mac_utils.parse_return(ret) | Display the amount of idle time until the hard disk sleeps.
:return: A string representing the sleep settings for the hard disk
:rtype: str
CLI Example:
..code-block:: bash
salt '*' power.get_harddisk_sleep | Below is the the instruction that describes the task:
### Input:
Display the amount of idle time until the hard disk sleeps.
:return: A string representing the sleep settings for the hard disk
:rtype: str
CLI Example:
..code-block:: bash
salt '*' power.get_harddisk_sleep
### Response:
def get_harddisk_sleep():
'''
Display the amount of idle time until the hard disk sleeps.
:return: A string representing the sleep settings for the hard disk
:rtype: str
CLI Example:
..code-block:: bash
salt '*' power.get_harddisk_sleep
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -getharddisksleep')
return salt.utils.mac_utils.parse_return(ret) |
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat)) | Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. | Below is the the instruction that describes the task:
### Input:
Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first.
### Response:
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat)) |
def fcontext_delete_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2019.2.0
Deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_delete_policy my-policy
'''
return _fcontext_add_or_delete_policy('delete', name, filetype, sel_type, sel_user, sel_level) | .. versionadded:: 2019.2.0
Deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_delete_policy my-policy | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_delete_policy my-policy
### Response:
def fcontext_delete_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2019.2.0
Deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_delete_policy my-policy
'''
return _fcontext_add_or_delete_policy('delete', name, filetype, sel_type, sel_user, sel_level) |
def map_(input_layer, fn):
"""Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence.
"""
if not input_layer.is_sequence():
raise ValueError('Can only map a sequence.')
return [fn(x) for x in input_layer] | Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence. | Below is the the instruction that describes the task:
### Input:
Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence.
### Response:
def map_(input_layer, fn):
"""Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence.
"""
if not input_layer.is_sequence():
raise ValueError('Can only map a sequence.')
return [fn(x) for x in input_layer] |
def dropna_columns(data: pd.DataFrame, max_na_values: int=0.15):
"""
Remove columns with more NA values than threshold level
:param data:
:param max_na_values: proportion threshold of max na values
:return:
"""
size = data.shape[0]
df_na = (data.isnull().sum()/size) >= max_na_values
data.drop(df_na[df_na].index, axis=1, inplace=True) | Remove columns with more NA values than threshold level
:param data:
:param max_na_values: proportion threshold of max na values
:return: | Below is the the instruction that describes the task:
### Input:
Remove columns with more NA values than threshold level
:param data:
:param max_na_values: proportion threshold of max na values
:return:
### Response:
def dropna_columns(data: pd.DataFrame, max_na_values: int=0.15):
"""
Remove columns with more NA values than threshold level
:param data:
:param max_na_values: proportion threshold of max na values
:return:
"""
size = data.shape[0]
df_na = (data.isnull().sum()/size) >= max_na_values
data.drop(df_na[df_na].index, axis=1, inplace=True) |
def arg(*args, **kwargs):
"""
Dcorates a function or a class method to add to the argument parser
"""
def decorate(func):
"""
Decorate
"""
# we'll set the command name with the passed cmd_name argument, if
# exist, else the command name will be the function name
func.__cmd_name__ = kwargs.pop(
'cmd_name', getattr(func, '__cmd_name__', func.__name__))
# retrieve the class (SillyClass)
func.__cls__ = utils.check_class()
if not hasattr(func, '__arguments__'):
# if the funcion hasn't the __arguments__ yet, we'll setup them
# using get_functarguments.
func.__arguments__ = utils.get_functarguments(func)
if len(args) or len(kwargs):
# if we have some argument or keyword argument
# we'll try to get the destination name from the kwargs ('dest')
# else we'll use the last arg name as destination
arg_name = kwargs.get(
'dest', args[-1].lstrip('-').replace('-', '_'))
try:
# we try to get the command index.
idx = func.__named__.index(arg_name)
# and delete it from the named list
del func.__named__[idx]
# and delete it from the arguments list
del func.__arguments__[idx]
except ValueError:
pass
# append the args and kwargs to the function arguments list
func.__arguments__.append((args, kwargs,))
if func.__cls__ is None and isinstance(func, types.FunctionType):
# if the function don't have a class and is a FunctionType
# we'll add it directly to he commands list.
ap_ = ArgParseInator(skip_init=True)
if func.__cmd_name__ not in ap_.commands:
# we'll add it if not exists
ap_.commands[func.__cmd_name__] = func
return func
return decorate | Dcorates a function or a class method to add to the argument parser | Below is the the instruction that describes the task:
### Input:
Dcorates a function or a class method to add to the argument parser
### Response:
def arg(*args, **kwargs):
"""
Dcorates a function or a class method to add to the argument parser
"""
def decorate(func):
"""
Decorate
"""
# we'll set the command name with the passed cmd_name argument, if
# exist, else the command name will be the function name
func.__cmd_name__ = kwargs.pop(
'cmd_name', getattr(func, '__cmd_name__', func.__name__))
# retrieve the class (SillyClass)
func.__cls__ = utils.check_class()
if not hasattr(func, '__arguments__'):
# if the funcion hasn't the __arguments__ yet, we'll setup them
# using get_functarguments.
func.__arguments__ = utils.get_functarguments(func)
if len(args) or len(kwargs):
# if we have some argument or keyword argument
# we'll try to get the destination name from the kwargs ('dest')
# else we'll use the last arg name as destination
arg_name = kwargs.get(
'dest', args[-1].lstrip('-').replace('-', '_'))
try:
# we try to get the command index.
idx = func.__named__.index(arg_name)
# and delete it from the named list
del func.__named__[idx]
# and delete it from the arguments list
del func.__arguments__[idx]
except ValueError:
pass
# append the args and kwargs to the function arguments list
func.__arguments__.append((args, kwargs,))
if func.__cls__ is None and isinstance(func, types.FunctionType):
# if the function don't have a class and is a FunctionType
# we'll add it directly to he commands list.
ap_ = ArgParseInator(skip_init=True)
if func.__cmd_name__ not in ap_.commands:
# we'll add it if not exists
ap_.commands[func.__cmd_name__] = func
return func
return decorate |
def _process_defpriv_part(defperms):
'''
Process part
'''
_tmp = {}
previous = None
for defperm in defperms:
if previous is None:
_tmp[_DEFAULT_PRIVILEGES_MAP[defperm]] = False
previous = _DEFAULT_PRIVILEGES_MAP[defperm]
else:
if defperm == '*':
_tmp[previous] = True
else:
_tmp[_DEFAULT_PRIVILEGES_MAP[defperm]] = False
previous = _DEFAULT_PRIVILEGES_MAP[defperm]
return _tmp | Process part | Below is the the instruction that describes the task:
### Input:
Process part
### Response:
def _process_defpriv_part(defperms):
'''
Process part
'''
_tmp = {}
previous = None
for defperm in defperms:
if previous is None:
_tmp[_DEFAULT_PRIVILEGES_MAP[defperm]] = False
previous = _DEFAULT_PRIVILEGES_MAP[defperm]
else:
if defperm == '*':
_tmp[previous] = True
else:
_tmp[_DEFAULT_PRIVILEGES_MAP[defperm]] = False
previous = _DEFAULT_PRIVILEGES_MAP[defperm]
return _tmp |
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False | Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool` | Below is the the instruction that describes the task:
### Input:
Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
### Response:
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False |
def get_response(self, **kwargs):
"""
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
"""
return Response(
json.dumps({"error": self.message}), # pylint: disable=exception-message-attribute
status_code=self.status_code,
content_type="application/json",
charset="utf-8",
**kwargs
) | Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response. | Below is the the instruction that describes the task:
### Input:
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
### Response:
def get_response(self, **kwargs):
"""
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
"""
return Response(
json.dumps({"error": self.message}), # pylint: disable=exception-message-attribute
status_code=self.status_code,
content_type="application/json",
charset="utf-8",
**kwargs
) |
def v_reference_choice(ctx, stmt):
"""Make sure that the default case exists"""
d = stmt.search_one('default')
if d is not None:
m = stmt.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, stmt.pos, 'DEFAULT_AND_MANDATORY', ())
ptr = attrsearch(d.arg, 'arg', stmt.i_children)
if ptr is None:
err_add(ctx.errors, d.pos, 'DEFAULT_CASE_NOT_FOUND', d.arg)
else:
# make sure there are no mandatory nodes in the default case
def chk_no_defaults(s):
for c in s.i_children:
if c.keyword in ('leaf', 'choice'):
m = c.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword in ('list', 'leaf-list'):
m = c.search_one('min-elements')
if m is not None and int(m.arg) > 0:
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword == 'container':
p = c.search_one('presence')
if p == None or p.arg == 'false':
chk_no_defaults(c)
chk_no_defaults(ptr) | Make sure that the default case exists | Below is the the instruction that describes the task:
### Input:
Make sure that the default case exists
### Response:
def v_reference_choice(ctx, stmt):
"""Make sure that the default case exists"""
d = stmt.search_one('default')
if d is not None:
m = stmt.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, stmt.pos, 'DEFAULT_AND_MANDATORY', ())
ptr = attrsearch(d.arg, 'arg', stmt.i_children)
if ptr is None:
err_add(ctx.errors, d.pos, 'DEFAULT_CASE_NOT_FOUND', d.arg)
else:
# make sure there are no mandatory nodes in the default case
def chk_no_defaults(s):
for c in s.i_children:
if c.keyword in ('leaf', 'choice'):
m = c.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword in ('list', 'leaf-list'):
m = c.search_one('min-elements')
if m is not None and int(m.arg) > 0:
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword == 'container':
p = c.search_one('presence')
if p == None or p.arg == 'false':
chk_no_defaults(c)
chk_no_defaults(ptr) |
def _approxaAInv(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if isinstance(Or,(int,float,numpy.float32,numpy.float64)): #Scalar input
Or= numpy.array([Or])
Op= numpy.array([Op])
Oz= numpy.array([Oz])
ar= numpy.array([ar])
ap= numpy.array([ap])
az= numpy.array([az])
#Calculate apar, angle offset along the stream
closestIndx= [self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=interp)\
for ii in range(len(Or))]
out= numpy.empty((6,len(Or)))
for ii in range(len(Or)):
dOa= numpy.empty(6)
if interp:
dOa[0]= Or[ii]-self._interpolatedObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._interpolatedObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._interpolatedObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._interpolatedObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._interpolatedObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._interpolatedObsTrackAA[closestIndx[ii],5]
jacIndx= self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=False)
else:
dOa[0]= Or[ii]-self._ObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._ObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._ObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._ObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._ObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._ObsTrackAA[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
da= numpy.stack(\
numpy.meshgrid(_TWOPIWRAPS+ar[ii]-self._progenitor_angle[0],
_TWOPIWRAPS+ap[ii]-self._progenitor_angle[1],
_TWOPIWRAPS+az[ii]-self._progenitor_angle[2],
indexing='xy')).T\
.reshape((len(_TWOPIWRAPS)**3,3))
dapar= self._sigMeanSign\
*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\
numpy.cross(da,self._dsigomeanProgDirection),
axis=1))],
self._dsigomeanProgDirection)
dmJacIndx= numpy.fabs(dapar-self._thetasTrack[jacIndx])
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
else:
dm1= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
dm2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= dmJacIndx/(dmJacIndx+dmJacIndx2)
#Make sure the angles haven't wrapped around
if dOa[3] > numpy.pi:
dOa[3]-= 2.*numpy.pi
elif dOa[3] < -numpy.pi:
dOa[3]+= 2.*numpy.pi
if dOa[4] > numpy.pi:
dOa[4]-= 2.*numpy.pi
elif dOa[4] < -numpy.pi:
dOa[4]+= 2.*numpy.pi
if dOa[5] > numpy.pi:
dOa[5]-= 2.*numpy.pi
elif dOa[5] < -numpy.pi:
dOa[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._allinvjacsTrack[jacIndx,:,:]
+ampJacIndx*self._allinvjacsTrack[jacIndx2,:,:],
dOa)
if interp:
out[:,ii]+= self._interpolatedObsTrack[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrack[closestIndx[ii]]
return out | NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
### Response:
def _approxaAInv(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if isinstance(Or,(int,float,numpy.float32,numpy.float64)): #Scalar input
Or= numpy.array([Or])
Op= numpy.array([Op])
Oz= numpy.array([Oz])
ar= numpy.array([ar])
ap= numpy.array([ap])
az= numpy.array([az])
#Calculate apar, angle offset along the stream
closestIndx= [self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=interp)\
for ii in range(len(Or))]
out= numpy.empty((6,len(Or)))
for ii in range(len(Or)):
dOa= numpy.empty(6)
if interp:
dOa[0]= Or[ii]-self._interpolatedObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._interpolatedObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._interpolatedObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._interpolatedObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._interpolatedObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._interpolatedObsTrackAA[closestIndx[ii],5]
jacIndx= self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=False)
else:
dOa[0]= Or[ii]-self._ObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._ObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._ObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._ObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._ObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._ObsTrackAA[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
da= numpy.stack(\
numpy.meshgrid(_TWOPIWRAPS+ar[ii]-self._progenitor_angle[0],
_TWOPIWRAPS+ap[ii]-self._progenitor_angle[1],
_TWOPIWRAPS+az[ii]-self._progenitor_angle[2],
indexing='xy')).T\
.reshape((len(_TWOPIWRAPS)**3,3))
dapar= self._sigMeanSign\
*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\
numpy.cross(da,self._dsigomeanProgDirection),
axis=1))],
self._dsigomeanProgDirection)
dmJacIndx= numpy.fabs(dapar-self._thetasTrack[jacIndx])
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
else:
dm1= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
dm2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= dmJacIndx/(dmJacIndx+dmJacIndx2)
#Make sure the angles haven't wrapped around
if dOa[3] > numpy.pi:
dOa[3]-= 2.*numpy.pi
elif dOa[3] < -numpy.pi:
dOa[3]+= 2.*numpy.pi
if dOa[4] > numpy.pi:
dOa[4]-= 2.*numpy.pi
elif dOa[4] < -numpy.pi:
dOa[4]+= 2.*numpy.pi
if dOa[5] > numpy.pi:
dOa[5]-= 2.*numpy.pi
elif dOa[5] < -numpy.pi:
dOa[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._allinvjacsTrack[jacIndx,:,:]
+ampJacIndx*self._allinvjacsTrack[jacIndx2,:,:],
dOa)
if interp:
out[:,ii]+= self._interpolatedObsTrack[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrack[closestIndx[ii]]
return out |
def shell():
version_too_old = False
if sys.version_info[0] == 2:
if sys.version_info < (2, 7):
version_too_old = True
elif sys.version_info.major == 3 and sys.version_info < (3, 5):
version_too_old = True
if version_too_old:
print('Pyradio requires python 2.7 or 3.5+...')
sys.exit(1)
# set window title
try:
sys.stdout.write("\x1b]2;PyRadio: The Internet Radio player\x07")
except:
pass
requested_player = ''
parser = ArgumentParser(description="Curses based Internet radio player")
parser.add_argument("-s", "--stations", default='',
help="Use specified station CSV file.")
parser.add_argument("-p", "--play", nargs='?', default='False',
help="Start and play."
"The value is num station or empty for random.")
parser.add_argument("-u", "--use-player", default='',
help="Use specified player. "
"A comma-separated list can be used to specify detection order. "
"Supported players: mpv, mplayer, vlc.")
parser.add_argument("-a", "--add", action='store_true',
help="Add station to list.")
parser.add_argument("-ls", "--list-playlists", action='store_true',
help="List of available playlists in config dir.")
parser.add_argument("-l", "--list", action='store_true',
help="List of available stations in a playlist.")
parser.add_argument("-t", "--theme", default='', help="Use specified theme. ")
parser.add_argument("-scd", "--show-config-dir", action='store_true',
help="Print config directory location and exit.")
parser.add_argument("-ocd", "--open-config-dir", action='store_true',
help="Open config directory with default file manager.")
parser.add_argument("-d", "--debug", action='store_true',
help="Start pyradio in debug mode.")
args = parser.parse_args()
sys.stdout.flush()
pyradio_config = PyRadioConfig()
if args.show_config_dir:
print('PyRadio config dir: "{}"'.format(pyradio_config.stations_dir))
sys.exit()
if args.open_config_dir:
open_conf_dir(pyradio_config)
sys.exit()
if args.list_playlists:
pyradio_config.list_playlists()
sys.exit()
if args.list is False and args.add is False:
print('Reading config...')
ret = pyradio_config.read_config()
if ret == -1:
print('Error opening config: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
elif ret == -2:
print('Config file is malformed: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
if args.use_player != '':
requested_player = args.use_player
if args.list is False and args.add is False:
print('Reading playlist...')
sys.stdout.flush()
ret = pyradio_config.read_playlist_file(args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
# No need to parse the file if we add station
# Actually we do need to do so now, so that we
# handle 2-column vs. 3-column playlists
if args.add:
if sys.version_info < (3, 0):
params = raw_input("Enter the name: "), raw_input("Enter the url: "), raw_input("Enter the encoding (leave empty for 'utf-8'): ")
else:
params = input("Enter the name: "), input("Enter the url: "), input("Enter the encoding (leave empty for 'utf-8'): ")
msg = ('name', 'url')
for i, a_param in enumerate(params):
if i < 2:
if a_param.strip() == '':
print('** Error: No {} entered. Aborting...'.format(msg[i]))
sys.exit(1)
ret = pyradio_config.append_station(params, args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
sys.exit()
if args.list:
header_format_string, format_string = get_format_string(pyradio_config.stations)
header_string = header_format_string.format('[Name]','[URL]','[Encoding]')
print(header_string)
print(len(header_string) * '-')
for num, a_station in enumerate(pyradio_config.stations):
if a_station[2]:
encoding = a_station[2]
else:
encoding = pyradio_config.default_encoding
print(format_string.format(str(num+1), a_station[0], a_station[1], encoding))
sys.exit()
if args.debug:
__configureLogger()
print('Debug mode activated; printing messages to file: "~/pyradio.log"')
else:
''' Refer to https://docs.python.org/3.7/howto/logging.html
section "What happens if no configuration is provided"
'''
logging.raiseExceptions = False
logging.lastResort = None
if requested_player is '':
requested_player = pyradio_config.player
#else:
# pyradio_config.requested_player = requested_player
if args.play == 'False':
if args.stations == '':
args.play = pyradio_config.default_station
if args.play == '-1':
args.play = 'False'
theme_to_use = args.theme
if not theme_to_use:
theme_to_use = pyradio_config.theme
# Starts the radio gui.
pyradio = PyRadio(pyradio_config,
play=args.play,
req_player=requested_player,
theme=theme_to_use)
""" Setting ESCAPE key delay to 25ms
Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses"""
environ.setdefault('ESCDELAY', '25')
curses.wrapper(pyradio.setup)
if not pyradio.setup_return_status:
print('\nThis terminal can not display colors.\nPyRadio cannot function in such a terminal.\n') | Setting ESCAPE key delay to 25ms
Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses | Below is the the instruction that describes the task:
### Input:
Setting ESCAPE key delay to 25ms
Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses
### Response:
def shell():
version_too_old = False
if sys.version_info[0] == 2:
if sys.version_info < (2, 7):
version_too_old = True
elif sys.version_info.major == 3 and sys.version_info < (3, 5):
version_too_old = True
if version_too_old:
print('Pyradio requires python 2.7 or 3.5+...')
sys.exit(1)
# set window title
try:
sys.stdout.write("\x1b]2;PyRadio: The Internet Radio player\x07")
except:
pass
requested_player = ''
parser = ArgumentParser(description="Curses based Internet radio player")
parser.add_argument("-s", "--stations", default='',
help="Use specified station CSV file.")
parser.add_argument("-p", "--play", nargs='?', default='False',
help="Start and play."
"The value is num station or empty for random.")
parser.add_argument("-u", "--use-player", default='',
help="Use specified player. "
"A comma-separated list can be used to specify detection order. "
"Supported players: mpv, mplayer, vlc.")
parser.add_argument("-a", "--add", action='store_true',
help="Add station to list.")
parser.add_argument("-ls", "--list-playlists", action='store_true',
help="List of available playlists in config dir.")
parser.add_argument("-l", "--list", action='store_true',
help="List of available stations in a playlist.")
parser.add_argument("-t", "--theme", default='', help="Use specified theme. ")
parser.add_argument("-scd", "--show-config-dir", action='store_true',
help="Print config directory location and exit.")
parser.add_argument("-ocd", "--open-config-dir", action='store_true',
help="Open config directory with default file manager.")
parser.add_argument("-d", "--debug", action='store_true',
help="Start pyradio in debug mode.")
args = parser.parse_args()
sys.stdout.flush()
pyradio_config = PyRadioConfig()
if args.show_config_dir:
print('PyRadio config dir: "{}"'.format(pyradio_config.stations_dir))
sys.exit()
if args.open_config_dir:
open_conf_dir(pyradio_config)
sys.exit()
if args.list_playlists:
pyradio_config.list_playlists()
sys.exit()
if args.list is False and args.add is False:
print('Reading config...')
ret = pyradio_config.read_config()
if ret == -1:
print('Error opening config: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
elif ret == -2:
print('Config file is malformed: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
if args.use_player != '':
requested_player = args.use_player
if args.list is False and args.add is False:
print('Reading playlist...')
sys.stdout.flush()
ret = pyradio_config.read_playlist_file(args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
# No need to parse the file if we add station
# Actually we do need to do so now, so that we
# handle 2-column vs. 3-column playlists
if args.add:
if sys.version_info < (3, 0):
params = raw_input("Enter the name: "), raw_input("Enter the url: "), raw_input("Enter the encoding (leave empty for 'utf-8'): ")
else:
params = input("Enter the name: "), input("Enter the url: "), input("Enter the encoding (leave empty for 'utf-8'): ")
msg = ('name', 'url')
for i, a_param in enumerate(params):
if i < 2:
if a_param.strip() == '':
print('** Error: No {} entered. Aborting...'.format(msg[i]))
sys.exit(1)
ret = pyradio_config.append_station(params, args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
sys.exit()
if args.list:
header_format_string, format_string = get_format_string(pyradio_config.stations)
header_string = header_format_string.format('[Name]','[URL]','[Encoding]')
print(header_string)
print(len(header_string) * '-')
for num, a_station in enumerate(pyradio_config.stations):
if a_station[2]:
encoding = a_station[2]
else:
encoding = pyradio_config.default_encoding
print(format_string.format(str(num+1), a_station[0], a_station[1], encoding))
sys.exit()
if args.debug:
__configureLogger()
print('Debug mode activated; printing messages to file: "~/pyradio.log"')
else:
''' Refer to https://docs.python.org/3.7/howto/logging.html
section "What happens if no configuration is provided"
'''
logging.raiseExceptions = False
logging.lastResort = None
if requested_player is '':
requested_player = pyradio_config.player
#else:
# pyradio_config.requested_player = requested_player
if args.play == 'False':
if args.stations == '':
args.play = pyradio_config.default_station
if args.play == '-1':
args.play = 'False'
theme_to_use = args.theme
if not theme_to_use:
theme_to_use = pyradio_config.theme
# Starts the radio gui.
pyradio = PyRadio(pyradio_config,
play=args.play,
req_player=requested_player,
theme=theme_to_use)
""" Setting ESCAPE key delay to 25ms
Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses"""
environ.setdefault('ESCDELAY', '25')
curses.wrapper(pyradio.setup)
if not pyradio.setup_return_status:
print('\nThis terminal can not display colors.\nPyRadio cannot function in such a terminal.\n') |
def get_health_monitor(self, loadbalancer):
"""
Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict.
"""
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
return body.get("healthMonitor", {}) | Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict. | Below is the the instruction that describes the task:
### Input:
Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict.
### Response:
def get_health_monitor(self, loadbalancer):
"""
Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict.
"""
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
return body.get("healthMonitor", {}) |
def cleanup_codra_edus(self):
"""Remove leading/trailing '_!' from CODRA EDUs and unescape its double quotes."""
for leafpos in self.tree.treepositions('leaves'):
edu_str = self.tree[leafpos]
edu_str = EDU_START_RE.sub("", edu_str)
edu_str = TRIPLE_ESCAPE_RE.sub('"', edu_str)
edu_str = EDU_END_RE.sub("", edu_str)
self.tree[leafpos] = edu_str | Remove leading/trailing '_!' from CODRA EDUs and unescape its double quotes. | Below is the the instruction that describes the task:
### Input:
Remove leading/trailing '_!' from CODRA EDUs and unescape its double quotes.
### Response:
def cleanup_codra_edus(self):
"""Remove leading/trailing '_!' from CODRA EDUs and unescape its double quotes."""
for leafpos in self.tree.treepositions('leaves'):
edu_str = self.tree[leafpos]
edu_str = EDU_START_RE.sub("", edu_str)
edu_str = TRIPLE_ESCAPE_RE.sub('"', edu_str)
edu_str = EDU_END_RE.sub("", edu_str)
self.tree[leafpos] = edu_str |
def _get_rules_from_aws(self):
"""
Load the EC2 security rules off AWS into a list of dict.
Returns:
list
"""
list_of_rules = list()
if self.profile:
boto3.setup_default_session(profile_name=self.profile)
if self.region:
ec2 = boto3.client('ec2', region_name=self.region)
else:
ec2 = boto3.client('ec2')
security_groups = ec2.describe_security_groups(Filters=self.filters)
for group in security_groups['SecurityGroups']:
group_dict = dict()
group_dict['id'] = group['GroupId']
group_dict['name'] = group['GroupName']
group_dict['description'] = group.get('Description', None)
if (group.get('IpPermissions', None) or
group.get('IpPermissionsEgress', None)):
group_dict['rules'] = list()
for rule in group.get('IpPermissions', None):
rule_dict = self._build_rule(rule)
rule_dict['direction'] = "INGRESS"
group_dict['rules'].append(rule_dict)
for rule in group.get('IpPermissionsEgress', None):
rule_dict = self._build_rule(rule)
rule_dict['direction'] = "EGRESS"
group_dict['rules'].append(rule_dict)
list_of_rules.append(group_dict)
return list_of_rules | Load the EC2 security rules off AWS into a list of dict.
Returns:
list | Below is the the instruction that describes the task:
### Input:
Load the EC2 security rules off AWS into a list of dict.
Returns:
list
### Response:
def _get_rules_from_aws(self):
"""
Load the EC2 security rules off AWS into a list of dict.
Returns:
list
"""
list_of_rules = list()
if self.profile:
boto3.setup_default_session(profile_name=self.profile)
if self.region:
ec2 = boto3.client('ec2', region_name=self.region)
else:
ec2 = boto3.client('ec2')
security_groups = ec2.describe_security_groups(Filters=self.filters)
for group in security_groups['SecurityGroups']:
group_dict = dict()
group_dict['id'] = group['GroupId']
group_dict['name'] = group['GroupName']
group_dict['description'] = group.get('Description', None)
if (group.get('IpPermissions', None) or
group.get('IpPermissionsEgress', None)):
group_dict['rules'] = list()
for rule in group.get('IpPermissions', None):
rule_dict = self._build_rule(rule)
rule_dict['direction'] = "INGRESS"
group_dict['rules'].append(rule_dict)
for rule in group.get('IpPermissionsEgress', None):
rule_dict = self._build_rule(rule)
rule_dict['direction'] = "EGRESS"
group_dict['rules'].append(rule_dict)
list_of_rules.append(group_dict)
return list_of_rules |
def library_supports_api(library_version, api_version, different_major_breaks_support=True):
"""
Returns whether api_version is supported by given library version.
E. g. library_version (1,3,21) returns True for api_version (1,3,21), (1,3,19), (1,3,'x'), (1,2,'x'), (1, 'x')
False for (1,3,24), (1,4,'x'), (2,'x')
different_major_breaks_support - if enabled and library and api major versions are different always return False
ex) with library_version (2,0,0) and for api_version(1,3,24) returns False if enabled, True if disabled
"""
assert isinstance(library_version, (tuple, list)) # won't work with e.g. generators
assert len(library_version) == 3
sequence_type = type(library_version) # assure we will compare same types
api_version = sequence_type(0 if num == 'x' else num for num in api_version)
if different_major_breaks_support and library_version[0] != api_version[0]:
return False
assert len(api_version) <= 3 # otherwise following comparision won't work as intended, e.g. (2, 0, 0) > (2, 0, 0, 0)
return library_version >= api_version | Returns whether api_version is supported by given library version.
E. g. library_version (1,3,21) returns True for api_version (1,3,21), (1,3,19), (1,3,'x'), (1,2,'x'), (1, 'x')
False for (1,3,24), (1,4,'x'), (2,'x')
different_major_breaks_support - if enabled and library and api major versions are different always return False
ex) with library_version (2,0,0) and for api_version(1,3,24) returns False if enabled, True if disabled | Below is the the instruction that describes the task:
### Input:
Returns whether api_version is supported by given library version.
E. g. library_version (1,3,21) returns True for api_version (1,3,21), (1,3,19), (1,3,'x'), (1,2,'x'), (1, 'x')
False for (1,3,24), (1,4,'x'), (2,'x')
different_major_breaks_support - if enabled and library and api major versions are different always return False
ex) with library_version (2,0,0) and for api_version(1,3,24) returns False if enabled, True if disabled
### Response:
def library_supports_api(library_version, api_version, different_major_breaks_support=True):
"""
Returns whether api_version is supported by given library version.
E. g. library_version (1,3,21) returns True for api_version (1,3,21), (1,3,19), (1,3,'x'), (1,2,'x'), (1, 'x')
False for (1,3,24), (1,4,'x'), (2,'x')
different_major_breaks_support - if enabled and library and api major versions are different always return False
ex) with library_version (2,0,0) and for api_version(1,3,24) returns False if enabled, True if disabled
"""
assert isinstance(library_version, (tuple, list)) # won't work with e.g. generators
assert len(library_version) == 3
sequence_type = type(library_version) # assure we will compare same types
api_version = sequence_type(0 if num == 'x' else num for num in api_version)
if different_major_breaks_support and library_version[0] != api_version[0]:
return False
assert len(api_version) <= 3 # otherwise following comparision won't work as intended, e.g. (2, 0, 0) > (2, 0, 0, 0)
return library_version >= api_version |
def _set_alarm_sample(self, v, load=False):
"""
Setter method for alarm_sample, mapped from YANG variable /rmon/alarm_entry/alarm_sample (alarm-sample-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_sample is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_sample() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name="alarm-sample", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alarm_sample must be of a type compatible with alarm-sample-type""",
'defined-type': "brocade-rmon:alarm-sample-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name="alarm-sample", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)""",
})
self.__alarm_sample = t
if hasattr(self, '_set'):
self._set() | Setter method for alarm_sample, mapped from YANG variable /rmon/alarm_entry/alarm_sample (alarm-sample-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_sample is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_sample() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for alarm_sample, mapped from YANG variable /rmon/alarm_entry/alarm_sample (alarm-sample-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_sample is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_sample() directly.
### Response:
def _set_alarm_sample(self, v, load=False):
"""
Setter method for alarm_sample, mapped from YANG variable /rmon/alarm_entry/alarm_sample (alarm-sample-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_sample is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_sample() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name="alarm-sample", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alarm_sample must be of a type compatible with alarm-sample-type""",
'defined-type': "brocade-rmon:alarm-sample-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name="alarm-sample", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)""",
})
self.__alarm_sample = t
if hasattr(self, '_set'):
self._set() |
def GetPixelColorsOfRects(self, rects: list) -> list:
"""
rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.
"""
rects2 = [(x, y, x + width, y + height) for x, y, width, height in rects]
left, top, right, bottom = zip(*rects2)
left, top, right, bottom = min(left), min(top), max(right), max(bottom)
width, height = right - left, bottom - top
allColors = self.GetPixelColorsOfRect(left, top, width, height)
colorsOfRects = []
for x, y, w, h in rects:
x -= left
y -= top
colors = []
for row in range(h):
colors.extend(allColors[(y + row) * width + x:(y + row) * width + x + w])
colorsOfRects.append(colors)
return colorsOfRects | rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb. | Below is the the instruction that describes the task:
### Input:
rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.
### Response:
def GetPixelColorsOfRects(self, rects: list) -> list:
"""
rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.
"""
rects2 = [(x, y, x + width, y + height) for x, y, width, height in rects]
left, top, right, bottom = zip(*rects2)
left, top, right, bottom = min(left), min(top), max(right), max(bottom)
width, height = right - left, bottom - top
allColors = self.GetPixelColorsOfRect(left, top, width, height)
colorsOfRects = []
for x, y, w, h in rects:
x -= left
y -= top
colors = []
for row in range(h):
colors.extend(allColors[(y + row) * width + x:(y + row) * width + x + w])
colorsOfRects.append(colors)
return colorsOfRects |
def create_instances(configuration):
"""Create necessary class instances from a configuration with no argument to the constructor
:param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration`
:return: a class-instance mapping
:rtype: dict
"""
instances = {}
for methods in configuration.itervalues():
for element in methods.itervalues():
if not isinstance(element, tuple):
continue
cls, _ = element
if cls not in instances:
instances[cls] = cls()
return instances | Create necessary class instances from a configuration with no argument to the constructor
:param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration`
:return: a class-instance mapping
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Create necessary class instances from a configuration with no argument to the constructor
:param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration`
:return: a class-instance mapping
:rtype: dict
### Response:
def create_instances(configuration):
"""Create necessary class instances from a configuration with no argument to the constructor
:param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration`
:return: a class-instance mapping
:rtype: dict
"""
instances = {}
for methods in configuration.itervalues():
for element in methods.itervalues():
if not isinstance(element, tuple):
continue
cls, _ = element
if cls not in instances:
instances[cls] = cls()
return instances |
def get_containers_by_name(self, name):
"""
get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list
"""
code, containers = self.get_containers()
if code != httplib.OK:
return []
return [container for container in containers if
any(map(lambda x: x.startswith(name), container.Names))] | get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list | Below is the the instruction that describes the task:
### Input:
get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list
### Response:
def get_containers_by_name(self, name):
"""
get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list
"""
code, containers = self.get_containers()
if code != httplib.OK:
return []
return [container for container in containers if
any(map(lambda x: x.startswith(name), container.Names))] |
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs) | Console verbose message to STDOUT | Below is the the instruction that describes the task:
### Input:
Console verbose message to STDOUT
### Response:
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs) |
def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict):
"""This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
is_fw_virt = self.is_device_virtual()
# Fabric portion
if result == fw_constants.RESULT_FW_CREATE_INIT:
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
is_fw_virt, result)
if not ret:
LOG.error("Retry failure returned fail for tenant %s",
tenant_id)
return
else:
result = fw_constants.RESULT_FW_CREATE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
# Device portion
if result == fw_constants.RESULT_FW_CREATE_DONE:
if fw_data.get('device_status') != 'SUCCESS':
ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
self.fwid_attr[tenant_id].fw_drvr_created(True)
self.update_fw_db_dev_status(fw_dict.get('fw_id'),
'SUCCESS')
LOG.info("Retry failue return success for create"
" tenant %s", tenant_id) | This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device. | Below is the the instruction that describes the task:
### Input:
This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
### Response:
def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict):
"""This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
is_fw_virt = self.is_device_virtual()
# Fabric portion
if result == fw_constants.RESULT_FW_CREATE_INIT:
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
is_fw_virt, result)
if not ret:
LOG.error("Retry failure returned fail for tenant %s",
tenant_id)
return
else:
result = fw_constants.RESULT_FW_CREATE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
# Device portion
if result == fw_constants.RESULT_FW_CREATE_DONE:
if fw_data.get('device_status') != 'SUCCESS':
ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
self.fwid_attr[tenant_id].fw_drvr_created(True)
self.update_fw_db_dev_status(fw_dict.get('fw_id'),
'SUCCESS')
LOG.info("Retry failue return success for create"
" tenant %s", tenant_id) |
def change_format(self):
"""
Ask user for display format for floats and use it.
This function also checks whether the format is valid and emits
`sig_option_changed`.
"""
format, valid = QInputDialog.getText(self, _('Format'),
_("Float formatting"),
QLineEdit.Normal,
self.dataModel.get_format())
if valid:
format = str(format)
try:
format % 1.1
except:
msg = _("Format ({}) is incorrect").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
if not format.startswith('%'):
msg = _("Format ({}) should start with '%'").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
self.dataModel.set_format(format)
self.sig_option_changed.emit('dataframe_format', format) | Ask user for display format for floats and use it.
This function also checks whether the format is valid and emits
`sig_option_changed`. | Below is the the instruction that describes the task:
### Input:
Ask user for display format for floats and use it.
This function also checks whether the format is valid and emits
`sig_option_changed`.
### Response:
def change_format(self):
"""
Ask user for display format for floats and use it.
This function also checks whether the format is valid and emits
`sig_option_changed`.
"""
format, valid = QInputDialog.getText(self, _('Format'),
_("Float formatting"),
QLineEdit.Normal,
self.dataModel.get_format())
if valid:
format = str(format)
try:
format % 1.1
except:
msg = _("Format ({}) is incorrect").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
if not format.startswith('%'):
msg = _("Format ({}) should start with '%'").format(format)
QMessageBox.critical(self, _("Error"), msg)
return
self.dataModel.set_format(format)
self.sig_option_changed.emit('dataframe_format', format) |
def istr_type(istr):
"""
Given an "ion" specification, determine its "type", e.g. 1D, Events, etc.
"""
data = set(i.rstrip('0123456789') for i in tokens(istr))
has_events = not data.isdisjoint(istr_type_evts)
has_2d = not data.isdisjoint(istr_type_2d)
has_1d = data.difference(istr_type_evts).difference(istr_type_2d) != set()
if has_events and not (has_1d or has_2d):
return 'events'
elif has_1d and not has_events:
return '1d'
elif has_2d and not (has_events or has_1d):
return '2d'
else:
return None | Given an "ion" specification, determine its "type", e.g. 1D, Events, etc. | Below is the the instruction that describes the task:
### Input:
Given an "ion" specification, determine its "type", e.g. 1D, Events, etc.
### Response:
def istr_type(istr):
"""
Given an "ion" specification, determine its "type", e.g. 1D, Events, etc.
"""
data = set(i.rstrip('0123456789') for i in tokens(istr))
has_events = not data.isdisjoint(istr_type_evts)
has_2d = not data.isdisjoint(istr_type_2d)
has_1d = data.difference(istr_type_evts).difference(istr_type_2d) != set()
if has_events and not (has_1d or has_2d):
return 'events'
elif has_1d and not has_events:
return '1d'
elif has_2d and not (has_events or has_1d):
return '2d'
else:
return None |
def start(self, daemon = False):
"""Start the threads."""
self.daemon = daemon
self.io_threads = []
self.event_thread = EventDispatcherThread(self.event_dispatcher,
daemon = daemon, exc_queue = self.exc_queue)
self.event_thread.start()
for handler in self.io_handlers:
self._run_io_threads(handler)
for handler in self.timeout_handlers:
self._run_timeout_threads(handler) | Start the threads. | Below is the the instruction that describes the task:
### Input:
Start the threads.
### Response:
def start(self, daemon = False):
"""Start the threads."""
self.daemon = daemon
self.io_threads = []
self.event_thread = EventDispatcherThread(self.event_dispatcher,
daemon = daemon, exc_queue = self.exc_queue)
self.event_thread.start()
for handler in self.io_handlers:
self._run_io_threads(handler)
for handler in self.timeout_handlers:
self._run_timeout_threads(handler) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.