code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def fake2db_mysql_initiator(self, host, port, password, username, number_of_rows, name=None, custom=None):
'''Main handler for the operation
'''
rows = number_of_rows
if name:
cursor, conn = self.database_caller_creator(host, port, password, username, name)
else:
cursor, conn = self.database_caller_creator(host, port, password, username)
if custom:
self.custom_db_creator(rows, cursor, conn, custom)
cursor.close()
conn.close()
sys.exit(0)
tables = self.mysql_table_creator()
keys = tables.keys()
for key in keys:
try:
cursor.execute(tables[key])
conn.commit()
except mysql.connector.Error as err:
logger.error(err.msg, extra=extra_information)
else:
logger.info("OK", extra=extra_information)
logger.warning('Table creation ops finished', extra=extra_information)
self.data_filler_simple_registration(rows, cursor, conn)
self.data_filler_detailed_registration(rows, cursor, conn)
self.data_filler_company(rows, cursor, conn)
self.data_filler_user_agent(rows, cursor, conn)
self.data_filler_customer(rows, cursor, conn)
cursor.close()
conn.close()
|
Main handler for the operation
|
def save_config(
self, cmd="save configuration primary", confirm=False, confirm_response=""
):
"""Saves configuration."""
return super(ExtremeExosBase, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
|
Saves configuration.
|
def view_plugins(category=None):
""" return a view of the loaded plugin names and descriptions
Parameters
----------
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin])
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> view_plugins('decoders')
{'example': 'a decoder for dicts containing _example_ key'}
>>> unload_all_plugins()
"""
if category is not None:
if category == 'parsers':
return {
name: {"descript": klass.plugin_descript,
"regex": klass.file_regex}
for name, klass in _all_plugins[category].items()
}
return {
name: klass.plugin_descript
for name, klass in _all_plugins[category].items()
}
else:
return {cat: {name: klass.plugin_descript
for name, klass in plugins.items()}
for cat, plugins in _all_plugins.items()}
|
return a view of the loaded plugin names and descriptions
Parameters
----------
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin])
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> view_plugins('decoders')
{'example': 'a decoder for dicts containing _example_ key'}
>>> unload_all_plugins()
|
def publish_message(self,
exchange,
routing_key,
properties,
body,
no_serialization=False,
no_encoding=False,
channel=None,
connection=None):
"""Publish a message to RabbitMQ on the same channel the original
message was received on.
By default, if you pass a non-string object to the body and the
properties have a supported ``content_type`` set, the body will be
auto-serialized in the specified ``content_type``.
If the properties do not have a timestamp set, it will be set to the
current time.
If you specify a ``content_encoding`` in the properties and the
encoding is supported, the body will be auto-encoded.
Both of these behaviors can be disabled by setting
``no_serialization`` or ``no_encoding`` to ``True``.
If you pass an unsupported content-type or content-encoding when using
the auto-serialization and auto-encoding features, a :exc:`ValueError`
will be raised.
.. versionchanged:: 4.0.0
The method returns a :py:class:`~tornado.concurrent.Future` if
`publisher confirmations <https://www.rabbitmq.com/confirms.html>`_
are enabled on for the connection. In addition, The ``channel``
parameter is deprecated and will be removed in a future release.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param mixed body: The message body to publish
:param bool no_serialization: Turn off auto-serialization of the body
:param bool no_encoding: Turn off auto-encoding of the body
:param str channel: **Deprecated in 4.0.0** Specify the connection
parameter instead.
:param str connection: The connection to use. If it is not
specified, the channel that the message was delivered on is used.
:rtype: tornado.concurrent.Future or None
:raises: ValueError
"""
# Auto-serialize the content if needed
is_string = (isinstance(body, str) or isinstance(body, bytes)
or isinstance(body, unicode))
if not no_serialization and not is_string and \
properties.get('content_type'):
body = self._serialize(
body, headers.parse_content_type(properties['content_type']))
# Auto-encode the message body if needed
if not no_encoding and \
properties.get('content_encoding') in self._CODEC_MAP.keys():
body = self._compress(
body, self._CODEC_MAP[properties['content_encoding']])
return super(SmartConsumer, self).publish_message(
exchange, routing_key, properties, body, channel or connection)
|
Publish a message to RabbitMQ on the same channel the original
message was received on.
By default, if you pass a non-string object to the body and the
properties have a supported ``content_type`` set, the body will be
auto-serialized in the specified ``content_type``.
If the properties do not have a timestamp set, it will be set to the
current time.
If you specify a ``content_encoding`` in the properties and the
encoding is supported, the body will be auto-encoded.
Both of these behaviors can be disabled by setting
``no_serialization`` or ``no_encoding`` to ``True``.
If you pass an unsupported content-type or content-encoding when using
the auto-serialization and auto-encoding features, a :exc:`ValueError`
will be raised.
.. versionchanged:: 4.0.0
The method returns a :py:class:`~tornado.concurrent.Future` if
`publisher confirmations <https://www.rabbitmq.com/confirms.html>`_
are enabled on for the connection. In addition, The ``channel``
parameter is deprecated and will be removed in a future release.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param mixed body: The message body to publish
:param bool no_serialization: Turn off auto-serialization of the body
:param bool no_encoding: Turn off auto-encoding of the body
:param str channel: **Deprecated in 4.0.0** Specify the connection
parameter instead.
:param str connection: The connection to use. If it is not
specified, the channel that the message was delivered on is used.
:rtype: tornado.concurrent.Future or None
:raises: ValueError
|
def set_editor_doc(self, doc, force_refresh=False):
"""
Use the help plugin to show docstring dictionary computed
with introspection plugin from the Editor plugin
"""
if (self.locked and not force_refresh):
return
self.switch_to_editor_source()
self._last_editor_doc = doc
self.object_edit.setText(doc['obj_text'])
if self.rich_help:
self.render_sphinx_doc(doc)
else:
self.set_plain_text(doc, is_code=False)
if self.dockwidget is not None:
self.dockwidget.blockSignals(True)
self.__eventually_raise_help(doc['docstring'], force=force_refresh)
if self.dockwidget is not None:
self.dockwidget.blockSignals(False)
|
Use the help plugin to show docstring dictionary computed
with introspection plugin from the Editor plugin
|
def is_valid_ip_pattern(ip):
"""Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard"""
ip = ip.replace('*', '1')
try:
socket.inet_aton(ip)
return True
except socket.error:
# Not a valid IPv4 address pattern
return False
|
Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard
|
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.colors[0:len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self._binary_decision == True:
self.ax.plot(
self.fpr[0], self.tpr[0],
label='ROC for binary decision, AUC = {:0.2f}'.format(
self.roc_auc[0]
)
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i], self.tpr[i], color=color,
label='ROC of class {}, AUC = {:0.2f}'.format(
self.classes_[i], self.roc_auc[i],
)
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO], self.tpr[MICRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='micro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["micro"],
)
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO], self.tpr[MACRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='macro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["macro"],
)
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0,1], [0,1], linestyle=':', c=LINE_COLOR)
return self.ax
|
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
|
def fmt_number(p):
"""Format a number.
It will be printed as a fraction if the denominator isn't too big and as a
decimal otherwise.
"""
formatted = '{:n}'.format(p)
if not config.PRINT_FRACTIONS:
return formatted
fraction = Fraction(p)
nice = fraction.limit_denominator(128)
return (
str(nice) if (abs(fraction - nice) < constants.EPSILON and
nice.denominator in NICE_DENOMINATORS)
else formatted
)
|
Format a number.
It will be printed as a fraction if the denominator isn't too big and as a
decimal otherwise.
|
def clear_callbacks(obj):
"""Remove all callbacks from an object."""
callbacks = obj._callbacks
if isinstance(callbacks, dllist):
# Help the garbage collector by clearing all links.
callbacks.clear()
obj._callbacks = None
|
Remove all callbacks from an object.
|
def get_option(self, key, subkey, in_path_none=False):
"""Get the current value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:param bool in_path_none: Allows for ``in_path`` values of
:data:`None` to be retrieved.
:return: Current value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option.
:ValueError: If a ``in_path`` type with :data:`None` value is
requested.
"""
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["type"].values[0] == "bool":
return bool(df["value"].values[0])
elif df["type"].values[0] == "int":
return int(df["value"].values[0])
elif df["type"].values[0] == "path_in":
if df["value"].values[0] is None and not in_path_none:
raise ValueError('Unspecified path for {0}.{1}'.format(key,
subkey))
return df["value"].values[0]
else:
return df["value"].values[0]
|
Get the current value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:param bool in_path_none: Allows for ``in_path`` values of
:data:`None` to be retrieved.
:return: Current value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option.
:ValueError: If a ``in_path`` type with :data:`None` value is
requested.
|
def save_hdf(self,filename,path=''):
"""Save to .h5 file.
"""
self.orbpop_long.save_hdf(filename,'{}/long'.format(path))
self.orbpop_short.save_hdf(filename,'{}/short'.format(path))
|
Save to .h5 file.
|
def entity_delete(args):
""" Delete entity in a workspace. """
msg = "WARNING: this will delete {0} {1} in {2}/{3}".format(
args.entity_type, args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt(msg)):
return
json_body=[{"entityType": args.entity_type,
"entityName": args.entity}]
r = fapi.delete_entities(args.project, args.workspace, json_body)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
print("Succesfully deleted " + args.type + " " + args.entity)
|
Delete entity in a workspace.
|
def t_heredocvar_ENCAPSED_AND_WHITESPACE(t):
r'( [^\n\\${] | \\. | \$(?![A-Za-z_{]) | \{(?!\$) )+\n? | \\?\n'
t.lexer.lineno += t.value.count("\n")
t.lexer.pop_state()
return t
|
r'( [^\n\\${] | \\. | \$(?![A-Za-z_{]) | \{(?!\$) )+\n? | \\?\n
|
def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result
|
Add digit to the input argument.
:param data: the typed digit as string
|
def validate(self):
"""
Validates the state of this XBlock.
Subclasses should override validate_field_data() to validate fields and override this
only for validation not related to this block's field values.
"""
validation = super(StudioEditableXBlockMixin, self).validate()
self.validate_field_data(validation, self)
return validation
|
Validates the state of this XBlock.
Subclasses should override validate_field_data() to validate fields and override this
only for validation not related to this block's field values.
|
def flash(self, duration=0.0):
""" Flash a group.
:param duration: How quickly to flash (in seconds).
"""
for _ in range(2):
self.on = not self.on
time.sleep(duration)
|
Flash a group.
:param duration: How quickly to flash (in seconds).
|
def fetch_post_data(self):
'''
fetch post accessed data. post_data, and ext_dic.
'''
post_data = {}
ext_dic = {}
for key in self.request.arguments:
if key.startswith('def_'):
ext_dic[key] = self.get_argument(key)
else:
post_data[key] = self.get_arguments(key)[0]
post_data['user_name'] = self.userinfo.user_name
ext_dic = dict(ext_dic, **self.ext_post_data(postdata=post_data))
return (post_data, ext_dic)
|
fetch post accessed data. post_data, and ext_dic.
|
def process_entry(self, defect_entry):
"""
Process a given Defect entry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as del
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True
"""
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
# apply corrections based on delocalization analysis
corrections = {}
skip_charge_corrections = False
if "num_hole_vbm" in defect_entry.parameters.keys():
if (self.free_chg_cutoff < defect_entry.parameters["num_hole_vbm"]) or (
self.free_chg_cutoff < defect_entry.parameters["num_elec_cbm"]):
print('Will not use charge correction because too many free charges')
# TODO: should the potential alignment correction still be used in this scenario?
# TODO: with too much charge delocalized should probably not use?
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({'charge_correction': 0.})
else:
if ('freysoldt' in self.preferred_cc.lower()) and ('freysoldt_meta' in defect_entry.parameters.keys()):
frey_meta = defect_entry.parameters['freysoldt_meta']
frey_corr = frey_meta["freysoldt_electrostatic"] + frey_meta["freysoldt_potential_alignment_correction"]
corrections.update({'charge_correction': frey_corr})
elif ('kumagai_meta' in defect_entry.parameters.keys()):
kumagai_meta = defect_entry.parameters['kumagai_meta']
kumagai_corr = kumagai_meta["kumagai_electrostatic"] + \
kumagai_meta["kumagai_potential_alignment_correction"]
corrections.update({'charge_correction': kumagai_corr})
else:
print('Could not use any charge correction because insufficient metadata was supplied.')
if self.use_bandfilling:
if "bandfilling_meta" in defect_entry.parameters.keys():
bfc_corr = defect_entry.parameters["bandfilling_meta"]["bandfilling_correction"]
corrections.update({'bandfilling_correction': bfc_corr})
else:
print('Could not use band filling correction because insufficient metadata was supplied.')
else:
corrections.update({'bandfilling_correction': 0.})
if self.use_bandedgeshift:
if "bandshift_meta" in defect_entry.parameters.keys():
bandfill_meta = defect_entry.parameters["bandshift_meta"]
bes_corr = bandfill_meta["vbm_shift_correction"] + bandfill_meta["hole_vbm_shift_correction"] + \
bandfill_meta["elec_cbm_shift_correction"]
corrections.update({'bandedgeshifting_correction': bes_corr})
# also want to update relevant data for phase diagram
defect_entry.parameters.update({
'phasediagram_meta': {
'vbm': defect_entry.parameters['hybrid_vbm'],
'gap': defect_entry.parameters['hybrid_cbm'] - defect_entry.parameters['hybrid_vbm']
}
})
else:
print("Could not use band edge shifting correction because insufficient metadata was supplied.")
defect_entry.parameters.update({
'phasediagram_meta': {
'vbm': defect_entry.parameters['vbm'],
'gap': defect_entry.parameters['cbm'] - defect_entry.parameters['vbm']
}
})
else: # if not using bandedge shift -> still want to have vbm and gap ready for phase diagram
corrections.update({'bandedgeshifting_correction': 0.})
defect_entry.parameters.update({
'phasediagram_meta': {
'vbm': defect_entry.parameters['vbm'],
'gap': defect_entry.parameters['cbm'] - defect_entry.parameters['vbm']
}
})
defect_entry.corrections.update(corrections)
return defect_entry
|
Process a given Defect entry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as del
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True
|
def hashdata(self, subject):
_data = bytearray()
if isinstance(subject, six.string_types):
subject = subject.encode('charmap')
"""
All signatures are formed by producing a hash over the signature
data, and then using the resulting hash in the signature algorithm.
"""
if self.type == SignatureType.BinaryDocument:
"""
For binary document signatures (type 0x00), the document data is
hashed directly.
"""
if isinstance(subject, (SKEData, IntegrityProtectedSKEData)):
_data += subject.__bytearray__()
else:
_data += bytearray(subject)
if self.type == SignatureType.CanonicalDocument:
"""
For text document signatures (type 0x01), the
document is canonicalized by converting line endings to <CR><LF>,
and the resulting data is hashed.
"""
_data += re.subn(br'\r?\n', b'\r\n', subject)[0]
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation, SignatureType.Subkey_Binding,
SignatureType.PrimaryKey_Binding}:
"""
When a signature is made over a key, the hash data starts with the
octet 0x99, followed by a two-octet length of the key, and then body
of the key packet. (Note that this is an old-style packet header for
a key packet with two-octet length.) ...
Key revocation signatures (types 0x20 and 0x28)
hash only the key being revoked.
"""
_s = b''
if isinstance(subject, PGPUID):
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and not subject.is_primary:
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and subject.is_primary:
_s = subject.hashdata
if len(_s) > 0:
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Subkey_Binding, SignatureType.PrimaryKey_Binding}:
"""
A subkey binding signature
(type 0x18) or primary key binding signature (type 0x19) then hashes
the subkey using the same format as the main key (also using 0x99 as
the first octet).
"""
if subject.is_primary:
_s = subject.subkeys[self.signer].hashdata
else:
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.KeyRevocation, SignatureType.SubkeyRevocation, SignatureType.DirectlyOnKey}:
"""
The signature is calculated directly on the key being revoked. A
revoked key is not to be used. Only revocation signatures by the
key being revoked, or by an authorized revocation key, should be
considered valid revocation signatures.
Subkey revocation signature
The signature is calculated directly on the subkey being revoked.
A revoked subkey is not to be used. Only revocation signatures
by the top-level signature key that is bound to this subkey, or
by an authorized revocation key, should be considered valid
revocation signatures.
- clarification from draft-ietf-openpgp-rfc4880bis-02:
Primary key revocation signatures (type 0x20) hash
only the key being revoked. Subkey revocation signature (type 0x28)
hash first the primary key and then the subkey being revoked
Signature directly on a key
This signature is calculated directly on a key. It binds the
information in the Signature subpackets to the key, and is
appropriate to be used for subpackets that provide information
about the key, such as the Revocation Key subpacket. It is also
appropriate for statements that non-self certifiers want to make
about the key itself, rather than the binding between a key and a
name.
"""
if self.type == SignatureType.SubkeyRevocation:
# hash the primary key first if this is a Subkey Revocation signature
_s = subject.parent.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation}:
"""
A certification signature (type 0x10 through 0x13) hashes the User
ID being bound to the key into the hash context after the above
data. ... A V4 certification
hashes the constant 0xB4 for User ID certifications or the constant
0xD1 for User Attribute certifications, followed by a four-octet
number giving the length of the User ID or User Attribute data, and
then the User ID or User Attribute data.
...
The [certificate revocation] signature
is computed over the same data as the certificate that it
revokes, and should have a later creation date than that
certificate.
"""
_s = subject.hashdata
if subject.is_uid:
_data += b'\xb4'
else:
_data += b'\xd1'
_data += self.int_to_bytes(len(_s), 4) + _s
# if this is a new signature, do update_hlen
if 0 in list(self._signature.signature):
self._signature.update_hlen()
"""
Once the data body is hashed, then a trailer is hashed. (...)
A V4 signature hashes the packet body
starting from its first field, the version number, through the end
of the hashed subpacket data. Thus, the fields hashed are the
signature version, the signature type, the public-key algorithm, the
hash algorithm, the hashed subpacket length, and the hashed
subpacket body.
V4 signatures also hash in a final trailer of six octets: the
version of the Signature packet, i.e., 0x04; 0xFF; and a four-octet,
big-endian number that is the length of the hashed data from the
Signature packet (note that this number does not include these final
six octets).
"""
hcontext = bytearray()
hcontext.append(self._signature.header.version if not self.embedded else self._signature._sig.header.version)
hcontext.append(self.type)
hcontext.append(self.key_algorithm)
hcontext.append(self.hash_algorithm)
hcontext += self._signature.subpackets.__hashbytearray__()
hlen = len(hcontext)
_data += hcontext
_data += b'\x04\xff'
_data += self.int_to_bytes(hlen, 4)
return bytes(_data)
|
All signatures are formed by producing a hash over the signature
data, and then using the resulting hash in the signature algorithm.
|
def local_filename(
self,
url=None,
filename=None,
decompress=False):
"""
What local filename will we use within the cache directory
for the given URL/filename/decompress options.
"""
return common.build_local_filename(url, filename, decompress)
|
What local filename will we use within the cache directory
for the given URL/filename/decompress options.
|
def correspondent_id(self):
"""
:returns: The id assigned to the correspondent of this message.
"""
try:
return int(self._thread_element.attrib['data-personid'])
except (ValueError, KeyError):
try:
return int(self.correspondent_profile.id)
except:
pass
|
:returns: The id assigned to the correspondent of this message.
|
def hourly_solar_radiation(self):
"""Three data collections containing hourly direct normal, diffuse horizontal,
and global horizontal radiation.
"""
dir_norm, diff_horiz, glob_horiz = \
self._sky_condition.radiation_values(self._location)
dir_norm_data = self._get_daily_data_collections(
energyintensity.DirectNormalRadiation(), 'Wh/m2', dir_norm)
diff_horiz_data = self._get_daily_data_collections(
energyintensity.DiffuseHorizontalRadiation(), 'Wh/m2', diff_horiz)
glob_horiz_data = self._get_daily_data_collections(
energyintensity.GlobalHorizontalRadiation(), 'Wh/m2', glob_horiz)
return dir_norm_data, diff_horiz_data, glob_horiz_data
|
Three data collections containing hourly direct normal, diffuse horizontal,
and global horizontal radiation.
|
def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,
gdal_type=GDT_Float32):
"""Output Raster to GeoTiff format file.
Args:
f_name: output gtiff file name.
n_rows: Row count.
n_cols: Col count.
data: 2D array data.
geotransform: geographic transformation.
srs: coordinate system.
nodata_value: nodata value.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
GDT_Float32 as default.
"""
UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
driver = gdal_GetDriverByName(str('GTiff'))
try:
ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
except Exception:
print('Cannot create output file %s' % f_name)
return
ds.SetGeoTransform(geotransform)
try:
ds.SetProjection(srs.ExportToWkt())
except AttributeError or Exception:
ds.SetProjection(srs)
ds.GetRasterBand(1).SetNoDataValue(nodata_value)
# if data contains numpy.nan, then replaced by nodata_value
if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),
numpy.dtype('float')]:
data = numpy.where(numpy.isnan(data), nodata_value, data)
ds.GetRasterBand(1).WriteArray(data)
ds = None
|
Output Raster to GeoTiff format file.
Args:
f_name: output gtiff file name.
n_rows: Row count.
n_cols: Col count.
data: 2D array data.
geotransform: geographic transformation.
srs: coordinate system.
nodata_value: nodata value.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
GDT_Float32 as default.
|
def is_valid_catalog(catalog, validator=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = readers.read_catalog(catalog)
if not validator:
if hasattr(catalog, "validator"):
validator = catalog.validator
else:
validator = create_validator()
jsonschema_res = validator.is_valid(catalog)
custom_errors = iter_custom_errors(catalog)
return jsonschema_res and len(list(custom_errors)) == 0
|
Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False.
|
def _fix_next_url(next_url):
"""Remove max=null parameter from URL.
Patch for Webex Teams Defect: 'next' URL returned in the Link headers of
the responses contain an errant 'max=null' parameter, which causes the
next request (to this URL) to fail if the URL is requested as-is.
This patch parses the next_url to remove the max=null parameter.
Args:
next_url(basestring): The 'next' URL to be parsed and cleaned.
Returns:
basestring: The clean URL to be used for the 'next' request.
Raises:
AssertionError: If the parameter types are incorrect.
ValueError: If 'next_url' does not contain a valid API endpoint URL
(scheme, netloc and path).
"""
next_url = str(next_url)
parsed_url = urllib.parse.urlparse(next_url)
if not parsed_url.scheme or not parsed_url.netloc or not parsed_url.path:
raise ValueError(
"'next_url' must be a valid API endpoint URL, minimally "
"containing a scheme, netloc and path."
)
if parsed_url.query:
query_list = parsed_url.query.split('&')
if 'max=null' in query_list:
query_list.remove('max=null')
warnings.warn("`max=null` still present in next-URL returned "
"from Webex Teams", RuntimeWarning)
new_query = '&'.join(query_list)
parsed_url = list(parsed_url)
parsed_url[4] = new_query
return urllib.parse.urlunparse(parsed_url)
|
Remove max=null parameter from URL.
Patch for Webex Teams Defect: 'next' URL returned in the Link headers of
the responses contain an errant 'max=null' parameter, which causes the
next request (to this URL) to fail if the URL is requested as-is.
This patch parses the next_url to remove the max=null parameter.
Args:
next_url(basestring): The 'next' URL to be parsed and cleaned.
Returns:
basestring: The clean URL to be used for the 'next' request.
Raises:
AssertionError: If the parameter types are incorrect.
ValueError: If 'next_url' does not contain a valid API endpoint URL
(scheme, netloc and path).
|
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
|
Get region
|
def choose_type(cls, content_type):
"""Choose object type from content type."""
return cls.type_cls.SUBDIR if content_type in cls.subdir_types \
else cls.type_cls.FILE
|
Choose object type from content type.
|
def plot_eeg_erp_topo(all_epochs, colors=None):
"""
Plot butterfly plot.
DOCS INCOMPLETE :(
"""
all_evokeds = eeg_to_all_evokeds(all_epochs)
data = {}
for participant, epochs in all_evokeds.items():
for cond, epoch in epochs.items():
data[cond] = []
for participant, epochs in all_evokeds.items():
for cond, epoch in epochs.items():
data[cond].append(epoch)
if colors is not None:
color_list = []
else:
color_list = None
evokeds = []
for condition, evoked in data.items():
grand_average = mne.grand_average(evoked)
grand_average.comment = condition
evokeds += [grand_average]
if colors is not None:
color_list.append(colors[condition])
plot = mne.viz.plot_evoked_topo(evokeds, background_color="w", color=color_list)
return(plot)
|
Plot butterfly plot.
DOCS INCOMPLETE :(
|
def gcp_conn(service, service_type='client', future_expiration_minutes=15):
"""
service_type: not currently used.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# Import here to avoid circular import issue
from cloudaux.gcp.auth import get_client
(conn_args, kwargs) = get_creds_from_kwargs(kwargs)
client_details, client = get_client(
service, service_type=service_type,
future_expiration_minutes=15, **conn_args)
if client_details:
kwargs = rewrite_kwargs(client_details['client_type'], kwargs,
client_details['module_name'])
kwargs['client'] = client
return f(*args, **kwargs)
return decorated_function
return decorator
|
service_type: not currently used.
|
def get_target_dimensions(self):
"""
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
"""
if self.target_height is None:
self._calculate_target_dimensions()
return int(self.target_width), int(self.target_height)
|
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
|
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
|
Invoke the config file parser.
|
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
|
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
|
def get_bel_versions() -> List[str]:
"""Get BEL Language versions supported
Get the list of all BEL Language versions supported. The file this depends
on is generated by belspec_yaml2json and is kept up to date using
`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`
directly as it's added as a command by pip install.
Returns:
List[str]: list of versions
"""
spec_dir = config["bel"]["lang"]["specifications"]
fn = f"{spec_dir}/versions.json"
with open(fn, "r") as f:
versions = json.load(f)
return versions
|
Get BEL Language versions supported
Get the list of all BEL Language versions supported. The file this depends
on is generated by belspec_yaml2json and is kept up to date using
`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`
directly as it's added as a command by pip install.
Returns:
List[str]: list of versions
|
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.typical_or_extreme_period_name))
out.append(self._to_str(self.typical_or_extreme_period_type))
out.append(self._to_str(self.period_start_day))
out.append(self._to_str(self.period_end_day))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
def split_data(self):
""" Split data according to baseline and projection time period values. """
try:
# Extract data ranging in time_period1
time_period1 = (slice(self.baseline_period[0], self.baseline_period[1]))
self.baseline_in = self.original_data.loc[time_period1, self.input_col]
self.baseline_out = self.original_data.loc[time_period1, self.output_col]
if self.exclude_time_period:
for i in range(0, len(self.exclude_time_period), 2):
# Drop data ranging in exclude_time_period1
exclude_time_period1 = (slice(self.exclude_time_period[i], self.exclude_time_period[i+1]))
self.baseline_in.drop(self.baseline_in.loc[exclude_time_period1].index, axis=0, inplace=True)
self.baseline_out.drop(self.baseline_out.loc[exclude_time_period1].index, axis=0, inplace=True)
except Exception as e:
raise e
# CHECK: Can optimize this part
# Error checking to ensure time_period values are valid
if self.projection_period:
for i in range(0, len(self.projection_period), 2):
period = (slice(self.projection_period[i], self.projection_period[i+1]))
try:
self.original_data.loc[period, self.input_col]
self.original_data.loc[period, self.output_col]
except Exception as e:
raise e
|
Split data according to baseline and projection time period values.
|
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
|
Check availability of requested template engine.
|
def _checkpoint_and_erase(self, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
|
Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
|
def sample_given_context(self, c, c_dims):
'''
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
'''
index = self.discrete_progress.sample_given_context(c, c_dims, self.space)
return self.space.rand_value(index).flatten()[list(set(range(len(self.space.cardinalities))) - set(c_dims))]
|
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
|
def registration(uri):
"""Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
"""
# log the URI that is used to access the Stack-In-A-Box services
logger.debug('Registering Stack-In-A-Box at {0} under Python Responses'
.format(uri))
# tell Stack-In-A-Box what URI to match with
StackInABox.update_uri(uri)
# Build the regex for the URI and register all HTTP verbs
# with Responses
regex = re.compile('(http)?s?(://)?{0}:?(\d+)?/'.format(uri),
re.I)
METHODS = [
responses.DELETE,
responses.GET,
responses.HEAD,
responses.OPTIONS,
responses.PATCH,
responses.POST,
responses.PUT
]
for method in METHODS:
responses.add_callback(method,
regex,
callback=responses_callback)
|
Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
|
def normalize(X, mean=None, std=None):
"""
Normalize X. If mean OR std is None, normalizes
X to have mean 0 and std 1.
"""
if mean is None or std is None:
mean = X.mean(0)
std = X.std(0)
return (X - mean) / std
|
Normalize X. If mean OR std is None, normalizes
X to have mean 0 and std 1.
|
def column_max_width(self, column_number):
"""Return the maximum width of a column based on the current terminal width.
:param int column_number: The column number to query.
:return: The max width of the column.
:rtype: int
"""
inner_widths = max_dimensions(self.table_data)[0]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
padding = self.padding_left + self.padding_right
return column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
|
Return the maximum width of a column based on the current terminal width.
:param int column_number: The column number to query.
:return: The max width of the column.
:rtype: int
|
def inflate_analysis_group(self, identifier, definition):
"""
Inflate a whole analysis group.
An analysis group is a section defined in the YAML file.
Args:
identifier (str): the group identifier.
definition (list/dict): the group definition.
Returns:
AnalysisGroup: an instance of AnalysisGroup.
Raises:
ValueError: when identifier targets a plugin of a certain type,
and the definition does not contain the entry for the
other-type plugins (providers <-> checkers).
"""
providers_definition = definition.pop('providers', None)
checkers_definition = definition.pop('checkers', None)
analysis_group = AnalysisGroup()
try:
first_plugin = self.inflate_plugin(identifier, definition)
if isinstance(first_plugin, Checker):
analysis_group.checkers.append(first_plugin)
if providers_definition is None:
raise ValueError(
'when declaring an analysis group with a checker '
'identifier, you must also declare providers with '
'the "providers" key.')
analysis_group.providers.extend(
self.inflate_providers(providers_definition))
elif isinstance(first_plugin, Provider):
analysis_group.providers.append(first_plugin)
if checkers_definition is None:
raise ValueError(
'when declaring an analysis group with a provider '
'identifier, you must also declare checkers with '
'the "checkers" key.')
analysis_group.checkers.extend(
self.inflate_checkers(checkers_definition))
except PluginNotFoundError as e:
logger.warning(
'Could not find any plugin identified by %s, '
'considering entry as group name. Exception: %s.',
identifier, e)
analysis_group.name = definition.pop('name', identifier)
analysis_group.description = definition.pop('description', None)
if bool(providers_definition) != bool(checkers_definition):
raise ValueError(
'when declaring an analysis group with a name, you must '
'either declare both "providers" and "checkers" or none.')
if providers_definition and checkers_definition:
analysis_group.providers.extend(
self.inflate_providers(providers_definition))
analysis_group.checkers.extend(
self.inflate_checkers(checkers_definition))
self.cleanup_definition(definition)
for nd_identifier, nd_definition in definition.items():
analysis_group.checkers.append(
self.inflate_nd_checker(nd_identifier, nd_definition))
return analysis_group
|
Inflate a whole analysis group.
An analysis group is a section defined in the YAML file.
Args:
identifier (str): the group identifier.
definition (list/dict): the group definition.
Returns:
AnalysisGroup: an instance of AnalysisGroup.
Raises:
ValueError: when identifier targets a plugin of a certain type,
and the definition does not contain the entry for the
other-type plugins (providers <-> checkers).
|
def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
logger.info('Task {} queued.'.format(task.id))
else:
unpickled_task = unpickle(data)
logger.info(
'Executing task {} synchronously.'.format(unpickled_task.id)
)
with measure_time() as summary, self.queue_context():
unpickled_task.execute(queue=self)
summary(unpickled_task.summary())
return TaskResult(task.id, self)
|
Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
|
def get_wegobject_by_id(self, id):
'''
Retrieve a `Wegobject` by the Id.
:param integer id: the Id of the `Wegobject`
:rtype: :class:`Wegobject`
'''
def creator():
res = crab_gateway_request(
self.client, 'GetWegobjectByIdentificatorWegobject', id
)
if res == None:
raise GatewayResourceNotFoundException()
return Wegobject(
res.IdentificatorWegobject,
res.AardWegobject,
(res.CenterX, res.CenterY),
(res.MinimumX, res.MinimumY, res.MaximumX, res.MaximumY),
Metadata(
res.BeginDatum,
res.BeginTijd,
self.get_bewerking(res.BeginBewerking),
self.get_organisatie(res.BeginOrganisatie)
)
)
if self.caches['short'].is_configured:
key = 'GetWegobjectByIdentificatorWegobject#%s' % (id)
wegobject = self.caches['short'].get_or_create(key, creator)
else:
wegobject = creator()
wegobject.set_gateway(self)
return wegobject
|
Retrieve a `Wegobject` by the Id.
:param integer id: the Id of the `Wegobject`
:rtype: :class:`Wegobject`
|
def data_request(self, payload, timeout=TIMEOUT):
"""Perform a data_request and return the result."""
request_url = self.base_url + "/data_request"
return requests.get(request_url, timeout=timeout, params=payload)
|
Perform a data_request and return the result.
|
def libvlc_audio_set_mute(p_mi, status):
'''Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
f = _Cfunctions.get('libvlc_audio_set_mute', None) or \
_Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, status)
|
Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
|
def markdown_filter(value, typogrify=True, extensions=('extra', 'codehilite')):
"""
A smart wrapper around the ``markdown`` and ``typogrify`` functions that automatically removes leading
whitespace before every line. This is necessary because Markdown is whitespace-sensitive. Consider some Markdown
content in a template that looks like this:
.. codeblock:: html+jinja
<article>
{% filter markdown %}
## A Heading
Some content here.
Code goes here.
More lines of code
And more.
Closing thoughts
{% endfilter %}
</article>
In this example, a typical Markdown filter would see the leading whitespace before the first heading and assume
it was a code block, which would then cause the entire Markdown document to be rendered incorrectly. You may have
a document with spacing like this because your text editor automatically 'pretty-prints' the markup,
including the content within the filter tag.
This filter automatically removes the leading whitespace - leaving code block and other expected offsets in place
of course - so that rendering occurs correctly regardless of the nested spacing of the source document.
"""
# Determine how many leading spaces there are, then remove that number from the beginning of each line.
match = re.match(r'(\n*)(\s*)', value)
s, e = match.span(2)
pattern = re.compile(r'^ {%s}' % (e - s), # use ^ in the pattern so mid-string matches won't be removed
flags=re.MULTILINE) # use multi-line mode so ^ will match the start of each line
output = pattern.sub(u'', value)
if typogrify:
return jinja_filters.typogrify(markdown(output, extensions=extensions))
else:
return markdown(output, extensions=extensions)
|
A smart wrapper around the ``markdown`` and ``typogrify`` functions that automatically removes leading
whitespace before every line. This is necessary because Markdown is whitespace-sensitive. Consider some Markdown
content in a template that looks like this:
.. codeblock:: html+jinja
<article>
{% filter markdown %}
## A Heading
Some content here.
Code goes here.
More lines of code
And more.
Closing thoughts
{% endfilter %}
</article>
In this example, a typical Markdown filter would see the leading whitespace before the first heading and assume
it was a code block, which would then cause the entire Markdown document to be rendered incorrectly. You may have
a document with spacing like this because your text editor automatically 'pretty-prints' the markup,
including the content within the filter tag.
This filter automatically removes the leading whitespace - leaving code block and other expected offsets in place
of course - so that rendering occurs correctly regardless of the nested spacing of the source document.
|
def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given Product.
"""
data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
|
List all BuildConfigurations associated with the given Product.
|
def get_timestamp(self, **kwargs):
"""Retrieves the timestamp for a given set of data"""
timestamp = kwargs.get('timestamp')
if not timestamp:
now = datetime.datetime.utcnow()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z"
return timestamp
|
Retrieves the timestamp for a given set of data
|
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.RBF)
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
# optimize
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m
|
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
|
def _populate_and_save_user_profile(self):
"""
Populates a User profile object with fields from the LDAP directory.
"""
try:
app_label, class_name = django.conf.settings.AUTH_PROFILE_MODULE.split('.')
profile_model = apps.get_model(app_label, class_name)
profile, created = profile_model.objects.get_or_create(user=self._user)
save_profile = False
logger.debug("Populating Django user profile for %s", get_user_username(self._user))
save_profile = self._populate_profile_from_attributes(profile) or save_profile
save_profile = self._populate_profile_flags_from_dn_regex(profile) or save_profile
save_profile = self._populate_profile_from_group_memberships(profile) or save_profile
signal_responses = populate_user_profile.send(self.backend.__class__, profile=profile, ldap_user=self)
if len(signal_responses) > 0:
save_profile = True
if save_profile:
profile.save()
except ObjectDoesNotExist:
logger.debug("Django user %s does not have a profile to populate", get_user_username(self._user))
except LookupError:
logger.debug('User Profile model defined in settings.AUTH_PROFILE_MODULE is invalid')
|
Populates a User profile object with fields from the LDAP directory.
|
def listen_on_udp_port():
"""listen_on_udp_port
Run a simple server for processing messages over ``UDP``.
``UDP_LISTEN_ON_HOST`` - listen on this host ip address
``UDP_LISTEN_ON_PORT`` - listen on this ``UDP`` port
``UDP_LISTEN_SIZE`` - listen on to packets of this size
``UDP_LISTEN_SLEEP`` - sleep this number of seconds per loop
``UDP_LISTEN_SHUTDOWN_HOOK`` - shutdown if file is found on disk
"""
host = os.getenv(
"UDP_LISTEN_ON_HOST",
"127.0.0.1").strip().lstrip()
port = int(os.getenv(
"UDP_LISTEN_ON_PORT",
"17000").strip().lstrip())
backlog = int(os.getenv(
"UDP_LISTEN_BACKLOG",
"5").strip().lstrip())
size = int(os.getenv(
"UDP_LISTEN_SIZE",
"1024").strip().lstrip())
sleep_in_seconds = float(os.getenv(
"UDP_LISTEN_SLEEP",
"0.5").strip().lstrip())
shutdown_hook = os.getenv(
"UDP_LISTEN_SHUTDOWN_HOOK",
"/tmp/udp-shutdown-listen-server-{}-{}".format(
host,
port)).strip().lstrip()
if os.path.exists(shutdown_hook):
print(("Please remove the UDP shutdown hook file: "
"\nrm -f {}")
.format(shutdown_hook))
sys.exit(1)
now = datetime.datetime.now().isoformat()
print(("{} - Starting UDP Server address={}:{} "
"backlog={} size={} sleep={} shutdown={}")
.format(
now,
host,
port,
backlog,
size,
sleep_in_seconds,
shutdown_hook))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
msg = 0
while 1:
data = None
address = None
data, address = s.recvfrom(4096)
if data:
now = datetime.datetime.now().isoformat()
print(("{} received UDP data={} ")
.format(
now,
data))
msg += 1
if msg > 1000000:
msg = 0
# if address:
# client.sendto("PROCESSED", address)
else:
time.sleep(sleep_in_seconds)
if os.path.exists(shutdown_hook):
now = datetime.datetime.now().isoformat()
print(("{} detected shutdown "
"file={}")
.format(
now,
shutdown_hook))
# end of loop
print("Shutting down")
|
listen_on_udp_port
Run a simple server for processing messages over ``UDP``.
``UDP_LISTEN_ON_HOST`` - listen on this host ip address
``UDP_LISTEN_ON_PORT`` - listen on this ``UDP`` port
``UDP_LISTEN_SIZE`` - listen on to packets of this size
``UDP_LISTEN_SLEEP`` - sleep this number of seconds per loop
``UDP_LISTEN_SHUTDOWN_HOOK`` - shutdown if file is found on disk
|
def set_resource_type(resource, type_id, types={}, **kwargs):
"""
Set this resource to be a certain type.
Type objects (a dictionary keyed on type_id) may be
passed in to save on loading.
This function does not call save. It must be done afterwards.
New resource attributes are added to the resource if the template
requires them. Resource attributes on the resource but not used by
the template are not removed.
@returns list of new resource attributes
,new resource type object
"""
ref_key = resource.ref_key
existing_attr_ids = []
for res_attr in resource.attributes:
existing_attr_ids.append(res_attr.attr_id)
if type_id in types:
type_i = types[type_id]
else:
type_i = db.DBSession.query(TemplateType).filter(TemplateType.id==type_id).options(joinedload_all('typeattrs')).one()
type_attrs = dict()
for typeattr in type_i.typeattrs:
type_attrs[typeattr.attr_id]={
'is_var':typeattr.attr_is_var,
'default_dataset_id': typeattr.default_dataset.id if typeattr.default_dataset else None}
# check if attributes exist
missing_attr_ids = set(type_attrs.keys()) - set(existing_attr_ids)
# add attributes if necessary
new_res_attrs = []
#This is a dict as the length of the list may not match the new_res_attrs
#Keyed on attr_id, as resource_attr_id doesn't exist yet, and there should only
#be one attr_id per template.
new_res_scenarios = {}
for attr_id in missing_attr_ids:
ra_dict = dict(
ref_key = ref_key,
attr_id = attr_id,
attr_is_var = type_attrs[attr_id]['is_var'],
node_id = resource.id if ref_key == 'NODE' else None,
link_id = resource.id if ref_key == 'LINK' else None,
group_id = resource.id if ref_key == 'GROUP' else None,
network_id = resource.id if ref_key == 'NETWORK' else None,
)
new_res_attrs.append(ra_dict)
if type_attrs[attr_id]['default_dataset_id'] is not None:
if hasattr(resource, 'network'):
for s in resource.network.scenarios:
if new_res_scenarios.get(attr_id) is None:
new_res_scenarios[attr_id] = {}
new_res_scenarios[attr_id][s.id] = dict(
dataset_id = type_attrs[attr_id]['default_dataset_id'],
scenario_id = s.id,
#Not stored in the DB, but needed to connect the RA ID later.
attr_id = attr_id,
ref_key = ref_key,
node_id = ra_dict['node_id'],
link_id = ra_dict['link_id'],
group_id = ra_dict['group_id'],
network_id = ra_dict['network_id'],
)
resource_type = None
for rt in resource.types:
if rt.type_id == type_i.id:
break
else:
errors = check_type_compatibility(rt.type_id, type_i.id)
if len(errors) > 0:
raise HydraError("Cannot apply type %s to resource as it "
"conflicts with type %s. Errors are: %s"
%(type_i.name, resource.get_name(),
rt.templatetype.name, ','.join(errors)))
else:
# add type to tResourceType if it doesn't exist already
resource_type = dict(
node_id = resource.id if ref_key == 'NODE' else None,
link_id = resource.id if ref_key == 'LINK' else None,
group_id = resource.id if ref_key == 'GROUP' else None,
network_id = resource.id if ref_key == 'NETWORK' else None,
ref_key = ref_key,
type_id = type_id,
)
return new_res_attrs, resource_type, new_res_scenarios
|
Set this resource to be a certain type.
Type objects (a dictionary keyed on type_id) may be
passed in to save on loading.
This function does not call save. It must be done afterwards.
New resource attributes are added to the resource if the template
requires them. Resource attributes on the resource but not used by
the template are not removed.
@returns list of new resource attributes
,new resource type object
|
def download(self, bucket_name, object_name, filename=None):
"""
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return blob.download_as_string()
|
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
|
def _new_stream(self, idx):
'''Randomly select and create a new stream.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
'''
# Don't activate the stream if the weight is 0 or None
if self.stream_weights_[idx]:
self.streams_[idx] = self.streamers[idx].iterate()
else:
self.streams_[idx] = None
# Reset the sample count to zero
self.stream_counts_[idx] = 0
|
Randomly select and create a new stream.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
|
def move_into(self, destination_folder):
# type: (Folder) -> None
"""Move the Folder into a different folder.
This makes the Folder provided a child folder of the destination_folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Args:
destination_folder: A :class:`Folder <pyOutlook.core.folder.Folder>` that should become the parent
Returns:
A new :class:`Folder <pyOutlook.core.folder.Folder>` that is now
inside of the destination_folder.
"""
headers = self.headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/move'
payload = '{ "DestinationId": "' + destination_folder.id + '"}'
r = requests.post(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
return self._json_to_folder(self.account, return_folder)
|
Move the Folder into a different folder.
This makes the Folder provided a child folder of the destination_folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Args:
destination_folder: A :class:`Folder <pyOutlook.core.folder.Folder>` that should become the parent
Returns:
A new :class:`Folder <pyOutlook.core.folder.Folder>` that is now
inside of the destination_folder.
|
def aggcv(rlist, show_stdv=True, show_progress=None, as_pandas=True):
# pylint: disable=invalid-name
"""
Aggregate cross-validation results.
"""
cvmap = {}
idx = rlist[0].split()[0]
for line in rlist:
arr = line.split()
assert idx == arr[0]
for it in arr[1:]:
if not isinstance(it, STRING_TYPES):
it = it.decode()
k, v = it.split(':')
if k not in cvmap:
cvmap[k] = []
cvmap[k].append(float(v))
msg = idx
if show_stdv:
fmt = '\tcv-{0}:{1}+{2}'
else:
fmt = '\tcv-{0}:{1}'
index = []
results = []
for k, v in sorted(cvmap.items(), key=lambda x: x[0]):
v = np.array(v)
if not isinstance(msg, STRING_TYPES):
msg = msg.decode()
mean, std = np.mean(v), np.std(v)
msg += fmt.format(k, mean, std)
index.extend([k + '-mean', k + '-std'])
results.extend([mean, std])
if as_pandas:
try:
import pandas as pd
results = pd.Series(results, index=index)
except ImportError:
if show_progress is None:
show_progress = True
else:
# if show_progress is default (None),
# result will be np.ndarray as it can't hold column name
if show_progress is None:
show_progress = True
if show_progress:
sys.stderr.write(msg + '\n')
return results
|
Aggregate cross-validation results.
|
def plot_isotherm(self, T, zs, ws, Pmin=None, Pmax=None, methods=[], pts=50,
only_valid=True): # pragma: no cover
r'''Method to create a plot of the property vs pressure at a specified
temperature and composition according to either a specified list of
methods, or the user methods (if set), or all methods. User-selectable
number of points, and pressure range. If only_valid is set,
`test_method_validity` will be used to check if each condition in
the specified range is valid, and `test_property_validity` will be used
to test the answer, and the method is allowed to fail; only the valid
points will be plotted. Otherwise, the result will be calculated and
displayed as-is. This will not suceed if the method fails.
Parameters
----------
T : float
Temperature at which to create the plot, [K]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
Pmin : float
Minimum pressure, to begin calculating the property, [Pa]
Pmax : float
Maximum pressure, to stop calculating the property, [Pa]
methods : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Pmin to Pmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
'''
# This function cannot be tested
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting')
if Pmin is None:
if self.Pmin is not None:
Pmin = self.Pmin
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it')
if Pmax is None:
if self.Pmax is not None:
Pmax = self.Pmax
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it')
if not methods:
if self.user_methods:
methods = self.user_methods
else:
methods = self.all_methods
Ps = np.linspace(Pmin, Pmax, pts)
for method in methods:
if only_valid:
properties, Ps2 = [], []
for P in Ps:
if self.test_method_validity(T, P, zs, ws, method):
try:
p = self.calculate(T, P, zs, ws, method)
if self.test_property_validity(p):
properties.append(p)
Ps2.append(P)
except:
pass
plt.plot(Ps2, properties, label=method)
else:
properties = [self.calculate(T, P, zs, ws, method) for P in Ps]
plt.plot(Ps, properties, label=method)
plt.legend(loc='best')
plt.ylabel(self.name + ', ' + self.units)
plt.xlabel('Pressure, Pa')
plt.title(self.name + ' of a mixture of ' + ', '.join(self.CASs)
+ ' at mole fractions of ' + ', '.join(str(round(i, 4)) for i in zs) + '.')
plt.show()
|
r'''Method to create a plot of the property vs pressure at a specified
temperature and composition according to either a specified list of
methods, or the user methods (if set), or all methods. User-selectable
number of points, and pressure range. If only_valid is set,
`test_method_validity` will be used to check if each condition in
the specified range is valid, and `test_property_validity` will be used
to test the answer, and the method is allowed to fail; only the valid
points will be plotted. Otherwise, the result will be calculated and
displayed as-is. This will not suceed if the method fails.
Parameters
----------
T : float
Temperature at which to create the plot, [K]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
Pmin : float
Minimum pressure, to begin calculating the property, [Pa]
Pmax : float
Maximum pressure, to stop calculating the property, [Pa]
methods : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Pmin to Pmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
|
def block_verify( block_data ):
"""
Given block data (a dict with 'merkleroot' hex string and 'tx' list of hex strings--i.e.
a block compatible with bitcoind's getblock JSON RPC method), verify that the
transactions are consistent.
Return True on success
Return False if not.
"""
# verify block data txs
m = merkle.MerkleTree( block_data['tx'] )
root_hash = str(m.root())
return root_hash == str(block_data['merkleroot'])
|
Given block data (a dict with 'merkleroot' hex string and 'tx' list of hex strings--i.e.
a block compatible with bitcoind's getblock JSON RPC method), verify that the
transactions are consistent.
Return True on success
Return False if not.
|
def python(self, cmd):
"""Execute a python script using the virtual environment python."""
python_bin = self.cmd_path('python')
cmd = '{0} {1}'.format(python_bin, cmd)
return self._execute(cmd)
|
Execute a python script using the virtual environment python.
|
def serializer_for(self, obj):
"""
Searches for a serializer for the provided object
Serializers will be searched in this order;
1-NULL serializer
2-Default serializers, like primitives, arrays, string and some default types
3-Custom registered types by user
4-Global serializer if registered by user
4-pickle serialization as a fallback
:param obj: input object
:return: Serializer
"""
# 1-NULL serializer
if obj is None:
return self._null_serializer
obj_type = type(obj)
# 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc)
serializer = self.lookup_default_serializer(obj_type, obj)
# 3-Custom registered types by user
if serializer is None:
serializer = self.lookup_custom_serializer(obj_type)
# 5-Global serializer if registered by user
if serializer is None:
serializer = self.lookup_global_serializer(obj_type)
# 4 Internal serializer
if serializer is None:
serializer = self.lookup_python_serializer(obj_type)
if serializer is None:
raise HazelcastSerializationError("There is no suitable serializer for:" + str(obj_type))
return serializer
|
Searches for a serializer for the provided object
Serializers will be searched in this order;
1-NULL serializer
2-Default serializers, like primitives, arrays, string and some default types
3-Custom registered types by user
4-Global serializer if registered by user
4-pickle serialization as a fallback
:param obj: input object
:return: Serializer
|
def _check_values(self, values):
"""Check values whenever they come through the values setter."""
assert isinstance(values, Iterable) and not \
isinstance(values, (str, dict, bytes, bytearray)), \
'values should be a list or tuple. Got {}'.format(type(values))
assert len(values) == len(self.datetimes), \
'Length of values list must match length of datetimes list. {} != {}'.format(
len(values), len(self.datetimes))
assert len(values) > 0, 'Data Collection must include at least one value'
|
Check values whenever they come through the values setter.
|
def removeChild(self, child_id):
"""Remove a child from current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
"""
self.log.debug("Try to remove a child <Workitem %s> from current "
"<Workitem %s>",
child_id,
self)
self._removeChildren([child_id])
self.log.info("Successfully remove a child <Workitem %s> from "
"current <Workitem %s>",
child_id,
self)
|
Remove a child from current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
|
def construct_error_message(driver_id, error_type, message, timestamp):
"""Construct a serialized ErrorTableData object.
Args:
driver_id: The ID of the driver that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The serialized object.
"""
builder = flatbuffers.Builder(0)
driver_offset = builder.CreateString(driver_id.binary())
error_type_offset = builder.CreateString(error_type)
message_offset = builder.CreateString(message)
ray.core.generated.ErrorTableData.ErrorTableDataStart(builder)
ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId(
builder, driver_offset)
ray.core.generated.ErrorTableData.ErrorTableDataAddType(
builder, error_type_offset)
ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage(
builder, message_offset)
ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp(
builder, timestamp)
error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd(
builder)
builder.Finish(error_data_offset)
return bytes(builder.Output())
|
Construct a serialized ErrorTableData object.
Args:
driver_id: The ID of the driver that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The serialized object.
|
def find_suitable_encoding(self, char):
"""The order of our search is a specific one:
1. code pages that we already tried before; there is a good
chance they might work again, reducing the search space,
and by re-using already used encodings we might also
reduce the number of codepage change instructiosn we have
to send. Still, any performance gains will presumably be
fairly minor.
2. code pages in lower ESCPOS slots first. Presumably, they
are more likely to be supported, so if a printer profile
is missing or incomplete, we might increase our change
that the code page we pick for this character is actually
supported.
"""
sorted_encodings = sorted(
self.codepages.items(),
key=self.__encoding_sort_func)
for encoding, _ in sorted_encodings:
if self.can_encode(encoding, char):
# This encoding worked; at it to the set of used ones.
self.used_encodings.add(encoding)
return encoding
|
The order of our search is a specific one:
1. code pages that we already tried before; there is a good
chance they might work again, reducing the search space,
and by re-using already used encodings we might also
reduce the number of codepage change instructiosn we have
to send. Still, any performance gains will presumably be
fairly minor.
2. code pages in lower ESCPOS slots first. Presumably, they
are more likely to be supported, so if a printer profile
is missing or incomplete, we might increase our change
that the code page we pick for this character is actually
supported.
|
def write_biom(self, sample_names, read_taxonomies, biom_file_io):
'''Write the OTU info to a biom IO output stream
Parameters
----------
sample_names: String
names of each sample (sample_ids for biom)
read_taxonomies: Array of hashes as per _iterate_otu_table_rows()
biom_file_io: io
open writeable stream to write biom contents to
Returns True if successful, else False'''
counts = []
observ_metadata = []
otu_ids = []
for otu_id, tax, count in self._iterate_otu_table_rows(read_taxonomies):
if len(count) != len(sample_names):
raise Exception("Programming error: mismatched sample names and counts")
counts.append(count)
observ_metadata.append({'taxonomy': tax})
otu_ids.append(str(otu_id))
if len(counts) == 0:
logging.info("Not writing BIOM file since no sequences were assigned taxonomy")
return True
table = Table(np.array(counts),
otu_ids, sample_names, observ_metadata,
[{}]*len(sample_names), table_id='GraftM Taxonomy Count Table')
try:
table.to_hdf5(biom_file_io, 'GraftM graft')
return True
except RuntimeError as e:
logging.warn("Error writing BIOM output, file not written. The specific error was: %s" % e)
return False
|
Write the OTU info to a biom IO output stream
Parameters
----------
sample_names: String
names of each sample (sample_ids for biom)
read_taxonomies: Array of hashes as per _iterate_otu_table_rows()
biom_file_io: io
open writeable stream to write biom contents to
Returns True if successful, else False
|
def check_schedule():
"""Helper routine to easily test if the schedule is valid"""
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True
|
Helper routine to easily test if the schedule is valid
|
def _query_select_options(self, query, select_columns=None):
"""
Add select load options to query. The goal
is to only SQL select what is requested
:param query: SQLAlchemy Query obj
:param select_columns: (list) of columns
:return: SQLAlchemy Query obj
"""
if select_columns:
_load_options = list()
for column in select_columns:
if "." in column:
model_relation = self.get_related_model(column.split(".")[0])
if not self.is_model_already_joinded(query, model_relation):
query = query.join(model_relation)
_load_options.append(
Load(model_relation).load_only(column.split(".")[1])
)
else:
if not self.is_relation(column) and not hasattr(
getattr(self.obj, column), "__call__"
):
_load_options.append(Load(self.obj).load_only(column))
else:
_load_options.append(Load(self.obj))
query = query.options(*tuple(_load_options))
return query
|
Add select load options to query. The goal
is to only SQL select what is requested
:param query: SQLAlchemy Query obj
:param select_columns: (list) of columns
:return: SQLAlchemy Query obj
|
def retry_on_exception(tries=6, delay=1, backoff=2, max_delay=32):
'''
Decorator for implementing exponential backoff for retrying on failures.
tries: Max number of tries to execute the wrapped function before failing.
delay: Delay time in seconds before the FIRST retry.
backoff: Multiplier to extend the initial delay by for each retry.
max_delay: Max time in seconds to wait between retries.
'''
tries = math.floor(tries)
if tries < 1:
raise ValueError('"tries" must be greater than or equal to 1.')
if delay < 0:
raise ValueError('"delay" must be greater than or equal to 0.')
if backoff < 1:
raise ValueError('"backoff" must be greater than or equal to 1.')
if max_delay < delay:
raise ValueError('"max_delay" must be greater than or equal to delay.')
def decorated_function_with_retry(func):
@wraps(func)
def function_to_retry(*args, **kwargs):
local_tries, local_delay = tries, delay
while local_tries > 1:
try:
return func(*args, **kwargs)
except Exception as e:
if local_delay > max_delay:
local_delay = max_delay
logging.exception('%s: Retrying in %d seconds...'
% (str(e), local_delay))
time.sleep(local_delay)
local_tries -= 1
local_delay *= backoff
return func(*args, **kwargs)
return function_to_retry
return decorated_function_with_retry
|
Decorator for implementing exponential backoff for retrying on failures.
tries: Max number of tries to execute the wrapped function before failing.
delay: Delay time in seconds before the FIRST retry.
backoff: Multiplier to extend the initial delay by for each retry.
max_delay: Max time in seconds to wait between retries.
|
def from_name(cls, name, all_fallback=True):
"""Gets a vocation filter from a vocation's name.
Parameters
----------
name: :class:`str`
The name of the vocation.
all_fallback: :class:`bool`
Whether to return :py:attr:`ALL` if no match is found. Otherwise, ``None`` will be returned.
Returns
-------
VocationFilter, optional:
The matching vocation filter.
"""
name = name.upper()
for vocation in cls: # type: VocationFilter
if vocation.name in name or vocation.name[:-1] in name and vocation != cls.ALL:
return vocation
if all_fallback or name.upper() == "ALL":
return cls.ALL
return None
|
Gets a vocation filter from a vocation's name.
Parameters
----------
name: :class:`str`
The name of the vocation.
all_fallback: :class:`bool`
Whether to return :py:attr:`ALL` if no match is found. Otherwise, ``None`` will be returned.
Returns
-------
VocationFilter, optional:
The matching vocation filter.
|
def path(self, *args: typing.List[str]) -> typing.Union[None, str]:
"""
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
"""
if not self._project:
return None
return environ.paths.clean(os.path.join(
self._project.source_directory,
*args
))
|
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
|
def _updateCallSetIds(self, variantFile):
"""
Updates the call set IDs based on the specified variant file.
"""
if len(self._callSetIdMap) == 0:
for sample in variantFile.header.samples:
self.addCallSetFromName(sample)
|
Updates the call set IDs based on the specified variant file.
|
def items(self):
"Returns a list of (key, value) pairs as 2-tuples."
return (list(self._pb.IntMap.items()) + list(self._pb.StringMap.items()) +
list(self._pb.FloatMap.items()) + list(self._pb.BoolMap.items()))
|
Returns a list of (key, value) pairs as 2-tuples.
|
def get_transitions(self, indexes):
""" Get dictionary of transition data """
assert indexes.shape[1] == self.state_buffer.shape[1], \
"Must have the same number of indexes as there are environments"
frame_batch_shape = (
[indexes.shape[0], indexes.shape[1]]
+ list(self.state_buffer.shape[2:-1])
+ [self.state_buffer.shape[-1] * self.frame_history]
)
past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
for buffer_idx, frame_row in enumerate(indexes):
for env_idx, frame_idx in enumerate(frame_row):
past_frame_buffer[buffer_idx, env_idx], future_frame_buffer[buffer_idx, env_idx] = (
self.get_frame_with_future(frame_idx, env_idx)
)
actions = take_along_axis(self.action_buffer, indexes)
rewards = take_along_axis(self.reward_buffer, indexes)
dones = take_along_axis(self.dones_buffer, indexes)
transition_tensors = {
'observations': past_frame_buffer,
'actions': actions,
'rewards': rewards,
'observations_next': future_frame_buffer,
'dones': dones.astype(np.float32),
}
for name in self.extra_data:
transition_tensors[name] = take_along_axis(self.extra_data[name], indexes)
return transition_tensors
|
Get dictionary of transition data
|
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
'''
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents)
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file)
else:
table = process_csv(reader, csv_file)
return [table]
|
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
|
def init_app(self, app):
"""Initialize the extension. It will create the `index_path_root`
directory upon initalization but it will **not** create the index.
Please use :meth:`reindex` for this.
:param app: The application instance for which the extension should
be initialized.
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
config = app.extensions.setdefault('whooshee', {})
# mapping that caches whoosheers to their indexes; used by `get_or_create_index`
config['whoosheers_indexes'] = {}
# store a reference to self whoosheers; this way, even whoosheers created after init_app
# was called will be found
config['whoosheers'] = self.whoosheers
config['index_path_root'] = app.config.get('WHOOSHEE_DIR', '') or 'whooshee'
config['writer_timeout'] = app.config.get('WHOOSHEE_WRITER_TIMEOUT', 2)
config['search_string_min_len'] = app.config.get('WHOOSHEE_MIN_STRING_LEN', 3)
config['memory_storage'] = app.config.get("WHOOSHEE_MEMORY_STORAGE", False)
config['enable_indexing'] = app.config.get('WHOOSHEE_ENABLE_INDEXING', True)
if app.config.get('WHOOSHE_MIN_STRING_LEN', None) is not None:
warnings.warn(WhoosheeDeprecationWarning("The config key WHOOSHE_MIN_STRING_LEN has been renamed to WHOOSHEE_MIN_STRING_LEN. The mispelled config key is deprecated and will be removed in upcoming releases. Change it to WHOOSHEE_MIN_STRING_LEN to suppress this warning"))
config['search_string_min_len'] = app.config.get('WHOOSHE_MIN_STRING_LEN')
if not os.path.exists(config['index_path_root']):
os.makedirs(config['index_path_root'])
|
Initialize the extension. It will create the `index_path_root`
directory upon initalization but it will **not** create the index.
Please use :meth:`reindex` for this.
:param app: The application instance for which the extension should
be initialized.
|
def recover_chain_id(storage: SQLiteStorage) -> ChainID:
"""We can reasonably assume, that any database has only one value for `chain_id` at this point
in time.
"""
action_init_chain = json.loads(storage.get_state_changes(limit=1, offset=0)[0])
assert action_init_chain['_type'] == 'raiden.transfer.state_change.ActionInitChain'
return action_init_chain['chain_id']
|
We can reasonably assume, that any database has only one value for `chain_id` at this point
in time.
|
def get_storage_id_for_state(state):
""" Calculates the storage id of a state. This ID can be used for generating the file path for a state.
:param rafcon.core.states.state.State state: state the storage_id should is composed for
"""
if global_config.get_config_value('STORAGE_PATH_WITH_STATE_NAME'):
max_length = global_config.get_config_value('MAX_LENGTH_FOR_STATE_NAME_IN_STORAGE_PATH')
max_length_of_state_name_in_folder_name = 255 - len(ID_NAME_DELIMITER + state.state_id)
# TODO: should we allow "None" in config file?
if max_length is None or max_length == "None" or max_length > max_length_of_state_name_in_folder_name:
if max_length_of_state_name_in_folder_name < len(state.name):
logger.info("The storage folder name is forced to be maximal 255 characters in length.")
max_length = max_length_of_state_name_in_folder_name
return limit_text_to_be_path_element(state.name, max_length) + ID_NAME_DELIMITER + state.state_id
else:
return state.state_id
|
Calculates the storage id of a state. This ID can be used for generating the file path for a state.
:param rafcon.core.states.state.State state: state the storage_id should is composed for
|
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
|
main game loop. returns the final score.
|
def parse_query(self, query):
"""Parse query string using given grammar"""
tree = pypeg2.parse(query, Main, whitespace="")
return tree.accept(self.converter)
|
Parse query string using given grammar
|
def trimLeft(self, amount):
"""
Trim this fastqSequence in-place by removing <amount> nucleotides from
the 5' end (left end).
:param amount: the number of nucleotides to trim from the left-side of
this sequence.
"""
if amount == 0:
return
self.sequenceData = self.sequenceData[amount:]
self.sequenceQual = self.sequenceQual[amount:]
|
Trim this fastqSequence in-place by removing <amount> nucleotides from
the 5' end (left end).
:param amount: the number of nucleotides to trim from the left-side of
this sequence.
|
def connect(host, default_protocol='telnet', **kwargs):
"""
Like :class:`prepare()`, but also connects to the host by calling
:class:`Protocol.connect()`. If the URL or host contain any login info, this
function also logs into the host using :class:`Protocol.login()`.
:type host: str or Host
:param host: A URL-formatted hostname or a :class:`Exscript.Host` object.
:type default_protocol: str
:param default_protocol: Protocol that is used if the URL specifies none.
:type kwargs: dict
:param kwargs: Passed to the protocol constructor.
:rtype: Protocol
:return: An instance of the protocol.
"""
host = to_host(host)
conn = prepare(host, default_protocol, **kwargs)
account = host.get_account()
conn.connect(host.get_address(), host.get_tcp_port())
if account is not None:
conn.login(account)
return conn
|
Like :class:`prepare()`, but also connects to the host by calling
:class:`Protocol.connect()`. If the URL or host contain any login info, this
function also logs into the host using :class:`Protocol.login()`.
:type host: str or Host
:param host: A URL-formatted hostname or a :class:`Exscript.Host` object.
:type default_protocol: str
:param default_protocol: Protocol that is used if the URL specifies none.
:type kwargs: dict
:param kwargs: Passed to the protocol constructor.
:rtype: Protocol
:return: An instance of the protocol.
|
def main(self, spin, data):
"""
The function which uses irc rfc regex to extract
the basic arguments from the msg.
"""
data = data.decode(self.encoding)
field = re.match(RFC_REG, data)
if not field:
return
prefix = self.extract_prefix(field.group('prefix'))
command = field.group('command').upper()
args = self.extract_args(field.group('arguments'))
spawn(spin, command, *(prefix + args))
|
The function which uses irc rfc regex to extract
the basic arguments from the msg.
|
def get_pathway(self, pathway_name=None, pathway_id=None, limit=None, as_df=False):
"""Get pathway
.. note::
Format of pathway_id is KEGG:X* or REACTOME:X* . X* stands for a sequence of digits
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param str pathway_name: pathway name
:param str pathway_id: KEGG or REACTOME identifier
:param int limit: maximum number of results
:return: list of :class:`pyctd.manager.models.Pathway` objects
.. seealso::
:class:`pyctd.manager.models.Pathway`
"""
q = self.session.query(models.Pathway)
if pathway_name:
q = q.filter(models.Pathway.pathway_name.like(pathway_name))
if pathway_id:
q = q.filter(models.Pathway.pathway_id.like(pathway_id))
return self._limit_and_df(q, limit, as_df)
|
Get pathway
.. note::
Format of pathway_id is KEGG:X* or REACTOME:X* . X* stands for a sequence of digits
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param str pathway_name: pathway name
:param str pathway_id: KEGG or REACTOME identifier
:param int limit: maximum number of results
:return: list of :class:`pyctd.manager.models.Pathway` objects
.. seealso::
:class:`pyctd.manager.models.Pathway`
|
def debug(self, debug_commands):
"""Run a debug command."""
if isinstance(debug_commands, sc_debug.DebugCommand):
debug_commands = [debug_commands]
return self._client.send(debug=sc_pb.RequestDebug(debug=debug_commands))
|
Run a debug command.
|
def get(self, key):
"""
simple `select`
"""
if not Log:
_late_import()
return FlatList(vals=[unwrap(coalesce(_datawrap(v), Null)[key]) for v in _get_list(self)])
|
simple `select`
|
def _traverse_command(self, name, *args, **kwargs):
"""
Add the key to the args and call the Redis command.
"""
if not name in self.available_commands:
raise AttributeError("%s is not an available command for %s" %
(name, self.__class__.__name__))
attr = getattr(self.connection, "%s" % name)
key = self.key
log.debug(u"Requesting %s with key %s and args %s" % (name, key, args))
result = attr(key, *args, **kwargs)
result = self.post_command(
sender=self,
name=name,
result=result,
args=args,
kwargs=kwargs
)
return result
|
Add the key to the args and call the Redis command.
|
def _translate(teleport_value):
"""Translate a teleport value in to a val subschema."""
if isinstance(teleport_value, dict):
return _translate_composite(teleport_value)
if teleport_value in PRIMITIVES:
return PRIMITIVES[teleport_value]
raise DeserializationError(
"Could not interpret %r as a teleport schema." % teleport_value)
|
Translate a teleport value in to a val subschema.
|
def registration_id_chunks(self, registration_ids):
"""
Splits registration ids in several lists of max 1000 registration ids per list
Args:
registration_ids (list): FCM device registration ID
Yields:
generator: list including lists with registration ids
"""
try:
xrange
except NameError:
xrange = range
# Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids
for i in xrange(0, len(registration_ids), self.FCM_MAX_RECIPIENTS):
yield registration_ids[i:i + self.FCM_MAX_RECIPIENTS]
|
Splits registration ids in several lists of max 1000 registration ids per list
Args:
registration_ids (list): FCM device registration ID
Yields:
generator: list including lists with registration ids
|
def metalarchives(song):
"""
Returns the lyrics found in MetalArchives for the specified mp3 file or an
empty string if not found.
"""
artist = normalize(song.artist)
title = normalize(song.title)
url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs'
url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1'
soup = get_url(url, parser='json')
if not soup:
return ''
song_id_re = re.compile(r'lyricsLink_([0-9]*)')
ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub)
if not ids:
return ''
if None in ids:
ids.remove(None)
ids = map(lambda a: a.group(1), ids)
for song_id in ids:
url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}'
lyrics = get_url(url.format(song_id), parser='html')
lyrics = lyrics.get_text().strip()
if not re.search('lyrics not available', lyrics):
return lyrics
return ''
|
Returns the lyrics found in MetalArchives for the specified mp3 file or an
empty string if not found.
|
def warn_message(self, message, fh=None, prefix="[warn]:", suffix="..."):
"""
print warn type message,
if file handle is `sys.stdout`, print color message
:param str message: message to print
:param file fh: file handle,default is `sys.stdout`
:param str prefix: message prefix,default is `[warn]`
:param str suffix: message suffix ,default is `...`
:return: None
"""
msg = prefix + message + suffix
fh = fh or sys.stdout
if fh is sys.stdout:
termcolor.cprint(msg, color="yellow")
else:
fh.write(msg)
pass
|
print warn type message,
if file handle is `sys.stdout`, print color message
:param str message: message to print
:param file fh: file handle,default is `sys.stdout`
:param str prefix: message prefix,default is `[warn]`
:param str suffix: message suffix ,default is `...`
:return: None
|
def filter_featured_apps(admin_apps, request):
"""
Given a list of apps return a set of pseudo-apps considered featured.
Apps are considered featured if the are defined in the settings
property called `DASHBOARD_FEATURED_APPS` which contains a list of the apps
that are considered to be featured.
:param admin_apps: A list of apps.
:param request: Django request.
:return: Subset of app like objects that are listed in
the settings `DASHBOARD_FEATURED_APPS` setting.
"""
featured_apps = []
# Build the featured apps list based upon settings.
for orig_app_spec in appsettings.DASHBOARD_FEATURED_APPS:
# make a copy that we can write to, to fix deprecations without
# changing settings
app_spec = orig_app_spec.copy()
if "verbose_name" in app_spec:
warnings.warn(
"DASHBOARD_FEATURED_APPS[]['verbose_name'] = '%s' is deprecated. "
"Use 'name' instead)" % app_spec['verbose_name'],
DeprecationWarning, stacklevel=2
)
app_spec['name'] = app_spec['verbose_name']
if hasattr(app_spec['models'], 'items'):
warnings.warn(
"DASHBOARD_FEATURED_APPS[]['models'] for '%s' should now be a "
"list of tuples, not a dict." % app_spec['name'],
DeprecationWarning, stacklevel=2
)
app_spec['models'] = app_spec['models'].items()
# lookup the models from the names
app_spec['models'] = _build_app_models(
request, admin_apps, app_spec['models']
)
# Only add the panel if at least one model is listed.
if app_spec['models']:
featured_apps.append(app_spec)
return featured_apps
|
Given a list of apps return a set of pseudo-apps considered featured.
Apps are considered featured if the are defined in the settings
property called `DASHBOARD_FEATURED_APPS` which contains a list of the apps
that are considered to be featured.
:param admin_apps: A list of apps.
:param request: Django request.
:return: Subset of app like objects that are listed in
the settings `DASHBOARD_FEATURED_APPS` setting.
|
def get_interfaces_ip(self):
"""
Get interface IP details. Returns a dictionary of dictionaries.
Sample output:
{
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"2001:db8::1": {
"prefix_length": 10
},
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": "128"
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
}
}
}
}
"""
interfaces_ip = {}
ipv4_command = "show ip interface vrf all"
ipv6_command = "show ipv6 interface vrf all"
output_v4 = self._send_command(ipv4_command)
output_v6 = self._send_command(ipv6_command)
v4_interfaces = {}
for line in output_v4.splitlines():
# Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38,
# IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0
# IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0
if "Interface status" in line:
interface = line.split(",")[0]
continue
if "IP address" in line:
ip_address = line.split(",")[0].split()[2]
try:
prefix_len = int(line.split()[5].split("/")[1])
except ValueError:
prefix_len = "N/A"
val = {"prefix_length": prefix_len}
v4_interfaces.setdefault(interface, {})[ip_address] = val
v6_interfaces = {}
for line in output_v6.splitlines():
# Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40
# IPv6 address:
# 2001:11:2233::a1/24 [VALID]
# 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID]
# IPv6 subnet: 2001::/24
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
if "Interface status" in line:
interface = line.split(",")[0]
continue
if "VALID" in line:
line = line.strip()
if "link-local address" in line:
# match the following format:
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
ip_address = line.split()[3]
prefix_len = "64"
elif "IPv6 address" in line:
# match the following format:
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
ip_address = line.split()[2]
prefix_len = "64"
else:
ip_address, prefix_len = line.split()[0].split("/")
prefix_len = int(prefix_len)
val = {"prefix_length": prefix_len}
v6_interfaces.setdefault(interface, {})[ip_address] = val
# Join data from intermediate dictionaries.
for interface, data in v4_interfaces.items():
interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data
for interface, data in v6_interfaces.items():
interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data
return interfaces_ip
|
Get interface IP details. Returns a dictionary of dictionaries.
Sample output:
{
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"2001:db8::1": {
"prefix_length": 10
},
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": "128"
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
}
}
}
}
|
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
squash=None, extra_hosts=None, platform=None, isolation=None,
use_config_proxy=False):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (:py:class:`list`): A list of images used for build
cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
remote = context = None
headers = {}
container_limits = container_limits or {}
buildargs = buildargs or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
raise errors.DockerException(
'Can not use custom encoding if gzip is enabled'
)
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
'Invalid container_limits key {0}'.format(key)
)
if custom_context:
if not fileobj:
raise TypeError("You must specify fileobj with custom_context")
context = fileobj
elif fileobj is not None:
context = utils.mkbuildcontext(fileobj)
elif path.startswith(('http://', 'https://',
'git://', 'github.com/', 'git@')):
remote = path
elif not os.path.isdir(path):
raise TypeError("You must specify a directory to build in path")
else:
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
))
dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
)
encoding = 'gzip' if gzip else encoding
u = self._url('/build')
params = {
't': tag,
'remote': remote,
'q': quiet,
'nocache': nocache,
'rm': rm,
'forcerm': forcerm,
'pull': pull,
'dockerfile': dockerfile,
}
params.update(container_limits)
if use_config_proxy:
proxy_args = self._proxy_configs.get_environment()
for k, v in proxy_args.items():
buildargs.setdefault(k, v)
if buildargs:
params.update({'buildargs': json.dumps(buildargs)})
if shmsize:
if utils.version_gte(self._version, '1.22'):
params.update({'shmsize': shmsize})
else:
raise errors.InvalidVersion(
'shmsize was only introduced in API version 1.22'
)
if labels:
if utils.version_gte(self._version, '1.23'):
params.update({'labels': json.dumps(labels)})
else:
raise errors.InvalidVersion(
'labels was only introduced in API version 1.23'
)
if cache_from:
if utils.version_gte(self._version, '1.25'):
params.update({'cachefrom': json.dumps(cache_from)})
else:
raise errors.InvalidVersion(
'cache_from was only introduced in API version 1.25'
)
if target:
if utils.version_gte(self._version, '1.29'):
params.update({'target': target})
else:
raise errors.InvalidVersion(
'target was only introduced in API version 1.29'
)
if network_mode:
if utils.version_gte(self._version, '1.25'):
params.update({'networkmode': network_mode})
else:
raise errors.InvalidVersion(
'network_mode was only introduced in API version 1.25'
)
if squash:
if utils.version_gte(self._version, '1.25'):
params.update({'squash': squash})
else:
raise errors.InvalidVersion(
'squash was only introduced in API version 1.25'
)
if extra_hosts is not None:
if utils.version_lt(self._version, '1.27'):
raise errors.InvalidVersion(
'extra_hosts was only introduced in API version 1.27'
)
if isinstance(extra_hosts, dict):
extra_hosts = utils.format_extra_hosts(extra_hosts)
params.update({'extrahosts': extra_hosts})
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
if isolation is not None:
if utils.version_lt(self._version, '1.24'):
raise errors.InvalidVersion(
'isolation was only introduced in API version 1.24'
)
params['isolation'] = isolation
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
self._set_auth_headers(headers)
response = self._post(
u,
data=context,
params=params,
headers=headers,
stream=True,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
return self._stream_helper(response, decode=decode)
|
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (:py:class:`list`): A list of images used for build
cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
|
def ensure_dim(core, dim, dim_):
"""Ensure that dim is correct."""
if dim is None:
dim = dim_
if not dim:
return core, 1
if dim_ == dim:
return core, int(dim)
if dim > dim_:
key_convert = lambda vari: vari[:dim_]
else:
key_convert = lambda vari: vari + (0,)*(dim-dim_)
new_core = {}
for key, val in core.items():
key_ = key_convert(key)
if key_ in new_core:
new_core[key_] += val
else:
new_core[key_] = val
return new_core, int(dim)
|
Ensure that dim is correct.
|
def load(self, callback=None, errback=None, reload=False):
"""
Load network data from the API.
"""
if not reload and self.data:
raise NetworkException('Network already loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.name = result['name']
self.report = self._rest.report(self.id)
if callback:
return callback(self)
else:
return self
if self.id is None:
if self.name is None:
raise NetworkException('Must at least specify an id or name')
else:
self.id = [network for network in self._rest.list()
if network['name'] == self.name][0]['id']
return self._rest.retrieve(self.id, callback=success,
errback=errback)
|
Load network data from the API.
|
def send_venue(chat_id, latitude, longitude, title, address,
foursquare_id=None, reply_to_message_id=None, reply_markup=None, disable_notification=False,
**kwargs):
"""
Use this method to send information about a venue.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param title: Name of the venue.
:param address: Address of the venue.
:param foursquare_id: Foursquare identifier of the venue.
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type title: str
:type address: str
:type foursquare_id: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:type disable_notification: bool
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
"""
# required args
params = dict(
chat_id=chat_id,
latitude=latitude,
longitude=longitude,
title=title,
address=address,
)
# optional args
params.update(
_clean_params(
foursquare_id=foursquare_id,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest('sendVenue', params=params, on_result=Message.from_result, **kwargs)
|
Use this method to send information about a venue.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param title: Name of the venue.
:param address: Address of the venue.
:param foursquare_id: Foursquare identifier of the venue.
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type title: str
:type address: str
:type foursquare_id: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:type disable_notification: bool
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
|
def user_open(url_or_command):
"""Open the specified paramater in the web browser if a URL is detected,
othewrise pass the paramater to the shell as a subprocess. This function
is inteded to bu used in on_leftclick/on_rightclick callbacks.
:param url_or_command: String containing URL or command
"""
from urllib.parse import urlparse
scheme = urlparse(url_or_command).scheme
if scheme == 'http' or scheme == 'https':
import webbrowser
import os
# webbrowser.open() sometimes prints a message for some reason and confuses i3
# Redirect stdout briefly to prevent this from happening.
savout = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
webbrowser.open(url_or_command)
finally:
os.dup2(savout, 1)
else:
import subprocess
subprocess.Popen(url_or_command, shell=True)
|
Open the specified paramater in the web browser if a URL is detected,
othewrise pass the paramater to the shell as a subprocess. This function
is inteded to bu used in on_leftclick/on_rightclick callbacks.
:param url_or_command: String containing URL or command
|
def mtanh(alpha, z):
"""Modified hyperbolic tangent function mtanh(z; alpha).
Parameters
----------
alpha : float
The core slope of the mtanh.
z : float or array
The coordinate of the mtanh.
"""
z = scipy.asarray(z)
ez = scipy.exp(z)
enz = 1.0 / ez
return ((1 + alpha * z) * ez - enz) / (ez + enz)
|
Modified hyperbolic tangent function mtanh(z; alpha).
Parameters
----------
alpha : float
The core slope of the mtanh.
z : float or array
The coordinate of the mtanh.
|
def get_jids():
'''
Return a list of all job ids
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jids', include_docs=True):
ret[result.key] = _format_jid_instance(result.key, result.doc.value['load'])
return ret
|
Return a list of all job ids
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.