code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def visited(self):
"""Called just after this node has been visited (with or
without a build)."""
try:
binfo = self.binfo
except AttributeError:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
SCons.Node.store_info_map[self.store_info](self)
|
Called just after this node has been visited (with or
without a build).
|
def save_csv(p, sheet):
'Save as single CSV file, handling column names as first line.'
with p.open_text(mode='w') as fp:
cw = csv.writer(fp, **csvoptions())
colnames = [col.name for col in sheet.visibleCols]
if ''.join(colnames):
cw.writerow(colnames)
for r in Progress(sheet.rows, 'saving'):
cw.writerow([col.getDisplayValue(r) for col in sheet.visibleCols])
|
Save as single CSV file, handling column names as first line.
|
def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='generated',
help='The directory containing the sources to benchmark'
)
parser.add_argument(
'--out',
dest='out_dir',
default='../../doc',
help='The output directory'
)
parser.add_argument(
'--include',
dest='include',
default='include',
help='The directory containing the headeres for the benchmark'
)
parser.add_argument(
'--boost_headers',
dest='boost_headers',
default='../../../..',
help='The directory containing the Boost headers (the boost directory)'
)
parser.add_argument(
'--compiler',
dest='compiler',
default='g++',
help='The compiler to do the benchmark with'
)
parser.add_argument(
'--repeat_count',
dest='repeat_count',
type=int,
default=5,
help='How many times a measurement should be repeated.'
)
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(
args.src_dir,
args.compiler,
[args.include, args.boost_headers],
args.repeat_count
)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir)
|
The main function of the script
|
def cat(src_filename, dst_file):
"""Copies the contents of the indicated file to an already opened file."""
(dev, dev_filename) = get_dev_and_path(src_filename)
if dev is None:
with open(dev_filename, 'rb') as txtfile:
for line in txtfile:
dst_file.write(line)
else:
filesize = dev.remote_eval(get_filesize, dev_filename)
return dev.remote(send_file_to_host, dev_filename, dst_file, filesize,
xfer_func=recv_file_from_remote)
|
Copies the contents of the indicated file to an already opened file.
|
def _create_messages(self, names, data, isDms=False):
"""
Creates object of arrays of messages from each json file specified by the names or ids
:param [str] names: names of each group of messages
:param [object] data: array of objects detailing where to get the messages from in
the directory structure
:param bool isDms: boolean value used to tell if the data is dm data so the function can
collect the empty dm directories and store them in memory only
:return: object of arrays of messages
:rtype: object
"""
chats = {}
empty_dms = []
formatter = SlackFormatter(self.__USER_DATA, data)
for name in names:
# gets path to dm directory that holds the json archive
dir_path = os.path.join(self._PATH, name)
messages = []
# array of all days archived
day_files = glob.glob(os.path.join(dir_path, "*.json"))
# this is where it's skipping the empty directories
if not day_files:
if isDms:
empty_dms.append(name)
continue
for day in sorted(day_files):
with io.open(os.path.join(self._PATH, day), encoding="utf8") as f:
# loads all messages
day_messages = json.load(f)
messages.extend([Message(formatter, d) for d in day_messages])
chats[name] = messages
if isDms:
self._EMPTY_DMS = empty_dms
return chats
|
Creates object of arrays of messages from each json file specified by the names or ids
:param [str] names: names of each group of messages
:param [object] data: array of objects detailing where to get the messages from in
the directory structure
:param bool isDms: boolean value used to tell if the data is dm data so the function can
collect the empty dm directories and store them in memory only
:return: object of arrays of messages
:rtype: object
|
def _settle_message(self, message_number, response):
"""Send a settle dispostition for a received message.
:param message_number: The delivery number of the message
to settle.
:type message_number: int
:response: The type of disposition to respond with, e.g. whether
the message was accepted, rejected or abandoned.
:type response: ~uamqp.errors.MessageResponse
"""
if not response or isinstance(response, errors.MessageAlreadySettled):
return
if isinstance(response, errors.MessageAccepted):
self._receiver.settle_accepted_message(message_number)
elif isinstance(response, errors.MessageReleased):
self._receiver.settle_released_message(message_number)
elif isinstance(response, errors.MessageRejected):
self._receiver.settle_rejected_message(
message_number,
response.error_condition,
response.error_description)
elif isinstance(response, errors.MessageModified):
self._receiver.settle_modified_message(
message_number,
response.failed,
response.undeliverable,
response.annotations)
else:
raise ValueError("Invalid message response type: {}".format(response))
|
Send a settle dispostition for a received message.
:param message_number: The delivery number of the message
to settle.
:type message_number: int
:response: The type of disposition to respond with, e.g. whether
the message was accepted, rejected or abandoned.
:type response: ~uamqp.errors.MessageResponse
|
def positions(self, word):
"""
Returns a list of positions where the word can be hyphenated.
See also Hyph_dict.positions. The points that are too far to
the left or right are removed.
"""
right = len(word) - self.right
return [i for i in self.hd.positions(word) if self.left <= i <= right]
|
Returns a list of positions where the word can be hyphenated.
See also Hyph_dict.positions. The points that are too far to
the left or right are removed.
|
def setup(self, config_file=None, aws_config=None, gpg_config=None,
decrypt_gpg=True, decrypt_kms=True):
"""Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
"""
if aws_config is not None:
self.aws_config = aws_config
if gpg_config is not None:
self.gpg_config = gpg_config
if decrypt_kms is not None:
self.decrypt_kms = decrypt_kms
if decrypt_gpg is not None:
self.decrypt_gpg = decrypt_gpg
# Again, load the file last so that it can rely on other properties.
if config_file is not None:
self.config_file = config_file
return self
|
Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
|
def delete(gandi, resource, background, force):
"""Delete one or more IPs (after detaching them from VMs if necessary).
resource can be an ip id or ip.
"""
resource = sorted(tuple(set(resource)))
possible_resources = gandi.ip.resource_list()
# check that each IP can be deleted
for item in resource:
if item not in possible_resources:
gandi.echo('Sorry interface %s does not exist' % item)
gandi.echo('Please use one of the following: %s' %
possible_resources)
return
if not force:
proceed = click.confirm('Are you sure you want to delete ip(s) %s' %
', '.join(resource))
if not proceed:
return
return gandi.ip.delete(resource, background, force)
|
Delete one or more IPs (after detaching them from VMs if necessary).
resource can be an ip id or ip.
|
def dataframe(self, force_refresh=False):
"""A pandas dataframe with lots of interesting results about this object.
Created by calling SageMaker List and Describe APIs and converting them into
a convenient tabular summary.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
"""
if force_refresh:
self.clear_cache()
if self._dataframe is None:
self._dataframe = self._fetch_dataframe()
return self._dataframe
|
A pandas dataframe with lots of interesting results about this object.
Created by calling SageMaker List and Describe APIs and converting them into
a convenient tabular summary.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
|
def require(method):
"""
Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# throw exception if input class doesn't have requirement
if not hasattr(args[0], method):
raise AssertionError('{} class has no method {}()'.format(args[0].__class__.__name__, method))
# create property to record that method has been called
callmethod = method + '_called'
if not hasattr(args[0], callmethod):
setattr(args[0], callmethod, False)
# call the method if it hasn't yet been called
if not getattr(args[0], callmethod):
getattr(args[0], method)()
setattr(args[0], callmethod, True)
return func(*args, **kwargs)
return wrapper
return decorator
|
Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2
|
def get_python_logger():
"""Returns logger to receive Python messages (as opposed to Fortran).
At first call, _python_logger is created. At subsequent calls, _python_logger is returned.
Therefore, if you want to change `a99.flag_log_file` or `a99.flag_log_console`, do so
before calling get_python_logger(), otherwise these changes will be ineffective.
"""
global _python_logger
if _python_logger is None:
fn = "a99.log"
l = logging.Logger("a99", level=a99.logging_level)
if a99.flag_log_file:
add_file_handler(l, fn)
if a99.flag_log_console:
ch = logging.StreamHandler()
ch.setFormatter(_fmtr)
l.addHandler(ch)
_python_logger = l
for line in a99.format_box("a99 logging session started @ {}".format(a99.now_str())):
l.info(line)
if a99.flag_log_file:
l.info("$ Logging to console $")
if a99.flag_log_file:
l.info("$ Logging to file '{}' $".format(fn))
return _python_logger
|
Returns logger to receive Python messages (as opposed to Fortran).
At first call, _python_logger is created. At subsequent calls, _python_logger is returned.
Therefore, if you want to change `a99.flag_log_file` or `a99.flag_log_console`, do so
before calling get_python_logger(), otherwise these changes will be ineffective.
|
def generate_main_h(directory, xml):
'''generate main header per XML file'''
f = open(os.path.join(directory, xml.basename + ".h"), mode='w')
t.write(f, '''
/** @file
* @brief MAVLink comm protocol generated from ${basename}.xml
* @see http://mavlink.org
*/
#pragma once
#ifndef MAVLINK_${basename_upper}_H
#define MAVLINK_${basename_upper}_H
#ifndef MAVLINK_H
#error Wrong include order: MAVLINK_${basename_upper}.H MUST NOT BE DIRECTLY USED. Include mavlink.h from the same directory instead or set ALL AND EVERY defines from MAVLINK.H manually accordingly, including the #define MAVLINK_H call.
#endif
#undef MAVLINK_THIS_XML_IDX
#define MAVLINK_THIS_XML_IDX ${xml_idx}
#ifdef __cplusplus
extern "C" {
#endif
// MESSAGE LENGTHS AND CRCS
#ifndef MAVLINK_MESSAGE_LENGTHS
#define MAVLINK_MESSAGE_LENGTHS {${message_lengths_array}}
#endif
#ifndef MAVLINK_MESSAGE_CRCS
#define MAVLINK_MESSAGE_CRCS {${message_crcs_array}}
#endif
#include "../protocol.h"
#define MAVLINK_ENABLED_${basename_upper}
// ENUM DEFINITIONS
${{enum:
/** @brief ${description} */
#ifndef HAVE_ENUM_${name}
#define HAVE_ENUM_${name}
typedef enum ${name}
{
${{entry: ${name}=${value}, /* ${description} |${{param:${description}| }} */
}}
} ${name};
#endif
}}
// MAVLINK VERSION
#ifndef MAVLINK_VERSION
#define MAVLINK_VERSION ${version}
#endif
#if (MAVLINK_VERSION == 0)
#undef MAVLINK_VERSION
#define MAVLINK_VERSION ${version}
#endif
// MESSAGE DEFINITIONS
${{message:#include "./mavlink_msg_${name_lower}.h"
}}
// base include
${{include_list:#include "../${base}/${base}.h"
}}
#undef MAVLINK_THIS_XML_IDX
#define MAVLINK_THIS_XML_IDX ${xml_idx}
#if MAVLINK_THIS_XML_IDX == MAVLINK_PRIMARY_XML_IDX
# define MAVLINK_MESSAGE_INFO {${message_info_array}}
# if MAVLINK_COMMAND_24BIT
# include "../mavlink_get_info.h"
# endif
#endif
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // MAVLINK_${basename_upper}_H
''', xml)
f.close()
|
generate main header per XML file
|
def module_path(name, path):
# type: (AModuleName, AModulePath) -> ADefine
"""Load an external malcolm module (e.g. ADCore/etc/malcolm)"""
define = Define(name, path)
assert os.path.isdir(path), "%r doesn't exist" % path
name = "malcolm.modules.%s" % name
import_package_from_path(name, path)
return define
|
Load an external malcolm module (e.g. ADCore/etc/malcolm)
|
def _add_vertex_attributes(self, genes: List[Gene],
disease_associations: Optional[dict] = None) -> None:
"""Add attributes to vertices.
:param genes: A list of genes containing attribute information.
"""
self._set_default_vertex_attributes()
self._add_vertex_attributes_by_genes(genes)
# compute up-regulated and down-regulated genes
up_regulated = self.get_upregulated_genes()
down_regulated = self.get_downregulated_genes()
# set the attributes for up-regulated and down-regulated genes
self.graph.vs(up_regulated.indices)["diff_expressed"] = True
self.graph.vs(up_regulated.indices)["up_regulated"] = True
self.graph.vs(down_regulated.indices)["diff_expressed"] = True
self.graph.vs(down_regulated.indices)["down_regulated"] = True
# add disease associations
self._add_disease_associations(disease_associations)
logger.info("Number of all differentially expressed genes is: {}".
format(len(up_regulated) + len(down_regulated)))
|
Add attributes to vertices.
:param genes: A list of genes containing attribute information.
|
def _blocks_to_samples(sig_data, n_samp, fmt):
"""
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
"""
if fmt == '212':
# Easier to process when dealing with whole blocks
if n_samp % 2:
n_samp += 1
added_samps = 1
sig_data = np.append(sig_data, np.zeros(1, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample pair is stored in one byte triplet.
# Even numbered samples
sig[0::2] = sig_data[0::3] + 256 * np.bitwise_and(sig_data[1::3], 0x0f)
# Odd numbered samples (len(sig) always > 1 due to processing of
# whole blocks)
sig[1::2] = sig_data[2::3] + 256*np.bitwise_and(sig_data[1::3] >> 4, 0x0f)
# Remove trailing sample read within the byte block if
# originally odd sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^11-1 are negative.
sig[sig > 2047] -= 4096
elif fmt == '310':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is 7 msb of first byte and 3 lsb of second byte.
sig[0::3] = (sig_data[0::4] >> 1)[0:len(sig[0::3])] + 128 * np.bitwise_and(sig_data[1::4], 0x07)[0:len(sig[0::3])]
# Second signal is 7 msb of third byte and 3 lsb of forth byte
sig[1::3] = (sig_data[2::4] >> 1)[0:len(sig[1::3])] + 128 * np.bitwise_and(sig_data[3::4], 0x07)[0:len(sig[1::3])]
# Third signal is 5 msb of second byte and 5 msb of forth byte
sig[2::3] = np.bitwise_and((sig_data[1::4] >> 3), 0x1f)[0:len(sig[2::3])] + 32 * np.bitwise_and(sig_data[3::4] >> 3, 0x1f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
elif fmt == '311':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is first byte and 2 lsb of second byte.
sig[0::3] = sig_data[0::4][0:len(sig[0::3])] + 256 * np.bitwise_and(sig_data[1::4], 0x03)[0:len(sig[0::3])]
# Second sample is 6 msb of second byte and 4 lsb of third byte
sig[1::3] = (sig_data[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sig_data[2::4], 0x0f)[0:len(sig[1::3])]
# Third sample is 4 msb of third byte and 6 msb of forth byte
sig[2::3] = (sig_data[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sig_data[3::4], 0x7f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form.
# Values > 2^9-1 are negative.
sig[sig > 511] -= 1024
return sig
|
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
|
def get_xml_parser(encoding=None):
"""Returns an ``etree.ETCompatXMLParser`` instance."""
parser = etree.ETCompatXMLParser(
huge_tree=True,
remove_comments=True,
strip_cdata=False,
remove_blank_text=True,
resolve_entities=False,
encoding=encoding
)
return parser
|
Returns an ``etree.ETCompatXMLParser`` instance.
|
def _set_sample_rate_cpu(self, v, load=False):
"""
Setter method for sample_rate_cpu, mapped from YANG variable /resource_monitor/cpu/sample_rate_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sample_rate_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sample_rate_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20']}), is_leaf=True, yang_name="sample-rate-cpu", rest_name="sample-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sampling rate for CPU usage monitoring', u'hidden': u'debug', u'alt-name': u'sample-rate', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sample_rate_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20']}), is_leaf=True, yang_name="sample-rate-cpu", rest_name="sample-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sampling rate for CPU usage monitoring', u'hidden': u'debug', u'alt-name': u'sample-rate', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__sample_rate_cpu = t
if hasattr(self, '_set'):
self._set()
|
Setter method for sample_rate_cpu, mapped from YANG variable /resource_monitor/cpu/sample_rate_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sample_rate_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sample_rate_cpu() directly.
|
def delete_feature_base(dbpath, set_object, name):
"""
Generic function which deletes a feature from a database
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
name : string, name of the feature to be deleted
Returns
-------
None
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(1)
if tmp_object.features is not None and name in tmp_object.features:
for i in session.query(set_object).order_by(set_object.id):
del i.features[name]
session.commit()
session.close()
return None
|
Generic function which deletes a feature from a database
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
name : string, name of the feature to be deleted
Returns
-------
None
|
def execute_process_synchronously_or_raise(self, execute_process_request, name, labels=None):
"""Execute process synchronously, and throw if the return code is not 0.
See execute_process_synchronously for the api docs.
"""
fallible_result = self.execute_process_synchronously_without_raising(execute_process_request, name, labels)
return fallible_to_exec_result_or_raise(
fallible_result,
execute_process_request
)
|
Execute process synchronously, and throw if the return code is not 0.
See execute_process_synchronously for the api docs.
|
def bz2_pack(source):
"""
Returns 'source' as a bzip2-compressed, self-extracting python script.
.. note::
This method uses up more space than the zip_pack method but it has the
advantage in that the resulting .py file can still be imported into a
python program.
"""
import bz2, base64
out = ""
# Preserve shebangs (don't care about encodings for this)
first_line = source.split('\n')[0]
if analyze.shebang.match(first_line):
if py3:
if first_line.rstrip().endswith('python'): # Make it python3
first_line = first_line.rstrip()
first_line += '3' #!/usr/bin/env python3
out = first_line + '\n'
compressed_source = bz2.compress(source.encode('utf-8'))
out += 'import bz2, base64\n'
out += "exec(bz2.decompress(base64.b64decode('"
out += base64.b64encode(compressed_source).decode('utf-8')
out += "')))\n"
return out
|
Returns 'source' as a bzip2-compressed, self-extracting python script.
.. note::
This method uses up more space than the zip_pack method but it has the
advantage in that the resulting .py file can still be imported into a
python program.
|
def _set_overlay_gateway(self, v, load=False):
"""
Setter method for overlay_gateway, mapped from YANG variable /overlay_gateway (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_gateway() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",overlay_gateway.overlay_gateway, yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure Overaly gateway instance', u'sort-priority': u'RUNNCFG_LEVEL_OVERLAY_GATEWAY_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'TunnelsGwCallpoint', u'cli-mode-name': u'config-overlay-gw-$(name)'}}), is_container='list', yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Overaly gateway instance', u'sort-priority': u'RUNNCFG_LEVEL_OVERLAY_GATEWAY_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'TunnelsGwCallpoint', u'cli-mode-name': u'config-overlay-gw-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """overlay_gateway must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",overlay_gateway.overlay_gateway, yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure Overaly gateway instance', u'sort-priority': u'RUNNCFG_LEVEL_OVERLAY_GATEWAY_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'TunnelsGwCallpoint', u'cli-mode-name': u'config-overlay-gw-$(name)'}}), is_container='list', yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Overaly gateway instance', u'sort-priority': u'RUNNCFG_LEVEL_OVERLAY_GATEWAY_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'TunnelsGwCallpoint', u'cli-mode-name': u'config-overlay-gw-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__overlay_gateway = t
if hasattr(self, '_set'):
self._set()
|
Setter method for overlay_gateway, mapped from YANG variable /overlay_gateway (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_gateway() directly.
|
def merge_odd_even_csu_configurations(conf_odd, conf_even):
"""Merge CSU configuration using odd- and even-numbered values.
The CSU returned CSU configuration include the odd-numbered values
from 'conf_odd' and the even-numbered values from 'conf_even'.
Parameters
----------
conf_odd : CsuConfiguration instance
CSU configuration corresponding to odd-numbered slitlets.
conf_even : CsuConfiguration instance
CSU configuration corresponding to even-numbered slitlets.
Returns
-------
merged_conf : CsuConfiguration instance
CSU configuration resulting from the merging process.
"""
# initialize resulting CsuConfiguration instance using one of the
# input configuration corresponding to the odd-numbered slitlets
merged_conf = deepcopy(conf_odd)
# update the resulting configuration with the values corresponding
# to the even-numbered slitlets
for i in range(EMIR_NBARS):
ibar = i + 1
if ibar % 2 == 0:
merged_conf._csu_bar_left[i] = conf_even._csu_bar_left[i]
merged_conf._csu_bar_right[i] = conf_even._csu_bar_right[i]
merged_conf._csu_bar_slit_center[i] = \
conf_even._csu_bar_slit_center[i]
merged_conf._csu_bar_slit_width[i] = \
conf_even._csu_bar_slit_width[i]
# return merged configuration
return merged_conf
|
Merge CSU configuration using odd- and even-numbered values.
The CSU returned CSU configuration include the odd-numbered values
from 'conf_odd' and the even-numbered values from 'conf_even'.
Parameters
----------
conf_odd : CsuConfiguration instance
CSU configuration corresponding to odd-numbered slitlets.
conf_even : CsuConfiguration instance
CSU configuration corresponding to even-numbered slitlets.
Returns
-------
merged_conf : CsuConfiguration instance
CSU configuration resulting from the merging process.
|
def get_station_year_text(WMO, WBAN, year):
'''Basic method to download data from the GSOD database, given a
station identifier and year.
Parameters
----------
WMO : int or None
World Meteorological Organization (WMO) identifiers, [-]
WBAN : int or None
Weather Bureau Army Navy (WBAN) weather station identifier, [-]
year : int
Year data should be retrieved from, [year]
Returns
-------
data : str
Downloaded data file
'''
if WMO is None:
WMO = 999999
if WBAN is None:
WBAN = 99999
station = str(int(WMO)) + '-' + str(int(WBAN))
gsod_year_dir = os.path.join(data_dir, 'gsod', str(year))
path = os.path.join(gsod_year_dir, station + '.op')
if os.path.exists(path):
data = open(path).read()
if data and data != 'Exception':
return data
else:
raise Exception(data)
toget = ('ftp://ftp.ncdc.noaa.gov/pub/data/gsod/' + str(year) + '/'
+ station + '-' + str(year) +'.op.gz')
try:
data = urlopen(toget, timeout=5)
except Exception as e:
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir)
open(path, 'w').write('Exception')
raise Exception('Could not obtain desired data; check '
'if the year has data published for the '
'specified station and the station was specified '
'in the correct form. The full error is %s' %(e))
data = data.read()
data_thing = StringIO(data)
f = gzip.GzipFile(fileobj=data_thing, mode="r")
year_station_data = f.read()
try:
year_station_data = year_station_data.decode('utf-8')
except:
pass
# Cache the data for future use
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir)
open(path, 'w').write(year_station_data)
return year_station_data
|
Basic method to download data from the GSOD database, given a
station identifier and year.
Parameters
----------
WMO : int or None
World Meteorological Organization (WMO) identifiers, [-]
WBAN : int or None
Weather Bureau Army Navy (WBAN) weather station identifier, [-]
year : int
Year data should be retrieved from, [year]
Returns
-------
data : str
Downloaded data file
|
def default(self, o):
"""Default encoder.
:param o: Atom or Bond instance.
:type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`.
:return: Dictionary that contains information required for atom and bond block of ``Ctab``.
:rtype: :py:class:`collections.OrderedDict`
"""
if isinstance(o, Atom) or isinstance(o, Bond):
return o._ctab_data
else:
return o.__dict__
|
Default encoder.
:param o: Atom or Bond instance.
:type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`.
:return: Dictionary that contains information required for atom and bond block of ``Ctab``.
:rtype: :py:class:`collections.OrderedDict`
|
def timeout(delay, handler=None):
"""
Context manager to run code and deliver a SIGALRM signal after `delay` seconds.
Note that `delay` must be a whole number; otherwise it is converted to an
integer by Python's `int()` built-in function. For floating-point numbers,
that means rounding off to the nearest integer from below.
If the optional argument `handler` is supplied, it must be a callable that
is invoked if the alarm triggers while the code is still running. If no
`handler` is provided (default), then a `RuntimeError` with message
``Timeout`` is raised.
"""
delay = int(delay)
if handler is None:
def default_handler(signum, frame):
raise RuntimeError("{:d} seconds timeout expired".format(delay))
handler = default_handler
prev_sigalrm_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, handler)
signal.alarm(delay)
yield
signal.alarm(0)
signal.signal(signal.SIGALRM, prev_sigalrm_handler)
|
Context manager to run code and deliver a SIGALRM signal after `delay` seconds.
Note that `delay` must be a whole number; otherwise it is converted to an
integer by Python's `int()` built-in function. For floating-point numbers,
that means rounding off to the nearest integer from below.
If the optional argument `handler` is supplied, it must be a callable that
is invoked if the alarm triggers while the code is still running. If no
`handler` is provided (default), then a `RuntimeError` with message
``Timeout`` is raised.
|
def bounds(self, pixelbuffer=0):
"""
Return Tile boundaries.
- pixelbuffer: tile buffer in pixels
"""
left = self._left
bottom = self._bottom
right = self._right
top = self._top
if pixelbuffer:
offset = self.pixel_x_size * float(pixelbuffer)
left -= offset
bottom -= offset
right += offset
top += offset
# on global grids clip at northern and southern TilePyramid bound
if self.tp.grid.is_global:
top = min([top, self.tile_pyramid.top])
bottom = max([bottom, self.tile_pyramid.bottom])
return Bounds(left, bottom, right, top)
|
Return Tile boundaries.
- pixelbuffer: tile buffer in pixels
|
def fit(self, X):
"""
Parameters
----------
X: shape = [n_samples, n_features]
"""
D = self._initialize(X)
for i in range(self.max_iter):
gamma = self._transform(D, X)
e = np.linalg.norm(X - gamma.dot(D))
if e < self.tol:
break
D, gamma = self._update_dict(X, D, gamma)
self.components_ = D
return self
|
Parameters
----------
X: shape = [n_samples, n_features]
|
def completed_work_items(self):
"Iterable of `(work-item, result)`s for all completed items."
completed = self._conn.execute(
"SELECT * FROM work_items, results WHERE work_items.job_id == results.job_id"
)
return ((_row_to_work_item(result), _row_to_work_result(result))
for result in completed)
|
Iterable of `(work-item, result)`s for all completed items.
|
def get_requirements(*args):
"""Get requirements from pip requirement files."""
requirements = set()
contents = get_contents(*args)
for line in contents.splitlines():
# Strip comments.
line = re.sub(r'^#.*|\s#.*', '', line)
# Ignore empty lines
if line and not line.isspace():
requirements.add(re.sub(r'\s+', '', line))
return sorted(requirements)
|
Get requirements from pip requirement files.
|
def grad_local_log_likelihood(self, x):
"""
d/d \psi y \psi - log (1 + exp(\psi))
= y - exp(\psi) / (1 + exp(\psi))
= y - sigma(psi)
= y - p
d \psi / dx = C
d / dx = (y - sigma(psi)) * C
"""
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
return (y - p).dot(C)
|
d/d \psi y \psi - log (1 + exp(\psi))
= y - exp(\psi) / (1 + exp(\psi))
= y - sigma(psi)
= y - p
d \psi / dx = C
d / dx = (y - sigma(psi)) * C
|
def add_conversion_steps(self, converters: List[Converter], inplace: bool = False):
"""
Utility method to add converters to this chain. If inplace is True, this object is modified and
None is returned. Otherwise, a copy is returned
:param converters: the list of converters to add
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added
"""
check_var(converters, var_types=list, min_len=1)
if inplace:
for converter in converters:
self.add_conversion_step(converter, inplace=True)
else:
new = copy(self)
new.add_conversion_steps(converters, inplace=True)
return new
|
Utility method to add converters to this chain. If inplace is True, this object is modified and
None is returned. Otherwise, a copy is returned
:param converters: the list of converters to add
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added
|
def linear(X, n, *args, **kwargs):
"""Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
"""
hyper_deriv = kwargs.pop('hyper_deriv', None)
m = scipy.asarray(args[:-1])
b = args[-1]
if sum(n) > 1:
return scipy.zeros(X.shape[0])
elif sum(n) == 0:
if hyper_deriv is not None:
if hyper_deriv < len(m):
return X[:, hyper_deriv]
elif hyper_deriv == len(m):
return scipy.ones(X.shape[0])
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return (m * X).sum(axis=1) + b
else:
# sum(n) == 1:
if hyper_deriv is not None:
if n[hyper_deriv] == 1:
return scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0])
return m[n == 1] * scipy.ones(X.shape[0])
|
Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
|
def main():
"""Execute all checks."""
check_python_version()
check_python_modules()
check_executables()
home = os.path.expanduser("~")
print("\033[1mCheck files\033[0m")
rcfile = os.path.join(home, ".hwrtrc")
if os.path.isfile(rcfile):
print("~/.hwrtrc... %sFOUND%s" %
(Bcolors.OKGREEN, Bcolors.ENDC))
else:
print("~/.hwrtrc... %sNOT FOUND%s" %
(Bcolors.FAIL, Bcolors.ENDC))
misc_path = pkg_resources.resource_filename('hwrt', 'misc/')
print("misc-path: %s" % misc_path)
|
Execute all checks.
|
def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
break
return list(available_actions)
|
Return the list of available action ids.
|
def extract_edges(self, feature_angle=30, boundary_edges=True,
non_manifold_edges=True, feature_edges=True,
manifold_edges=True, inplace=False):
"""
Extracts edges from the surface of the grid. From vtk documentation:
These edges are either
1) boundary (used by one polygon) or a line cell;
2) non-manifold (used by three or more polygons)
3) feature edges (edges used by two triangles and whose
dihedral angle > feature_angle)
4) manifold edges (edges used by exactly two polygons).
Parameters
----------
feature_angle : float, optional
Defaults to 30 degrees.
boundary_edges : bool, optional
Defaults to True
non_manifold_edges : bool, optional
Defaults to True
feature_edges : bool, optional
Defaults to True
manifold_edges : bool, optional
Defaults to True
inplace : bool, optional
Return new mesh or overwrite input.
Returns
-------
edges : vtki.vtkPolyData
Extracted edges
"""
surf = self.extract_surface()
return surf.extract_edges(feature_angle, boundary_edges,
non_manifold_edges, feature_edges,
manifold_edges, inplace=inplace)
|
Extracts edges from the surface of the grid. From vtk documentation:
These edges are either
1) boundary (used by one polygon) or a line cell;
2) non-manifold (used by three or more polygons)
3) feature edges (edges used by two triangles and whose
dihedral angle > feature_angle)
4) manifold edges (edges used by exactly two polygons).
Parameters
----------
feature_angle : float, optional
Defaults to 30 degrees.
boundary_edges : bool, optional
Defaults to True
non_manifold_edges : bool, optional
Defaults to True
feature_edges : bool, optional
Defaults to True
manifold_edges : bool, optional
Defaults to True
inplace : bool, optional
Return new mesh or overwrite input.
Returns
-------
edges : vtki.vtkPolyData
Extracted edges
|
def mk_token(self, load):
'''
Run time_auth and create a token. Return False or the token
'''
if not self.authenticate_eauth(load):
return {}
if self._allow_custom_expire(load):
token_expire = load.pop('token_expire', self.opts['token_expire'])
else:
_ = load.pop('token_expire', None)
token_expire = self.opts['token_expire']
tdata = {'start': time.time(),
'expire': time.time() + token_expire,
'name': self.load_name(load),
'eauth': load['eauth']}
if self.opts['keep_acl_in_token']:
acl_ret = self.__get_acl(load)
tdata['auth_list'] = acl_ret
groups = self.get_groups(load)
if groups:
tdata['groups'] = groups
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
Run time_auth and create a token. Return False or the token
|
def flatten(self):
"""
Flattens any np.array of column vectors into 1D arrays. Basically,
this makes data readable for humans if you are just inspecting via
the REPL. For example, if you have saved a KalmanFilter object with 89
epochs, self.x will be shape (89, 9, 1) (for example). After flatten
is run, self.x.shape == (89, 9), which displays nicely from the REPL.
There is no way to unflatten, so it's a one way trip.
"""
for key in self.keys:
try:
arr = self.__dict__[key]
shape = arr.shape
if shape[2] == 1:
self.__dict__[key] = arr.reshape(shape[0], shape[1])
except:
# not an ndarray or not a column vector
pass
|
Flattens any np.array of column vectors into 1D arrays. Basically,
this makes data readable for humans if you are just inspecting via
the REPL. For example, if you have saved a KalmanFilter object with 89
epochs, self.x will be shape (89, 9, 1) (for example). After flatten
is run, self.x.shape == (89, 9), which displays nicely from the REPL.
There is no way to unflatten, so it's a one way trip.
|
def shifted(self, rows, cols):
"""Returns a new selection that is shifted by rows and cols.
Negative values for rows and cols may result in a selection
that addresses negative cells.
Parameters
----------
rows: Integer
\tNumber of rows that the new selection is shifted down
cols: Integer
\tNumber of columns that the new selection is shifted right
"""
shifted_block_tl = \
[(row + rows, col + cols) for row, col in self.block_tl]
shifted_block_br = \
[(row + rows, col + cols) for row, col in self.block_br]
shifted_rows = [row + rows for row in self.rows]
shifted_cols = [col + cols for col in self.cols]
shifted_cells = [(row + rows, col + cols) for row, col in self.cells]
return Selection(shifted_block_tl, shifted_block_br, shifted_rows,
shifted_cols, shifted_cells)
|
Returns a new selection that is shifted by rows and cols.
Negative values for rows and cols may result in a selection
that addresses negative cells.
Parameters
----------
rows: Integer
\tNumber of rows that the new selection is shifted down
cols: Integer
\tNumber of columns that the new selection is shifted right
|
def update_role(self, service_name, deployment_name, role_name,
os_virtual_hard_disk=None, network_config=None,
availability_set_name=None, data_virtual_hard_disks=None,
role_size=None, role_type='PersistentVMRole',
resource_extension_references=None,
provision_guest_agent=None):
'''
Updates the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine allocation
in the Windows Azure environment. Virtual machines specified in the
same availability set are allocated to different nodes to maximize
availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall, Small, Medium, Large,
ExtraLarge. The specified value must be compatible with the disk
selected in the OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_put(
self._get_role_path(service_name, deployment_name, role_name),
_XmlSerializer.update_role_to_xml(
role_name,
os_virtual_hard_disk,
role_type,
network_config,
availability_set_name,
data_virtual_hard_disks,
role_size,
resource_extension_references,
provision_guest_agent),
as_async=True)
|
Updates the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine allocation
in the Windows Azure environment. Virtual machines specified in the
same availability set are allocated to different nodes to maximize
availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall, Small, Medium, Large,
ExtraLarge. The specified value must be compatible with the disk
selected in the OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
|
def processData(config, stats):
"""
Collate the stats and report
"""
if 'total_time' not in stats or 'total_clock' not in stats:
# toil job not finished yet
stats.total_time = [0.0]
stats.total_clock = [0.0]
stats.total_time = sum([float(number) for number in stats.total_time])
stats.total_clock = sum([float(number) for number in stats.total_clock])
collatedStatsTag = Expando(total_run_time=stats.total_time,
total_clock=stats.total_clock,
batch_system=config.batchSystem,
default_memory=str(config.defaultMemory),
default_cores=str(config.defaultCores),
max_cores=str(config.maxCores)
)
# Add worker info
worker = [_f for _f in getattr(stats, 'workers', []) if _f]
jobs = [_f for _f in getattr(stats, 'jobs', []) if _f]
jobs = [item for sublist in jobs for item in sublist]
def fn4(job):
try:
return list(jobs)
except TypeError:
return []
buildElement(collatedStatsTag, worker, "worker")
createSummary(buildElement(collatedStatsTag, jobs, "jobs"),
getattr(stats, 'workers', []), "worker", fn4)
# Get info for each job
jobNames = set()
for job in jobs:
jobNames.add(job.class_name)
jobTypesTag = Expando()
collatedStatsTag.job_types = jobTypesTag
for jobName in jobNames:
jobTypes = [ job for job in jobs if job.class_name == jobName ]
buildElement(jobTypesTag, jobTypes, jobName)
collatedStatsTag.name = "collatedStatsTag"
return collatedStatsTag
|
Collate the stats and report
|
def rhymes(word):
"""Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words
"""
phones = phones_for_word(word)
combined_rhymes = []
if phones:
for element in phones:
combined_rhymes.append([w for w in rhyme_lookup.get(rhyming_part(
element), []) if w != word])
combined_rhymes = list(chain.from_iterable(combined_rhymes))
unique_combined_rhymes = sorted(set(combined_rhymes))
return unique_combined_rhymes
else:
return []
|
Get words rhyming with a given word.
This function may return an empty list if no rhyming words are found in
the dictionary, or if the word you pass to the function is itself not
found in the dictionary.
.. doctest::
>>> import pronouncing
>>> pronouncing.rhymes("conditioner")
['commissioner', 'parishioner', 'petitioner', 'practitioner']
:param word: a word
:returns: a list of rhyming words
|
def fromtimestamp(cls, ts, tzi=None):
# pylint: disable=invalid-name
"""
Factory method that returns a new :class:`~pywbem.CIMDateTime` object
from a POSIX timestamp value and optional timezone information.
A POSIX timestamp value is the number of seconds since "the epoch",
i.e. 1970-01-01 00:00:00 UTC. Thus, a POSIX timestamp value is
unambiguous w.r.t. the timezone, but it is not timezone-aware.
The optional timezone information is used to convert the CIM datetime
value into the desired timezone. That does not change the point in time
that is represented by the value, but it changes the value of the
``hhmmss`` components of the CIM datetime value to compensate for
changes in the timezone offset component.
Parameters:
ts (:term:`integer`):
POSIX timestamp value.
tzi (:class:`~pywbem.MinutesFromUTC`):
Timezone information. `None` means that the current local timezone
is used.
Returns:
A new :class:`~pywbem.CIMDateTime` object representing the
specified point in time.
"""
if tzi is None:
tzi = MinutesFromUTC(cls.get_local_utcoffset())
return cls(datetime.fromtimestamp(ts, tzi))
|
Factory method that returns a new :class:`~pywbem.CIMDateTime` object
from a POSIX timestamp value and optional timezone information.
A POSIX timestamp value is the number of seconds since "the epoch",
i.e. 1970-01-01 00:00:00 UTC. Thus, a POSIX timestamp value is
unambiguous w.r.t. the timezone, but it is not timezone-aware.
The optional timezone information is used to convert the CIM datetime
value into the desired timezone. That does not change the point in time
that is represented by the value, but it changes the value of the
``hhmmss`` components of the CIM datetime value to compensate for
changes in the timezone offset component.
Parameters:
ts (:term:`integer`):
POSIX timestamp value.
tzi (:class:`~pywbem.MinutesFromUTC`):
Timezone information. `None` means that the current local timezone
is used.
Returns:
A new :class:`~pywbem.CIMDateTime` object representing the
specified point in time.
|
def update(self, track=values.unset, publisher=values.unset, kind=values.unset,
status=values.unset):
"""
Update the SubscribedTrackInstance
:param unicode track: The track
:param unicode publisher: The publisher
:param SubscribedTrackInstance.Kind kind: The kind
:param SubscribedTrackInstance.Status status: The status
:returns: Updated SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
data = values.of({'Track': track, 'Publisher': publisher, 'Kind': kind, 'Status': status, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SubscribedTrackInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
subscriber_sid=self._solution['subscriber_sid'],
)
|
Update the SubscribedTrackInstance
:param unicode track: The track
:param unicode publisher: The publisher
:param SubscribedTrackInstance.Kind kind: The kind
:param SubscribedTrackInstance.Status status: The status
:returns: Updated SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
|
def to_array(self):
"""
Serializes this StickerMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(StickerMessage, self).to_array()
if isinstance(self.sticker, InputFile):
array['sticker'] = self.sticker.to_array() # type InputFile
elif isinstance(self.sticker, str):
array['sticker'] = u(self.sticker) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if self.receiver is not None:
if isinstance(self.receiver, None):
array['chat_id'] = None(self.receiver) # type Noneelif isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type intelse:
raise TypeError('Unknown type, must be one of None, str, int.')
# end if
if self.reply_id is not None:
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_IDelif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type intelse:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
if self.disable_notification is not None:
array['disable_notification'] = bool(self.disable_notification) # type bool
if self.reply_markup is not None:
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
|
Serializes this StickerMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
|
def installedRequirements(self, target):
"""
Return an iterable of things installed on the target that this
item requires.
"""
myDepends = dependentsOf(self.__class__)
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target == target):
if dc.installee.__class__ in myDepends:
yield dc.installee
|
Return an iterable of things installed on the target that this
item requires.
|
def minimum_needs_extractor(impact_report, component_metadata):
"""Extracting minimum needs of the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
analysis_layer = impact_report.analysis
analysis_keywords = analysis_layer.keywords['inasafe_fields']
use_rounding = impact_report.impact_function.use_rounding
header = resolve_from_dictionary(extra_args, 'header')
context['header'] = header
# check if displaced is not zero
try:
displaced_field_name = analysis_keywords[displaced_field['key']]
total_displaced = value_from_field_name(
displaced_field_name, analysis_layer)
if total_displaced == 0:
zero_displaced_message = resolve_from_dictionary(
extra_args, 'zero_displaced_message')
context['zero_displaced'] = {
'status': True,
'message': zero_displaced_message
}
return context
except KeyError:
# in case no displaced field
pass
# minimum needs calculation only affect population type exposure
# check if analysis keyword have minimum_needs keywords
have_minimum_needs_field = False
for field_key in analysis_keywords:
if field_key.startswith(minimum_needs_namespace):
have_minimum_needs_field = True
break
if not have_minimum_needs_field:
return context
frequencies = {}
# map each needs to its frequency groups
for field in (minimum_needs_fields + additional_minimum_needs):
need_parameter = field.get('need_parameter')
if isinstance(need_parameter, ResourceParameter):
frequency = need_parameter.frequency
else:
frequency = field.get('frequency')
if frequency:
if frequency not in frequencies:
frequencies[frequency] = [field]
else:
frequencies[frequency].append(field)
needs = []
analysis_feature = next(analysis_layer.getFeatures())
header_frequency_format = resolve_from_dictionary(
extra_args, 'header_frequency_format')
total_header = resolve_from_dictionary(extra_args, 'total_header')
need_header_format = resolve_from_dictionary(
extra_args, 'need_header_format')
# group the needs by frequency
for key, frequency in list(frequencies.items()):
group = {
'header': header_frequency_format.format(frequency=tr(key)),
'total_header': total_header,
'needs': []
}
for field in frequency:
# check value exists in the field
field_idx = analysis_layer.fields(
).lookupField(field['field_name'])
if field_idx == -1:
# skip if field doesn't exists
continue
value = format_number(
analysis_feature[field_idx],
use_rounding=use_rounding,
is_population=True)
unit_abbreviation = ''
if field.get('need_parameter'):
need_parameter = field['need_parameter']
""":type: ResourceParameter"""
name = tr(need_parameter.name)
unit_abbreviation = need_parameter.unit.abbreviation
else:
if field.get('header_name'):
name = field.get('header_name')
else:
name = field.get('name')
need_unit = field.get('unit')
if need_unit:
unit_abbreviation = need_unit.get('abbreviation')
if unit_abbreviation:
header = need_header_format.format(
name=name,
unit_abbreviation=unit_abbreviation)
else:
header = name
item = {
'header': header,
'value': value
}
group['needs'].append(item)
needs.append(group)
context['component_key'] = component_metadata.key
context['needs'] = needs
return context
|
Extracting minimum needs of the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
|
def _readLoop(self):
""" Read thread main loop
Reads lines from the connected device
"""
try:
readTermSeq = list(self.RX_EOL_SEQ)
readTermLen = len(readTermSeq)
rxBuffer = []
while self.alive:
data = self.serial.read(1)
if data != '': # check for timeout
#print >> sys.stderr, ' RX:', data,'({0})'.format(ord(data))
rxBuffer.append(data)
if rxBuffer[-readTermLen:] == readTermSeq:
# A line (or other logical segment) has been read
line = ''.join(rxBuffer[:-readTermLen])
rxBuffer = []
if len(line) > 0:
#print 'calling handler'
self._handleLineRead(line)
elif self._expectResponseTermSeq:
if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq:
line = ''.join(rxBuffer)
rxBuffer = []
self._handleLineRead(line, checkForResponseTerm=False)
#else:
#' <RX timeout>'
except serial.SerialException as e:
self.alive = False
try:
self.serial.close()
except Exception: #pragma: no cover
pass
# Notify the fatal error handler
self.fatalErrorCallback(e)
|
Read thread main loop
Reads lines from the connected device
|
def _standardize_data(
model: pd.DataFrame,
data: pd.DataFrame,
batch_key: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]:
"""
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data : pandas.DataFrame
Standardized Data
design : pandas.DataFrame
Batch assignment as one-hot encodings
var_pooled : numpy.ndarray
Pooled variance per gene
stand_mean : numpy.ndarray
Gene-wise mean
"""
# compute the design matrix
batch_items = model.groupby(batch_key).groups.items()
batch_levels, batch_info = zip(*batch_items)
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
design = _design_matrix(model, batch_key, batch_levels)
# compute pooled variance estimator
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = (data - np.dot(design, B_hat).T)**2
var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array))
# Compute the means
if np.sum(var_pooled == 0) > 0:
print(
'Found {} genes with zero variance.'
.format(np.sum(var_pooled == 0))
)
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array))))
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
# need to be a bit careful with the zero variance genes
# just set the zero variance genes to zero in the standardized data
s_data = np.where(var_pooled == 0, 0, (
(data - stand_mean) /
np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array))))
))
s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns)
return s_data, design, var_pooled, stand_mean
|
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data : pandas.DataFrame
Standardized Data
design : pandas.DataFrame
Batch assignment as one-hot encodings
var_pooled : numpy.ndarray
Pooled variance per gene
stand_mean : numpy.ndarray
Gene-wise mean
|
def plot_fit(self, **kwargs):
""" Plots the fit of the model
Notes
----------
Intervals are bootstrapped as follows: take the filtered values from the
algorithm (thetas). Use these thetas to generate a pseudo data stream from
the measurement density. Use the GAS algorithm and estimated latent variables to
filter the pseudo data. Repeat this N times.
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
date_index = self.index.copy()
mu, Y, scores, coefficients = self._model(self.latent_variables.get_z_values())
if self.model_name2 == "Exponential":
values_to_plot = 1.0/self.link(mu)
elif self.model_name2 == "Skewt":
t_params = self.transform_z()
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params)
m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0))
additional_loc = (model_skewness - (1.0/model_skewness))*model_scale*m1
values_to_plot = mu + additional_loc
else:
values_to_plot = self.link(mu)
plt.figure(figsize=figsize)
plt.subplot(len(self.X_names)+1, 1, 1)
plt.title(self.y_name + " Filtered")
plt.plot(date_index,Y,label='Data')
plt.plot(date_index,values_to_plot,label='GAS Filter',c='black')
plt.legend(loc=2)
for coef in range(0,len(self.X_names)):
plt.subplot(len(self.X_names)+1, 1, 2+coef)
plt.title("Beta " + self.X_names[coef])
plt.plot(date_index,coefficients[coef,0:-1],label='Coefficient')
plt.legend(loc=2)
plt.show()
|
Plots the fit of the model
Notes
----------
Intervals are bootstrapped as follows: take the filtered values from the
algorithm (thetas). Use these thetas to generate a pseudo data stream from
the measurement density. Use the GAS algorithm and estimated latent variables to
filter the pseudo data. Repeat this N times.
Returns
----------
None (plots data and the fit)
|
def partial_to_complete_sha_hex(self, partial_hexsha):
""":return: Full binary 20 byte sha from the given partial hexsha
:raise AmbiguousObjectName:
:raise BadObject:
:note: currently we only raise BadObject as git does not communicate
AmbiguousObjects separately"""
try:
hexsha, typename, size = self._git.get_object_header(partial_hexsha) # @UnusedVariable
return hex_to_bin(hexsha)
except (GitCommandError, ValueError):
raise BadObject(partial_hexsha)
|
:return: Full binary 20 byte sha from the given partial hexsha
:raise AmbiguousObjectName:
:raise BadObject:
:note: currently we only raise BadObject as git does not communicate
AmbiguousObjects separately
|
def clean(self, point_merging=True, merge_tol=None, lines_to_points=True,
polys_to_lines=True, strips_to_polys=True, inplace=False):
"""
Cleans mesh by merging duplicate points, remove unused
points, and/or remove degenerate cells.
Parameters
----------
point_merging : bool, optional
Enables point merging. On by default.
merge_tol : float, optional
Set merging tolarance. When enabled merging is set to
absolute distance
lines_to_points : bool, optional
Turn on/off conversion of degenerate lines to points. Enabled by
default.
polys_to_lines : bool, optional
Turn on/off conversion of degenerate polys to lines. Enabled by
default.
strips_to_polys : bool, optional
Turn on/off conversion of degenerate strips to polys.
inplace : bool, optional
Updates mesh in-place while returning nothing. Default True.
Returns
-------
mesh : vtki.PolyData
Cleaned mesh. None when inplace=True
"""
clean = vtk.vtkCleanPolyData()
clean.SetConvertLinesToPoints(lines_to_points)
clean.SetConvertPolysToLines(polys_to_lines)
clean.SetConvertStripsToPolys(strips_to_polys)
if merge_tol:
clean.ToleranceIsAbsoluteOn()
clean.SetAbsoluteTolerance(merge_tol)
clean.SetInputData(self)
clean.Update()
output = _get_output(clean)
# Check output so no segfaults occur
if output.n_points < 1:
raise AssertionError('Clean tolerance is too high. Empty mesh returned.')
if inplace:
self.overwrite(output)
else:
return output
|
Cleans mesh by merging duplicate points, remove unused
points, and/or remove degenerate cells.
Parameters
----------
point_merging : bool, optional
Enables point merging. On by default.
merge_tol : float, optional
Set merging tolarance. When enabled merging is set to
absolute distance
lines_to_points : bool, optional
Turn on/off conversion of degenerate lines to points. Enabled by
default.
polys_to_lines : bool, optional
Turn on/off conversion of degenerate polys to lines. Enabled by
default.
strips_to_polys : bool, optional
Turn on/off conversion of degenerate strips to polys.
inplace : bool, optional
Updates mesh in-place while returning nothing. Default True.
Returns
-------
mesh : vtki.PolyData
Cleaned mesh. None when inplace=True
|
def QA_util_sql_async_mongo_setting(uri='mongodb://localhost:27017/quantaxis'):
"""异步mongo示例
Keyword Arguments:
uri {str} -- [description] (default: {'mongodb://localhost:27017/quantaxis'})
Returns:
[type] -- [description]
"""
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# async def client():
return AsyncIOMotorClient(uri, io_loop=loop)
|
异步mongo示例
Keyword Arguments:
uri {str} -- [description] (default: {'mongodb://localhost:27017/quantaxis'})
Returns:
[type] -- [description]
|
def sg_init(sess):
r""" Initializes session variables.
Args:
sess: Session to initialize.
"""
# initialize variables
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer()))
|
r""" Initializes session variables.
Args:
sess: Session to initialize.
|
def import_ohm(filename, verbose=False, reciprocals=False):
"""Construct pandas data frame from BERT`s unified data format (.ohm).
Parameters
----------
filename : string
File path to .ohm file
verbose : bool, optional
Enables extended debug output
reciprocals : int, optional
if provided, then assume that this is a reciprocal measurement where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data : :class:`pandas.DataFrame`
The measurement data
elecs : :class:`pandas.DataFrame`
Electrode positions (columns: X, Y, Z)
topography : None
No topography information is provided at the moment
"""
if verbose:
print(("Reading in %s... \n" % filename))
file = open(filename)
eleccount = int(file.readline().split("#")[0])
elecs_str = file.readline().split("#")[1]
elecs_dim = len(elecs_str.split())
elecs_ix = elecs_str.split()
elecs = np.zeros((eleccount, elecs_dim), 'float')
for i in range(eleccount):
line = file.readline().split("#")[0] # Account for comments
elecs[i] = line.rsplit()
datacount = int(file.readline().split("#")[0])
data_str = file.readline().split("#")[1]
data_dim = len(data_str.split())
data_ix = data_str.split()
_string_ = """
Number of electrodes: %s
Dimension: %s
Coordinates: %s
Number of data points: %s
Data header: %s
""" % (eleccount, elecs_dim, elecs_str, datacount, data_str)
data = np.zeros((datacount, data_dim), 'float')
for i in range(datacount):
line = file.readline()
data[i] = line.rsplit()
file.close()
data = pd.DataFrame(data, columns=data_ix)
# rename columns to the reda standard
data_reda = data.rename(
index=str,
columns={
'rhoa': 'rho_a',
# 'k': 'k',
# 'u': 'U',
# 'i': 'I'
}
)
if ('r' not in data_reda.keys()) and \
('rho_a' in data_reda.keys() and 'k' in data_reda.keys()):
data_reda['r'] = data_reda['rho_a'] / data_reda['k']
print(
"Calculating resistance from apparent resistivity and "
"geometric factors. (r = rhoa_ / k)")
elecs = pd.DataFrame(elecs, columns=elecs_ix)
# Ensure uppercase labels (X, Y, Z) in electrode positions
elecs.columns = elecs.columns.str.upper()
# rename electrode denotations
if type(reciprocals) == int:
print('renumbering electrode numbers')
data_reda[['a', 'b', 'm', 'n']] = reciprocals + 1 - data_reda[
['a', 'b', 'm', 'n']]
if verbose:
print((_string_))
return data_reda, elecs, None
|
Construct pandas data frame from BERT`s unified data format (.ohm).
Parameters
----------
filename : string
File path to .ohm file
verbose : bool, optional
Enables extended debug output
reciprocals : int, optional
if provided, then assume that this is a reciprocal measurement where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data : :class:`pandas.DataFrame`
The measurement data
elecs : :class:`pandas.DataFrame`
Electrode positions (columns: X, Y, Z)
topography : None
No topography information is provided at the moment
|
def get_single_review_comments(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/review/:id/comments <https://developer.github.com/v3/pulls/reviews/>`_
:param id: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(id, (int, long)), id
return github.PaginatedList.PaginatedList(
github.PullRequestComment.PullRequestComment,
self._requester,
self.url + "/reviews/" + str(id) + "/comments",
None
)
|
:calls: `GET /repos/:owner/:repo/pulls/:number/review/:id/comments <https://developer.github.com/v3/pulls/reviews/>`_
:param id: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
|
def kill(self):
"""
If run_step needs to be killed, this method will be called
:return: None
"""
try:
logger.info('Trying to terminating run_step...')
self.process.terminate()
time_waited_seconds = 0
while self.process.poll() is None and time_waited_seconds < CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM:
time.sleep(0.5)
time_waited_seconds += 0.5
if self.process.poll() is None:
self.process.kill()
logger.warning('Waited %d seconds for run_step to terminate. Killing now....', CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM)
except OSError, e:
logger.error('Error while trying to kill the subprocess: %s', e)
|
If run_step needs to be killed, this method will be called
:return: None
|
def lookup_by_number(errno):
""" Used for development only """
for key, val in globals().items():
if errno == val:
print(key)
|
Used for development only
|
def sum_from(zero: T1 = None) -> Callable[[ActualIterable[T1]], T1]:
"""
>>> from Redy.Collections import Traversal, Flow
>>> lst: Iterable[int] = [1, 2, 3]
>>> x = Flow(lst)[Traversal.sum_from(0)].unbox
>>> assert x is 6
>>> x = Flow(lst)[Traversal.sum_from()].unbox
>>> assert x is 6
"""
def _(collection: Iterable[T1]) -> T1:
if zero is None:
collection = iter(collection)
_zero = next(collection)
return builtins.sum(collection, _zero)
return builtins.sum(collection, zero)
return _
|
>>> from Redy.Collections import Traversal, Flow
>>> lst: Iterable[int] = [1, 2, 3]
>>> x = Flow(lst)[Traversal.sum_from(0)].unbox
>>> assert x is 6
>>> x = Flow(lst)[Traversal.sum_from()].unbox
>>> assert x is 6
|
def parse_identifier(source, start, throw=True):
"""passes white space from start and returns first identifier,
if identifier invalid and throw raises SyntaxError otherwise returns None"""
start = pass_white(source, start)
end = start
if not end < len(source):
if throw:
raise SyntaxError('Missing identifier!')
return None
if source[end] not in IDENTIFIER_START:
if throw:
raise SyntaxError('Invalid identifier start: "%s"' % source[end])
return None
end += 1
while end < len(source) and source[end] in IDENTIFIER_PART:
end += 1
if not is_valid_lval(source[start:end]):
if throw:
raise SyntaxError(
'Invalid identifier name: "%s"' % source[start:end])
return None
return source[start:end], end
|
passes white space from start and returns first identifier,
if identifier invalid and throw raises SyntaxError otherwise returns None
|
def copy_file_to_remote(self, local_path, remote_path):
"""scp the local file to remote folder.
:param local_path: local path
:param remote_path: remote path
"""
sftp_client = self.transport.open_sftp_client()
LOG.debug('Copy the local file to remote. '
'Source=%(src)s. Target=%(target)s.' %
{'src': local_path, 'target': remote_path})
try:
sftp_client.put(local_path, remote_path)
except Exception as ex:
LOG.error('Failed to copy the local file to remote. '
'Reason: %s.' % six.text_type(ex))
raise SFtpExecutionError(err=ex)
|
scp the local file to remote folder.
:param local_path: local path
:param remote_path: remote path
|
def add_handler(self, name='console-color', level='info', formatter='standard', **kwargs):
"""
Add another handler to the logging system if not present already.
Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
"""
# make sure the the log file has a name
if name == 'rotating-log' and 'filename' not in kwargs:
kwargs.update({'filename': self.logfilename})
# make sure the the log file has a name
if name == 'stringio' and 'stringio' not in kwargs:
kwargs.update({'stringio': StringIO.StringIO()})
handler = types[name](**kwargs)
self.add_handler_raw(handler, name, level=level, formatter=formatter)
|
Add another handler to the logging system if not present already.
Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
|
def splitext(self):
""" p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext
|
p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
|
def _compileRegExp(string, insensitive, minimal):
"""Compile regular expression.
Python function, used by C code
NOTE minimal flag is not supported here, but supported on PCRE
"""
flags = 0
if insensitive:
flags = re.IGNORECASE
string = string.replace('[_[:alnum:]]', '[\\w\\d]') # ad-hoc fix for C++ parser
string = string.replace('[:digit:]', '\\d')
string = string.replace('[:blank:]', '\\s')
try:
return re.compile(string, flags)
except (re.error, AssertionError) as ex:
_logger.warning("Invalid pattern '%s': %s", string, str(ex))
return None
|
Compile regular expression.
Python function, used by C code
NOTE minimal flag is not supported here, but supported on PCRE
|
def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
"""convert shapefile to geojson file"""
if os.path.exists(jsonfile):
os.remove(jsonfile)
if sysstr == 'Windows':
exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
else:
exepath = FileClass.get_executable_fullpath('ogr2ogr')
# os.system(s)
s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
exepath, src_srs, dst_srs, jsonfile, src_file)
UtilClass.run_command(s)
|
convert shapefile to geojson file
|
def get_token(self):
"""
Retrieves the token from the File System
:return dict or None: The token if exists, None otherwise
"""
token = None
if self.token_path.exists():
with self.token_path.open('r') as token_file:
token = self.token_constructor(self.serializer.load(token_file))
self.token = token
return token
|
Retrieves the token from the File System
:return dict or None: The token if exists, None otherwise
|
def search():
"""Show all keywords that match a pattern"""
pattern = flask.request.args.get('pattern', "*").strip().lower()
# if the pattern contains "in:<collection>" (eg: in:builtin),
# filter results to only that (or those) collections
# This was kind-of hacked together, but seems to work well enough
collections = [c["name"].lower() for c in current_app.kwdb.get_collections()]
words = []
filters = []
if pattern.startswith("name:"):
pattern = pattern[5:].strip()
mode = "name"
else:
mode="both"
for word in pattern.split(" "):
if word.lower().startswith("in:"):
filters.extend([name for name in collections if name.startswith(word[3:])])
else:
words.append(word)
pattern = " ".join(words)
keywords = []
for keyword in current_app.kwdb.search(pattern, mode):
kw = list(keyword)
collection_id = kw[0]
collection_name = kw[1].lower()
if len(filters) == 0 or collection_name in filters:
url = flask.url_for(".doc_for_library", collection_id=kw[0], keyword=kw[2])
row_id = "row-%s.%s" % (keyword[1].lower(), keyword[2].lower().replace(" ","-"))
keywords.append({"collection_id": keyword[0],
"collection_name": keyword[1],
"name": keyword[2],
"synopsis": keyword[3],
"version": __version__,
"url": url,
"row_id": row_id
})
keywords.sort(key=lambda kw: kw["name"])
return flask.render_template("search.html",
data={"keywords": keywords,
"version": __version__,
"pattern": pattern
})
|
Show all keywords that match a pattern
|
def compute_stats2(arrayNR, stats, weights):
"""
:param arrayNR:
an array of (N, R) elements
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of (N, S) elements
"""
newshape = list(arrayNR.shape)
if newshape[1] != len(weights):
raise ValueError('Got %d weights but %d values!' %
(len(weights), newshape[1]))
newshape[1] = len(stats) # number of statistical outputs
newarray = numpy.zeros(newshape, arrayNR.dtype)
data = [arrayNR[:, i] for i in range(len(weights))]
for i, func in enumerate(stats):
newarray[:, i] = apply_stat(func, data, weights)
return newarray
|
:param arrayNR:
an array of (N, R) elements
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of (N, S) elements
|
def generate_login(self, min_length=6, max_length=10, digits=True):
"""
Generate string for email address login with defined length and
alphabet.
:param min_length: (optional) min login length.
Default value is ``6``.
:param max_length: (optional) max login length.
Default value is ``10``.
:param digits: (optional) use digits in login generation.
Default value is ``True``.
"""
chars = string.ascii_lowercase
if digits:
chars += string.digits
length = random.randint(min_length, max_length)
return ''.join(random.choice(chars) for x in range(length))
|
Generate string for email address login with defined length and
alphabet.
:param min_length: (optional) min login length.
Default value is ``6``.
:param max_length: (optional) max login length.
Default value is ``10``.
:param digits: (optional) use digits in login generation.
Default value is ``True``.
|
def delete_local_docker_cache(docker_tag):
"""
Delete the local docker cache for the entire docker image chain
:param docker_tag: Docker tag
:return: None
"""
history_cmd = ['docker', 'history', '-q', docker_tag]
try:
image_ids_b = subprocess.check_output(history_cmd)
image_ids_str = image_ids_b.decode('utf-8').strip()
layer_ids = [id.strip() for id in image_ids_str.split('\n') if id != '<missing>']
delete_cmd = ['docker', 'image', 'rm', '--force']
delete_cmd.extend(layer_ids)
subprocess.check_call(delete_cmd)
except subprocess.CalledProcessError as error:
# Could be caused by the image not being present
logging.debug('Error during local cache deletion %s', error)
|
Delete the local docker cache for the entire docker image chain
:param docker_tag: Docker tag
:return: None
|
def move(self, target):
""" Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool
"""
if isinstance(target, Folder):
target_id = target.object_id
elif isinstance(target, Drive):
# we need the root folder id
root_folder = target.get_root_folder()
if not root_folder:
return False
target_id = root_folder.object_id
elif isinstance(target, str):
target_id = target
else:
raise ValueError('Target must be a Folder or Drive')
if not self.object_id or not target_id:
raise ValueError(
'Both self, and target must have a valid object_id.')
if target_id == 'root':
raise ValueError("When moving, target id can't be 'root'")
url = self.build_url(
self._endpoints.get('item').format(id=self.object_id))
data = {'parentReference': {'id': target_id}}
response = self.con.patch(url, data=data)
if not response:
return False
self.parent_id = target_id
return True
|
Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool
|
def _ring_2d(m, n):
"""Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
"""
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret
|
Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
|
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4),
np.cross((vt2 - vt4), (vt3 - vt4)))) / 6
return vol_tetra
|
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
|
def get_bgp_config(self, group="", neighbor=""):
"""
Parse BGP config params into a dict
:param group='':
:param neighbor='':
"""
bgp_config = {}
def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout):
prefix_limit = {}
inet = False
inet6 = False
preifx_type = "inet"
if isinstance(af_table, list):
af_table = str(af_table)
if "ipv4" in af_table.lower():
inet = True
if "ipv6" in af_table.lower():
inet6 = True
preifx_type = "inet6"
if len(af_table.split()) == 2:
safi = "unicast"
else:
safi = af_table.split()[-1]
if inet or inet6:
prefix_limit = {
preifx_type: {
safi: {
"limit": limit,
"teardown": {
"threshold": prefix_percent,
"timeout": prefix_timeout,
},
}
}
}
return prefix_limit
# Get BGP config using ciscoconfparse because some old devices dont support "| sec bgp"
cfg = self.get_config(retrieve="running")
cfg = cfg["running"].splitlines()
bgp_config_text = napalm.base.helpers.cisco_conf_parse_objects(
"router bgp", cfg
)
bgp_asn = napalm.base.helpers.regex_find_txt(
r"router bgp (\d+)", bgp_config_text, default=0
)
# Get a list of all neighbors and groups in the config
all_neighbors = set()
all_groups = set()
bgp_group_neighbors = {}
all_groups.add("_")
for line in bgp_config_text:
if " neighbor " in line:
if re.search(IP_ADDR_REGEX, line) is not None:
all_neighbors.add(re.search(IP_ADDR_REGEX, line).group())
elif re.search(IPV6_ADDR_REGEX_2, line) is not None:
all_neighbors.add(re.search(IPV6_ADDR_REGEX_2, line).group())
else:
bgp_group = re.search(r" neighbor [^\s]+", line).group()
bgp_group = bgp_group.split()[1]
all_groups.add(bgp_group)
# Get the neighrbor level config for each neighbor
for bgp_neighbor in all_neighbors:
# If neighbor_filter is passed in, only continue for that neighbor
if neighbor:
if bgp_neighbor != neighbor:
continue
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", bgp_neighbor, bgp_config_text
)
afi = afi_list[0]
# Skipping neighbors in VRFs for now
if "vrf" in str(afi_list):
continue
else:
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
bgp_neighbor, bgp_config_text
)
# For group_name- use peer-group name, else VRF name, else "_" for no group
group_name = napalm.base.helpers.regex_find_txt(
" peer-group ([^']+)", neighbor_config, default="_"
)
# Start finding attributes for the neighbor config
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
password = napalm.base.helpers.regex_find_txt(
r"password (?:[0-9] )?([^\']+\')", neighbor_config
)
nhs = bool(
napalm.base.helpers.regex_find_txt(r" next-hop-self", neighbor_config)
)
route_reflector_client = bool(
napalm.base.helpers.regex_find_txt(
r"route-reflector-client", neighbor_config
)
)
# Add the group name to bgp_group_neighbors if its not there already
if group_name not in bgp_group_neighbors.keys():
bgp_group_neighbors[group_name] = {}
# Build the neighbor dict of attributes
bgp_group_neighbors[group_name][bgp_neighbor] = {
"description": description,
"remote_as": peer_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"export_policy": export_policy,
"import_policy": import_policy,
"local_address": local_address,
"local_as": local_as,
"authentication_key": password,
"nhs": nhs,
"route_reflector_client": route_reflector_client,
}
# Get the peer-group level config for each group
for group_name in bgp_group_neighbors.keys():
# If a group is passed in params, only continue on that group
if group:
if group_name != group:
continue
# Default no group
if group_name == "_":
bgp_config["_"] = {
"apply_groups": [],
"description": "",
"local_as": 0,
"type": "",
"import_policy": "",
"export_policy": "",
"local_address": "",
"multipath": False,
"multihop_ttl": 0,
"remote_as": 0,
"remove_private_as": False,
"prefix_limit": {},
"neighbors": bgp_group_neighbors.get("_", {}),
}
continue
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
group_name, bgp_config_text
)
multipath = False
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", group_name, neighbor_config
)
for afi in afi_list:
afi_config = napalm.base.helpers.cisco_conf_parse_objects(
afi, bgp_config_text
)
multipath = bool(
napalm.base.helpers.regex_find_txt(r" multipath", str(afi_config))
)
if multipath:
break
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
multihop_ttl = napalm.base.helpers.regex_find_txt(
r"ebgp-multihop {\d+}", neighbor_config, default=0
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
remove_private_as = bool(
napalm.base.helpers.regex_find_txt(
r"remove-private-as", neighbor_config
)
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
bgp_type = "external"
if local_as:
if local_as == peer_as:
bgp_type = "internal"
elif bgp_asn == peer_as:
bgp_type = "internal"
bgp_config[group_name] = {
"apply_groups": [], # on IOS will always be empty list!
"description": description,
"local_as": local_as,
"type": bgp_type,
"import_policy": import_policy,
"export_policy": export_policy,
"local_address": local_address,
"multipath": multipath,
"multihop_ttl": multihop_ttl,
"remote_as": peer_as,
"remove_private_as": remove_private_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"neighbors": bgp_group_neighbors.get(group_name, {}),
}
return bgp_config
|
Parse BGP config params into a dict
:param group='':
:param neighbor='':
|
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplesat_core(self.maplesat)
|
Get an unsatisfiable core if the formula was previously
unsatisfied.
|
def _add_element(self, element, parent_node):
"""
add an element (i.e. a unit/connective/discourse or modifier)
to the docgraph.
"""
if element.tag == 'unit':
element_node_id = element.attrib['id']+':'+element.attrib['type']
node_layers = {self.ns, self.ns+':unit', self.ns+':'+element.attrib['type']}
elif element.tag == 'connective':
element_node_id = element.attrib['id']+':connective'
node_layers = {self.ns, self.ns+':connective'}
elif element.tag == 'discourse':
element_node_id = 'discourse'
node_layers = {self.ns}
else: # <modifier>
element_node_id = element.getparent().attrib['id']+':'+element.tag
node_layers = {self.ns, self.ns+':modifier'}
self.add_node(element_node_id, layers=node_layers)
self.add_edge(parent_node, element_node_id, layers={self.ns},
edge_type=EdgeTypes.dominance_relation)
if element.text:
if self.tokenize:
for token in element.text.split():
self._add_token(token, element_node_id)
else:
element_text = sanitize_string(element.text)
self.node[element_node_id].update(
{'label': u"{0}: {1}...".format(element_node_id,
element_text[:20])})
for child_element in element.iterchildren():
self._add_element(child_element, element_node_id)
if element.tail: # tokens _after_ the </element> closes
if self.tokenize:
for token in element.tail.split():
self._add_token(token, parent_node)
else:
tail_text = sanitize_string(element.tail)
self.node[parent_node].update(
{'label': u"{0}: {1}...".format(parent_node,
tail_text[:20])})
|
add an element (i.e. a unit/connective/discourse or modifier)
to the docgraph.
|
def loadRecords(self, records):
"""
Loads the inputed records as children to this item.
:param records | [<orb.Table>, ..] || {<str> sub: <variant>, .. }
"""
self.setChildIndicatorPolicy(self.DontShowIndicatorWhenChildless)
self._loaded = True
if records is None:
return
# load sub-groups if desired
if self._nextLevels and RecordSet.typecheck(records):
level = self._nextLevels[0]
sublevels = self._nextLevels[1:]
records = records.grouped(level)
elif RecordSet.typecheck(records):
sublevels = None
records = records.all()
else:
sublevels = None
# load a child set of groups
if type(records) == dict:
try:
generator = self.treeWidget().createGroupItem
cls = None
except AttributeError:
generator = None
cls = type(self)
for subgroup, subrecords in records.items():
if generator:
generator(subgroup, subrecords, sublevels, self)
elif cls:
cls(self, subgroup, subrecords, sublevels)
# load records
else:
try:
generator = self.treeWidget().createRecordItem
cls = None
except AttributeError:
generator = None
cls = XOrbRecordItem
cls = self.treeWidget().createRecordItem
for record in records:
if generator:
generator(record, self)
elif cls:
cls(self, record)
|
Loads the inputed records as children to this item.
:param records | [<orb.Table>, ..] || {<str> sub: <variant>, .. }
|
def get_listing(path):
"""
Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir.
"""
if path != ".":
listing = sorted(['..'] + os.listdir(path))
else:
listing = sorted(os.listdir(path))
return listing
|
Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir.
|
def mins(self):
""" Returns de minimum values of x, y, z as a numpy array
"""
return np.array([self.x_min, self.y_min, self.z_min])
|
Returns de minimum values of x, y, z as a numpy array
|
def find_vulnerabilities(
cfg_list,
blackbox_mapping_file,
sources_and_sinks_file,
interactive=False,
nosec_lines=defaultdict(set)
):
"""Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities.
"""
vulnerabilities = list()
definitions = parse(sources_and_sinks_file)
with open(blackbox_mapping_file) as infile:
blackbox_mapping = json.load(infile)
for cfg in cfg_list:
find_vulnerabilities_in_cfg(
cfg,
definitions,
Lattice(cfg.nodes),
blackbox_mapping,
vulnerabilities,
interactive,
nosec_lines
)
if interactive:
with open(blackbox_mapping_file, 'w') as outfile:
json.dump(blackbox_mapping, outfile, indent=4)
return vulnerabilities
|
Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities.
|
def raw(self, from_, to, body):
"""
Send a raw MIME message.
"""
if isinstance(to, string_types):
raise TypeError('"to" parameter must be enumerable')
return self._session.post('{}/raw'.format(self._url), json={
'from': from_,
'to': to,
'body': body,
}).json()
|
Send a raw MIME message.
|
def absent(name, user=None, signal=None):
'''
Ensures that the named command is not running.
name
The pattern to match.
user
The user to which the process belongs
signal
Signal to send to the process(es).
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
running = __salt__['ps.pgrep'](name, user=user)
ret['result'] = None
if running:
ret['comment'] = ('{0} processes will '
'be killed').format(len(running))
else:
ret['comment'] = 'No matching processes running'
return ret
if signal:
status = __salt__['ps.pkill'](name, user=user,
signal=signal, full=True)
else:
status = __salt__['ps.pkill'](name, user=user, full=True)
ret['result'] = True
if status:
ret['comment'] = 'Killed {0} processes'.format(len(status['killed']))
ret['changes'] = status
else:
ret['comment'] = 'No matching processes running'
return ret
|
Ensures that the named command is not running.
name
The pattern to match.
user
The user to which the process belongs
signal
Signal to send to the process(es).
|
def multi_plot_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True):
"""
plot the time trace for multiple data sets on the same axes.
Parameters
----------
DataArray : array-like
array of DataObject instances for which to plot the PSDs
SubSampleN : int, optional
Number of intervals between points to remove (to sub-sample data so
that you effectively have lower sample rate to make plotting easier
and quicker.
xlim : array-like, optional
2 element array specifying the lower and upper x limit for which to
plot the time signal
LabelArray : array-like, optional
array of labels for each data-set to be plotted
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The axes object created
"""
unit_prefix = units[:-1] # removed the last char
if LabelArray == []:
LabelArray = ["DataSet {}".format(i)
for i in _np.arange(0, len(DataArray), 1)]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
for i, data in enumerate(DataArray):
ax.plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN],
alpha=0.8, label=LabelArray[i])
ax.set_xlabel("time (s)")
if xlim != None:
ax.set_xlim(xlim)
if ylim != None:
ax.set_ylim(ylim)
ax.grid(which="major")
legend = ax.legend(loc="best", frameon = 1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('white')
ax.set_ylabel("voltage (V)")
if show_fig == True:
_plt.show()
return fig, ax
|
plot the time trace for multiple data sets on the same axes.
Parameters
----------
DataArray : array-like
array of DataObject instances for which to plot the PSDs
SubSampleN : int, optional
Number of intervals between points to remove (to sub-sample data so
that you effectively have lower sample rate to make plotting easier
and quicker.
xlim : array-like, optional
2 element array specifying the lower and upper x limit for which to
plot the time signal
LabelArray : array-like, optional
array of labels for each data-set to be plotted
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The axes object created
|
def set_membership(self, membership):
""" Set membership. """
_c_leiden._MutableVertexPartition_set_membership(self._partition, list(membership))
self._update_internal_membership()
|
Set membership.
|
def infer_shape(self, *args, **kwargs):
"""Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_shape_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, shape in zip(arg_names, arg_shapes):
if is_np_compat():
shape_is_none = not shape or -1 in shape
else:
shape_is_none = not shape or 0 in shape
if shape_is_none:
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(shape)))
warnings.warn(
"Cannot decide shape for the following arguments " +
"(0s in shape means unknown dimensions). " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_shape error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise
|
Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
|
def get_below_threshold(umi_quals, quality_encoding, quality_filter_threshold):
'''test whether the umi_quals are below the threshold'''
umi_quals = [x - RANGES[quality_encoding][0] for x in map(ord, umi_quals)]
below_threshold = [x < quality_filter_threshold for x in umi_quals]
return below_threshold
|
test whether the umi_quals are below the threshold
|
def apply_patch(self, patch):
"""
Applies given patch.
:param patch: Patch.
:type patch: Patch
:return: Method success.
:rtype: bool
"""
history_file = File(self.__history_file)
patches_history = history_file.cache() and [line.strip() for line in history_file.content] or []
if patch.uid not in patches_history:
LOGGER.debug("> Applying '{0}' patch!".format(patch.name))
if patch.apply():
history_file.content = ["{0}\n".format(patch.uid)]
history_file.append()
else:
raise umbra.exceptions.PatchApplyError("{0} | '{1}' patch failed to apply!".format(
self.__class__.__name__, patch.path))
else:
LOGGER.debug("> '{0}' patch is already applied!".format(patch.name))
return True
|
Applies given patch.
:param patch: Patch.
:type patch: Patch
:return: Method success.
:rtype: bool
|
def log_state(self, state):
""" Gathers the stats from self.trainer.stats and passes them into
self.log, as a list """
results = []
for field_idx, field in enumerate(self.fields):
parent, stat = None, state
for f in field:
parent, stat = stat, stat[f]
results.append(stat)
self.log(*results)
|
Gathers the stats from self.trainer.stats and passes them into
self.log, as a list
|
def sort_trigger_set(triggers, exclude_previous=True, say=None):
"""Sort a group of triggers in optimal sorting order.
The optimal sorting order is, briefly:
* Atomic triggers (containing nothing but plain words and alternation
groups) are on top, with triggers containing the most words coming
first. Triggers with equal word counts are sorted by length, and then
alphabetically if they have the same length.
* Triggers containing optionals are sorted next, by word count like
atomic triggers.
* Triggers containing wildcards are next, with ``_`` (alphabetic)
wildcards on top, then ``#`` (numeric) and finally ``*``.
* At the bottom of the sorted list are triggers consisting of only a
single wildcard, in the order: ``_``, ``#``, ``*``.
Triggers that have ``{weight}`` tags are grouped together by weight
value and sorted amongst themselves. Higher weighted groups are then
ordered before lower weighted groups regardless of the normal sorting
algorithm.
Triggers that come from topics which inherit other topics are also
sorted with higher priority than triggers from the inherited topics.
Arguments:
triggers ([]str): Array of triggers to sort.
exclude_previous (bool): Create a sort buffer for 'previous' triggers.
say (function): A reference to ``RiveScript._say()`` or provide your
own function.
"""
if say is None:
say = lambda x: x
# KEEP IN MIND: the `triggers` array is composed of array elements of the form
# ["trigger text", pointer to trigger data]
# So this code will use e.g. `trig[0]` when referring to the trigger text.
# Create a list of trigger objects map.
trigger_object_list = []
for index, trig in enumerate(triggers):
if exclude_previous and trig[1]["previous"]:
continue
pattern = trig[0] # Extract only the text of the trigger, with possible tag of inherit
# See if it has a weight tag
match, weight = re.search(RE.weight, trig[0]), 0
if match: # Value of math is not None if there is a match.
weight = int(match.group(1)) # Get the weight from the tag ``{weight}``
# See if it has an inherits tag.
match = re.search(RE.inherit, pattern)
if match:
inherit = int(match.group(1)) # Get inherit value from the tag ``{inherit}``
say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherit))
triggers[index][0] = pattern = re.sub(RE.inherit, "", pattern) # Remove the inherit tag if any
else:
inherit = sys.maxsize # If not found any inherit, set it to the maximum value, to place it last in the sort
trigger_object_list.append(TriggerObj(pattern, index, weight, inherit))
# Priority order of sorting criteria:
# weight, inherit, is_empty, star, pound, under, option, wordcount, len, alphabet
sorted_list = sorted(trigger_object_list,
key=attrgetter('weight', 'inherit', 'is_empty', 'star', 'pound',
'under', 'option', 'wordcount', 'len', 'alphabet'))
return [triggers[item.index] for item in sorted_list]
|
Sort a group of triggers in optimal sorting order.
The optimal sorting order is, briefly:
* Atomic triggers (containing nothing but plain words and alternation
groups) are on top, with triggers containing the most words coming
first. Triggers with equal word counts are sorted by length, and then
alphabetically if they have the same length.
* Triggers containing optionals are sorted next, by word count like
atomic triggers.
* Triggers containing wildcards are next, with ``_`` (alphabetic)
wildcards on top, then ``#`` (numeric) and finally ``*``.
* At the bottom of the sorted list are triggers consisting of only a
single wildcard, in the order: ``_``, ``#``, ``*``.
Triggers that have ``{weight}`` tags are grouped together by weight
value and sorted amongst themselves. Higher weighted groups are then
ordered before lower weighted groups regardless of the normal sorting
algorithm.
Triggers that come from topics which inherit other topics are also
sorted with higher priority than triggers from the inherited topics.
Arguments:
triggers ([]str): Array of triggers to sort.
exclude_previous (bool): Create a sort buffer for 'previous' triggers.
say (function): A reference to ``RiveScript._say()`` or provide your
own function.
|
def cycle_app(parser, cmd, args): # pragma: no cover
"""
Generate a de Bruijn sequence of a given length.
"""
parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value')
parser.add_argument('length', type=int, help='the cycle length to generate')
args = parser.parse_args(args)
return cycle(args.length, args.width)
|
Generate a de Bruijn sequence of a given length.
|
def get_env_dirs(self):
"""Return list of directories in env_root."""
repo_dirs = next(os.walk(self.env_root))[1]
if '.git' in repo_dirs:
repo_dirs.remove('.git') # not relevant for any repo operations
return repo_dirs
|
Return list of directories in env_root.
|
def build_pmid_exclusion_filter(pmids: Strings) -> EdgePredicate:
"""Fail for edges with citations whose references are one of the given PubMed identifiers.
:param pmids: A PubMed identifier or list of PubMed identifiers to filter against
"""
if isinstance(pmids, str):
@edge_predicate
def pmid_exclusion_filter(data: EdgeData) -> bool:
"""Fail for edges with PubMed citations matching the contained PubMed identifier.
:return: If the edge has a PubMed citation with the contained PubMed identifier
"""
return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] != pmids
elif isinstance(pmids, Iterable):
pmids = set(pmids)
@edge_predicate
def pmid_exclusion_filter(data: EdgeData) -> bool:
"""Pass for edges with PubMed citations matching one of the contained PubMed identifiers.
:return: If the edge has a PubMed citation with one of the contained PubMed identifiers
"""
return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] not in pmids
else:
raise TypeError
return pmid_exclusion_filter
|
Fail for edges with citations whose references are one of the given PubMed identifiers.
:param pmids: A PubMed identifier or list of PubMed identifiers to filter against
|
def get_header(headers, name, default=None):
"""Return the value of header *name*.
The *headers* argument must be a list of ``(name, value)`` tuples. If the
header is found its associated value is returned, otherwise *default* is
returned. Header names are matched case insensitively.
"""
name = name.lower()
for header in headers:
if header[0].lower() == name:
return header[1]
return default
|
Return the value of header *name*.
The *headers* argument must be a list of ``(name, value)`` tuples. If the
header is found its associated value is returned, otherwise *default* is
returned. Header names are matched case insensitively.
|
def get_room_history(
self,
room_id,
oldest=None,
latest=datetime.now(),
inclusive=False,
count=20,
unreads=False,
**kwargs
):
"""
Get various history of specific channel/room
:param room_id:
:param kwargs:
:return:
"""
return GetRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
latest=latest,
inclusive=inclusive,
count=count,
unreads=unreads,
**kwargs
)
|
Get various history of specific channel/room
:param room_id:
:param kwargs:
:return:
|
def _pquery(scheduler, data, ndata, ndim, leafsize,
x, nx, d, i, k, eps, p, dub, ierr):
"""
Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler
"""
try:
_data = shmem_as_nparray(data).reshape((ndata, ndim))
_x = shmem_as_nparray(x).reshape((nx, ndim))
_d = shmem_as_nparray(d).reshape((nx, k))
_i = shmem_as_nparray(i).reshape((nx, k))
kdtree = cKDTree(_data, leafsize=leafsize)
for s in scheduler:
d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub)
m_d = d_out.shape[0]
m_i = i_out.shape[0]
_d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1)
except:
ierr.value += 1
|
Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler
|
def validate(self, value):
"""Validate the length of a list.
:param value: List of values.
:raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than
minimum or greater than maximum.
"""
try:
length = len(value)
except TypeError:
length = 0
if self.min_length is not None:
min_length = self.min_length() if callable(self.min_length) else self.min_length
if length < min_length:
raise exceptions.ValidationError(self.min_err.format(min_length))
if self.max_length is not None:
max_length = self.max_length() if callable(self.max_length) else self.max_length
if length > max_length:
raise exceptions.ValidationError(self.max_err.format(max_length))
|
Validate the length of a list.
:param value: List of values.
:raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than
minimum or greater than maximum.
|
def is_base64(string):
"""Determines whether or not a string is likely to
be base64 encoded binary nonsense"""
return (not re.match('^[0-9]+$', string)) and \
(len(string) % 4 == 0) and \
re.match('^[A-Za-z0-9+/]+[=]{0,2}$', string)
|
Determines whether or not a string is likely to
be base64 encoded binary nonsense
|
def efficiency(self):
"""Calculate :ref:`pysynphot-formula-qtlam`.
Returns
-------
ans : float
Bandpass dimensionless efficiency.
"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
ans = self.trapezoidIntegration(wave, thru/wave)
return ans
|
Calculate :ref:`pysynphot-formula-qtlam`.
Returns
-------
ans : float
Bandpass dimensionless efficiency.
|
def _save_trace(self):
"""
Save current stack trace as formatted string.
"""
stack_trace = stack()
try:
self.trace = []
for frm in stack_trace[5:]: # eliminate our own overhead
self.trace.insert(0, frm[1:])
finally:
del stack_trace
|
Save current stack trace as formatted string.
|
async def fire(self, name, payload=None, *,
dc=None, node=None, service=None, tag=None):
"""Fires a new event
Parameters:
name (str): Event name
payload (Payload): Opaque data
node (Filter): Regular expression to filter by node name
service (Filter): Regular expression to filter by service
tag (Filter): Regular expression to filter by service tags
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Returns:
Object: where value is event ID
The return body is like::
{
"ID": "b54fe110-7af5-cafc-d1fb-afc8ba432b1c",
"Name": "deploy",
"Payload": None,
"NodeFilter": re.compile("node-\d+"),
"ServiceFilter": "",
"TagFilter": "",
"Version": 1,
"LTime": 0
}
The **ID** field uniquely identifies the newly fired event.
"""
params = {
"dc": dc,
"node": extract_pattern(node),
"service": extract_pattern(service),
"tag": extract_pattern(tag)
}
payload = encode_value(payload) if payload else None
response = await self._api.put(
"/v1/event/fire", name,
data=payload,
params=params,
headers={"Content-Type": "application/octet-stream"})
result = format_event(response.body)
return result
|
Fires a new event
Parameters:
name (str): Event name
payload (Payload): Opaque data
node (Filter): Regular expression to filter by node name
service (Filter): Regular expression to filter by service
tag (Filter): Regular expression to filter by service tags
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Returns:
Object: where value is event ID
The return body is like::
{
"ID": "b54fe110-7af5-cafc-d1fb-afc8ba432b1c",
"Name": "deploy",
"Payload": None,
"NodeFilter": re.compile("node-\d+"),
"ServiceFilter": "",
"TagFilter": "",
"Version": 1,
"LTime": 0
}
The **ID** field uniquely identifies the newly fired event.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.