code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def filter_zone(self, data):
"""Check if a zone is private"""
if self.private_zone is not None:
if data['Config']['PrivateZone'] != self.str2bool(self.private_zone):
return False
if data['Name'] != '{0}.'.format(self.domain):
return False
return True
|
Check if a zone is private
|
def get_userstable_data(self):
"""Get users with roles on the project.
Roles can be applied directly on the project or through a group.
"""
project_users = {}
project = self.tab_group.kwargs['project']
try:
# Get all global roles once to avoid multiple requests.
roles = api.keystone.role_list(self.request)
# Update project_users with users which have role directly on
# the project, (NOT through a group)
self._get_users_from_project(project_id=project.id,
roles=roles,
project_users=project_users)
# Update project_users with users which have role indirectly on
# the project, (through a group)
self._get_users_from_groups(project_id=project.id,
roles=roles,
project_users=project_users)
except Exception:
exceptions.handle(self.request,
_("Unable to display the users of this project.")
)
return project_users.values()
|
Get users with roles on the project.
Roles can be applied directly on the project or through a group.
|
def _warn_silly_options(cls, args):
'''Print warnings about any options that may be silly.'''
if 'page-requisites' in args.span_hosts_allow \
and not args.page_requisites:
_logger.warning(
_('Spanning hosts is allowed for page requisites, '
'but the page requisites option is not on.')
)
if 'linked-pages' in args.span_hosts_allow \
and not args.recursive:
_logger.warning(
_('Spanning hosts is allowed for linked pages, '
'but the recursive option is not on.')
)
if args.warc_file and \
(args.http_proxy or args.https_proxy):
_logger.warning(_('WARC specifications do not handle proxies.'))
if (args.password or args.ftp_password or
args.http_password or args.proxy_password) and \
args.warc_file:
_logger.warning(
_('Your password is recorded in the WARC file.'))
|
Print warnings about any options that may be silly.
|
def compare_variants_label_plot(data):
""" Return HTML for the Compare variants plot"""
keys = OrderedDict()
keys['total_called_variants_known'] = {'name': 'Known Variants'}
keys['total_called_variants_novel'] = {'name': 'Novel Variants'}
pconfig = {
'id': 'picard_variantCallingMetrics_variant_label',
'title': 'Picard: Variants Called',
'ylab': 'Counts of Variants',
}
return bargraph.plot(data, cats=keys, pconfig=pconfig)
|
Return HTML for the Compare variants plot
|
def _compile_pfgen(self):
"""Post power flow computation for PV and SW"""
string = '"""\n'
string += 'system.dae.init_g()\n'
for gcall, pflow, shunt, series, stagen, call in zip(
self.gcall, self.pflow, self.shunt, self.series, self.stagen,
self.gcalls):
if gcall and pflow and (shunt or series) and not stagen:
string += call
string += '\n'
string += 'system.dae.reset_small_g()\n'
string += '"""'
self.pfgen = compile(eval(string), '', 'exec')
|
Post power flow computation for PV and SW
|
def DBObject(table_name, versioning=VersioningTypes.NONE):
"""Classes annotated with DBObject gain persistence methods."""
def wrapped(cls):
field_names = set()
all_fields = []
for name in dir(cls):
fld = getattr(cls, name)
if fld and isinstance(fld, Field):
fld.name = name
all_fields.append(fld)
field_names.add(name)
def add_missing_field(name, default='', insert_pos=None):
if name not in field_names:
fld = Field(default=default)
fld.name = name
all_fields.insert(
len(all_fields) if insert_pos is None else insert_pos,
fld
)
add_missing_field('id', insert_pos=0)
add_missing_field('_create_date')
add_missing_field('_last_update')
if versioning == VersioningTypes.DELTA_HISTORY:
add_missing_field('_version_hist', default=list)
# Things we count on as part of our processing
cls.__table_name__ = table_name
cls.__versioning__ = versioning
cls.__fields__ = all_fields
# Give them a ctor for free - but make sure we aren't clobbering one
if not ctor_overridable(cls):
raise TypeError(
'Classes with user-supplied __init__ should not be decorated '
'with DBObject. Use the setup method'
)
cls.__init__ = _auto_init
# Duck-type the class for our data methods
cls.get_table_name = classmethod(_get_table_name)
cls.get_id = _get_id
cls.set_id = _set_id
cls.to_data = _to_data
cls.from_data = classmethod(_from_data)
cls.index_names = classmethod(_index_names)
cls.indexes = _indexes
# Bonus methods they get for using gludb.simple
cls.get_version_hist = _get_version_hist
# Register with our abc since we actually implement all necessary
# functionality
Storable.register(cls)
# And now that we're registered, we can also get the database
# read/write functionality for free
cls = DatabaseEnabled(cls)
if versioning == VersioningTypes.DELTA_HISTORY:
cls.save = _delta_save(cls.save)
return cls
return wrapped
|
Classes annotated with DBObject gain persistence methods.
|
def get_distbins(start=100, bins=2500, ratio=1.01):
""" Get exponentially sized
"""
b = np.ones(bins, dtype="float64")
b[0] = 100
for i in range(1, bins):
b[i] = b[i - 1] * ratio
bins = np.around(b).astype(dtype="int")
binsizes = np.diff(bins)
return bins, binsizes
|
Get exponentially sized
|
def _rename_hstore_unique(self, old_table_name, new_table_name,
old_field, new_field, keys):
"""Renames an existing UNIQUE constraint for the specified
hstore keys."""
old_name = self._unique_constraint_name(
old_table_name, old_field, keys)
new_name = self._unique_constraint_name(
new_table_name, new_field, keys)
sql = self.sql_hstore_unique_rename.format(
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name)
)
self.execute(sql)
|
Renames an existing UNIQUE constraint for the specified
hstore keys.
|
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
|
Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
|
def generate_nodes(self, topology):
"""
Generate a list of nodes for the new topology
:param dict topology: processed topology from
:py:meth:`process_topology`
:return: a list of dicts on nodes
:rtype: list
"""
nodes = []
devices = topology['devices']
hypervisors = topology['conf']
for device in sorted(devices):
hv_id = devices[device]['hv_id']
try:
tmp_node = Node(hypervisors[hv_id], self.port_id)
except IndexError:
tmp_node = Node({}, self.port_id)
# Start building the structure
tmp_node.node['properties']['name'] = device
tmp_node.node['id'] = devices[device]['node_id']
tmp_node.node['x'] = devices[device]['x']
tmp_node.node['y'] = devices[device]['y']
tmp_node.device_info['from'] = devices[device]['from']
tmp_node.device_info['type'] = devices[device]['type']
tmp_node.device_info['desc'] = devices[device]['desc']
if 'ext_conf' in devices[device]:
tmp_node.device_info['ext_conf'] = devices[device]['ext_conf']
# Node Label
tmp_node.node['label']['text'] = device
if 'hx' in devices[device] and 'hy' in devices[device]:
tmp_node.node['label']['x'] = devices[device]['hx']
tmp_node.node['label']['y'] = devices[device]['hy']
if 'model' in devices[device]:
tmp_node.device_info['model'] = devices[device]['model']
else:
tmp_node.device_info['model'] = ''
tmp_node.set_description()
tmp_node.set_type()
# Now lets process the rest
for item in sorted(devices[device]):
tmp_node.add_device_items(item, devices[device])
if tmp_node.device_info['type'] == 'Router':
tmp_node.add_info_from_hv()
tmp_node.node['router_id'] = devices[device]['node_id']
tmp_node.calc_mb_ports()
for item in sorted(tmp_node.node['properties']):
if item.startswith('slot'):
tmp_node.add_slot_ports(item)
elif item.startswith('wic'):
tmp_node.add_wic_ports(item)
# Add default ports to 7200 and 3660
if tmp_node.device_info['model'] == 'c7200':
# tmp_node.add_slot_ports('slot0')
# C7200 doesnt have any ports by default
pass
elif tmp_node.device_info['model'] == 'c3600' \
and tmp_node.device_info['chassis'] == '3660':
tmp_node.node['properties']['slot0'] = 'Leopard-2FE'
# Calculate the router links
tmp_node.calc_device_links()
elif tmp_node.device_info['type'] == 'Cloud':
try:
tmp_node.calc_cloud_connection()
except RuntimeError as err:
print(err)
elif tmp_node.device_info['type'] == 'FrameRelaySwitch':
tmp_node.process_mappings()
elif tmp_node.device_info['type'] == 'VirtualBoxVM':
tmp_node.add_to_virtualbox()
tmp_node.add_vm_ethernet_ports()
tmp_node.calc_device_links()
elif tmp_node.device_info['type'] == 'QemuVM':
tmp_node.add_to_qemu()
tmp_node.set_qemu_symbol()
tmp_node.add_vm_ethernet_ports()
tmp_node.calc_device_links()
# Get the data we need back from the node instance
self.links.extend(tmp_node.links)
self.configs.extend(tmp_node.config)
self.port_id += tmp_node.get_nb_added_ports(self.port_id)
nodes.append(tmp_node.node)
return nodes
|
Generate a list of nodes for the new topology
:param dict topology: processed topology from
:py:meth:`process_topology`
:return: a list of dicts on nodes
:rtype: list
|
def list_symbols(self, partial_match=None):
"""
Returns all symbols in the library
Parameters
----------
partial: None or str
if not none, use this string to do a partial match on symbol names
Returns
-------
list of str
"""
symbols = self._symbols.distinct(SYMBOL)
if partial_match is None:
return symbols
return [x for x in symbols if partial_match in x]
|
Returns all symbols in the library
Parameters
----------
partial: None or str
if not none, use this string to do a partial match on symbol names
Returns
-------
list of str
|
def apply_layout(self, child, layout):
""" Apply the flexbox specific layout.
"""
params = self.create_layout_params(child, layout)
w = child.widget
if w:
# padding
if layout.get('padding'):
dp = self.dp
l, t, r, b = layout['padding']
w.setPadding(int(l*dp), int(t*dp),
int(r*dp), int(b*dp))
child.layout_params = params
|
Apply the flexbox specific layout.
|
def update( # noqa: C901
self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None,
since_value=None, terms=None, expected_groups=None, value_function=None,
runbook_url=None, ignore_overlap=None, enabled=True):
"""
Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
conditions_nrql_dict = self.list(policy_id)
target_condition_nrql = None
for condition in conditions_nrql_dict['nrql_conditions']:
if int(condition['id']) == alert_condition_nrql_id:
target_condition_nrql = condition
break
if target_condition_nrql is None:
raise NoEntityException(
'Target alert condition nrql is not included in that policy.'
'policy_id: {}, alert_condition_nrql_id {}'.format(
policy_id,
alert_condition_nrql_id
)
)
data = {
'nrql_condition': {
'type': threshold_type or target_condition_nrql['type'],
'enabled': target_condition_nrql['enabled'],
'name': name or target_condition_nrql['name'],
'terms': terms or target_condition_nrql['terms'],
'nrql': {
'query': query or target_condition_nrql['nrql']['query'],
'since_value': since_value or target_condition_nrql['nrql']['since_value'],
}
}
}
if enabled is not None:
data['nrql_condition']['enabled'] = str(enabled).lower()
if runbook_url is not None:
data['nrql_condition']['runbook_url'] = runbook_url
elif 'runbook_url' in target_condition_nrql:
data['nrql_condition']['runbook_url'] = target_condition_nrql['runbook_url']
if expected_groups is not None:
data['nrql_condition']['expected_groups'] = expected_groups
elif 'expected_groups' in target_condition_nrql:
data['nrql_condition']['expected_groups'] = target_condition_nrql['expected_groups']
if ignore_overlap is not None:
data['nrql_condition']['ignore_overlap'] = ignore_overlap
elif 'ignore_overlap' in target_condition_nrql:
data['nrql_condition']['ignore_overlap'] = target_condition_nrql['ignore_overlap']
if value_function is not None:
data['nrql_condition']['value_function'] = value_function
elif 'value_function' in target_condition_nrql:
data['nrql_condition']['value_function'] = target_condition_nrql['value_function']
if data['nrql_condition']['type'] == 'static':
if 'value_function' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as static but no value_function config specified'
)
data['nrql_condition'].pop('expected_groups', None)
data['nrql_condition'].pop('ignore_overlap', None)
elif data['nrql_condition']['type'] == 'outlier':
if 'expected_groups' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but expected_groups config is not specified'
)
if 'ignore_overlap' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but ignore_overlap config is not specified'
)
data['nrql_condition'].pop('value_function', None)
return self._put(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers,
data=data
)
|
Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
|
def tradingStatusDF(symbol=None, token='', version=''):
'''The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = tradingStatus(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = pd.DataFrame(data)
_toDatetime(df)
return df
|
The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
|
def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
"""Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
# issue a callback
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {}
|
Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
|
def predict(self, x_test):
"""Returns the prediction of the model on the given test data.
Args:
x_test : array-like, shape = (n_samples, sent_length)
Test samples.
Returns:
y_pred : array-like, shape = (n_smaples, sent_length)
Prediction labels for x.
"""
if self.model:
lengths = map(len, x_test)
x_test = self.p.transform(x_test)
y_pred = self.model.predict(x_test)
y_pred = self.p.inverse_transform(y_pred, lengths)
return y_pred
else:
raise OSError('Could not find a model. Call load(dir_path).')
|
Returns the prediction of the model on the given test data.
Args:
x_test : array-like, shape = (n_samples, sent_length)
Test samples.
Returns:
y_pred : array-like, shape = (n_smaples, sent_length)
Prediction labels for x.
|
def isSameStatementList(stmListA: List[HdlStatement],
stmListB: List[HdlStatement]) -> bool:
"""
:return: True if two lists of HdlStatement instances are same
"""
if stmListA is stmListB:
return True
if stmListA is None or stmListB is None:
return False
for a, b in zip(stmListA, stmListB):
if not a.isSame(b):
return False
return True
|
:return: True if two lists of HdlStatement instances are same
|
def _copy_across(self, rel_path, cb=None):
"""If the upstream doesn't have the file, get it from the alternate and store it in the upstream"""
from . import copy_file_or_flo
if not self.upstream.has(rel_path):
if not self.alternate.has(rel_path):
return None
source = self.alternate.get_stream(rel_path)
sink = self.upstream.put_stream(rel_path, metadata=source.meta)
try:
copy_file_or_flo(source, sink, cb=cb)
except:
self.upstream.remove(rel_path, propagate=True)
raise
source.close()
sink.close()
|
If the upstream doesn't have the file, get it from the alternate and store it in the upstream
|
def distance_to(self, other_catchment):
"""
Returns the distance between the centroids of two catchments in kilometers.
:param other_catchment: Catchment to calculate distance to
:type other_catchment: :class:`.Catchment`
:return: Distance between the catchments in km.
:rtype: float
"""
try:
if self.country == other_catchment.country:
try:
return 0.001 * hypot(self.descriptors.centroid_ngr.x - other_catchment.descriptors.centroid_ngr.x,
self.descriptors.centroid_ngr.y - other_catchment.descriptors.centroid_ngr.y)
except TypeError:
# In case no centroid available, just return infinity which is helpful in most cases
return float('+inf')
else:
# If the catchments are in a different country (e.g. `ni` versus `gb`) then set distance to infinity.
return float('+inf')
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.")
|
Returns the distance between the centroids of two catchments in kilometers.
:param other_catchment: Catchment to calculate distance to
:type other_catchment: :class:`.Catchment`
:return: Distance between the catchments in km.
:rtype: float
|
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Add, self)._save_to_input_dict()
input_dict["class"] = str("GPy.kern.Add")
return input_dict
|
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
|
def _metahash(self):
"""Checksum hash of all the inputs to this rule.
Output is invalid until collect_srcs and collect_deps have been run.
In theory, if this hash doesn't change, the outputs won't change
either, which makes it useful for caching.
"""
# BE CAREFUL when overriding/extending this method. You want to copy
# the if(cached)/return(cached) part, then call this method, then at
# the end update the cached metahash. Just like this code, basically,
# only you call the method from the base class in the middle of it. If
# you get this wrong it could result in butcher not noticing changed
# inputs between runs, which could cause really nasty problems.
# TODO(ben): the above warning seems avoidable with better memoization
if self._cached_metahash:
return self._cached_metahash
# If you are extending this function in a subclass,
# here is where you do:
# BaseBuilder._metahash(self)
log.debug('[%s]: Metahash input: %s', self.address,
unicode(self.address))
mhash = util.hash_str(unicode(self.address))
log.debug('[%s]: Metahash input: %s', self.address, self.rule.params)
mhash = util.hash_str(str(self.rule.params), hasher=mhash)
for src in self.rule.source_files or []:
log.debug('[%s]: Metahash input: %s', self.address, src)
mhash = util.hash_str(src, hasher=mhash)
mhash = util.hash_file(self.srcs_map[src], hasher=mhash)
for dep in self.rule.composed_deps() or []:
dep_rule = self.rule.subgraph.node[dep]['target_obj']
for item in dep_rule.output_files:
log.debug('[%s]: Metahash input: %s', self.address, item)
item_path = os.path.join(self.buildroot, item)
mhash = util.hash_str(item, hasher=mhash)
mhash = util.hash_file(item_path, hasher=mhash)
self._cached_metahash = mhash
return mhash
|
Checksum hash of all the inputs to this rule.
Output is invalid until collect_srcs and collect_deps have been run.
In theory, if this hash doesn't change, the outputs won't change
either, which makes it useful for caching.
|
def unescape(msg, extra_format_dict={}):
"""Takes a girc-escaped message and returns a raw IRC message"""
new_msg = ''
extra_format_dict.update(format_dict)
while len(msg):
char = msg[0]
msg = msg[1:]
if char == escape_character:
escape_key = msg[0]
msg = msg[1:]
# we handle this character separately, otherwise we mess up and
# double escape characters while escaping and unescaping
if escape_key == escape_character:
new_msg += escape_character
elif escape_key == '{':
buf = ''
new_char = ''
while True:
new_char = msg[0]
msg = msg[1:]
if new_char == '}':
break
else:
buf += new_char
new_msg += _get_from_format_dict(extra_format_dict, buf)
else:
new_msg += _get_from_format_dict(extra_format_dict, escape_key)
if escape_key == 'c':
fill_last = len(msg) and msg[0] in digits
colours, msg = extract_girc_colours(msg, fill_last)
new_msg += colours
else:
new_msg += char
return new_msg
|
Takes a girc-escaped message and returns a raw IRC message
|
async def on_step(self, iteration):
self.combinedActions = []
"""
- depots when low on remaining supply
- townhalls contains commandcenter and orbitalcommand
- self.units(TYPE).not_ready.amount selects all units of that type, filters incomplete units, and then counts the amount
- self.already_pending(TYPE) counts how many units are queued - but in this bot below you will find a slightly different already_pending function which only counts units queued (but not in construction)
"""
if self.supply_left < 5 and self.townhalls.exists and self.supply_used >= 14 and self.can_afford(UnitTypeId.SUPPLYDEPOT) and self.units(UnitTypeId.SUPPLYDEPOT).not_ready.amount + self.already_pending(UnitTypeId.SUPPLYDEPOT) < 1:
ws = self.workers.gathering
if ws: # if workers found
w = ws.furthest_to(ws.center)
loc = await self.find_placement(UnitTypeId.SUPPLYDEPOT, w.position, placement_step=3)
if loc: # if a placement location was found
# build exactly on that location
self.combinedActions.append(w.build(UnitTypeId.SUPPLYDEPOT, loc))
# lower all depots when finished
for depot in self.units(UnitTypeId.SUPPLYDEPOT).ready:
self.combinedActions.append(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
# morph commandcenter to orbitalcommand
if self.units(UnitTypeId.BARRACKS).ready.exists and self.can_afford(UnitTypeId.ORBITALCOMMAND): # check if orbital is affordable
for cc in self.units(UnitTypeId.COMMANDCENTER).idle: # .idle filters idle command centers
self.combinedActions.append(cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND))
# expand if we can afford and have less than 2 bases
if 1 <= self.townhalls.amount < 2 and self.already_pending(UnitTypeId.COMMANDCENTER) == 0 and self.can_afford(UnitTypeId.COMMANDCENTER):
# get_next_expansion returns the center of the mineral fields of the next nearby expansion
next_expo = await self.get_next_expansion()
# from the center of mineral fields, we need to find a valid place to place the command center
location = await self.find_placement(UnitTypeId.COMMANDCENTER, next_expo, placement_step=1)
if location:
# now we "select" (or choose) the nearest worker to that found location
w = self.select_build_worker(location)
if w and self.can_afford(UnitTypeId.COMMANDCENTER):
# the worker will be commanded to build the command center
error = await self.do(w.build(UnitTypeId.COMMANDCENTER, location))
if error:
print(error)
# make up to 4 barracks if we can afford them
# check if we have a supply depot (tech requirement) before trying to make barracks
if self.units.of_type([UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED, UnitTypeId.SUPPLYDEPOTDROP]).ready.exists and self.units(UnitTypeId.BARRACKS).amount + self.already_pending(UnitTypeId.BARRACKS) < 4 and self.can_afford(UnitTypeId.BARRACKS):
ws = self.workers.gathering
if ws and self.townhalls.exists: # need to check if townhalls.amount > 0 because placement is based on townhall location
w = ws.furthest_to(ws.center)
# I chose placement_step 4 here so there will be gaps between barracks hopefully
loc = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=4)
if loc:
self.combinedActions.append(w.build(UnitTypeId.BARRACKS, loc))
# build refineries (on nearby vespene) when at least one barracks is in construction
if self.units(UnitTypeId.BARRACKS).amount > 0 and self.already_pending(UnitTypeId.REFINERY) < 1:
for th in self.townhalls:
vgs = self.state.vespene_geyser.closer_than(10, th)
for vg in vgs:
if await self.can_place(UnitTypeId.REFINERY, vg.position) and self.can_afford(UnitTypeId.REFINERY):
ws = self.workers.gathering
if ws.exists: # same condition as above
w = ws.closest_to(vg)
# caution: the target for the refinery has to be the vespene geyser, not its position!
self.combinedActions.append(w.build(UnitTypeId.REFINERY, vg))
# make scvs until 18, usually you only need 1:1 mineral:gas ratio for reapers, but if you don't lose any then you will need additional depots (mule income should take care of that)
# stop scv production when barracks is complete but we still have a command cender (priotize morphing to orbital command)
if self.can_afford(UnitTypeId.SCV) and self.supply_left > 0 and self.units(UnitTypeId.SCV).amount < 18 and (self.units(UnitTypeId.BARRACKS).ready.amount < 1 and self.units(UnitTypeId.COMMANDCENTER).idle.exists or self.units(UnitTypeId.ORBITALCOMMAND).idle.exists):
for th in self.townhalls.idle:
self.combinedActions.append(th.train(UnitTypeId.SCV))
# make reapers if we can afford them and we have supply remaining
if self.can_afford(UnitTypeId.REAPER) and self.supply_left > 0:
# loop through all idle barracks
for rax in self.units(UnitTypeId.BARRACKS).idle:
self.combinedActions.append(rax.train(UnitTypeId.REAPER))
# send workers to mine from gas
if iteration % 25 == 0:
await self.distribute_workers()
# reaper micro
for r in self.units(UnitTypeId.REAPER):
# move to range 15 of closest unit if reaper is below 20 hp and not regenerating
enemyThreatsClose = self.known_enemy_units.filter(lambda x: x.can_attack_ground).closer_than(15, r) # threats that can attack the reaper
if r.health_percentage < 2/5 and enemyThreatsClose.exists:
retreatPoints = self.neighbors8(r.position, distance=2) | self.neighbors8(r.position, distance=4)
# filter points that are pathable
retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
if retreatPoints:
closestEnemy = enemyThreatsClose.closest_to(r)
retreatPoint = closestEnemy.position.furthest(retreatPoints)
self.combinedActions.append(r.move(retreatPoint))
continue # continue for loop, dont execute any of the following
# reaper is ready to attack, shoot nearest ground unit
enemyGroundUnits = self.known_enemy_units.not_flying.closer_than(5, r) # hardcoded attackrange of 5
if r.weapon_cooldown == 0 and enemyGroundUnits.exists:
enemyGroundUnits = enemyGroundUnits.sorted(lambda x: x.distance_to(r))
closestEnemy = enemyGroundUnits[0]
self.combinedActions.append(r.attack(closestEnemy))
continue # continue for loop, dont execute any of the following
# attack is on cooldown, check if grenade is on cooldown, if not then throw it to furthest enemy in range 5
reaperGrenadeRange = self._game_data.abilities[AbilityId.KD8CHARGE_KD8CHARGE.value]._proto.cast_range
enemyGroundUnitsInGrenadeRange = self.known_enemy_units.not_structure.not_flying.exclude_type([UnitTypeId.LARVA, UnitTypeId.EGG]).closer_than(reaperGrenadeRange, r)
if enemyGroundUnitsInGrenadeRange.exists and (r.is_attacking or r.is_moving):
# if AbilityId.KD8CHARGE_KD8CHARGE in abilities, we check that to see if the reaper grenade is off cooldown
abilities = (await self.get_available_abilities(r))
enemyGroundUnitsInGrenadeRange = enemyGroundUnitsInGrenadeRange.sorted(lambda x: x.distance_to(r), reverse=True)
furthestEnemy = None
for enemy in enemyGroundUnitsInGrenadeRange:
if await self.can_cast(r, AbilityId.KD8CHARGE_KD8CHARGE, enemy, cached_abilities_of_unit=abilities):
furthestEnemy = enemy
break
if furthestEnemy:
self.combinedActions.append(r(AbilityId.KD8CHARGE_KD8CHARGE, furthestEnemy))
continue # continue for loop, don't execute any of the following
# move towards to max unit range if enemy is closer than 4
enemyThreatsVeryClose = self.known_enemy_units.filter(lambda x: x.can_attack_ground).closer_than(4.5, r) # hardcoded attackrange minus 0.5
# threats that can attack the reaper
if r.weapon_cooldown != 0 and enemyThreatsVeryClose.exists:
retreatPoints = self.neighbors8(r.position, distance=2) | self.neighbors8(r.position, distance=4)
# filter points that are pathable by a reaper
retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
if retreatPoints:
closestEnemy = enemyThreatsVeryClose.closest_to(r)
retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(closestEnemy) - x.distance_to(r))
# retreatPoint = closestEnemy.position.furthest(retreatPoints)
self.combinedActions.append(r.move(retreatPoint))
continue # continue for loop, don't execute any of the following
# move to nearest enemy ground unit/building because no enemy unit is closer than 5
allEnemyGroundUnits = self.known_enemy_units.not_flying
if allEnemyGroundUnits.exists:
closestEnemy = allEnemyGroundUnits.closest_to(r)
self.combinedActions.append(r.move(closestEnemy))
continue # continue for loop, don't execute any of the following
# move to random enemy start location if no enemy buildings have been seen
self.combinedActions.append(r.move(random.choice(self.enemy_start_locations)))
# manage idle scvs, would be taken care by distribute workers aswell
if self.townhalls.exists:
for w in self.workers.idle:
th = self.townhalls.closest_to(w)
mfs = self.state.mineral_field.closer_than(10, th)
if mfs:
mf = mfs.closest_to(w)
self.combinedActions.append(w.gather(mf))
# manage orbital energy and drop mules
for oc in self.units(UnitTypeId.ORBITALCOMMAND).filter(lambda x: x.energy >= 50):
mfs = self.state.mineral_field.closer_than(10, oc)
if mfs:
mf = max(mfs, key=lambda x:x.mineral_contents)
self.combinedActions.append(oc(AbilityId.CALLDOWNMULE_CALLDOWNMULE, mf))
# when running out of mineral fields near command center, fly to next base with minerals
# execuite actions
await self.do_actions(self.combinedActions)
|
- depots when low on remaining supply
- townhalls contains commandcenter and orbitalcommand
- self.units(TYPE).not_ready.amount selects all units of that type, filters incomplete units, and then counts the amount
- self.already_pending(TYPE) counts how many units are queued - but in this bot below you will find a slightly different already_pending function which only counts units queued (but not in construction)
|
def realpred(cls, lemma, pos, sense=None):
"""Instantiate a Pred from its components."""
string_tokens = [lemma]
if pos is not None:
string_tokens.append(pos)
if sense is not None:
sense = str(sense)
string_tokens.append(sense)
predstr = '_'.join([''] + string_tokens + ['rel'])
return cls(Pred.REALPRED, lemma, pos, sense, predstr)
|
Instantiate a Pred from its components.
|
def step(self):
# type: () -> bool
"""
Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0
"""
with self.__lock:
self.__value -= 1
if self.__value == 0:
# All done
self.__event.set()
return True
elif self.__value < 0:
# Gone too far
raise ValueError("The counter has gone below 0")
return False
|
Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0
|
def frames(self):
"""Retrieve the next frame from the image directory and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the stream is not running or if all images in the
directory have been used.
"""
if not self._running:
raise RuntimeError('Device pointing to %s not runnning. Cannot read frames' %(self._path_to_images))
if self._im_index >= self._num_images:
raise RuntimeError('Device is out of images')
# read images
color_filename = os.path.join(self._path_to_images, 'color_%d%s' %(self._im_index, self._color_ext))
color_im = ColorImage.open(color_filename, frame=self._frame)
depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %(self._im_index))
depth_im = DepthImage.open(depth_filename, frame=self._frame)
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None
|
Retrieve the next frame from the image directory and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the stream is not running or if all images in the
directory have been used.
|
def html(data, options, center=False, save=False,
save_name=None, save_path='saved', dated=True, notebook=True):
"""
save=True will create a standalone HTML doc under localdir/saved (creating folfer save if necessary)
center=True will center the plot in the output cell, otherwise left-aligned by default.
"""
def json_dumps(obj):
return pd.io.json.dumps(obj)
_options = dict(options)
_data = data
def clean_function_str(key, n=15):
"""
Remove new line characters in the first say 15 characters
of the value corresponding to key in dictionary _options.
This value is a string that potentially starts with
'function' or ' function' or '[newline]function' or '[newline]function', etc
This cleaning makes js string parsing easier to recognize functions.
"""
if key in _options.keys():
new_str = _options[key][:n].replace('\n', '')
new_str = new_str.replace('\r', '')
new_str = new_str + _options[key][n:]
_options[key] = new_str
clean_function_str('tooltip')
clean_function_str('xValueLabel')
clean_function_str('yValueLabel')
clean_function_str('zValueLabel')
chart_id = str(uuid.uuid4()).replace('-', '_')
js_init = """
var options = %s;
%s
window.opt = jQuery.extend(true, {}, options);
console.log('vis3d options accessible as opt');
var data = %s;
%s
window.dta = jQuery.extend(true, [], data);
console.log('vis3d data accessible as dta');
""" % (json_dumps(_options), JS_JSON_PARSE_OPTION,
json_dumps(_data), JS_JSON_PARSE_DATA)
js_call = """
var container = document.getElementById('%s');
console.log(options);
console.log(data);
graph3d = new vis.Graph3d(container, data, options);
""" % (chart_id)
if center:
if not 'width' in _options.keys():
# Explicitly sets width to default value
_options['width'] = '750px'
html = """
<div id="%s" style="margin: auto; width: %s;"></div>
""" % (chart_id, _options['width'])
else:
html = """
<div id="%s"></div>
""" % (chart_id)
css = """
<link href="%s" rel="stylesheet" type="text/css" />
""" % (CSS_LIBS_ONE)
js = """<script>
// the Jupyter notebook loads jquery.min.js and require.js and at the top of the page
// then to make jquery available inside a require module
// the trick is http://www.manuel-strehl.de/dev/load_jquery_before_requirejs.en.html
require(%s, function(lib) {
window.vis = jQuery.extend(true, {}, lib);
%s
%s
});
</script>""" % (JS_LIBS_ONE, js_init, js_call)
# save
js_load = ''.join(['<script src="%s"></script>' % e for e in JS_SAVE])
if save == True:
if not os.path.exists(save_path):
os.makedirs(save_path)
tag = save_name if save_name else 'plot'
dated = dt.datetime.now().strftime('_%Y%m%d_%H%M%S') if dated else ''
with open(os.path.join(save_path, tag + dated + '.html'), 'w') as f:
f.write(js_load + html + js)
if notebook:
return html + js
else:
return js_load + html + js
return html + css + js
|
save=True will create a standalone HTML doc under localdir/saved (creating folfer save if necessary)
center=True will center the plot in the output cell, otherwise left-aligned by default.
|
def close(self):
"""Closes the tunnel."""
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except socket.error:
pass
|
Closes the tunnel.
|
def code_data_whitening(self, decoding, inpt):
"""
XOR Data Whitening
:param decoding:
:param inpt:
:return:
"""
inpt_copy = array.array("B", inpt)
return self.apply_data_whitening(decoding, inpt_copy)
|
XOR Data Whitening
:param decoding:
:param inpt:
:return:
|
def grant_admin_role(self):
"""
Grant admin access to a user. If the user already has admin access, this
does nothing. If the user currently has a non-admin role, it will be replaced
with the admin role.
@return: An ApiUser object
"""
apiuser = ApiUser(self._get_resource_root(), self.name, roles=['ROLE_ADMIN'])
return self._put('', ApiUser, data=apiuser)
|
Grant admin access to a user. If the user already has admin access, this
does nothing. If the user currently has a non-admin role, it will be replaced
with the admin role.
@return: An ApiUser object
|
def listdict_to_listlist_and_matrix(sparse):
"""Transforms the adjacency list representation of a graph
of type listdict into the listlist + weight matrix representation
:param sparse: graph in listdict representation
:returns: couple with listlist representation, and weight matrix
:complexity: linear
"""
V = range(len(sparse))
graph = [[] for _ in V]
weight = [[None for v in V] for u in V]
for u in V:
for v in sparse[u]:
graph[u].append(v)
weight[u][v] = sparse[u][v]
return graph, weight
|
Transforms the adjacency list representation of a graph
of type listdict into the listlist + weight matrix representation
:param sparse: graph in listdict representation
:returns: couple with listlist representation, and weight matrix
:complexity: linear
|
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
|
Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
|
def adafactor_optimizer_from_hparams(hparams, lr):
"""Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values
"""
if hparams.optimizer_adafactor_decay_type == "adam":
decay_rate = adafactor_decay_rate_adam(
hparams.optimizer_adafactor_beta2)
elif hparams.optimizer_adafactor_decay_type == "pow":
decay_rate = adafactor_decay_rate_pow(
hparams.optimizer_adafactor_memory_exponent)
else:
raise ValueError("unknown optimizer_adafactor_decay_type")
if hparams.weight_dtype == "bfloat16":
parameter_encoding = quantization.EighthPowerEncoding()
else:
parameter_encoding = None
return AdafactorOptimizer(
multiply_by_parameter_scale=(
hparams.optimizer_adafactor_multiply_by_parameter_scale),
learning_rate=lr,
decay_rate=decay_rate,
beta1=hparams.optimizer_adafactor_beta1,
clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
factored=hparams.optimizer_adafactor_factored,
simulated_quantize_bits=getattr(
hparams, "simulated_parameter_quantize_bits", 0),
parameter_encoding=parameter_encoding,
use_locking=False,
name="Adafactor")
|
Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values
|
def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
"""Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas.
"""
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate
|
Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas.
|
def list_formats ():
"""Print information about available archive formats to stdout."""
print("Archive programs of", App)
print("Archive programs are searched in the following directories:")
print(util.system_search_path())
print()
for format in ArchiveFormats:
print(format, "files:")
for command in ArchiveCommands:
programs = ArchivePrograms[format]
if command not in programs and None not in programs:
print(" %8s: - (not supported)" % command)
continue
try:
program = find_archive_program(format, command)
print(" %8s: %s" % (command, program), end=' ')
if format == 'tar':
encs = [x for x in ArchiveCompressions if util.find_program(x)]
if encs:
print("(supported compressions: %s)" % ", ".join(encs), end=' ')
elif format == '7z':
if util.p7zip_supports_rar():
print("(rar archives supported)", end=' ')
else:
print("(rar archives not supported)", end=' ')
print()
except util.PatoolError:
# display information what programs can handle this archive format
handlers = programs.get(None, programs.get(command))
print(" %8s: - (no program found; install %s)" %
(command, util.strlist_with_or(handlers)))
|
Print information about available archive formats to stdout.
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
# Which rows of 'SHOW GLOBAL STATUS' you would like to publish.
# http://dev.mysql.com/doc/refman/5.1/en/show-status.html
# Leave unset to publish all
# 'publish': '',
'slave': False,
'master': False,
'innodb': False,
})
return config
|
Returns the default collector settings
|
def pid(self):
"""The server's PID (None if not running).
"""
# We can't possibly be running if our base_pathname isn't defined.
if not self.base_pathname:
return None
try:
pidfile = os.path.join(self.base_pathname, 'postmaster.pid')
return int(open(pidfile).readline())
except (IOError, OSError):
return None
|
The server's PID (None if not running).
|
def push(self, repository, stream=False, raise_on_error=True, **kwargs):
"""
Pushes an image repository to the registry.
:param repository: Name of the repository (can include a tag).
:type repository: unicode | str
:param stream: Use the stream output format with additional status information.
:type stream: bool
:param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs
errors.
:type raise_on_error: bool
:param kwargs: Additional kwargs for :meth:`docker.client.Client.push`.
:return: ``True`` if the image has been pushed successfully.
:rtype: bool
"""
response = super(DockerClientWrapper, self).push(repository, stream=stream, **kwargs)
if stream:
result = self._docker_status_stream(response, raise_on_error)
else:
result = self._docker_status_stream(response.split('\r\n') if response else (), raise_on_error)
return result and not result.get('error')
|
Pushes an image repository to the registry.
:param repository: Name of the repository (can include a tag).
:type repository: unicode | str
:param stream: Use the stream output format with additional status information.
:type stream: bool
:param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs
errors.
:type raise_on_error: bool
:param kwargs: Additional kwargs for :meth:`docker.client.Client.push`.
:return: ``True`` if the image has been pushed successfully.
:rtype: bool
|
def Subgroups(self):
"""Returns a Groups object containing all child groups.
>>> clc.v2.Group("wa1-4416").Subgroups()
<clc.APIv2.group.Groups object at 0x105fa27d0>
"""
return(Groups(alias=self.alias,groups_lst=self.data['groups'],session=self.session))
|
Returns a Groups object containing all child groups.
>>> clc.v2.Group("wa1-4416").Subgroups()
<clc.APIv2.group.Groups object at 0x105fa27d0>
|
def VerifySignature(self, message, signature, public_key, unhex=True):
"""
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
"""
return Crypto.VerifySignature(message, signature, public_key, unhex=unhex)
|
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
|
def evolution_strength_of_connection(A, B=None, epsilon=4.0, k=2,
proj_type="l2", block_flag=False,
symmetrize_measure=True):
"""Evolution Strength Measure.
Construct strength of connection matrix using an Evolution-based measure
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
B : string, array
If B=None, then the near nullspace vector used is all ones. If B is
an (NxK) array, then B is taken to be the near nullspace vectors.
epsilon : scalar
Drop tolerance
k : integer
ODE num time steps, step size is assumed to be 1/rho(DinvA)
proj_type : {'l2','D_A'}
Define norm for constrained min prob, i.e. define projection
block_flag : boolean
If True, use a block D inverse as preconditioner for A during
weighted-Jacobi
Returns
-------
Atilde : csr_matrix
Sparse matrix of strength values
See [2008OlScTu]_ for more details.
References
----------
.. [2008OlScTu] Olson, L. N., Schroder, J., Tuminaro, R. S.,
"A New Perspective on Strength Measures in Algebraic Multigrid",
submitted, June, 2008.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import evolution_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1)))
"""
# local imports for evolution_strength_of_connection
from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
from pyamg.util.linalg import approximate_spectral_radius
# ====================================================================
# Check inputs
if epsilon < 1.0:
raise ValueError("expected epsilon > 1.0")
if k <= 0:
raise ValueError("number of time steps must be > 0")
if proj_type not in ['l2', 'D_A']:
raise ValueError("proj_type must be 'l2' or 'D_A'")
if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
raise TypeError("expected csr_matrix or bsr_matrix")
# ====================================================================
# Format A and B correctly.
# B must be in mat format, this isn't a deep copy
if B is None:
Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
else:
Bmat = np.mat(B)
# Pre-process A. We need A in CSR, to be devoid of explicit 0's and have
# sorted indices
if (not sparse.isspmatrix_csr(A)):
csrflag = False
numPDEs = A.blocksize[0]
D = A.diagonal()
# Calculate Dinv*A
if block_flag:
Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0] + 1)),
shape=A.shape)
Dinv_A = (Dinv * A).tocsr()
else:
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A = A.tocsr()
else:
csrflag = True
numPDEs = 1
D = A.diagonal()
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A.eliminate_zeros()
A.sort_indices()
# Handle preliminaries for the algorithm
dimen = A.shape[1]
NullDim = Bmat.shape[1]
# Get spectral radius of Dinv*A, this will be used to scale the time step
# size for the ODE
rho_DinvA = approximate_spectral_radius(Dinv_A)
# Calculate D_A for later use in the minimization problem
if proj_type == "D_A":
D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
else:
D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
# Calculate (I - delta_t Dinv A)^k
# In order to later access columns, we calculate the transpose in
# CSR format so that columns will be accessed efficiently
# Calculate the number of time steps that can be done by squaring, and
# the number of time steps that must be done incrementally
nsquare = int(np.log2(k))
ninc = k - 2**nsquare
# Calculate one time step
Id = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
Atilde = (Id - (1.0 / rho_DinvA) * Dinv_A)
Atilde = Atilde.T.tocsr()
# Construct a sparsity mask for Atilde that will restrict Atilde^T to the
# nonzero pattern of A, with the added constraint that row i of Atilde^T
# retains only the nonzeros that are also in the same PDE as i.
mask = A.copy()
# Restrict to same PDE
if numPDEs > 1:
row_length = np.diff(mask.indptr)
my_pde = np.mod(np.arange(dimen), numPDEs)
my_pde = np.repeat(my_pde, row_length)
mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
del row_length, my_pde
mask.eliminate_zeros()
# If the total number of time steps is a power of two, then there is
# a very efficient computational short-cut. Otherwise, we support
# other numbers of time steps, through an inefficient algorithm.
if ninc > 0:
warn("The most efficient time stepping for the Evolution Strength\
Method is done in powers of two.\nYou have chosen " + str(k) +
" time steps.")
# Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
for i in range(nsquare):
Atilde = Atilde * Atilde
JacobiStep = (Id - (1.0 / rho_DinvA) * Dinv_A).T.tocsr()
for i in range(ninc):
Atilde = Atilde * JacobiStep
del JacobiStep
# Apply mask to Atilde, zeros in mask have already been eliminated at
# start of routine.
mask.data[:] = 1.0
Atilde = Atilde.multiply(mask)
Atilde.eliminate_zeros()
Atilde.sort_indices()
elif nsquare == 0:
if numPDEs > 1:
# Apply mask to Atilde, zeros in mask have already been eliminated
# at start of routine.
mask.data[:] = 1.0
Atilde = Atilde.multiply(mask)
Atilde.eliminate_zeros()
Atilde.sort_indices()
else:
# Use computational short-cut for case (ninc == 0) and (nsquare > 0)
# Calculate Atilde^k only at the sparsity pattern of mask.
for i in range(nsquare - 1):
Atilde = Atilde * Atilde
# Call incomplete mat-mat mult
AtildeCSC = Atilde.tocsc()
AtildeCSC.sort_indices()
mask.sort_indices()
Atilde.sort_indices()
amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
Atilde.data, AtildeCSC.indptr,
AtildeCSC.indices, AtildeCSC.data,
mask.indptr, mask.indices, mask.data,
dimen)
del AtildeCSC, Atilde
Atilde = mask
Atilde.eliminate_zeros()
Atilde.sort_indices()
del Dinv, Dinv_A, mask
# Calculate strength based on constrained min problem of
# min( z - B*x ), such that
# (B*x)|_i = z|_i, i.e. they are equal at point i
# z = (I - (t/k) Dinv A)^k delta_i
#
# Strength is defined as the relative point-wise approx. error between
# B*x and z. We don't use the full z in this problem, only that part of
# z that is in the sparsity pattern of A.
#
# Can use either the D-norm, and inner product, or l2-norm and inner-prod
# to solve the constrained min problem. Using D gives scale invariance.
#
# This is a quadratic minimization problem with a linear constraint, so
# we can build a linear system and solve it to find the critical point,
# i.e. minimum.
#
# We exploit a known shortcut for the case of NullDim = 1. The shortcut is
# mathematically equivalent to the longer constrained min. problem
if NullDim == 1:
# Use shortcut to solve constrained min problem if B is only a vector
# Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
# These ratios can be calculated by diagonal row and column scalings
# Create necessary vectors for scaling Atilde
# Its not clear what to do where B == 0. This is an
# an easy programming solution, that may make sense.
Bmat_forscaling = np.ravel(Bmat)
Bmat_forscaling[Bmat_forscaling == 0] = 1.0
DAtilde = Atilde.diagonal()
DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling
# Calculate best approximation, z_tilde, in span(B)
# Importantly, scale_rows and scale_columns leave zero entries
# in the matrix. For previous implementations this was useful
# because we assume data and Atilde.data are the same length below
data = Atilde.data.copy()
Atilde.data[:] = 1.0
Atilde = scale_rows(Atilde, DAtildeDivB)
Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))
# If angle in the complex plane between z and z_tilde is
# greater than 90 degrees, then weak. We can just look at the
# dot product to determine if angle is greater than 90 degrees.
angle = np.multiply(np.real(Atilde.data), np.real(data)) +\
np.multiply(np.imag(Atilde.data), np.imag(data))
angle = angle < 0.0
angle = np.array(angle, dtype=bool)
# Calculate Approximation ratio
Atilde.data = Atilde.data / data
# If approximation ratio is less than tol, then weak connection
weak_ratio = (np.abs(Atilde.data) < 1e-4)
# Calculate Approximation error
Atilde.data = abs(1.0 - Atilde.data)
# Set small ratios and large angles to weak
Atilde.data[weak_ratio] = 0.0
Atilde.data[angle] = 0.0
# Set near perfect connections to 1e-4
Atilde.eliminate_zeros()
Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4
del data, weak_ratio, angle
else:
# For use in computing local B_i^H*B, precompute the element-wise
# multiply of each column of B with each other column. We also scale
# by 2.0 to account for BDB's eventual use in a constrained
# minimization problem
BDBCols = int(np.sum(np.arange(NullDim + 1)))
BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
counter = 0
for i in range(NullDim):
for j in range(i, NullDim):
BDB[:, counter] = 2.0 *\
(np.conjugate(np.ravel(np.asarray(B[:, i]))) *
np.ravel(np.asarray(D_A * B[:, j])))
counter = counter + 1
# Choose tolerance for dropping "numerically zero" values later
t = Atilde.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]
# Use constrained min problem to define strength
amg_core.evolution_strength_helper(Atilde.data,
Atilde.indptr,
Atilde.indices,
Atilde.shape[0],
np.ravel(np.asarray(B)),
np.ravel(np.asarray(
(D_A * np.conjugate(B)).T)),
np.ravel(np.asarray(BDB)),
BDBCols, NullDim, tol)
Atilde.eliminate_zeros()
# All of the strength values are real by this point, so ditch the complex
# part
Atilde.data = np.array(np.real(Atilde.data), dtype=float)
# Apply drop tolerance
if epsilon != np.inf:
amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
Atilde.indices, Atilde.data)
Atilde.eliminate_zeros()
# Symmetrize
if symmetrize_measure:
Atilde = 0.5 * (Atilde + Atilde.T)
# Set diagonal to 1.0, as each point is strongly connected to itself.
Id = sparse.eye(dimen, dimen, format="csr")
Id.data -= Atilde.diagonal()
Atilde = Atilde + Id
# If converted BSR to CSR, convert back and return amalgamated matrix,
# i.e. the sparsity structure of the blocks of Atilde
if not csrflag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
n_blocks = Atilde.indices.shape[0]
blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
CSRdata = np.zeros((n_blocks,))
amg_core.min_blocks(n_blocks, blocksize,
np.ravel(np.asarray(Atilde.data)), CSRdata)
# Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
shape=(int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the algebraic distances computed here
Atilde.data = 1.0 / Atilde.data
# Scale C by the largest magnitude entry in each row
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde
|
Evolution Strength Measure.
Construct strength of connection matrix using an Evolution-based measure
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
B : string, array
If B=None, then the near nullspace vector used is all ones. If B is
an (NxK) array, then B is taken to be the near nullspace vectors.
epsilon : scalar
Drop tolerance
k : integer
ODE num time steps, step size is assumed to be 1/rho(DinvA)
proj_type : {'l2','D_A'}
Define norm for constrained min prob, i.e. define projection
block_flag : boolean
If True, use a block D inverse as preconditioner for A during
weighted-Jacobi
Returns
-------
Atilde : csr_matrix
Sparse matrix of strength values
See [2008OlScTu]_ for more details.
References
----------
.. [2008OlScTu] Olson, L. N., Schroder, J., Tuminaro, R. S.,
"A New Perspective on Strength Measures in Algebraic Multigrid",
submitted, June, 2008.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import evolution_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1)))
|
def panes(self):
" List with all panes from this Window. "
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item)
return result
|
List with all panes from this Window.
|
def update_vip(self, vip, body=None):
"""Updates a load balancer vip."""
return self.put(self.vip_path % (vip), body=body)
|
Updates a load balancer vip.
|
def parse(self):
"""
Parses a requirements.txt-like file
"""
index_server = None
for num, line in enumerate(self.iter_lines()):
line = line.rstrip()
if not line:
continue
if line.startswith('#'):
# comments are lines that start with # only
continue
if line.startswith('-i') or \
line.startswith('--index-url') or \
line.startswith('--extra-index-url'):
# this file is using a private index server, try to parse it
index_server = self.parse_index_server(line)
continue
elif self.obj.path and (line.startswith('-r') or line.startswith('--requirement')):
self.obj.resolved_files.append(self.resolve_file(self.obj.path, line))
elif line.startswith('-f') or line.startswith('--find-links') or \
line.startswith('--no-index') or line.startswith('--allow-external') or \
line.startswith('--allow-unverified') or line.startswith('-Z') or \
line.startswith('--always-unzip'):
continue
elif self.is_marked_line(line):
continue
else:
try:
parseable_line = line
# multiline requirements are not parseable
if "\\" in line:
parseable_line = line.replace("\\", "")
for next_line in self.iter_lines(num + 1):
parseable_line += next_line.strip().replace("\\", "")
line += "\n" + next_line
if "\\" in next_line:
continue
break
# ignore multiline requirements if they are marked
if self.is_marked_line(parseable_line):
continue
hashes = []
if "--hash" in parseable_line:
parseable_line, hashes = Parser.parse_hashes(parseable_line)
req = RequirementsTXTLineParser.parse(parseable_line)
if req:
req.hashes = hashes
req.index_server = index_server
# replace the requirements line with the 'real' line
req.line = line
self.obj.dependencies.append(req)
except ValueError:
continue
|
Parses a requirements.txt-like file
|
def _textio_iterlines(stream):
"""
Iterates over lines in a TextIO stream until an EOF is encountered.
This is the iterator version of stream.readlines()
"""
line = stream.readline()
while line != '':
yield line
line = stream.readline()
|
Iterates over lines in a TextIO stream until an EOF is encountered.
This is the iterator version of stream.readlines()
|
def nearby_faces(mesh, points):
"""
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : Trimesh object
points : (n,3) float , points in space
Returns
-----------
candidates : (points,) int, sequence of indexes for mesh.faces
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# an r-tree containing the axis aligned bounding box for every triangle
rtree = mesh.triangles_tree
# a kd-tree containing every vertex of the mesh
kdtree = mesh.kdtree
# query the distance to the nearest vertex to get AABB of a sphere
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
# axis aligned bounds
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
# faces that intersect axis aligned bounding box
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates
|
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : Trimesh object
points : (n,3) float , points in space
Returns
-----------
candidates : (points,) int, sequence of indexes for mesh.faces
|
def _create_RSA_private_key(self,
bytes):
"""
Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes.
"""
try:
private_key = serialization.load_pem_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key
except Exception:
private_key = serialization.load_der_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key
|
Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes.
|
def __get_tax(self, account_id, **kwargs):
"""Call documentation: `/account/get_tax
<https://www.wepay.com/developer/reference/account-2011-01-15#get_tax>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'.
"""
params = {
'account_id': account_id
}
return self.make_call(self.__get_tax, params, kwargs)
|
Call documentation: `/account/get_tax
<https://www.wepay.com/developer/reference/account-2011-01-15#get_tax>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'.
|
def element_data_from_Z(Z):
'''Obtain elemental data given a Z number
An exception is thrown if the Z number is not found
'''
# Z may be a str
if isinstance(Z, str) and Z.isdecimal():
Z = int(Z)
if Z not in _element_Z_map:
raise KeyError('No element data for Z = {}'.format(Z))
return _element_Z_map[Z]
|
Obtain elemental data given a Z number
An exception is thrown if the Z number is not found
|
def _rsa_recover_prime_factors(n, e, d):
"""
Compute factors p and q from the private exponent d. We assume that n has
no more than two factors. This function is adapted from code in PyCrypto.
"""
# See 8.2.2(i) in Handbook of Applied Cryptography.
ktot = d * e - 1
# The quantity d*e-1 is a multiple of phi(n), even,
# and can be represented as t*2^s.
t = ktot
while t % 2 == 0:
t = t // 2
# Cycle through all multiplicative inverses in Zn.
# The algorithm is non-deterministic, but there is a 50% chance
# any candidate a leads to successful factoring.
# See "Digitalized Signatures and Public Key Functions as Intractable
# as Factorization", M. Rabin, 1979
spotted = False
a = 2
while not spotted and a < _MAX_RECOVERY_ATTEMPTS:
k = t
# Cycle through all values a^{t*2^i}=a^k
while k < ktot:
cand = pow(a, k, n)
# Check if a^k is a non-trivial root of unity (mod n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
# We have found a number such that (cand-1)(cand+1)=0 (mod n).
# Either of the terms divides n.
p = _gcd(cand + 1, n)
spotted = True
break
k *= 2
# This value was not any good... let's try another!
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
# Found !
q, r = divmod(n, p)
assert r == 0
p, q = sorted((p, q), reverse=True)
return (p, q)
|
Compute factors p and q from the private exponent d. We assume that n has
no more than two factors. This function is adapted from code in PyCrypto.
|
def find_runner(program):
"""Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith('#!'):
return shlex.split(first_line[2:])
if program.endswith('.py'):
return [sys.executable]
return None
|
Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
|
def setup_versioned_routes(routes, version=None):
"""Set up routes with a version prefix."""
prefix = '/' + version if version else ""
for r in routes:
path, method = r
route(prefix + path, method, routes[r])
|
Set up routes with a version prefix.
|
def delete_operation(self, name):
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp
|
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
|
def debug_ratelimit(g):
"""Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
"""
assert isinstance(g, github.MainClass.Github), type(g)
debug("github ratelimit: {rl}".format(rl=g.rate_limiting))
|
Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
|
def sort_by_number_values(x00, y00): # pragma: no cover, looks like not used!
"""Compare x00, y00 base on number of values
:param x00: first elem to compare
:type x00: list
:param y00: second elem to compare
:type y00: list
:return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else
:rtype: int
"""
if len(x00) < len(y00):
return 1
if len(x00) > len(y00):
return -1
# So is equal
return 0
|
Compare x00, y00 base on number of values
:param x00: first elem to compare
:type x00: list
:param y00: second elem to compare
:type y00: list
:return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else
:rtype: int
|
def config_get(self, param, default=None):
'''Return the value of a git configuration option. This will
return the value of the default parameter (which defaults to
None) if the given option does not exist.'''
try:
return self("config", "--get", param,
log_fail=False, log_cmd=False)
except exception.CommandFailed:
return default
|
Return the value of a git configuration option. This will
return the value of the default parameter (which defaults to
None) if the given option does not exist.
|
def save(self, filename, binary=True):
"""
Writes a rectilinear grid to disk.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use. ".vtk" will use the legacy writer, while
".vtr" will select the VTK XML writer.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII.
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can be used
only with the legacy writer.
"""
filename = os.path.abspath(os.path.expanduser(filename))
# Use legacy writer if vtk is in filename
if '.vtk' in filename:
writer = vtk.vtkRectilinearGridWriter()
legacy = True
elif '.vtr' in filename:
writer = vtk.vtkXMLRectilinearGridWriter()
legacy = False
else:
raise Exception('Extension should be either ".vtr" (xml) or' +
'".vtk" (legacy)')
# Write
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and legacy:
writer.SetFileTypeToBinary()
writer.Write()
|
Writes a rectilinear grid to disk.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use. ".vtk" will use the legacy writer, while
".vtr" will select the VTK XML writer.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII.
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can be used
only with the legacy writer.
|
def remove():
"""Function executed when running the script with the -remove switch"""
current = True # only affects current user
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
for key in (KEY_C1 % ("", EWS), KEY_C1 % ("NoCon", EWS),
KEY_C0 % ("", EWS), KEY_C0 % ("NoCon", EWS)):
try:
winreg.DeleteKey(root, key)
except WindowsError:
pass
else:
if not is_bdist_wininst:
print("Successfully removed Spyder shortcuts from Windows "\
"Explorer context menu.", file=sys.stdout)
if not is_bdist_wininst:
# clean up desktop
desktop_folder = get_special_folder_path("CSIDL_DESKTOPDIRECTORY")
fname = osp.join(desktop_folder, 'Spyder.lnk')
if osp.isfile(fname):
try:
os.remove(fname)
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcuts from your desktop.",
file=sys.stdout)
# clean up startmenu
start_menu = osp.join(get_special_folder_path('CSIDL_PROGRAMS'),
'Spyder (Py%i.%i %i bit)' % (sys.version_info[0],
sys.version_info[1],
struct.calcsize('P')*8))
if osp.isdir(start_menu):
for fname in os.listdir(start_menu):
try:
os.remove(osp.join(start_menu,fname))
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcuts from your "\
" start menu.", file=sys.stdout)
try:
os.rmdir(start_menu)
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcut folder from your "\
" start menu.", file=sys.stdout)
|
Function executed when running the script with the -remove switch
|
def writelines(lines, filename, encoding='utf-8', mode='wb'):
"""
Write 'lines' to file ('filename') assuming 'encoding'
Return (eventually new) encoding
"""
return write(os.linesep.join(lines), filename, encoding, mode)
|
Write 'lines' to file ('filename') assuming 'encoding'
Return (eventually new) encoding
|
def weed(self):
"""
Get rid of key value pairs that are not standard
"""
_ext = [k for k in self._dict.keys() if k not in self.c_param]
for k in _ext:
del self._dict[k]
|
Get rid of key value pairs that are not standard
|
def get_data(self, path, **params):
""" Giving a service path and optional specific arguments, returns
the XML data from the API parsed as a dict structure.
"""
xml = self.get_response(path, **params)
try:
return parse(xml)
except Exception as err:
print(path)
print(params)
print(err)
raise
|
Giving a service path and optional specific arguments, returns
the XML data from the API parsed as a dict structure.
|
def mock_bable(monkeypatch):
""" Mock the BaBLEInterface class with some controllers inside. """
mocked_bable = MockBaBLE()
mocked_bable.set_controllers([
Controller(0, '11:22:33:44:55:66', '#0'),
Controller(1, '22:33:44:55:66:11', '#1', settings={'powered': True, 'low_energy': True}),
Controller(2, '33:44:55:66:11:22', '#2', settings={'powered': True})
])
monkeypatch.setattr(bable_interface, 'BaBLEInterface', lambda: mocked_bable)
return mocked_bable
|
Mock the BaBLEInterface class with some controllers inside.
|
def arraydifference(X,Y):
"""
Elements of a numpy array that do not appear in another.
Fast routine for determining which elements in numpy array `X`
do not appear in numpy array `Y`.
**Parameters**
**X** : numpy array
Numpy array to comapare to numpy array `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy array
Numpy array to which numpy array `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy array
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin`
"""
if len(Y) > 0:
Z = isin(X,Y)
return X[np.invert(Z)]
else:
return X
|
Elements of a numpy array that do not appear in another.
Fast routine for determining which elements in numpy array `X`
do not appear in numpy array `Y`.
**Parameters**
**X** : numpy array
Numpy array to comapare to numpy array `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy array
Numpy array to which numpy array `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy array
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin`
|
def _resolve_group_location(self, group: str) -> str:
"""
Resolves the location of a setting file based on the given identifier.
:param group: the identifier for the group's settings file (~its location)
:return: the absolute path of the settings location
"""
if os.path.isabs(group):
possible_paths = [group]
else:
possible_paths = []
for repository in self.setting_repositories:
possible_paths.append(os.path.join(repository, group))
for default_setting_extension in self.default_setting_extensions:
number_of_paths = len(possible_paths)
for i in range(number_of_paths):
path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension)
possible_paths.append(path_with_extension)
for path in possible_paths:
if os.path.exists(path):
return path
raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group)
|
Resolves the location of a setting file based on the given identifier.
:param group: the identifier for the group's settings file (~its location)
:return: the absolute path of the settings location
|
def put(self, local_path, remote_path=None):
"""
Copy a file (or directory recursively) to a location on the remote server
:param local_path: Local path to copy to; can be file or directory
:param remote_path: Remote path to copy to (default: None - Copies file or directory to
home directory directory on the remote server)
"""
# Determine if local_path should be put into remote user directory
if remote_path is None:
remote_path = os.path.basename(local_path)
ftp = self.ssh.open_sftp()
if os.path.isdir(local_path):
self.__put_dir(ftp, local_path, remote_path)
else:
ftp.put(local_path, remote_path)
ftp.close()
|
Copy a file (or directory recursively) to a location on the remote server
:param local_path: Local path to copy to; can be file or directory
:param remote_path: Remote path to copy to (default: None - Copies file or directory to
home directory directory on the remote server)
|
def validateSamOptions(options, group=False):
''' Check the validity of the option combinations for sam/bam input '''
if options.per_gene:
if options.gene_tag and options.per_contig:
raise ValueError("need to use either --per-contig "
"OR --gene-tag, please do not provide both")
if not options.per_contig and not options.gene_tag:
raise ValueError("for per-gene applications, must supply "
"--per-contig or --gene-tag")
if options.per_contig and not options.per_gene:
raise ValueError("need to use --per-gene with --per-contig")
if options.gene_tag and not options.per_gene:
raise ValueError("need to use --per-gene with --gene_tag")
if options.gene_transcript_map and not options.per_contig:
raise ValueError("need to use --per-contig and --per-gene"
"with --gene-transcript-map")
if options.get_umi_method == "tag":
if options.umi_tag is None:
raise ValueError("Need to supply the --umi-tag option")
if options.per_cell and options.cell_tag is None:
raise ValueError("Need to supply the --cell-tag option")
if options.assigned_tag is None:
options.assigned_tag = options.gene_tag
if options.skip_regex:
try:
re.compile(options.skip_regex)
except re.error:
raise ValueError("skip-regex '%s' is not a "
"valid regex" % options.skip_regex)
if not group:
if options.unmapped_reads == "output":
raise ValueError("Cannot use --unmapped-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.chimeric_pairs == "output":
raise ValueError("Cannot use --chimeric-pairs=output. If you want "
"to retain chimeric read pairs without "
"deduplicating them, use the group command")
if options.unpaired_reads == "output":
raise ValueError("Cannot use --unpaired-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.paired:
if options.chimeric_pairs == "use":
warn("Chimeric read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --chimeric-pairs==discard to discard these reads "
"or --chimeric-pairs==output (group command only) to "
"output them without grouping")
if options.unpaired_reads == "use":
warn("Unpaired read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --unpared-reads==discard to discard these reads "
"or --unpared-reads==output (group command only) to "
"output them without grouping")
if options.unmapped_reads == "use":
warn("Unmapped read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1. "
"This may also increase the run time and memory usage. "
"Consider --unmapped_reads==discard to discard these reads "
"or --unmapped_reads==output (group command only) to "
"output them without grouping")
command = " ".join(sys.argv)
info("command: %s" % command)
if "--umi-tag" in command or "--cell-tag" in command:
if options.get_umi_method != "tag":
raise ValueError("--umi-tag and/or --cell-tag options provided. "
"Need to set --extract-umi-method=tag")
if options.unmapped_reads == "use":
if not options.paired:
raise ValueError("--unmapped-reads=use is only compatible with "
"paired end reads (--paired)")
if "--chimeric-pairs" in command:
info("command: %s" % command)
if not options.paired:
raise ValueError("--chimeric-pairs is only compatible "
"with paired end reads (--paired)")
if "--unpaired-reads" in command:
if not options.paired:
raise ValueError("--unpaired-reads is only compatible "
"with paired end reads (--paired)")
# legacy support for --output-unmapped behaviour
if options.output_unmapped:
warn("--output-unmapped will be removed in the near future. "
"Use --unmapped-reads=output instead")
# We will update the value of options.unmapped_reads so we want to
# check the user has not also supplied this option
if "--unmapped_reads" in command:
raise ValueError("Do not use --output-unmapped in combination with"
"--unmapped-reads. Just use --unmapped-reads")
options.unmapped_reads = "output"
|
Check the validity of the option combinations for sam/bam input
|
def launch(exec_, args):
"""
Launches application.
"""
if not exec_:
raise RuntimeError(
'Mayalauncher could not find a maya executable, please specify'
'a path in the config file (-e) or add the {} directory location'
'to your PATH system environment.'.format(DEVELOPER_NAME)
)
# Launch Maya
if args.debug:
return
watched = WatchFile()
cmd = [exec_] if args.file is None else [exec_, args.file]
cmd.extend(['-hideConsole', '-log', watched.path])
if args.debug:
cmd.append('-noAutoloadPlugins')
maya = subprocess.Popen(cmd)
# Maya 2016 stupid clic ipm
# os.environ['MAYA_DISABLE_CLIC_IPM'] = '1'
# os.environ['MAYA_DISABLE_CIP'] = '1'
# os.environ['MAYA_OPENCL_IGNORE_DRIVER_VERSION'] = '1'
while True:
time.sleep(1)
maya.poll()
watched.check()
if maya.returncode is not None:
if not maya.returncode == 0:
maya = subprocess.Popen(cmd)
else:
watched.stop()
break
|
Launches application.
|
def search(self):
""" This is the most important method """
try:
filters = json.loads(self.query)
except ValueError:
return False
result = self.model_query
if 'filter'in filters.keys():
result = self.parse_filter(filters['filter'])
if 'sort'in filters.keys():
result = result.order_by(*self.sort(filters['sort']))
return result
|
This is the most important method
|
def get_nan_locs(self, **kwargs):
"""Gets the locations of nans in feature data and returns
the coordinates in the matrix
"""
if np.issubdtype(self.X.dtype, np.string_) or np.issubdtype(self.X.dtype, np.unicode_):
mask = np.where( self.X == '' )
nan_matrix = np.zeros(self.X.shape)
nan_matrix[mask] = np.nan
else:
nan_matrix = self.X.astype(float)
if self.y is None:
return np.argwhere(np.isnan(nan_matrix))
else:
nan_locs = []
for target_value in np.unique(self.y):
indices = np.argwhere(self.y == target_value)
target_matrix = nan_matrix[indices.flatten()]
nan_target_locs = np.argwhere(np.isnan(target_matrix))
nan_locs.append((target_value, nan_target_locs))
return nan_locs
|
Gets the locations of nans in feature data and returns
the coordinates in the matrix
|
def add_point_region(self, y: float, x: float) -> Graphic:
"""Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes
"""
graphic = Graphics.PointGraphic()
graphic.position = Geometry.FloatPoint(y, x)
self.__display_item.add_graphic(graphic)
return Graphic(graphic)
|
Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes
|
def _unpack(struct, bc, offset=0):
"""
returns the unpacked data tuple, and the next offset past the
unpacked data
"""
return struct.unpack_from(bc, offset), offset + struct.size
|
returns the unpacked data tuple, and the next offset past the
unpacked data
|
def query(self, model_cls):
"""
SQLAlchemy query like method
"""
self._filters_cmd = list()
self.query_filters = list()
self._order_by_cmd = None
self._offset = 0
self._limit = 0
self.query_class = model_cls._name
return self
|
SQLAlchemy query like method
|
def _mark_void(self):
''' Marks the invoice as refunded, and updates the attached cart if
necessary. '''
self.invoice.status = commerce.Invoice.STATUS_VOID
self.invoice.save()
|
Marks the invoice as refunded, and updates the attached cart if
necessary.
|
def open(self, file_path):
"""
Open a SQLite database file.
:param str file_path: SQLite database file path to open.
"""
from simplesqlite import SimpleSQLite
if self.is_opened():
if self.stream.database_path == abspath(file_path):
self._logger.logger.debug(
"database already opened: {}".format(self.stream.database_path)
)
return
self.close()
self._stream = SimpleSQLite(file_path, "w")
|
Open a SQLite database file.
:param str file_path: SQLite database file path to open.
|
def is_matching_mime_type(self, mime_type):
'''This implements the MIME-type matching logic for deciding whether
to run `make_clean_html`
'''
if len(self.include_mime_types) == 0:
return True
if mime_type is None:
return False
mime_type = mime_type.lower()
# NB: startswith is necessary here, because encodings are
# often appended to HTTP header Content-Type
return any(mime_type.startswith(mt) for mt in self.include_mime_types)
|
This implements the MIME-type matching logic for deciding whether
to run `make_clean_html`
|
def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value
|
Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
|
def get_gid_list(user, include_default=True):
'''
Returns a list of all of the system group IDs of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
return []
gid_list = list(
six.itervalues(
get_group_dict(user, include_default=include_default)
)
)
return sorted(set(gid_list))
|
Returns a list of all of the system group IDs of which the user
is a member.
|
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of evals
tmp_out_len = ctypes.c_int(0)
string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.byref(tmp_out_len),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
self.__name_inner_eval = \
[string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)]
self.__higher_better_inner_eval = \
[name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
|
Get inner evaluation count and names.
|
def MaxPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
|
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
|
def __get_neighbors(self, node_index):
"""!
@brief Returns indexes of neighbors of the specified node.
@param[in] node_index (uint):
@return (list) Neighbors of the specified node.
"""
return [ index for index in range(len(self.__data_pointer[node_index])) if self.__data_pointer[node_index][index] != 0 ];
|
!
@brief Returns indexes of neighbors of the specified node.
@param[in] node_index (uint):
@return (list) Neighbors of the specified node.
|
def gen_hyper_keys(minion_id,
country='US',
state='Utah',
locality='Salt Lake City',
organization='Salted',
expiration_days='365'):
'''
Generate the keys to be used by libvirt hypervisors, this routine gens
the keys and applies them to the pillar for the hypervisor minions
'''
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt')
if not os.path.isdir(key_dir):
os.makedirs(key_dir)
cakey = os.path.join(key_dir, 'cakey.pem')
cacert = os.path.join(key_dir, 'cacert.pem')
cainfo = os.path.join(key_dir, 'ca.info')
if not os.path.isfile(cainfo):
with salt.utils.files.fopen(cainfo, 'w+') as fp_:
fp_.write('cn = salted\nca\ncert_signing_key')
if not os.path.isfile(cakey):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cakey),
shell=True)
if not os.path.isfile(cacert):
cmd = ('certtool --generate-self-signed --load-privkey {0} '
'--template {1} --outfile {2}').format(cakey, cainfo, cacert)
subprocess.call(cmd, shell=True)
sub_dir = os.path.join(key_dir, minion_id)
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
priv = os.path.join(sub_dir, 'serverkey.pem')
cert = os.path.join(sub_dir, 'servercert.pem')
srvinfo = os.path.join(sub_dir, 'server.info')
cpriv = os.path.join(sub_dir, 'clientkey.pem')
ccert = os.path.join(sub_dir, 'clientcert.pem')
clientinfo = os.path.join(sub_dir, 'client.info')
if not os.path.isfile(srvinfo):
with salt.utils.files.fopen(srvinfo, 'w+') as fp_:
infodat = salt.utils.stringutils.to_str(
'organization = salted\ncn = {0}\ntls_www_server'
'\nencryption_key\nsigning_key'
'\ndigitalSignature\nexpiration_days = {1}'.format(
__grains__['fqdn'], expiration_days
)
)
fp_.write(infodat)
if not os.path.isfile(priv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(priv),
shell=True)
if not os.path.isfile(cert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(priv, cacert, cakey, srvinfo, cert)
subprocess.call(cmd, shell=True)
if not os.path.isfile(clientinfo):
with salt.utils.files.fopen(clientinfo, 'w+') as fp_:
infodat = salt.utils.stringutils.to_str(
'country = {0}\nstate = {1}\nlocality = {2}\n'
'organization = {3}\ncn = {4}\n'
'tls_www_client\nencryption_key\nsigning_key\n'
'digitalSignature'.format(
country,
state,
locality,
organization,
__grains__['fqdn']
)
)
fp_.write(infodat)
if not os.path.isfile(cpriv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cpriv),
shell=True)
if not os.path.isfile(ccert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(cpriv, cacert, cakey, clientinfo, ccert)
subprocess.call(cmd, shell=True)
|
Generate the keys to be used by libvirt hypervisors, this routine gens
the keys and applies them to the pillar for the hypervisor minions
|
def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=True):
'''
.. versionadded:: 2015.5.0
Return information about (all) storage pools
zpool : string
optional name of storage pool
properties : string
comma-separated list of properties to list
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
.. note::
The ``name`` property will always be included, while the ``frag``
property will get removed if not available
zpool : string
optional zpool
.. note::
Multiple storage pool can be provded as a space separated list
CLI Example:
.. code-block:: bash
salt '*' zpool.list
salt '*' zpool.list zpool=tank
salt '*' zpool.list 'size,free'
salt '*' zpool.list 'size,free' tank
'''
ret = OrderedDict()
## update properties
# NOTE: properties should be a list
if not isinstance(properties, list):
properties = properties.split(',')
# NOTE: name should be first property
while 'name' in properties:
properties.remove('name')
properties.insert(0, 'name')
# NOTE: remove 'frags' if we don't have feature flags
if not __utils__['zfs.has_feature_flags']():
while 'frag' in properties:
properties.remove('frag')
## collect list output
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='list',
flags=['-H'],
opts={'-o': ','.join(properties)},
target=zpool
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
# NOTE: command output for reference
# ========================================================================
# data 1992864825344 695955501056 1296909324288 34 11% ONLINE
# =========================================================================
## parse list output
for line in res['stdout'].splitlines():
# NOTE: transform data into dict
zpool_data = OrderedDict(list(zip(
properties,
line.strip().split('\t'),
)))
# NOTE: normalize values
if parsable:
# NOTE: raw numbers and pythonic types
zpool_data = __utils__['zfs.from_auto_dict'](zpool_data)
else:
# NOTE: human readable zfs types
zpool_data = __utils__['zfs.to_auto_dict'](zpool_data)
ret[zpool_data['name']] = zpool_data
del ret[zpool_data['name']]['name']
return ret
|
.. versionadded:: 2015.5.0
Return information about (all) storage pools
zpool : string
optional name of storage pool
properties : string
comma-separated list of properties to list
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
.. note::
The ``name`` property will always be included, while the ``frag``
property will get removed if not available
zpool : string
optional zpool
.. note::
Multiple storage pool can be provded as a space separated list
CLI Example:
.. code-block:: bash
salt '*' zpool.list
salt '*' zpool.list zpool=tank
salt '*' zpool.list 'size,free'
salt '*' zpool.list 'size,free' tank
|
def remove(self, dic):
'''remove the pair by passing a identical dict
Args:
dic (dict): key and value
'''
for kw in dic:
removePair = Pair(kw, dic[kw])
self._remove([removePair])
|
remove the pair by passing a identical dict
Args:
dic (dict): key and value
|
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
|
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
|
def index_all(self):
"""
Index all records under :attr:`record_path`.
"""
self.logger.debug('Start indexing all records under: %s',
self.record_path)
with self.db.connection():
for json_path in sorted(self.find_record_files()):
self.index_record(json_path)
|
Index all records under :attr:`record_path`.
|
def _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info):
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file.
"""
def _segment_by_cns(target_chrom, freqs, coords):
with open(seg_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
next(reader) # header
for cur_chrom, start, end in (xs[:3] for xs in reader):
if cur_chrom == target_chrom:
block_freqs = []
for i, (freq, coord) in enumerate(zip(freqs, coords)):
if coord >= int(start) and coord < int(end):
block_freqs.append(freq)
elif coord >= int(end):
break
coords = coords[max(0, i - 1):]
freqs = freqs[max(0, i - 1):]
if len(block_freqs) > params["hetblock"]["min_alleles"]:
yield start, end
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_cns, params, work_dir, somatic_info)
|
Identify heterogeneity blocks corresponding to segmentation from CNV input file.
|
def _library_check(self):
"""
Checks for missing shared library dependencies in the IOU image.
"""
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
except (FileNotFoundError, subprocess.SubprocessError) as e:
log.warn("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
return
p = re.compile("([\.\w]+)\s=>\s+not found")
missing_libs = p.findall(output)
if missing_libs:
raise IOUError("The following shared library dependencies cannot be found for IOU image {}: {}".format(self._path,
", ".join(missing_libs)))
|
Checks for missing shared library dependencies in the IOU image.
|
def get_content_version(cls, abspath: str) -> str:
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
|
Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
|
def manifest(txt, dname):
"""Extracts file manifest for a body of text with the given directory."""
_, files = _expand_source(txt, dname, HTML)
return files
|
Extracts file manifest for a body of text with the given directory.
|
def plot_one_month(x, y, xlabel=None, ylabel=None, title=None, ylim=None):
"""时间跨度为一月。
major tick = every days
"""
plt.close("all")
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax.plot(x, y)
days = DayLocator(range(365))
daysFmt = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(daysFmt)
ax.autoscale_view()
ax.grid()
plt.setp( ax.xaxis.get_majorticklabels(), rotation=90 )
if xlabel:
plt.xlabel(xlabel)
else:
plt.xlabel("Time")
if ylabel:
plt.ylabel(ylabel)
else:
plt.ylabel("Value")
if title:
plt.title(title)
else:
plt.title("%s to %s" % (str(x[0]), str(x[-1]) ) )
if ylim:
plt.ylim(ylim)
else:
plt.ylim([min(y) - (max(y) - min(y) ) * 0.05,
max(y) + (max(y) - min(y) ) * 0.05])
return plt, ax
|
时间跨度为一月。
major tick = every days
|
def user_save(self):
"""Save the current user
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_user:
return
username = self.user_username_le.text()
first = self.user_first_le.text()
last = self.user_last_le.text()
email = self.user_email_le.text()
self.cur_user.username = username
self.cur_user.first_name = first
self.cur_user.last_name = last
self.cur_user.email = email
self.cur_user.save()
|
Save the current user
:returns: None
:rtype: None
:raises: None
|
def comments(context, obj):
""" Render comments for obj. """
content_type = ContentType.objects.get_for_model(obj.__class__)
comment_list = LogEntry.objects.filter(
content_type=content_type,
object_id=obj.pk,
action_flag=COMMENT
)
return {
'obj': obj,
'comment_list': comment_list,
'is_admin': context['is_admin'],
}
|
Render comments for obj.
|
def rev_after(self, rev: int) -> int:
"""Return the earliest future rev on which the value will change."""
self.seek(rev)
if self._future:
return self._future[-1][0]
|
Return the earliest future rev on which the value will change.
|
def _distarray_no_missing(self, xc, xd):
"""Distance array calculation for data with no missing values. The 'pdist() function outputs a condense distance array, and squareform() converts this vector-form
distance vector to a square-form, redundant distance matrix.
*This could be a target for saving memory in the future, by not needing to expand to the redundant square-form matrix. """
from scipy.spatial.distance import pdist, squareform
#------------------------------------------#
def pre_normalize(x):
"""Normalizes continuous features so they are in the same range (0 to 1)"""
idx = 0
# goes through all named features (doesn really need to) this method is only applied to continuous features
for i in sorted(self.attr.keys()):
if self.attr[i][0] == 'discrete':
continue
cmin = self.attr[i][2]
diff = self.attr[i][3]
x[:, idx] -= cmin
x[:, idx] /= diff
idx += 1
return x
#------------------------------------------#
if self.data_type == 'discrete': # discrete features only
return squareform(pdist(self._X, metric='hamming'))
elif self.data_type == 'mixed': # mix of discrete and continuous features
d_dist = squareform(pdist(xd, metric='hamming'))
# Cityblock is also known as Manhattan distance
c_dist = squareform(pdist(pre_normalize(xc), metric='cityblock'))
return np.add(d_dist, c_dist) / self._num_attributes
else: #continuous features only
#xc = pre_normalize(xc)
return squareform(pdist(pre_normalize(xc), metric='cityblock'))
|
Distance array calculation for data with no missing values. The 'pdist() function outputs a condense distance array, and squareform() converts this vector-form
distance vector to a square-form, redundant distance matrix.
*This could be a target for saving memory in the future, by not needing to expand to the redundant square-form matrix.
|
def render_pyquery(self, **kwargs):
"""Render the graph, and return a pyquery wrapped tree"""
from pyquery import PyQuery as pq
return pq(self.render(**kwargs), parser='html')
|
Render the graph, and return a pyquery wrapped tree
|
def _trim_tree(state):
"""Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
"""
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state)
|
Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
|
def init(opts):
'''
Open the connection to the network device
managed through netmiko.
'''
proxy_dict = opts.get('proxy', {})
opts['multiprocessing'] = proxy_dict.get('multiprocessing', False)
netmiko_connection_args = proxy_dict.copy()
netmiko_connection_args.pop('proxytype', None)
netmiko_device['always_alive'] = netmiko_connection_args.pop('always_alive',
opts.get('proxy_always_alive', True))
try:
connection = ConnectHandler(**netmiko_connection_args)
netmiko_device['connection'] = connection
netmiko_device['initialized'] = True
netmiko_device['args'] = netmiko_connection_args
netmiko_device['up'] = True
if not netmiko_device['always_alive']:
netmiko_device['connection'].disconnect()
except NetMikoTimeoutException as t_err:
log.error('Unable to setup the netmiko connection', exc_info=True)
except NetMikoAuthenticationException as au_err:
log.error('Unable to setup the netmiko connection', exc_info=True)
return True
|
Open the connection to the network device
managed through netmiko.
|
def _make_reversed_wildcards(self, old_length=-1):
"""Creates a full mapping from all wildcard translations to the corresponding wildcards"""
if len(self._reversed_wildcards) > 0:
# We already created reversed wildcards, so we don't need to do all of them
# again
start = old_length
else:
start = -1
for wildcards, func in self._wildcard_functions.items():
for irun in range(start, len(self)):
translated_name = func(irun)
if not translated_name in self._reversed_wildcards:
self._reversed_wildcards[translated_name] = ([], wildcards)
self._reversed_wildcards[translated_name][0].append(irun)
|
Creates a full mapping from all wildcard translations to the corresponding wildcards
|
def _handle_exception(self, row, exception):
"""
Logs an exception occurred during transformation of a row.
:param list|dict|() row: The source row.
:param Exception exception: The exception.
"""
self._log('Error during processing of line {0:d}.'.format(self._source_reader.row_number))
self._log(row)
self._log(str(exception))
self._log(traceback.format_exc())
|
Logs an exception occurred during transformation of a row.
:param list|dict|() row: The source row.
:param Exception exception: The exception.
|
def make_cutter(self):
"""
Create solid to subtract from material to make way for the fastener's
head (just the head)
"""
return cadquery.Workplane('XY') \
.circle(self.access_diameter / 2) \
.extrude(self.access_height)
|
Create solid to subtract from material to make way for the fastener's
head (just the head)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.