code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def remove_project(self, node):
"""
Removes the project associated with given node.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
"""
if node.family == "Project":
self.__script_editor.remove_project(node.path)
return True
for node in foundations.walkers.nodes_walker(node, ascendants=True):
if node.family == "Project" and not node is self.__script_editor.model.default_project_node:
self.__script_editor.remove_project(node.path)
return True
|
Removes the project associated with given node.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
|
def get_setting(key, *default):
"""Return specific search setting from Django conf."""
if default:
return get_settings().get(key, default[0])
else:
return get_settings()[key]
|
Return specific search setting from Django conf.
|
def get_picture_elements(context, instance):
"""
Create a context, used to render a <picture> together with all its ``<source>`` elements:
It returns a list of HTML elements, each containing the information to render a ``<source>``
element.
The purpose of this HTML entity is to display images with art directions. For normal images use
the ``<img>`` element.
"""
if hasattr(instance, 'image') and hasattr(instance.image, 'exif'):
aspect_ratio = compute_aspect_ratio(instance.image)
elif 'image' in instance.glossary and 'width' in instance.glossary['image']:
aspect_ratio = compute_aspect_ratio_with_glossary(instance.glossary)
instance.glossary['ramdom_svg_color'] = 'hsl({}, 30%, 80%, 0.8)'.format( str(random.randint(0, 360)))
else:
# if accessing the image file fails or fake image fails, abort here
logger.warning("Unable to compute aspect ratio of image '{}'".format(instance.image))
return
complete_glossary = instance.get_complete_glossary()
container_max_heights = complete_glossary.get('container_max_heights', {})
resize_options = instance.glossary.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
if hasattr(instance.image, 'subject_location'):
subject_location = instance.image.subject_location and 'subject_location' in resize_options
else:
subject_location = None
max_width = 0
max_zoom = 0
elements = []
for bp in complete_glossary['breakpoints']:
try:
width = float(complete_glossary['container_max_widths'][bp])
except KeyError:
width = 0
max_width = max(max_width, round(width))
size = None
try:
image_height = parse_responsive_length(instance.glossary['responsive_heights'][bp])
except KeyError:
image_height = (None, None)
if image_height[0]: # height was given in px
size = (int(width), image_height[0])
elif image_height[1]: # height was given in %
size = (int(width), int(round(width * aspect_ratio * image_height[1])))
elif bp in container_max_heights:
container_height = parse_responsive_length(container_max_heights[bp])
if container_height[0]:
size = (int(width), container_height[0])
elif container_height[1]:
size = (int(width), int(round(width * aspect_ratio * container_height[1])))
try:
zoom = int(
instance.glossary['responsive_zoom'][bp].strip().rstrip('%')
)
except (AttributeError, KeyError, ValueError):
zoom = 0
max_zoom = max(max_zoom, zoom)
if size is None:
# as fallback, adopt height to current width
size = (int(width), int(round(width * aspect_ratio)))
try:
media_queries = complete_glossary['media_queries'][bp][:]
except KeyError:
media_queries = []
media = ' and '.join(media_queries)
elem = {'tag': 'source', 'size': size, 'zoom': zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location, 'media': media}
if 'high_resolution' in resize_options:
elem['size2'] = (size[0] * 2, size[1] * 2)
elements.append(elem)
# add a fallback image for old browsers which can't handle the <picture> element
if image_height[1]:
size = (int(max_width), int(round(max_width * aspect_ratio * image_height[1])))
else:
size = (int(max_width), int(round(max_width * aspect_ratio)))
elements.append({'tag': 'img', 'size': size, 'zoom': max_zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location})
return elements
|
Create a context, used to render a <picture> together with all its ``<source>`` elements:
It returns a list of HTML elements, each containing the information to render a ``<source>``
element.
The purpose of this HTML entity is to display images with art directions. For normal images use
the ``<img>`` element.
|
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_username, user_email, user_field
first_name = user_field(user, 'first_name')
last_name = user_field(user, 'last_name')
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(
user,
username or self.generate_unique_username([
first_name,
last_name,
email,
username,
'user']))
|
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
|
def reload(self, **params):
"""
Reloads the datatype from Riak.
.. warning: This clears any local modifications you might have
made.
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: :class:`Datatype`
"""
if not self.bucket:
raise ValueError('bucket property not assigned')
if not self.key:
raise ValueError('key property not assigned')
dtype, value, context = self.bucket._client._fetch_datatype(
self.bucket, self.key, **params)
if not dtype == self.type_name:
raise TypeError("Expected datatype {} but "
"got datatype {}".format(self.__class__,
TYPES[dtype]))
self.clear()
self._context = context
self._set_value(value)
return self
|
Reloads the datatype from Riak.
.. warning: This clears any local modifications you might have
made.
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: :class:`Datatype`
|
def serialize(self, data):
""" Determine & invoke the proper serializer method
If data is a list then the serialize_datas method will
be run otherwise serialize_data.
"""
super(Serializer, self).serialize(data)
self.resp.content_type += '; header=present'
import pprint
pprint.PrettyPrinter().pprint(data)
|
Determine & invoke the proper serializer method
If data is a list then the serialize_datas method will
be run otherwise serialize_data.
|
def get_node_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
"""
all_layers = set()
for node_id, node_attribs in docgraph.nodes_iter(data=True):
for layer in node_attribs['layers']:
all_layers.add(layer)
return all_layers
|
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
|
def is_group_or_super_group(cls, obj) -> bool:
"""
Check chat is group or super-group
:param obj:
:return:
"""
return cls._check(obj, [cls.GROUP, cls.SUPER_GROUP])
|
Check chat is group or super-group
:param obj:
:return:
|
def BGPSessionState_BGPPeerState(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
BGPSessionState = ET.SubElement(config, "BGPSessionState", xmlns="http://brocade.com/ns/brocade-notification-stream")
BGPPeerState = ET.SubElement(BGPSessionState, "BGPPeerState")
BGPPeerState.text = kwargs.pop('BGPPeerState')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def subscribeToDeviceEvents(self, typeId="+", deviceId="+", eventId="+", msgFormat="+", qos=0):
"""
Subscribe to device event messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
eventId (string): eventId for the subscription, optional. Defaults to all events (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
"""
if self._config.isQuickstart() and deviceId == "+":
self.logger.warning(
"QuickStart applications do not support wildcard subscription to events from all devices"
)
return 0
topic = "iot-2/type/%s/id/%s/evt/%s/fmt/%s" % (typeId, deviceId, eventId, msgFormat)
return self._subscribe(topic, qos)
|
Subscribe to device event messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
eventId (string): eventId for the subscription, optional. Defaults to all events (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
|
def init(self):
"""Init the connection to the CouchDB server."""
if not self.export_enable:
return None
server_uri = 'tcp://{}:{}'.format(self.host, self.port)
try:
self.context = zmq.Context()
publisher = self.context.socket(zmq.PUB)
publisher.bind(server_uri)
except Exception as e:
logger.critical("Cannot connect to ZeroMQ server %s (%s)" % (server_uri, e))
sys.exit(2)
else:
logger.info("Connected to the ZeroMQ server %s" % server_uri)
return publisher
|
Init the connection to the CouchDB server.
|
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
|
Verify original is subset of replica
|
def percent(args=None):
'''
Return partition information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.percent /var
'''
if __grains__['kernel'] == 'Linux':
cmd = 'df -P'
elif __grains__['kernel'] == 'OpenBSD' or __grains__['kernel'] == 'AIX':
cmd = 'df -kP'
else:
cmd = 'df'
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in out:
if not line:
continue
if line.startswith('Filesystem'):
continue
comps = line.split()
while len(comps) >= 2 and not comps[1].isdigit():
comps[0] = '{0} {1}'.format(comps[0], comps[1])
comps.pop(1)
if len(comps) < 2:
continue
try:
if __grains__['kernel'] == 'Darwin':
ret[comps[8]] = comps[4]
else:
ret[comps[5]] = comps[4]
except IndexError:
log.error('Problem parsing disk usage information')
ret = {}
if args and args not in ret:
log.error(
'Problem parsing disk usage information: Partition \'%s\' '
'does not exist!', args
)
ret = {}
elif args:
return ret[args]
return ret
|
Return partition information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.percent /var
|
def is_cython(obj):
"""Check if an object is a Cython function or method"""
# TODO(suo): We could split these into two functions, one for Cython
# functions and another for Cython methods.
# TODO(suo): There doesn't appear to be a Cython function 'type' we can
# check against via isinstance. Please correct me if I'm wrong.
def check_cython(x):
return type(x).__name__ == "cython_function_or_method"
# Check if function or method, respectively
return check_cython(obj) or \
(hasattr(obj, "__func__") and check_cython(obj.__func__))
|
Check if an object is a Cython function or method
|
def trade_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None, pair=None
):
"""
Returns trade history.
To use this method you need a privilege of the info key.
:param int or None from_: trade ID, from which the display starts (default 0)
:param int or None count: the number of trades for display (default 1000)
:param int or None from_id: trade ID, from which the display starts (default 0)
:param int or None end_id: trade ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
:param str or None pair: pair to be displayed (ex. 'btc_usd')
"""
return self._trade_api_call(
'TradeHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end, pair=pair
)
|
Returns trade history.
To use this method you need a privilege of the info key.
:param int or None from_: trade ID, from which the display starts (default 0)
:param int or None count: the number of trades for display (default 1000)
:param int or None from_id: trade ID, from which the display starts (default 0)
:param int or None end_id: trade ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
:param str or None pair: pair to be displayed (ex. 'btc_usd')
|
def extract_rows(data, *rows):
"""Extract rows specified in the argument list.
>>> chart_data.extract_rows([[10,20], [30,40], [50,60]], 1, 2)
[[30,40],[50,60]]
"""
try:
# for python 2.2
# return [data[r] for r in rows]
out = []
for r in rows:
out.append(data[r])
return out
except IndexError:
raise IndexError("data=%s rows=%s" % (data, rows))
return out
|
Extract rows specified in the argument list.
>>> chart_data.extract_rows([[10,20], [30,40], [50,60]], 1, 2)
[[30,40],[50,60]]
|
def allocate(self, nodes, append=True):
# TODO: check docstring
"""Allocates all nodes from `nodes` list in this route
Parameters
----------
nodes : type
Desc
append : bool, defaults to True
Desc
"""
nodes_demand = 0
for node in [node for node in nodes]:
if node._allocation:
node._allocation.deallocate([node])
node._allocation = self
nodes_demand = nodes_demand + node.demand()
if append:
self._nodes.append(node)
else:
self._nodes.insert(0, node)
self._demand = self._demand + nodes_demand
|
Allocates all nodes from `nodes` list in this route
Parameters
----------
nodes : type
Desc
append : bool, defaults to True
Desc
|
def create_position(self, params={}):
"""
Creates a position
http://dev.wheniwork.com/#create-update-position
"""
url = "/2/positions/"
body = params
data = self._post_resource(url, body)
return self.position_from_json(data["position"])
|
Creates a position
http://dev.wheniwork.com/#create-update-position
|
def set_up_dirs(proc_name, output_dir=None, work_dir=None, log_dir=None):
""" Creates output_dir, work_dir, and sets up log
"""
output_dir = safe_mkdir(adjust_path(output_dir or join(os.getcwd(), proc_name)), 'output_dir')
debug('Saving results into ' + output_dir)
work_dir = safe_mkdir(work_dir or join(output_dir, 'work'), 'working directory')
info('Using work directory ' + work_dir)
log_fpath = set_up_log(log_dir or safe_mkdir(join(work_dir, 'log')), proc_name + '.log')
return output_dir, work_dir, log_fpath
|
Creates output_dir, work_dir, and sets up log
|
def _phi0(self, tau, delta):
"""Ideal gas Helmholtz free energy and derivatives
Parameters
----------
tau : float
Inverse reduced temperature Tc/T, [-]
delta : float
Reduced density rho/rhoc, [-]
Returns
-------
prop : dictionary with ideal adimensional helmholtz energy and deriv
fio, [-]
fiot: ∂fio/∂τ|δ
fiod: ∂fio/∂δ|τ
fiott: ∂²fio/∂τ²|δ
fiodt: ∂²fio/∂τ∂δ
fiodd: ∂²fio/∂δ²|τ
References
----------
IAPWS, Revised Release on the IAPWS Formulation 1995 for the
Thermodynamic Properties of Ordinary Water Substance for General and
Scientific Use, September 2016, Table 4
http://www.iapws.org/relguide/IAPWS-95.html
"""
Fi0 = self.Fi0
fio = Fi0["ao_log"][0]*log(delta)+Fi0["ao_log"][1]*log(tau)
fiot = +Fi0["ao_log"][1]/tau
fiott = -Fi0["ao_log"][1]/tau**2
fiod = 1/delta
fiodd = -1/delta**2
fiodt = 0
for n, t in zip(Fi0["ao_pow"], Fi0["pow"]):
fio += n*tau**t
if t != 0:
fiot += t*n*tau**(t-1)
if t not in [0, 1]:
fiott += n*t*(t-1)*tau**(t-2)
for n, t in zip(Fi0["ao_exp"], Fi0["titao"]):
fio += n*log(1-exp(-tau*t))
fiot += n*t*((1-exp(-t*tau))**-1-1)
fiott -= n*t**2*exp(-t*tau)*(1-exp(-t*tau))**-2
# Extension to especial terms of air
if "ao_exp2" in Fi0:
for n, g, C in zip(Fi0["ao_exp2"], Fi0["titao2"], Fi0["sum2"]):
fio += n*log(C+exp(g*tau))
fiot += n*g/(C*exp(-g*tau)+1)
fiott += C*n*g**2*exp(-g*tau)/(C*exp(-g*tau)+1)**2
prop = {}
prop["fio"] = fio
prop["fiot"] = fiot
prop["fiott"] = fiott
prop["fiod"] = fiod
prop["fiodd"] = fiodd
prop["fiodt"] = fiodt
return prop
|
Ideal gas Helmholtz free energy and derivatives
Parameters
----------
tau : float
Inverse reduced temperature Tc/T, [-]
delta : float
Reduced density rho/rhoc, [-]
Returns
-------
prop : dictionary with ideal adimensional helmholtz energy and deriv
fio, [-]
fiot: ∂fio/∂τ|δ
fiod: ∂fio/∂δ|τ
fiott: ∂²fio/∂τ²|δ
fiodt: ∂²fio/∂τ∂δ
fiodd: ∂²fio/∂δ²|τ
References
----------
IAPWS, Revised Release on the IAPWS Formulation 1995 for the
Thermodynamic Properties of Ordinary Water Substance for General and
Scientific Use, September 2016, Table 4
http://www.iapws.org/relguide/IAPWS-95.html
|
def update_eol(self, os_name):
"""Update end of line status."""
os_name = to_text_string(os_name)
value = {"nt": "CRLF", "posix": "LF"}.get(os_name, "CR")
self.set_value(value)
|
Update end of line status.
|
def main():
"""Main function"""
# **** For Pytest ****
# We need to create MainWindow **here** to avoid passing pytest
# options to Spyder
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
options.debug_info = None
options.debug_output = None
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
# **** Set OpenGL implementation to use ****
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Show crash dialog ****
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# **** Create main window ****
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
|
Main function
|
def start_all_linking(self, mode, group):
"""Put the IM into All-Linking mode.
Puts the IM into All-Linking mode for 4 minutes.
Parameters:
mode: 0 | 1 | 3 | 255
0 - PLM is responder
1 - PLM is controller
3 - Device that initiated All-Linking is Controller
255 = Delete All-Link
group: All-Link group number (0 - 255)
"""
msg = StartAllLinking(mode, group)
self.send_msg(msg)
|
Put the IM into All-Linking mode.
Puts the IM into All-Linking mode for 4 minutes.
Parameters:
mode: 0 | 1 | 3 | 255
0 - PLM is responder
1 - PLM is controller
3 - Device that initiated All-Linking is Controller
255 = Delete All-Link
group: All-Link group number (0 - 255)
|
def _set_wmi_setting(wmi_class_name, setting, value, server):
'''
Set the value of the setting for the provided class.
'''
with salt.utils.winapi.Com():
try:
connection = wmi.WMI(namespace=_WMI_NAMESPACE)
wmi_class = getattr(connection, wmi_class_name)
objs = wmi_class(Name=server)[0]
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except (AttributeError, IndexError) as error:
_LOG.error('Error getting %s: %s', wmi_class_name, error)
try:
setattr(objs, setting, value)
return True
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except AttributeError as error:
_LOG.error('Error setting %s: %s', setting, error)
return False
|
Set the value of the setting for the provided class.
|
def all():
"""Returns all active registered goals, sorted alphabetically by name.
:API: public
"""
return [goal for _, goal in sorted(Goal._goal_by_name.items()) if goal.active]
|
Returns all active registered goals, sorted alphabetically by name.
:API: public
|
async def filter_by(cls, db, offset=None, limit=None, **kwargs):
"""Query by attributes iteratively. Ordering is not supported
Example:
User.get_by(db, age=[32, 54])
User.get_by(db, age=23, name="guido")
"""
if limit and type(limit) is not int:
raise InvalidQuery('If limit is supplied it must be an int')
if offset and type(offset) is not int:
raise InvalidQuery('If offset is supplied it must be an int')
ids_to_iterate = await cls._get_ids_filter_by(db, **kwargs)
if offset:
# Using offset without order_by is pretty strange, but allowed
if limit:
ids_to_iterate = ids_to_iterate[offset:offset+limit]
else:
ids_to_iterate = ids_to_iterate[offset:]
elif limit:
ids_to_iterate = ids_to_iterate[:limit]
for key in ids_to_iterate:
yield await cls.load(db, key)
|
Query by attributes iteratively. Ordering is not supported
Example:
User.get_by(db, age=[32, 54])
User.get_by(db, age=23, name="guido")
|
async def write(self, writer: Any,
close_boundary: bool=True) -> None:
"""Write body."""
if not self._parts:
return
for part, encoding, te_encoding in self._parts:
await writer.write(b'--' + self._boundary + b'\r\n')
await writer.write(part._binary_headers)
if encoding or te_encoding:
w = MultipartPayloadWriter(writer)
if encoding:
w.enable_compression(encoding)
if te_encoding:
w.enable_encoding(te_encoding)
await part.write(w) # type: ignore
await w.write_eof()
else:
await part.write(writer)
await writer.write(b'\r\n')
if close_boundary:
await writer.write(b'--' + self._boundary + b'--\r\n')
|
Write body.
|
def main(args=None):
""" Output information about `streamsx` and the environment.
Useful for support to get key information for use of `streamsx`
and Python in IBM Streams.
"""
_parse_args(args)
streamsx._streams._version._mismatch_check('streamsx.topology.context')
srp = pkg_resources.working_set.find(pkg_resources.Requirement.parse('streamsx'))
if srp is not None:
srv = srp.parsed_version
location = srp.location
spkg = 'package'
else:
srv = streamsx._streams._version.__version__
location = os.path.dirname(streamsx._streams._version.__file__)
location = os.path.dirname(location)
location = os.path.dirname(location)
tk_path = (os.path.join('com.ibm.streamsx.topology', 'opt', 'python', 'packages'))
spkg = 'toolkit' if location.endswith(tk_path) else 'unknown'
print('streamsx==' + str(srv) + ' (' + spkg + ')')
print(' location: ' + str(location))
print('Python version:' + str(sys.version))
print('PYTHONHOME=' + str(os.environ.get('PYTHONHOME', 'unset')))
print('PYTHONPATH=' + str(os.environ.get('PYTHONPATH', 'unset')))
print('PYTHONWARNINGS=' + str(os.environ.get('PYTHONWARNINGS', 'unset')))
print('STREAMS_INSTALL=' + str(os.environ.get('STREAMS_INSTALL', 'unset')))
print('JAVA_HOME=' + str(os.environ.get('JAVA_HOME', 'unset')))
return 0
|
Output information about `streamsx` and the environment.
Useful for support to get key information for use of `streamsx`
and Python in IBM Streams.
|
def clean_ufo(path):
"""Make sure old UFO data is removed, as it may contain deleted glyphs."""
if path.endswith(".ufo") and os.path.exists(path):
shutil.rmtree(path)
|
Make sure old UFO data is removed, as it may contain deleted glyphs.
|
def partial_derivative_scalar(self, U, V, y=0):
"""Compute partial derivative :math:`C(u|v)` of cumulative density of single values."""
self.check_fit()
X = np.column_stack((U, V))
return self.partial_derivative(X, y)
|
Compute partial derivative :math:`C(u|v)` of cumulative density of single values.
|
def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm
|
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
|
def get_region_for_chip(x, y, level=3):
"""Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
"""
shift = 6 - 2*level
bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set
mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00}
nx = x & mask # The mask guarantees that bits 1:0 will be cleared
ny = y & mask # The mask guarantees that bits 1:0 will be cleared
# sig bits x | sig bits y | 2-bit level | region select bits
region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit)
return region
|
Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
|
def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep)
|
Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
|
def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len)
|
Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported.
|
def get_rules(self, optimized):
"""
Args:
optimized (bool): Enable or Disable optimization - Do not produce O(n^3)
Return:
list: The CFG rules
"""
self.insert_start_to_accepting()
# If CFG is not requested, avoid the following O(n^3) rule.
# It can be solved and a string can be generated faster with BFS of DFS
if optimized == 0:
self.insert_self_to_empty_and_insert_all_intemediate(optimized)
self.insert_symbol_pushpop()
return self.rules
|
Args:
optimized (bool): Enable or Disable optimization - Do not produce O(n^3)
Return:
list: The CFG rules
|
def run(self):
"""
Attmept to deliver the first outgoing L{QueuedMessage}; return a time
to reschedule if there are still more retries or outgoing messages to
send.
"""
delay = None
router = self.siteRouter
for qmsg in self.store.query(_QueuedMessage,
sort=_QueuedMessage.storeID.ascending):
try:
self._verifySender(qmsg.sender)
except:
self.routeAnswer(qmsg.sender, qmsg.target,
Value(DELIVERY_ERROR, ERROR_BAD_SENDER),
qmsg.messageID)
log.err(Failure(),
"Could not verify sender for sending message.")
else:
router.routeMessage(qmsg.sender, qmsg.target,
qmsg.value, qmsg.messageID)
for answer in self.store.query(_AlreadyAnswered,
sort=_AlreadyAnswered.storeID.ascending):
self._deliverAnswer(answer)
nextmsg = self.store.findFirst(_QueuedMessage, default=None)
if nextmsg is not None:
delay = _RETRANSMIT_DELAY
else:
nextanswer = self.store.findFirst(_AlreadyAnswered, default=None)
if nextanswer is not None:
delay = _RETRANSMIT_DELAY
if delay is not None:
return IScheduler(self.store).now() + timedelta(seconds=delay)
|
Attmept to deliver the first outgoing L{QueuedMessage}; return a time
to reschedule if there are still more retries or outgoing messages to
send.
|
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
|
Read exactly one line from the device, nonblocking.
Returns:
None on no data
|
def _on_properties(self, properties):
"""
Callback if properties are changed.
:param properties: (bool bold, bool italic, bool underline, bool overstrike)
"""
self._bold, self._italic, self._underline, self._overstrike = properties
self._on_change()
|
Callback if properties are changed.
:param properties: (bool bold, bool italic, bool underline, bool overstrike)
|
def debug_log_repo(repo):
"""Log to DEBUG level a Repo (or subclass) pretty-printed"""
ds_str = juicer.utils.create_json_str(repo,
indent=4,
cls=juicer.common.Repo.RepoEncoder)
juicer.utils.Log.log_debug(ds_str)
|
Log to DEBUG level a Repo (or subclass) pretty-printed
|
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
|
Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
|
def liftover(pass_pos, matures):
"""Make position at precursor scale"""
fixed_pos = []
_print_header(pass_pos)
for pos in pass_pos:
mir = pos["mature"]
db_pos = matures[pos["chrom"]]
mut = _parse_mut(pos["sv"])
print([db_pos[mir], mut, pos["sv"]])
pos['pre_pos'] = db_pos[mir][0] + mut[1] - 1
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
print_vcf(pos)
return fixed_pos
|
Make position at precursor scale
|
def fetch(self, range_start, range_end):
"""
Fetches speeches from the ListarDiscursosPlenario endpoint of the
SessoesReunioes (SessionsReunions) API.
The date range provided should be specified as a string using the
format supported by the API (%d/%m/%Y)
"""
range_dates = {'dataIni': range_start, 'dataFim': range_end}
url = self.URL.format(**range_dates)
xml = urllib.request.urlopen(url)
tree = ET.ElementTree(file=xml)
records = self._parse_speeches(tree.getroot())
return pd.DataFrame(records, columns=[
'session_code',
'session_date',
'session_num',
'phase_code',
'phase_desc',
'speech_speaker_num',
'speech_speaker_name',
'speech_speaker_party',
'speech_speaker_state',
'speech_started_at',
'speech_room_num',
'speech_insertion_num'
])
|
Fetches speeches from the ListarDiscursosPlenario endpoint of the
SessoesReunioes (SessionsReunions) API.
The date range provided should be specified as a string using the
format supported by the API (%d/%m/%Y)
|
def delete(self, image):
"""Delete the file of the given ``image``.
:param image: the image to delete
:type image: :class:`sqlalchemy_imageattach.entity.Image`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
self.delete_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
|
Delete the file of the given ``image``.
:param image: the image to delete
:type image: :class:`sqlalchemy_imageattach.entity.Image`
|
def find_value_type(global_ns, value_type_str):
"""implementation details"""
if not value_type_str.startswith('::'):
value_type_str = '::' + value_type_str
found = global_ns.decls(
name=value_type_str,
function=lambda decl: not isinstance(decl, calldef.calldef_t),
allow_empty=True)
if not found:
no_global_ns_value_type_str = value_type_str[2:]
if no_global_ns_value_type_str in cpptypes.FUNDAMENTAL_TYPES:
return cpptypes.FUNDAMENTAL_TYPES[no_global_ns_value_type_str]
elif type_traits.is_std_string(value_type_str):
string_ = global_ns.typedef('::std::string')
return type_traits.remove_declarated(string_)
elif type_traits.is_std_wstring(value_type_str):
string_ = global_ns.typedef('::std::wstring')
return type_traits.remove_declarated(string_)
else:
value_type_str = no_global_ns_value_type_str
has_const = value_type_str.startswith('const ')
if has_const:
value_type_str = value_type_str[len('const '):]
has_pointer = value_type_str.endswith('*')
if has_pointer:
value_type_str = value_type_str[:-1]
found = None
if has_const or has_pointer:
found = impl_details.find_value_type(
global_ns,
value_type_str)
if not found:
return None
else:
if isinstance(found, class_declaration.class_types):
return cpptypes.declarated_t(found)
if has_const:
return cpptypes.const_t(found)
if has_pointer:
return cpptypes.pointer_t(found)
if len(found) == 1:
return found[0]
return None
|
implementation details
|
def timestamp_feature(catalog, soup):
"""The datetime the xml file was last modified.
"""
# there's really no "time created", we're using the date the courses are listed for...
epoch = 1318790434
catalog.timestamp = int(float(soup.title.text)) + epoch
catalog.datetime = datetime.datetime.fromtimestamp(catalog.timestamp)
logger.info('Catalog last updated on %s' % catalog.datetime)
|
The datetime the xml file was last modified.
|
def plot_theta(self, colorbar=True, cb_orientation='vertical',
cb_label='$g_\\theta$, m s$^{-2}$', ax=None, show=True,
fname=None, **kwargs):
"""
Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
"""
if ax is None:
fig, axes = self.theta.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=False,
**kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
self.theta.plot(colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs)
|
Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
|
def seek(self, position):
"""Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
"""
self._position = position
range_string = make_range_string(self._position)
logger.debug('content_length: %r range_string: %r', self._content_length, range_string)
#
# Close old body explicitly.
# When first seek(), self._body is not exist. Catch the exception and do nothing.
#
try:
self._body.close()
except AttributeError:
pass
if position == self._content_length == 0 or position == self._content_length:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
self._body = io.BytesIO()
else:
self._body = self._object.get(Range=range_string)['Body']
|
Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
|
def load_module(ldr, fqname):
'''Load `fqname` from under `ldr.fspath`.
The `fqname` argument is the fully qualified module name,
eg. "spam.eggs.ham". As explained above, when ::
finder.find_module("spam.eggs.ham")
is called, "spam.eggs" has already been imported and added
to `sys.modules`. However, the `find_module()` method isn't
necessarily always called during an actual import:
meta tools that analyze import dependencies (such as freeze,
Installer or py2exe) don't actually load modules, so
a finder shouldn't depend on the parent package being
available in `sys.modules`.
The `load_module()` method has a few responsibilities that
it must fulfill before it runs any code:
* If there is an existing module object named 'fullname' in
`sys.modules`, the loader must use that existing module.
(Otherwise, the `reload()` builtin will not work correctly.)
If a module named 'fullname' does not exist in
`sys.modules`, the loader must create a new module object
and add it to `sys.modules`.
Note that the module object must be in `sys.modules`
before the loader executes the module code. This is
crucial because the module code may (directly or
indirectly) import itself; adding it to `sys.modules`
beforehand prevents unbounded recursion in the worst case
and multiple loading in the best.
If the load fails, the loader needs to remove any module it
may have inserted into `sys.modules`. If the module was
already in `sys.modules` then the loader should leave it
alone.
* The `__file__` attribute must be set. This must be a string,
but it may be a dummy value, for example "<frozen>".
The privilege of not having a `__file__` attribute at all
is reserved for built-in modules.
* The `__name__` attribute must be set. If one uses
`imp.new_module()` then the attribute is set automatically.
* If it's a package, the __path__ variable must be set.
This must be a list, but may be empty if `__path__` has no
further significance to the importer (more on this later).
* The `__loader__` attribute must be set to the loader object.
This is mostly for introspection and reloading, but can be
used for importer-specific extras, for example getting data
associated with an importer.
The `__package__` attribute [8] must be set.
If the module is a Python module (as opposed to a built-in
module or a dynamically loaded extension), it should execute
the module's code in the module's global name space
(`module.__dict__`).
[8] PEP 366: Main module explicit relative imports
http://www.python.org/dev/peps/pep-0366/
'''
scope = ldr.scope.split('.')
modpath = fqname.split('.')
if scope != modpath[0:len(scope)]:
raise AssertionError(
"%s responsible for %s got request for %s" % (
ldr.__class__.__name__,
ldr.scope,
fqname,
)
)
if fqname in sys.modules:
mod = sys.modules[fqname]
else:
mod = sys.modules.setdefault(fqname, types.ModuleType(fqname))
mod.__loader__ = ldr
fspath = ldr.path_to(fqname)
mod.__file__ = str(fspath)
if fs.is_package(fspath):
mod.__path__ = [ldr.fspath]
mod.__package__ = str(fqname)
else:
mod.__package__ = str(fqname.rpartition('.')[0])
exec(fs.get_code(fspath), mod.__dict__)
return mod
|
Load `fqname` from under `ldr.fspath`.
The `fqname` argument is the fully qualified module name,
eg. "spam.eggs.ham". As explained above, when ::
finder.find_module("spam.eggs.ham")
is called, "spam.eggs" has already been imported and added
to `sys.modules`. However, the `find_module()` method isn't
necessarily always called during an actual import:
meta tools that analyze import dependencies (such as freeze,
Installer or py2exe) don't actually load modules, so
a finder shouldn't depend on the parent package being
available in `sys.modules`.
The `load_module()` method has a few responsibilities that
it must fulfill before it runs any code:
* If there is an existing module object named 'fullname' in
`sys.modules`, the loader must use that existing module.
(Otherwise, the `reload()` builtin will not work correctly.)
If a module named 'fullname' does not exist in
`sys.modules`, the loader must create a new module object
and add it to `sys.modules`.
Note that the module object must be in `sys.modules`
before the loader executes the module code. This is
crucial because the module code may (directly or
indirectly) import itself; adding it to `sys.modules`
beforehand prevents unbounded recursion in the worst case
and multiple loading in the best.
If the load fails, the loader needs to remove any module it
may have inserted into `sys.modules`. If the module was
already in `sys.modules` then the loader should leave it
alone.
* The `__file__` attribute must be set. This must be a string,
but it may be a dummy value, for example "<frozen>".
The privilege of not having a `__file__` attribute at all
is reserved for built-in modules.
* The `__name__` attribute must be set. If one uses
`imp.new_module()` then the attribute is set automatically.
* If it's a package, the __path__ variable must be set.
This must be a list, but may be empty if `__path__` has no
further significance to the importer (more on this later).
* The `__loader__` attribute must be set to the loader object.
This is mostly for introspection and reloading, but can be
used for importer-specific extras, for example getting data
associated with an importer.
The `__package__` attribute [8] must be set.
If the module is a Python module (as opposed to a built-in
module or a dynamically loaded extension), it should execute
the module's code in the module's global name space
(`module.__dict__`).
[8] PEP 366: Main module explicit relative imports
http://www.python.org/dev/peps/pep-0366/
|
def enable_secrets_engine(self, backend_type, path=None, description=None, config=None, plugin_name=None,
options=None, local=False, seal_wrap=False):
"""Enable a new secrets engine at the given path.
Supported methods:
POST: /sys/mounts/{path}. Produces: 204 (empty body)
:param backend_type: The name of the backend type, such as "github" or "token".
:type backend_type: str | unicode
:param path: The path to mount the method on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:param description: A human-friendly description of the mount.
:type description: str | unicode
:param config: Configuration options for this mount. These are the possible values:
* **default_lease_ttl**: The default lease duration, specified as a string duration like "5s" or "30m".
* **max_lease_ttl**: The maximum lease duration, specified as a string duration like "5s" or "30m".
* **force_no_cache**: Disable caching.
* **plugin_name**: The name of the plugin in the plugin catalog to use.
* **audit_non_hmac_request_keys**: Comma-separated list of keys that will not be HMAC'd by audit devices in
the request data object.
* **audit_non_hmac_response_keys**: Comma-separated list of keys that will not be HMAC'd by audit devices in
the response data object.
* **listing_visibility**: Specifies whether to show this mount in the UI-specific listing endpoint. ("unauth" or "hidden")
* **passthrough_request_headers**: Comma-separated list of headers to whitelist and pass from the request to
the backend.
:type config: dict
:param options: Specifies mount type specific options that are passed to the backend.
* **version**: <KV> The version of the KV to mount. Set to "2" for mount KV v2.
:type options: dict
:param plugin_name: Specifies the name of the plugin to use based from the name in the plugin catalog. Applies only to plugin backends.
:type plugin_name: str | unicode
:param local: <Vault enterprise only> Specifies if the auth method is a local only. Local auth methods are not
replicated nor (if a secondary) removed by replication.
:type local: bool
:param seal_wrap: <Vault enterprise only> Enable seal wrapping for the mount.
:type seal_wrap: bool
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = backend_type
params = {
'type': backend_type,
'description': description,
'config': config,
'options': options,
'plugin_name': plugin_name,
'local': local,
'seal_wrap': seal_wrap,
}
api_path = '/v1/sys/mounts/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params,
)
|
Enable a new secrets engine at the given path.
Supported methods:
POST: /sys/mounts/{path}. Produces: 204 (empty body)
:param backend_type: The name of the backend type, such as "github" or "token".
:type backend_type: str | unicode
:param path: The path to mount the method on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:param description: A human-friendly description of the mount.
:type description: str | unicode
:param config: Configuration options for this mount. These are the possible values:
* **default_lease_ttl**: The default lease duration, specified as a string duration like "5s" or "30m".
* **max_lease_ttl**: The maximum lease duration, specified as a string duration like "5s" or "30m".
* **force_no_cache**: Disable caching.
* **plugin_name**: The name of the plugin in the plugin catalog to use.
* **audit_non_hmac_request_keys**: Comma-separated list of keys that will not be HMAC'd by audit devices in
the request data object.
* **audit_non_hmac_response_keys**: Comma-separated list of keys that will not be HMAC'd by audit devices in
the response data object.
* **listing_visibility**: Specifies whether to show this mount in the UI-specific listing endpoint. ("unauth" or "hidden")
* **passthrough_request_headers**: Comma-separated list of headers to whitelist and pass from the request to
the backend.
:type config: dict
:param options: Specifies mount type specific options that are passed to the backend.
* **version**: <KV> The version of the KV to mount. Set to "2" for mount KV v2.
:type options: dict
:param plugin_name: Specifies the name of the plugin to use based from the name in the plugin catalog. Applies only to plugin backends.
:type plugin_name: str | unicode
:param local: <Vault enterprise only> Specifies if the auth method is a local only. Local auth methods are not
replicated nor (if a secondary) removed by replication.
:type local: bool
:param seal_wrap: <Vault enterprise only> Enable seal wrapping for the mount.
:type seal_wrap: bool
:return: The response of the request.
:rtype: requests.Response
|
def get_00t_magmom_with_xyz_saxis(self):
"""
For internal implementation reasons, in non-collinear calculations
VASP prefers:
MAGMOM = 0 0 total_magnetic_moment
SAXIS = x y z
to an equivalent:
MAGMOM = x y z
SAXIS = 0 0 1
This method returns a Magmom object with magnetic moment [0, 0, t],
where t is the total magnetic moment, and saxis rotated as required.
A consistent direction of saxis is applied such that t might be positive
or negative depending on the direction of the initial moment. This is useful
in the case of collinear structures, rather than constraining assuming
t is always positive.
:return: Magmom
"""
# reference direction gives sign of moment
# entirely arbitrary, there will always be a pathological case
# where a consistent sign is not possible if the magnetic moments
# are aligned along the reference direction, but in practice this
# is unlikely to happen
ref_direction = np.array([1.01, 1.02, 1.03])
t = abs(self)
if t != 0:
new_saxis = self.moment/np.linalg.norm(self.moment)
if np.dot(ref_direction, new_saxis) < 0:
t = -t
new_saxis = -new_saxis
return Magmom([0, 0, t], saxis=new_saxis)
else:
return Magmom(self)
|
For internal implementation reasons, in non-collinear calculations
VASP prefers:
MAGMOM = 0 0 total_magnetic_moment
SAXIS = x y z
to an equivalent:
MAGMOM = x y z
SAXIS = 0 0 1
This method returns a Magmom object with magnetic moment [0, 0, t],
where t is the total magnetic moment, and saxis rotated as required.
A consistent direction of saxis is applied such that t might be positive
or negative depending on the direction of the initial moment. This is useful
in the case of collinear structures, rather than constraining assuming
t is always positive.
:return: Magmom
|
def has_preview(self):
"""
Returns if the document has real merged data. When True, `topil()`
returns pre-composed data.
"""
version_info = self.image_resources.get_data('version_info')
if version_info:
return version_info.has_composite
return True
|
Returns if the document has real merged data. When True, `topil()`
returns pre-composed data.
|
def iscm_md_append_array(self, arraypath, member):
"""
Append a member to a metadata array entry
"""
array_path = string.split(arraypath, ".")
array_key = array_path.pop()
current = self.metadata
for k in array_path:
if not current.has_key(k):
current[k] = {}
current = current[k]
if not current.has_key(array_key):
current[array_key] = []
if not type(current[array_key]) == list:
raise KeyError("%s doesn't point to an array" % arraypath)
current[array_key].append(member)
|
Append a member to a metadata array entry
|
def add_comment(self, line: str) -> None:
'''Keeping track of "last comment" for section and parameter '''
# the rule is like
#
# # comment line --> add to last comment
# blank line --> clears last comment
# [ ] --> use last comment
# parameter: --> use last comment
# All others: clear last comment
self._last_comment += (' ' if self._last_comment else '') + \
line.lstrip('#').strip()
|
Keeping track of "last comment" for section and parameter
|
def relative_abundance(coverage):
"""
cov = number of bases / length of genome
relative abundance = [(cov) / sum(cov for all genomes)] * 100
"""
relative = {}
sums = []
for genome in coverage:
for cov in coverage[genome]:
sums.append(0)
break
for genome in coverage:
index = 0
for cov in coverage[genome]:
sums[index] += cov
index += 1
for genome in coverage:
index = 0
relative[genome] = []
for cov in coverage[genome]:
if sums[index] == 0:
relative[genome].append(0)
else:
relative[genome].append((cov / sums[index]) * float(100))
index += 1
return relative
|
cov = number of bases / length of genome
relative abundance = [(cov) / sum(cov for all genomes)] * 100
|
def create_scree_plot(data, o_filename, options):
"""Creates the scree plot.
:param data: the eigenvalues.
:param o_filename: the name of the output files.
:param options: the options.
:type data: numpy.ndarray
:type o_filename: str
:type options: argparse.Namespace
"""
# Importing plt
mpl.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
# Computing the cumulative sum
cumul_data = np.cumsum(data)
# Creating the figure and axes
fig, axes = plt.subplots(2, 1, figsize=(8, 16))
# The title of the figure
fig.suptitle(options.scree_plot_title, fontsize=16, weight="bold")
fig.subplots_adjust(hspace=0.27, top=0.93, bottom=0.06)
# Modifying the spines
for axe in axes:
axe.xaxis.set_ticks_position("bottom")
axe.yaxis.set_ticks_position("left")
axe.spines["top"].set_visible(False)
axe.spines["right"].set_visible(False)
axe.spines["left"].set_position(("outward", 9))
axe.spines["bottom"].set_position(("outward", 9))
# First, plotting the eigenvalues
axes[0].set_title("Scree Plot", weight="bold")
axes[0].set_xlabel("Component")
axes[0].set_ylabel("Eigenvalue")
axes[0].plot(np.arange(len(data)) + 1, data, marker="o", c="#0099CC",
mec="#0099CC", ls="-", lw=2, clip_on=False)
# Then, plotting the annotation
for i in range(len(data)):
axes[0].annotate(np.round(data[i], 3),
xy=(np.arange(len(data))[i] + 1, data[i]),
xytext=(1, 10), textcoords="offset points",
ha="left", va="bottom",
bbox=dict(boxstyle="round,pad=0.5", fc="#FFFFFF"))
# Next plot the cumulative values
axes[1].set_title(("Cumulative explained variance "
"(max={:.3f})".format(np.sum(data))), weight="bold")
axes[1].set_xlabel("Component")
axes[1].set_ylabel("Cumulative explained variance")
axes[1].axhline(np.sum(data) * 0.8, ls="--", lw="2", c="#999999")
axes[1].plot(np.arange(len(data)) + 1, cumul_data, marker="o", c="#CC0000",
mec="#CC0000", mfc="#CC0000", ls="-", lw=2, clip_on=False)
# Then, plotting the annotation
for i in range(len(data)):
axes[1].annotate(np.round(cumul_data[i], 3),
xy=(np.arange(len(data))[i] + 1, cumul_data[i]),
xytext=(1, -10), textcoords="offset points",
ha="left", va="top",
bbox=dict(boxstyle="round,pad=0.5", fc="#FFFFFF"))
# Saving the file
plt.savefig(o_filename, dpi=300)
plt.close(fig)
|
Creates the scree plot.
:param data: the eigenvalues.
:param o_filename: the name of the output files.
:param options: the options.
:type data: numpy.ndarray
:type o_filename: str
:type options: argparse.Namespace
|
def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=None,
keyid=None, profile=None):
'''
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
'''
log.debug('security group contents %s pre-conversion', groups)
group_ids = []
for group in groups:
group_id = get_group_id(name=group, vpc_id=vpc_id,
vpc_name=vpc_name, region=region,
key=key, keyid=keyid, profile=profile)
if not group_id:
# Security groups are a big deal - need to fail if any can't be resolved...
raise CommandExecutionError('Could not resolve Security Group name '
'{0} to a Group ID'.format(group))
else:
group_ids.append(six.text_type(group_id))
log.debug('security group contents %s post-conversion', group_ids)
return group_ids
|
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
|
def set_context(self):
"""
Reads throught the namespaces in the RML and generates a context for
json+ld output when compared to the RdfNsManager namespaces
"""
results = self.rml.query("""
SELECT ?o {
{
?s rr:class ?o
} UNION {
?s rr:predicate ?o
}
}""")
namespaces = [Uri(row[0]).value[0]
for row in results
if isinstance(row[0], rdflib.URIRef)]
self.context = {ns[0]: ns[1] for ns in namespaces if ns[0]}
|
Reads throught the namespaces in the RML and generates a context for
json+ld output when compared to the RdfNsManager namespaces
|
def _insert(self, key, records, count):
""" Insert records according to key """
if key not in self.records:
assert key not in self.counts
self.records[key] = records
self.counts[key] = count
else:
self.records[key] = np.vstack((self.records[key], records))
assert key in self.counts
self.counts[key] += count
|
Insert records according to key
|
def browse_httpauth_write_apis(
request,
database_name=None,
collection_name=None):
"""Deprecated"""
name = "Write APIs Using HTTPAuth Authentication"
if database_name and collection_name:
wapis = WriteAPIHTTPAuth.objects.filter(
database_name=database_name,
collection_name=collection_name)
else:
wapis = WriteAPIHTTPAuth.objects.all()
context = {'name': name, 'wapis': wapis,
'database_name': database_name,
'collection_name': collection_name}
return render(
request,
'djmongo/console/browse-httpauth-write-apis.html',
context)
|
Deprecated
|
def parse_time_derivative(self, node):
"""
Parses <TimeDerivative>
@param node: Node containing the <TimeDerivative> element
@type node: xml.etree.Element
@raise ParseError: Raised when the time derivative does not hava a variable
name of a value.
"""
if 'variable' in node.lattrib:
variable = node.lattrib['variable']
else:
self.raise_error('<TimeDerivative> must specify a variable.')
if 'value' in node.lattrib:
value = node.lattrib['value']
else:
self.raise_error("Time derivative for '{0}' must specify an expression.",
variable)
self.current_regime.add_time_derivative(TimeDerivative(variable, value))
|
Parses <TimeDerivative>
@param node: Node containing the <TimeDerivative> element
@type node: xml.etree.Element
@raise ParseError: Raised when the time derivative does not hava a variable
name of a value.
|
def get_segment_effort(self, effort_id):
"""
Return a specific segment effort by ID.
http://strava.github.io/api/v3/efforts/#retrieve
:param effort_id: The id of associated effort to fetch.
:type effort_id: int
:return: The specified effort on a segment.
:rtype: :class:`stravalib.model.SegmentEffort`
"""
return model.SegmentEffort.deserialize(self.protocol.get('/segment_efforts/{id}',
id=effort_id))
|
Return a specific segment effort by ID.
http://strava.github.io/api/v3/efforts/#retrieve
:param effort_id: The id of associated effort to fetch.
:type effort_id: int
:return: The specified effort on a segment.
:rtype: :class:`stravalib.model.SegmentEffort`
|
def skip_class_parameters():
"""
Can be used with :meth:`add_parametric_object_params`, this removes
duplicate variables cluttering the sphinx docs.
This is only intended to be used with *sphinx autodoc*
In your *sphinx* ``config.py`` file::
from cqparts.utils.sphinx import skip_class_parameters
def setup(app):
app.connect("autodoc-skip-member", skip_class_parameters())
"""
from ..params import Parameter
def callback(app, what, name, obj, skip, options):
if (what == 'class') and isinstance(obj, Parameter):
return True # yes, skip this object
return None
return callback
|
Can be used with :meth:`add_parametric_object_params`, this removes
duplicate variables cluttering the sphinx docs.
This is only intended to be used with *sphinx autodoc*
In your *sphinx* ``config.py`` file::
from cqparts.utils.sphinx import skip_class_parameters
def setup(app):
app.connect("autodoc-skip-member", skip_class_parameters())
|
def search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult:
"""
Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json.
"""
return self.filtered_search(
id_list=id_list,
negated_classes=negated_classes,
limit=limit,
taxon_filter=None,
category_filter=None,
method=method
)
|
Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json.
|
def _extract_functions(resources):
"""
Extracts and returns function information from the given dictionary of SAM/CloudFormation resources. This
method supports functions defined with AWS::Serverless::Function and AWS::Lambda::Function
:param dict resources: Dictionary of SAM/CloudFormation resources
:return dict(string : samcli.commands.local.lib.provider.Function): Dictionary of function LogicalId to the
Function configuration object
"""
result = {}
for name, resource in resources.items():
resource_type = resource.get("Type")
resource_properties = resource.get("Properties", {})
if resource_type == SamFunctionProvider._SERVERLESS_FUNCTION:
layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources)
result[name] = SamFunctionProvider._convert_sam_function_resource(name, resource_properties, layers)
elif resource_type == SamFunctionProvider._LAMBDA_FUNCTION:
layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources)
result[name] = SamFunctionProvider._convert_lambda_function_resource(name, resource_properties, layers)
# We don't care about other resource types. Just ignore them
return result
|
Extracts and returns function information from the given dictionary of SAM/CloudFormation resources. This
method supports functions defined with AWS::Serverless::Function and AWS::Lambda::Function
:param dict resources: Dictionary of SAM/CloudFormation resources
:return dict(string : samcli.commands.local.lib.provider.Function): Dictionary of function LogicalId to the
Function configuration object
|
def write_header(self):
"""Write the DNS message header.
Writing the DNS message header is done asfter all sections
have been rendered, but before the optional TSIG signature
is added.
"""
self.output.seek(0)
self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
self.counts[0], self.counts[1],
self.counts[2], self.counts[3]))
self.output.seek(0, 2)
|
Write the DNS message header.
Writing the DNS message header is done asfter all sections
have been rendered, but before the optional TSIG signature
is added.
|
def is_unit_upgrading_set():
"""Return the state of the kv().get('unit-upgrading').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
except Exception:
return False
|
Return the state of the kv().get('unit-upgrading').
To help with units that don't have HookData() (testing)
if it excepts, return False
|
def get_subscription_attributes(SubscriptionArn, region=None, key=None, keyid=None, profile=None):
'''
Returns all of the properties of a subscription.
CLI example::
salt myminion boto3_sns.get_subscription_attributes somesubscription region=us-west-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_subscription_attributes(SubscriptionArn=SubscriptionArn)
return ret['Attributes']
except botocore.exceptions.ClientError as e:
log.error('Failed to list attributes for SNS subscription %s: %s',
SubscriptionArn, e)
return None
except KeyError:
log.error('Failed to list attributes for SNS subscription %s',
SubscriptionArn)
return None
|
Returns all of the properties of a subscription.
CLI example::
salt myminion boto3_sns.get_subscription_attributes somesubscription region=us-west-1
|
def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):
"""Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
if not isinstance(script, str): # is None:
raise Exception("script should be string")
if hyper_parameters is None:
hyper_parameters = {}
if saved_result_keys is None:
saved_result_keys = []
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
kwargs.update({'hyper_parameters': hyper_parameters})
kwargs.update({'saved_result_keys': saved_result_keys})
_script = open(script, 'rb').read()
kwargs.update({'status': 'pending', 'script': _script, 'result': {}})
self.db.Task.insert_one(kwargs)
logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script))
|
Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
|
def normalize_rrs(rrsets):
"""Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>) """
for rrset in rrsets:
if rrset.tag == '{%s}ResourceRecordSet' % R53_XMLNS:
for rrs in rrset:
# preformat wildcard records
if rrs.tag == '{%s}Name' % R53_XMLNS:
if rrs.text.startswith('*.'):
old_text = rrs.text
new_text = '\\052.%s' % old_text[2:]
print 'Found wildcard record, rewriting to %s' % new_text
rrs.text = rrs.text.replace(old_text, new_text)
# sort ResourceRecord elements by Value
if rrs.tag == '{%s}ResourceRecords' % R53_XMLNS:
# 0th value of ResourceRecord is always the Value element
sorted_rrs = sorted(rrs, key=lambda x: x[0].text)
rrs[:] = sorted_rrs
return rrsets
|
Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>)
|
def writable(self):
"""True if the Slot is writable."""
return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name))
|
True if the Slot is writable.
|
def load_drp(self, name, entry_point='numina.pipeline.1'):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(entry_point):
if drpins.name == name:
return drpins
else:
raise KeyError('{}'.format(name))
|
Load all available DRPs in 'entry_point'.
|
def send(self, request, stem=None):
"""Prepare and send a request
Arguments:
request: a Request object that is not yet prepared
stem: a path to append to the root URL
Returns:
The response to the request
"""
if stem is not None:
request.url = request.url + "/" + stem.lstrip("/")
prepped = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(url=prepped.url,
proxies={},
stream=None,
verify=None,
cert=None)
return self.session.send(prepped, **settings)
|
Prepare and send a request
Arguments:
request: a Request object that is not yet prepared
stem: a path to append to the root URL
Returns:
The response to the request
|
def squash_xml_to_text(elm, remove_namespaces=False):
"""Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
"""
leading_text = elm.text and elm.text or ''
result = [leading_text]
for child in elm.getchildren():
# Encoding is set to utf-8 because otherwise `ó` would
# become `ó`
child_value = etree.tostring(child, encoding='utf-8')
# Decode to a string for later regexp and whitespace stripping
child_value = child_value.decode('utf-8')
result.append(child_value)
if remove_namespaces:
# Best way to remove the namespaces without having the parser complain
# about producing invalid XML.
result = [re.sub(' xmlns:?[^=]*="[^"]*"', '', v) for v in result]
# Join the results and strip any surrounding whitespace
result = u''.join(result).strip()
return result
|
Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
|
def get_all_namespace_ids( self ):
"""
Get the set of all existing, READY namespace IDs.
"""
cur = self.db.cursor()
namespace_ids = namedb_get_all_namespace_ids( cur )
return namespace_ids
|
Get the set of all existing, READY namespace IDs.
|
def _get_group_infos(self):
"""
Returns a (cached) list of group_info structures for the groups that our
user is a member of.
"""
if self._group_infos is None:
self._group_infos = self._group_type.user_groups(
self._ldap_user, self._group_search
)
return self._group_infos
|
Returns a (cached) list of group_info structures for the groups that our
user is a member of.
|
def update_source(ident, data):
'''Update an harvest source'''
source = get_source(ident)
source.modify(**data)
signals.harvest_source_updated.send(source)
return source
|
Update an harvest source
|
def update_ports(self, ports, id_or_uri):
"""
Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are
supported for update.
Note:
This method is available for API version 300 or later.
Args:
ports: List of Switch Ports.
id_or_uri: Can be either the switch id or the switch uri.
Returns:
dict: Switch
"""
ports = merge_default_values(ports, {'type': 'port'})
uri = self._client.build_uri(id_or_uri) + "/update-ports"
return self._client.update(uri=uri, resource=ports)
|
Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are
supported for update.
Note:
This method is available for API version 300 or later.
Args:
ports: List of Switch Ports.
id_or_uri: Can be either the switch id or the switch uri.
Returns:
dict: Switch
|
def wait_and_start_browser(host, port=None, cancel_event=None):
"""
Waits for the server to run and then opens the specified address in
the browser. Set cancel_event to cancel the wait.
"""
if host == '0.0.0.0':
host = 'localhost'
if port is None:
port = 80
if wait_for_server(host, port, cancel_event):
start_browser('http://{0}:{1}/'.format(host, port))
|
Waits for the server to run and then opens the specified address in
the browser. Set cancel_event to cancel the wait.
|
def transformToNative(obj):
"""
Turn obj.value into a list of dates, datetimes, or
(datetime, timedelta) tuples.
"""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
valueParam = getattr(obj, 'value_param', "DATE-TIME").upper()
valTexts = obj.value.split(",")
if valueParam == "DATE":
obj.value = [stringToDate(x) for x in valTexts]
elif valueParam == "DATE-TIME":
obj.value = [stringToDateTime(x, tzinfo) for x in valTexts]
elif valueParam == "PERIOD":
obj.value = [stringToPeriod(x, tzinfo) for x in valTexts]
return obj
|
Turn obj.value into a list of dates, datetimes, or
(datetime, timedelta) tuples.
|
def set_statements_pmid(self, pmid):
"""Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor.
"""
# Replace PMID value in JSON dict first
for stmt in self.json_stmts:
evs = stmt.get('evidence', [])
for ev in evs:
ev['pmid'] = pmid
# Replace PMID value in extracted Statements next
for stmt in self.statements:
for ev in stmt.evidence:
ev.pmid = pmid
|
Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor.
|
def json_output(cls, cs, score_dict, output_filename, ds_loc, limit,
output_type='json'):
'''
Generates JSON output for the ocmpliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest,
and going up from there.
@param output_type Either 'json' or 'json_new'. json_new is the new
json output format that supports multiple datasets
'''
results = {}
# json output keys out at the top level by
if len(score_dict) > 1 and output_type != 'json_new':
raise ValueError("output_type must be set to 'json_new' if outputting multiple datasets to a single json file or stdout")
if output_type == 'json':
for ds, score_groups in six.iteritems(score_dict):
for checker, rpair in six.iteritems(score_groups):
groups, errors = rpair
results[checker] = cs.dict_output(
checker, groups, ds, limit,
)
elif output_type == 'json_new':
for ds, score_groups in six.iteritems(score_dict):
for checker, rpair in six.iteritems(score_groups):
groups, errors = rpair
results[ds] = {}
results[ds][checker] = cs.dict_output(
checker, groups, ds, limit
)
json_results = json.dumps(results, indent=2, ensure_ascii=False)
if output_filename == '-':
print(json_results)
else:
with io.open(output_filename, 'w', encoding='utf8') as f:
f.write(json_results)
return groups
|
Generates JSON output for the ocmpliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest,
and going up from there.
@param output_type Either 'json' or 'json_new'. json_new is the new
json output format that supports multiple datasets
|
def get_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock and returns if it can be stablished. Returns false otherwise '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
try:
lock = rl.acquire(blocking=False)
except RedisError:
return False
if not lock:
return False
else:
return rl
|
Gets a lock and returns if it can be stablished. Returns false otherwise
|
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
|
Backwards-compatibility for the old retries format.
|
def imrotate(img,
angle,
center=None,
scale=1.0,
border_value=0,
auto_bound=False):
"""Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
"""
if center is not None and auto_bound:
raise ValueError('`auto_bound` conflicts with `center`')
h, w = img.shape[:2]
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
if auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
|
Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
|
def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Note: This method requires that this SequentialLayerBuilder holds a
template.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
Raises:
ValueError: if this doesn't hold a template.
"""
if hasattr(self._head, 'construct'):
return self._head.construct(**bindings)
else:
raise ValueError(
'Cannot call construct on a non-template: %s' % type(self._head))
|
Constructs the graph and returns either a tensor or a sequence.
Note: This method requires that this SequentialLayerBuilder holds a
template.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
Raises:
ValueError: if this doesn't hold a template.
|
def update(self, scaling_group, name=None, cooldown=None,
min_entities=None, max_entities=None, metadata=None):
"""
Updates an existing ScalingGroup. One or more of the attributes can
be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
uri = "/%s/%s/config" % (self.uri_base, scaling_group.id)
if cooldown is None:
cooldown = scaling_group.cooldown
if min_entities is None:
min_entities = scaling_group.min_entities
if max_entities is None:
max_entities = scaling_group.max_entities
body = {"name": name or scaling_group.name,
"cooldown": cooldown,
"minEntities": min_entities,
"maxEntities": max_entities,
"metadata": metadata or scaling_group.metadata,
}
resp, resp_body = self.api.method_put(uri, body=body)
return None
|
Updates an existing ScalingGroup. One or more of the attributes can
be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
|
def _get_struct_cxformwithalpha(self):
"""Get the values for the CXFORMWITHALPHA record."""
obj = _make_object("CXformWithAlpha")
bc = BitConsumer(self._src)
obj.HasAddTerms = bc.u_get(1)
obj.HasMultTerms = bc.u_get(1)
obj.NBits = nbits = bc.u_get(4)
if obj.HasMultTerms:
obj.RedMultTerm = bc.s_get(nbits)
obj.GreenMultTerm = bc.s_get(nbits)
obj.BlueMultTerm = bc.s_get(nbits)
obj.AlphaMultTerm = bc.s_get(nbits)
if obj.HasAddTerms:
obj.RedAddTerm = bc.s_get(nbits)
obj.GreenAddTerm = bc.s_get(nbits)
obj.BlueAddTerm = bc.s_get(nbits)
obj.AlphaAddTerm = bc.s_get(nbits)
return obj
|
Get the values for the CXFORMWITHALPHA record.
|
def get_msg_count_info(self, channel=Channel.CHANNEL_CH0):
"""
Reads the message counters of the specified CAN channel.
:param int channel:
CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with number of CAN messages sent and received.
:rtype: tuple(int, int)
"""
msg_count_info = MsgCountInfo()
UcanGetMsgCountInfoEx(self._handle, channel, byref(msg_count_info))
return msg_count_info.sent_msg_count, msg_count_info.recv_msg_count
|
Reads the message counters of the specified CAN channel.
:param int channel:
CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with number of CAN messages sent and received.
:rtype: tuple(int, int)
|
def get_touch_dict(self, ind=None, out=bool):
""" Get a dictionnary of Cls_Name struct with indices of Rays touching
Only includes Struct object with compute = True
(as returned by self.lStruct__computeInOut_computeInOut)
Also return the associated colors
If in is not None, the indices for each Struct are split between:
- indok : rays touching Struct and in ind
- indout: rays touching Struct but not in ind
"""
if self.config is None:
msg = "Config must be set in order to get touch dict !"
raise Exception(msg)
dElt = {}
ind = self._check_indch(ind, out=bool)
for ss in self.lStruct_computeInOut:
kn = "%s_%s"%(ss.__class__.__name__, ss.Id.Name)
indtouch = self.select(touch=kn, out=bool)
if np.any(indtouch):
indok = indtouch & ind
indout = indtouch & ~ind
if np.any(indok) or np.any(indout):
if out == int:
indok = indok.nonzero()[0]
indout = indout.nonzero()[0]
dElt[kn] = {'indok':indok, 'indout':indout,
'col':ss.get_color()}
return dElt
|
Get a dictionnary of Cls_Name struct with indices of Rays touching
Only includes Struct object with compute = True
(as returned by self.lStruct__computeInOut_computeInOut)
Also return the associated colors
If in is not None, the indices for each Struct are split between:
- indok : rays touching Struct and in ind
- indout: rays touching Struct but not in ind
|
def parse_object_type_extension(lexer: Lexer) -> ObjectTypeExtensionNode:
"""ObjectTypeExtension"""
start = lexer.token
expect_keyword(lexer, "extend")
expect_keyword(lexer, "type")
name = parse_name(lexer)
interfaces = parse_implements_interfaces(lexer)
directives = parse_directives(lexer, True)
fields = parse_fields_definition(lexer)
if not (interfaces or directives or fields):
raise unexpected(lexer)
return ObjectTypeExtensionNode(
name=name,
interfaces=interfaces,
directives=directives,
fields=fields,
loc=loc(lexer, start),
)
|
ObjectTypeExtension
|
def _resample_nu(self, tau, N_steps=100, prop_std=0.1, alpha=1, beta=1):
"""
Update the degree of freedom parameter with
Metropolis-Hastings. Assume a prior nu ~ Ga(alpha, beta)
and use a proposal nu' ~ N(nu, prop_std^2). If proposals
are negative, reject automatically due to likelihood.
"""
# Convert tau to a list of arrays
taus = [tau] if isinstance(tau, np.ndarray) else tau
N = 0
E_tau = 0
E_logtau = 0
for tau in taus:
bad = ~np.isfinite(tau)
N += np.sum(~bad)
E_tau += np.sum(tau[~bad])
E_logtau += np.sum(np.log(tau[~bad]))
if N > 0:
E_tau /= N
E_logtau /= N
# Compute the log prior, likelihood, and posterior
lprior = lambda nu: (alpha - 1) * np.log(nu) - alpha * nu
ll = lambda nu: N * (nu/2 * np.log(nu/2) - gammaln(nu/2) + (nu/2 - 1) * E_logtau - nu/2 * E_tau)
lp = lambda nu: ll(nu) + lprior(nu)
lp_curr = lp(self.nu)
for step in range(N_steps):
# Symmetric proposal
nu_new = self.nu + prop_std * np.random.randn()
if nu_new <1e-3:
# Reject if too small
continue
# Accept / reject based on likelihoods
lp_new = lp(nu_new)
if np.log(np.random.rand()) < lp_new - lp_curr:
self.nu = nu_new
lp_curr = lp_new
|
Update the degree of freedom parameter with
Metropolis-Hastings. Assume a prior nu ~ Ga(alpha, beta)
and use a proposal nu' ~ N(nu, prop_std^2). If proposals
are negative, reject automatically due to likelihood.
|
def _format_attrs(self):
""" Formats the self.attrs #OrderedDict """
_bold = bold
_colorize = colorize
if not self.pretty:
_bold = lambda x: x
_colorize = lambda x, c: x
attrs = []
add_attr = attrs.append
if self.doc and hasattr(self.obj, "__doc__"):
# Optionally attaches documentation
if self.obj.__doc__:
add_attr("`{}`".format(self.obj.__doc__.strip()))
if self.attrs:
# Attach request attributes
for key, value in self.attrs.items():
value, color = value
try:
value = value or \
self._getattrs(getattr, self.obj, key.split("."))
except AttributeError:
pass
value = _colorize(value, color) if color else value
v = None
if value is not None:
value = "`{}`".format(value) \
if isinstance(value, Look.str_) else value
k, v = _bold(key), value
else:
k, v = _bold(key), str(value)
if v:
k = '{}='.format(k) if not self._no_keys else ''
add_attr("{}{}".format(k, v))
if len(attrs):
breaker = "\n " if self.line_break and len(attrs) > 1 else ""
return breaker + ((", "+breaker).join(attrs)) + breaker.strip(" ")
else:
return ""
|
Formats the self.attrs #OrderedDict
|
def relativize(self, absolute_address, target_region_id=None):
"""
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
"""
if target_region_id is None:
if self.is_stack:
# Get the base address of the stack frame it belongs to
base_address = next(self._address_to_region_id.irange(minimum=absolute_address, reverse=False))
else:
try:
base_address = next(self._address_to_region_id.irange(maximum=absolute_address, reverse=True))
except StopIteration:
# Not found. It belongs to the global region then.
return 'global', absolute_address, None
descriptor = self._address_to_region_id[base_address]
else:
if target_region_id == 'global':
# Just return the absolute address
return 'global', absolute_address, None
if target_region_id not in self._region_id_to_address:
raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id)
descriptor = self._region_id_to_address[target_region_id]
base_address = descriptor.base_address
return descriptor.region_id, absolute_address - base_address, descriptor.related_function_address
|
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
|
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
|
Lock connection from being used else where
|
def download_files(file_list):
"""Download the latest data. """
for _, source_data_file in file_list:
sql_gz_name = source_data_file['name'].split('/')[-1]
msg = 'Downloading: %s' % (sql_gz_name)
log.debug(msg)
new_data = objectstore.get_object(
handelsregister_conn, source_data_file, 'handelsregister')
# save output to file!
with open('data/{}'.format(sql_gz_name), 'wb') as outputzip:
outputzip.write(new_data)
|
Download the latest data.
|
def update_fit_boxes(self, new_fit=False):
"""
alters fit_box and mean_fit_box lists to match with changes in
specimen or new/removed interpretations
Parameters
----------
new_fit : boolean representing if there is a new fit
Alters
------
fit_box selection, tmin_box selection, tmax_box selection,
mean_fit_box selection, current_fit
"""
# update the fit box
self.update_fit_box(new_fit)
# select new fit
self.on_select_fit(None)
# update the high level fits box
self.update_mean_fit_box()
|
alters fit_box and mean_fit_box lists to match with changes in
specimen or new/removed interpretations
Parameters
----------
new_fit : boolean representing if there is a new fit
Alters
------
fit_box selection, tmin_box selection, tmax_box selection,
mean_fit_box selection, current_fit
|
def _deftype_to_py_ast( # pylint: disable=too-many-branches
ctx: GeneratorContext, node: DefType
) -> GeneratedPyAST:
"""Return a Python AST Node for a `deftype*` expression."""
assert node.op == NodeOp.DEFTYPE
type_name = munge(node.name)
ctx.symbol_table.new_symbol(sym.symbol(node.name), type_name, LocalType.DEFTYPE)
bases = []
for base in node.interfaces:
base_node = gen_py_ast(ctx, base)
assert (
count(base_node.dependencies) == 0
), "Class and host form nodes do not have dependencies"
bases.append(base_node.node)
decorator = ast.Call(
func=_ATTR_CLASS_DECORATOR_NAME,
args=[],
keywords=[
ast.keyword(arg="cmp", value=ast.NameConstant(False)),
ast.keyword(arg="frozen", value=ast.NameConstant(node.is_frozen)),
ast.keyword(arg="slots", value=ast.NameConstant(True)),
],
)
with ctx.new_symbol_table(node.name):
type_nodes = []
for field in node.fields:
safe_field = munge(field.name)
type_nodes.append(
ast.Assign(
targets=[ast.Name(id=safe_field, ctx=ast.Store())],
value=ast.Call(func=_ATTRIB_FIELD_FN_NAME, args=[], keywords=[]),
)
)
ctx.symbol_table.new_symbol(sym.symbol(field.name), safe_field, field.local)
type_deps: List[ast.AST] = []
for method in node.methods:
type_ast = __deftype_method_to_py_ast(ctx, method)
type_nodes.append(type_ast.node)
type_deps.extend(type_ast.dependencies)
return GeneratedPyAST(
node=ast.Name(id=type_name, ctx=ast.Load()),
dependencies=list(
chain(
type_deps,
[
ast.ClassDef(
name=type_name,
bases=bases,
keywords=[],
body=type_nodes,
decorator_list=[decorator],
)
],
)
),
)
|
Return a Python AST Node for a `deftype*` expression.
|
def make_slow_waves(events, data, time, s_freq):
"""Create dict for each slow wave, based on events of time points.
Parameters
----------
events : ndarray (dtype='int')
N x 5 matrix with start, trough, zero, peak, end samples
data : ndarray (dtype='float')
vector with the data
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
Returns
-------
list of dict
list of all the SWs, with information about start,
trough_time, zero_time, peak_time, end, duration (s), trough_val,
peak_val, peak-to-peak amplitude (signal units), area_under_curve
(signal units * s)
"""
slow_waves = []
for ev in events:
one_sw = {'start': time[ev[0]],
'trough_time': time[ev[1]],
'zero_time': time[ev[2]],
'peak_time': time[ev[3]],
'end': time[ev[4] - 1],
'trough_val': data[ev[1]],
'peak_val': data[ev[3]],
'dur': (ev[4] - ev[0]) / s_freq,
'ptp': abs(ev[3] - ev[1])
}
slow_waves.append(one_sw)
return slow_waves
|
Create dict for each slow wave, based on events of time points.
Parameters
----------
events : ndarray (dtype='int')
N x 5 matrix with start, trough, zero, peak, end samples
data : ndarray (dtype='float')
vector with the data
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
Returns
-------
list of dict
list of all the SWs, with information about start,
trough_time, zero_time, peak_time, end, duration (s), trough_val,
peak_val, peak-to-peak amplitude (signal units), area_under_curve
(signal units * s)
|
def disable(states):
'''
Disable state runs.
CLI Example:
.. code-block:: bash
salt '*' state.disable highstate
salt '*' state.disable highstate,test.succeed_without_changes
.. note::
To disable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
'''
ret = {
'res': True,
'msg': ''
}
states = salt.utils.args.split_input(states)
msg = []
_disabled = __salt__['grains.get']('state_runs_disabled')
if not isinstance(_disabled, list):
_disabled = []
_changed = False
for _state in states:
if _state in _disabled:
msg.append('Info: {0} state already disabled.'.format(_state))
else:
msg.append('Info: {0} state disabled.'.format(_state))
_disabled.append(_state)
_changed = True
if _changed:
__salt__['grains.setval']('state_runs_disabled', _disabled)
ret['msg'] = '\n'.join(msg)
# refresh the grains
__salt__['saltutil.refresh_modules']()
return ret
|
Disable state runs.
CLI Example:
.. code-block:: bash
salt '*' state.disable highstate
salt '*' state.disable highstate,test.succeed_without_changes
.. note::
To disable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
|
def run_command(self, data):
"""
check the given command and send to the correct dispatcher
"""
command = data.get("command")
if self.debug:
self.py3_wrapper.log("Running remote command %s" % command)
if command == "refresh":
self.refresh(data)
elif command == "refresh_all":
self.py3_wrapper.refresh_modules()
elif command == "click":
self.click(data)
|
check the given command and send to the correct dispatcher
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.