code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _get_batch_name(items, skip_jointcheck=False):
"""Retrieve the shared batch name for a group of items.
"""
batch_names = collections.defaultdict(int)
has_joint = any([is_joint(d) for d in items])
for data in items:
if has_joint and not skip_jointcheck:
batches = dd.get_sample_name(data)
else:
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
for b in batches:
batch_names[b] += 1
return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
|
Retrieve the shared batch name for a group of items.
|
def _trychar(char, fallback, asciimode=None): # nocover
"""
Logic from IPython timeit to handle terminals that cant show mu
Args:
char (str): character, typically unicode, to try to use
fallback (str): ascii character to use if stdout cannot encode char
asciimode (bool): if True, always use fallback
Example:
>>> char = _trychar('µs', 'us')
>>> print('char = {}'.format(char))
>>> assert _trychar('µs', 'us', asciimode=True) == 'us'
"""
if asciimode is True:
# If we request ascii mode simply return it
return fallback
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: # pragma: nobranch
try:
char.encode(sys.stdout.encoding)
except Exception: # nocover
pass
else:
return char
return fallback
|
Logic from IPython timeit to handle terminals that cant show mu
Args:
char (str): character, typically unicode, to try to use
fallback (str): ascii character to use if stdout cannot encode char
asciimode (bool): if True, always use fallback
Example:
>>> char = _trychar('µs', 'us')
>>> print('char = {}'.format(char))
>>> assert _trychar('µs', 'us', asciimode=True) == 'us'
|
def build_date(self):
"""
get build date.
:return: build date. None if not found
"""
# pylint: disable=len-as-condition
if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None):
return self.dutinformation.get(0).build.date
return None
|
get build date.
:return: build date. None if not found
|
def move_window(self, destination="", session=None):
"""
Move the current :class:`Window` object ``$ tmux move-window``.
Parameters
----------
destination : str, optional
the ``target window`` or index to move the window to, default:
empty string
session : str, optional
the ``target session`` or index to move the window to, default:
current session.
"""
session = session or self.get('session_id')
proc = self.cmd(
'move-window',
'-s%s:%s' % (self.get('session_id'), self.index),
'-t%s:%s' % (session, destination),
)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
self.server._update_windows()
|
Move the current :class:`Window` object ``$ tmux move-window``.
Parameters
----------
destination : str, optional
the ``target window`` or index to move the window to, default:
empty string
session : str, optional
the ``target session`` or index to move the window to, default:
current session.
|
def get_nearest_edges(G, X, Y, method=None, dist=0.0001):
"""
Return the graph edges nearest to a list of points. Pass in points
as separate vectors of X and Y coordinates. The 'kdtree' method
is by far the fastest with large data sets, but only finds approximate
nearest edges if working in unprojected coordinates like lat-lng (it
precisely finds the nearest edge if working in projected coordinates).
The 'balltree' method is second fastest with large data sets, but it
is precise if working in unprojected coordinates like lat-lng.
Parameters
----------
G : networkx multidigraph
X : list-like
The vector of longitudes or x's for which we will find the nearest
edge in the graph. For projected graphs, use the projected coordinates,
usually in meters.
Y : list-like
The vector of latitudes or y's for which we will find the nearest
edge in the graph. For projected graphs, use the projected coordinates,
usually in meters.
method : str {None, 'kdtree', 'balltree'}
Which method to use for finding nearest edge to each point.
If None, we manually find each edge one at a time using
osmnx.utils.get_nearest_edge. If 'kdtree' we use
scipy.spatial.cKDTree for very fast euclidean search. Recommended for
projected graphs. If 'balltree', we use sklearn.neighbors.BallTree for
fast haversine search. Recommended for unprojected graphs.
dist : float
spacing length along edges. Units are the same as the geom; Degrees for
unprojected geometries and meters for projected geometries. The smaller
the value, the more points are created.
Returns
-------
ne : ndarray
array of nearest edges represented by their startpoint and endpoint ids,
u and v, the OSM ids of the nodes.
Info
----
The method creates equally distanced points along the edges of the network.
Then, these points are used in a kdTree or BallTree search to identify which
is nearest.Note that this method will not give the exact perpendicular point
along the edge, but the smaller the *dist* parameter, the closer the solution
will be.
Code is adapted from an answer by JHuw from this original question:
https://gis.stackexchange.com/questions/222315/geopandas-find-nearest-point
-in-other-dataframe
"""
start_time = time.time()
if method is None:
# calculate nearest edge one at a time for each point
ne = [get_nearest_edge(G, (x, y)) for x, y in zip(X, Y)]
ne = [(u, v) for _, u, v in ne]
elif method == 'kdtree':
# check if we were able to import scipy.spatial.cKDTree successfully
if not cKDTree:
raise ImportError('The scipy package must be installed to use this optional feature.')
# transform graph into DataFrame
edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
# transform edges into evenly spaced points
edges['points'] = edges.apply(lambda x: redistribute_vertices(x.geometry, dist), axis=1)
# develop edges data for each created points
extended = edges['points'].apply([pd.Series]).stack().reset_index(level=1, drop=True).join(edges).reset_index()
# Prepare btree arrays
nbdata = np.array(list(zip(extended['Series'].apply(lambda x: x.x),
extended['Series'].apply(lambda x: x.y))))
# build a k-d tree for euclidean nearest node search
btree = cKDTree(data=nbdata, compact_nodes=True, balanced_tree=True)
# query the tree for nearest node to each point
points = np.array([X, Y]).T
dist, idx = btree.query(points, k=1) # Returns ids of closest point
eidx = extended.loc[idx, 'index']
ne = edges.loc[eidx, ['u', 'v']]
elif method == 'balltree':
# check if we were able to import sklearn.neighbors.BallTree successfully
if not BallTree:
raise ImportError('The scikit-learn package must be installed to use this optional feature.')
# transform graph into DataFrame
edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
# transform edges into evenly spaced points
edges['points'] = edges.apply(lambda x: redistribute_vertices(x.geometry, dist), axis=1)
# develop edges data for each created points
extended = edges['points'].apply([pd.Series]).stack().reset_index(level=1, drop=True).join(edges).reset_index()
# haversine requires data in form of [lat, lng] and inputs/outputs in units of radians
nodes = pd.DataFrame({'x': extended['Series'].apply(lambda x: x.x),
'y': extended['Series'].apply(lambda x: x.y)})
nodes_rad = np.deg2rad(nodes[['y', 'x']].values.astype(np.float))
points = np.array([Y, X]).T
points_rad = np.deg2rad(points)
# build a ball tree for haversine nearest node search
tree = BallTree(nodes_rad, metric='haversine')
# query the tree for nearest node to each point
idx = tree.query(points_rad, k=1, return_distance=False)
eidx = extended.loc[idx[:, 0], 'index']
ne = edges.loc[eidx, ['u', 'v']]
else:
raise ValueError('You must pass a valid method name, or None.')
log('Found nearest edges to {:,} points in {:,.2f} seconds'.format(len(X), time.time() - start_time))
return np.array(ne)
|
Return the graph edges nearest to a list of points. Pass in points
as separate vectors of X and Y coordinates. The 'kdtree' method
is by far the fastest with large data sets, but only finds approximate
nearest edges if working in unprojected coordinates like lat-lng (it
precisely finds the nearest edge if working in projected coordinates).
The 'balltree' method is second fastest with large data sets, but it
is precise if working in unprojected coordinates like lat-lng.
Parameters
----------
G : networkx multidigraph
X : list-like
The vector of longitudes or x's for which we will find the nearest
edge in the graph. For projected graphs, use the projected coordinates,
usually in meters.
Y : list-like
The vector of latitudes or y's for which we will find the nearest
edge in the graph. For projected graphs, use the projected coordinates,
usually in meters.
method : str {None, 'kdtree', 'balltree'}
Which method to use for finding nearest edge to each point.
If None, we manually find each edge one at a time using
osmnx.utils.get_nearest_edge. If 'kdtree' we use
scipy.spatial.cKDTree for very fast euclidean search. Recommended for
projected graphs. If 'balltree', we use sklearn.neighbors.BallTree for
fast haversine search. Recommended for unprojected graphs.
dist : float
spacing length along edges. Units are the same as the geom; Degrees for
unprojected geometries and meters for projected geometries. The smaller
the value, the more points are created.
Returns
-------
ne : ndarray
array of nearest edges represented by their startpoint and endpoint ids,
u and v, the OSM ids of the nodes.
Info
----
The method creates equally distanced points along the edges of the network.
Then, these points are used in a kdTree or BallTree search to identify which
is nearest.Note that this method will not give the exact perpendicular point
along the edge, but the smaller the *dist* parameter, the closer the solution
will be.
Code is adapted from an answer by JHuw from this original question:
https://gis.stackexchange.com/questions/222315/geopandas-find-nearest-point
-in-other-dataframe
|
def _extract(self, identifier):
''' Extracts data from conjugation table. '''
conjugation = []
if self.tree.xpath('//p/b[normalize-space(text()) = "' + identifier.decode('utf-8') + '"]'):
p = self.tree.xpath('//p/b[normalize-space(text()) = "' + identifier.decode('utf-8') + '"]')[0].getparent()
for font in p.iterfind('font'):
text = self._normalize(font.text_content())
next = font.getnext()
text += ' ' + self._normalize(next.text_content())
while True:
next = next.getnext()
if next.tag != 'span':
break
text += '/' + self._normalize(next.text_content())
conjugation.append(text)
return conjugation
|
Extracts data from conjugation table.
|
def content_val(self, ymldata=None, messages=None):
"""Validates the Command Dictionary to ensure the contents for each of the fields
meets specific criteria regarding the expected types, byte ranges, etc."""
self._ymlproc = YAMLProcessor(self._ymlfile, False)
# Turn off the YAML Processor
log.debug("BEGIN: Content-based validation of Command dictionary")
if ymldata is not None:
cmddict = ymldata
elif ymldata is None and self._ymlproc.loaded:
cmddict = self._ymlproc.data
elif not self._ymlproc.loaded:
raise util.YAMLError("YAML failed to load.")
try:
# instantiate the document number. this will increment in order to
# track the line numbers and section where validation fails
docnum = 0
# boolean to hold argument validity
argsvalid = True
# list of rules to validate against
rules = []
### set the command rules
#
# set uniqueness rule for command names
rules.append(UniquenessRule('name', "Duplicate command name: %s", messages))
# set uniqueness rule for opcodes
rules.append(UniquenessRule('opcode', "Duplicate opcode: %s", messages))
#
###
for cmdcnt, cmddefn in enumerate(cmddict[0]):
# check the command rules
for rule in rules:
rule.check(cmddefn)
# list of argument rules to validate against
argrules = []
### set rules for command arguments
#
# set uniqueness rule for opcodes
argrules.append(UniquenessRule('name', "Duplicate argument name: " + cmddefn.name + ".%s", messages))
# set type rule for arg.type
argrules.append(TypeRule('type', "Invalid argument type for argument: " + cmddefn.name + ".%s", messages))
# set argument size rule for arg.type.nbytes
argrules.append(TypeSizeRule('nbytes', "Invalid argument size for argument: " + cmddefn.name + ".%s", messages))
# set argument enumerations rule to check no enumerations contain un-quoted YAML special variables
argrules.append(EnumRule('enum', "Invalid enum value for argument: " + cmddefn.name + ".%s", messages))
# set byte order rule to ensure proper ordering of aruguments
argrules.append(ByteOrderRule('bytes', "Invalid byte order for argument: " + cmddefn.name + ".%s", messages))
#
###
argdefns = cmddefn.argdefns
for arg in argdefns:
# check argument rules
for rule in argrules:
rule.check(arg)
# check if argument rule failed, if so set the validity to False
if not all(r.valid is True for r in argrules):
argsvalid = False
log.debug("END: Content-based validation complete for '%s'", self._ymlfile)
# check validity of all command rules and argument validity
return all(rule.valid is True for rule in rules) and argsvalid
except util.YAMLValidationError, e:
# Display the error message
if messages is not None:
if len(e.message) < 128:
msg = "Validation Failed for YAML file '" + self._ymlfile + "': '" + str(e.message) + "'"
else:
msg = "Validation Failed for YAML file '" + self._ymlfile + "'"
log.error(msg)
self.ehandler.process(docnum, self.ehandler.doclines, e, messages)
return False
|
Validates the Command Dictionary to ensure the contents for each of the fields
meets specific criteria regarding the expected types, byte ranges, etc.
|
def _parse_module_with_import(self, uri):
"""Look for functions and classes in an importable module.
Parameters
----------
uri : str
The name of the module to be parsed. This module needs to be
importable.
Returns
-------
functions : list of str
A list of (public) function names in the module.
classes : list of str
A list of (public) class names in the module.
"""
mod = __import__(uri, fromlist=[uri])
# find all public objects in the module.
obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')]
functions = []
classes = []
for obj_str in obj_strs:
# find the actual object from its string representation
if obj_str not in mod.__dict__:
continue
obj = mod.__dict__[obj_str]
# figure out if obj is a function or class
if hasattr(obj, 'func_name') or \
isinstance(obj, BuiltinFunctionType):
functions.append(obj_str)
else:
try:
issubclass(obj, object)
classes.append(obj_str)
except TypeError:
# not a function or class
pass
return functions, classes
|
Look for functions and classes in an importable module.
Parameters
----------
uri : str
The name of the module to be parsed. This module needs to be
importable.
Returns
-------
functions : list of str
A list of (public) function names in the module.
classes : list of str
A list of (public) class names in the module.
|
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = ["{}({})".format(_get_exitcode_name(e), e)
for e in exitcodes if e is not None]
return "{" + ", ".join(str_exitcodes) + "}"
|
Format a list of exit code with names of the signals if possible
|
def regex(self, *pattern, **kwargs):
"""
Add re pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk
"""
self.pattern(self.build_re(*pattern, **kwargs))
return self
|
Add re pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk
|
def _get_closest_ansi_color(r, g, b, exclude=()):
"""
Find closest ANSI color. Return it by name.
:param r: Red (Between 0 and 255.)
:param g: Green (Between 0 and 255.)
:param b: Blue (Between 0 and 255.)
:param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.)
"""
assert isinstance(exclude, tuple)
# When we have a bit of saturation, avoid the gray-like colors, otherwise,
# too often the distance to the gray color is less.
saturation = abs(r - g) + abs(g - b) + abs(b - r) # Between 0..510
if saturation > 30:
exclude += ('ansilightgray', 'ansidarkgray', 'ansiwhite', 'ansiblack')
# Take the closest color.
# (Thanks to Pygments for this part.)
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 'ansidefault'
for name, (r2, g2, b2) in ANSI_COLORS_TO_RGB.items():
if name != 'ansidefault' and name not in exclude:
d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2
if d < distance:
match = name
distance = d
return match
|
Find closest ANSI color. Return it by name.
:param r: Red (Between 0 and 255.)
:param g: Green (Between 0 and 255.)
:param b: Blue (Between 0 and 255.)
:param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.)
|
def logger(self) -> Logger:
"""A :class:`logging.Logger` logger for the app.
This can be used to log messages in a format as defined in the
app configuration, for example,
.. code-block:: python
app.logger.debug("Request method %s", request.method)
app.logger.error("Error, of some kind")
"""
if self._logger is None:
self._logger = create_logger(self)
return self._logger
|
A :class:`logging.Logger` logger for the app.
This can be used to log messages in a format as defined in the
app configuration, for example,
.. code-block:: python
app.logger.debug("Request method %s", request.method)
app.logger.error("Error, of some kind")
|
def _run_qmc(self, boot):
""" runs quartet max-cut on a quartets file """
## convert to txt file for wQMC
self._tmp = os.path.join(self.dirs, ".tmpwtre")
cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp]
## run them
proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
#LOGGER.error("Error in QMC: \n({}).".format(res))
LOGGER.error(res)
raise IPyradWarningExit(res[1])
## read in the tmp files since qmc does not pipe
with open(self._tmp) as intree:
## convert int names back to str names renamer returns a newick str
#tmp = toytree.tree(intree.read().strip())
tmp = ete3.Tree(intree.read().strip())
tmpwtre = self._renamer(tmp)#.tree)
## save the tree
if boot:
self.trees.boots = os.path.join(self.dirs, self.name+".boots")
with open(self.trees.boots, 'a') as outboot:
outboot.write(tmpwtre+"\n")
else:
self.trees.tree = os.path.join(self.dirs, self.name+".tree")
with open(self.trees.tree, 'w') as outtree:
outtree.write(tmpwtre)
## save JSON file checkpoint
self._save()
|
runs quartet max-cut on a quartets file
|
def get_push_commits(self, repository_id, push_id, project=None, top=None, skip=None, include_links=None):
"""GetPushCommits.
Retrieve a list of commits associated with a particular push.
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param int push_id: The id of the push.
:param str project: Project ID or project name
:param int top: The maximum number of commits to return ("get the top x commits").
:param int skip: The number of commits to skip.
:param bool include_links: Set to false to avoid including REST Url links for resources. Defaults to true.
:rtype: [GitCommitRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if push_id is not None:
query_parameters['pushId'] = self._serialize.query('push_id', push_id, 'int')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['skip'] = self._serialize.query('skip', skip, 'int')
if include_links is not None:
query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool')
response = self._send(http_method='GET',
location_id='c2570c3b-5b3f-41b8-98bf-5407bfde8d58',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitCommitRef]', self._unwrap_collection(response))
|
GetPushCommits.
Retrieve a list of commits associated with a particular push.
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param int push_id: The id of the push.
:param str project: Project ID or project name
:param int top: The maximum number of commits to return ("get the top x commits").
:param int skip: The number of commits to skip.
:param bool include_links: Set to false to avoid including REST Url links for resources. Defaults to true.
:rtype: [GitCommitRef]
|
def execute(self, query):
"""
Execute a query directly on the database.
"""
c = self.conn.cursor()
result = c.execute(query)
for i in result:
yield i
|
Execute a query directly on the database.
|
def get_element_tree(parent_to_parse):
"""
:return: an ElementTree initialized with the parsed element.
:see: get_element(parent_to_parse, element_path)
"""
if isinstance(parent_to_parse, ElementTree):
return parent_to_parse
element = get_element(parent_to_parse)
return ElementTree() if element is None else ElementTree(element)
|
:return: an ElementTree initialized with the parsed element.
:see: get_element(parent_to_parse, element_path)
|
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
|
Finds the next address recursively which does not collide with any other address
|
def handle_POST(self, environ, start_response):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
Most code taken from SimpleXMLRPCServer with modifications for wsgi and my custom dispatcher.
"""
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and
# using that method if present.
response = self.dispatcher._marshaled_dispatch(
data, getattr(self.dispatcher, '_dispatch', None)
)
response += b'\n'
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
logger.exception(e)
start_response("500 Server error", [('Content-Type', 'text/plain')])
return []
else:
# got a valid XML RPC response
start_response("200 OK", [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)),)])
return [response]
|
Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
Most code taken from SimpleXMLRPCServer with modifications for wsgi and my custom dispatcher.
|
def __on_message(self, ws, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
self.logger.debug("Subscribed to %s." % message['subscribe'])
elif action:
if table not in self.data:
self.data[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
return # No item found to update. Could happen before push
item.update(updateData)
# Remove cancelled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
|
Handler for parsing WS messages.
|
def is_number(obj):
"""
Helper function to determine numbers
across Python 2.x and 3.x
"""
try:
from numbers import Number
except ImportError:
from operator import isNumberType
return isNumberType(obj)
else:
return isinstance(obj, Number)
|
Helper function to determine numbers
across Python 2.x and 3.x
|
def update(self):
"""
Get the latest status information from device.
Method executes the update method for the current receiver type.
"""
if self._receiver_type == AVR_X_2016.type:
return self._update_avr_2016()
else:
return self._update_avr()
|
Get the latest status information from device.
Method executes the update method for the current receiver type.
|
def dragMoveEvent( self, event ):
"""
Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent>
"""
filt = self.dragDropFilter()
if not filt:
super(XTreeWidget, self).dragMoveEvent(event)
return
filt(self, event)
|
Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent>
|
def is_training():
"""Get status on training/predicting.
Returns
-------
Current state of training/predicting.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsTraining(ctypes.byref(curr)))
return curr.value
|
Get status on training/predicting.
Returns
-------
Current state of training/predicting.
|
def __parseParameters(self):
"""Parses the parameters of data."""
self.__parameters = []
for parameter in self.__data['parameters']:
self.__parameters.append(Parameter(parameter))
|
Parses the parameters of data.
|
def _onLexerError(self, message):
"""Memorizes a lexer error message"""
self.isOK = False
if message.strip() != "":
self.lexerErrors.append(message)
|
Memorizes a lexer error message
|
def toupper(self):
"""
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
"""
return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
|
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
|
def modify_identity(self, identity, **kwargs):
""" Modify some attributes of an identity or its name.
:param: identity a zobjects.Identity with `id` set (mandatory). Also
set items you want to modify/set and/or the `name` attribute to
rename the identity.
Can also take the name in string and then attributes to modify
:returns: zobjects.Identity object
"""
if isinstance(identity, zobjects.Identity):
self.request('ModifyIdentity', {'identity': identity._full_data})
return self.get_identities(identity=identity.name)[0]
else:
attrs = []
for attr, value in kwargs.items():
attrs.append({
'name': attr,
'_content': value
})
self.request('ModifyIdentity', {
'identity': {
'name': identity,
'a': attrs
}
})
return self.get_identities(identity=identity)[0]
|
Modify some attributes of an identity or its name.
:param: identity a zobjects.Identity with `id` set (mandatory). Also
set items you want to modify/set and/or the `name` attribute to
rename the identity.
Can also take the name in string and then attributes to modify
:returns: zobjects.Identity object
|
def sub_menu(request, page_id):
"""Render the children of the requested page with the sub_menu
template."""
page = Page.objects.get(id=page_id)
pages = page.children.all()
page_languages = settings.PAGE_LANGUAGES
return render_to_response("admin/basic_cms/page/sub_menu.html", {
'page': page,
'pages': pages,
'page_languages': page_languages,
}, context_instance=RequestContext(request))
|
Render the children of the requested page with the sub_menu
template.
|
def validateDocumentFinal(self, doc):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)
return ret
|
Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity
|
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective hierarchy design service for the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
objective bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NotFound - ``objective_bank_id`` not found
raise: NullArgument - ``objective_bank_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented -
``supports_objective_hierarchy_design()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_objective_hierarchy_design():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ObjectiveHierarchyDesignSession(objective_bank_id, proxy, self._runtime)
|
Gets the ``OsidSession`` associated with the objective hierarchy design service for the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
objective bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NotFound - ``objective_bank_id`` not found
raise: NullArgument - ``objective_bank_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented -
``supports_objective_hierarchy_design()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` and
``supports_visible_federation()`` are ``true``.*
|
def only_horizon(meshes, xs, ys, zs, expose_horizon=False):
"""
Check all visible or partial triangles to see if they're behind the horizon,
by checking the direction of the z-component of the normals (ie hidden if mu<0)
"""
# if visibility == 0, it should remain 0
# if visibility == 0.5, it should stay 0.5 if mu > 0 else it should become 0
# if visibility == 1, it should stay 1 if mu > 0 else it should become 0
# this can all by easily done by multiplying by int(mu>0) (1 if visible, 0 if hidden)
return {comp_no: mesh.visibilities * (mesh.mus > 0).astype(int) for comp_no, mesh in meshes.items()}, None, None
|
Check all visible or partial triangles to see if they're behind the horizon,
by checking the direction of the z-component of the normals (ie hidden if mu<0)
|
def DownloadData(name='all', data_dir=DataPath()):
""" Downloads fit data to DataPath() diretory.
If name='all', gets all fit data.
"""
if name == 'all':
for tmp_name in fits_collection.keys():
DownloadData(name=tmp_name, data_dir=data_dir)
return
if name not in fits_collection.keys():
raise Exception('Invalid fit name : %s'%name)
print('Downloading %s data'%name)
data_url = fits_collection[name].data_url
filename = data_url.split('/')[-1]
try:
os.makedirs(data_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(data_dir):
pass
else:
raise
urlretrieve(data_url, data_dir + '/' + filename)
|
Downloads fit data to DataPath() diretory.
If name='all', gets all fit data.
|
def p_file_cr_text_1(self, p):
"""file_cr_text : FILE_CR_TEXT file_cr_value"""
try:
self.builder.set_file_copyright(self.document, p[2])
except OrderError:
self.order_error('FileCopyrightText', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileCopyrightText', p.lineno(1))
|
file_cr_text : FILE_CR_TEXT file_cr_value
|
def _append_national_number(self, national_number):
"""Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
"""
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
# We want to add a space after the national prefix if the national
# prefix formatting rule indicates that this would normally be
# done, with the exception of the case where we already appended a
# space because the NDD was surprisingly long.
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number
|
Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
|
def rebuild(self, scene=None):
"""
This method is where you will rebuild the geometry and \
data for a node.
:param scene <QGraphicsScene> || None
:return <bool> success
"""
rect = QRectF(0, 0, self.minimumWidth(), self.minimumHeight())
self.setRect(rect)
self._dirty = False
self.adjustTitleFont()
return True
|
This method is where you will rebuild the geometry and \
data for a node.
:param scene <QGraphicsScene> || None
:return <bool> success
|
def definition_rst(self, definition, spec_path=None):
'''
Prepare and write information about definition
:param definition: --name of definition that would be prepared for render
:type definition: str, unicode
:param spec_path: --path to definitions
:type spec_path: str, unicode
:return:
'''
spec_path = spec_path or self.models_path
definitions = self.spec[spec_path]
definition_property = definitions[definition]['properties'].copy()
if not definition_property:
self.write('{}', self.indent_depth)
return
self.indent_depth += 1
definition_property = self.find_nested_models(definition_property, definitions)
json_str = json.dumps(definition_property, indent=4)
for line in json_str.split('\n'):
self.write(line, self.indent_depth)
self.indent_depth -= 1
|
Prepare and write information about definition
:param definition: --name of definition that would be prepared for render
:type definition: str, unicode
:param spec_path: --path to definitions
:type spec_path: str, unicode
:return:
|
def Write(self, schedule, output_file):
"""Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use.
"""
# Generate the DOM to write
root = ET.Element('kml')
root.attrib['xmlns'] = 'http://earth.google.com/kml/2.1'
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
for route in schedule.GetRouteList():
route_types.add(route.route_type)
route_types = list(route_types)
route_types.sort()
for route_type in route_types:
self._CreateRoutesFolder(schedule, doc, route_type)
else:
self._CreateRoutesFolder(schedule, doc)
self._CreateShapesFolder(schedule, doc)
# Make sure we pretty-print
self._SetIndentation(root)
# Now write the output
if isinstance(output_file, file):
output = output_file
else:
output = open(output_file, 'w')
output.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
ET.ElementTree(root).write(output, 'utf-8')
|
Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use.
|
def save_to_file(filename, content):
"""
Save content to file. Used by node initial contact but
can be used anywhere.
:param str filename: name of file to save to
:param str content: content to save
:return: None
:raises IOError: permissions issue saving, invalid directory, etc
"""
import os.path
path = os.path.abspath(filename)
with open(path, "w") as text_file:
text_file.write("{}".format(content))
|
Save content to file. Used by node initial contact but
can be used anywhere.
:param str filename: name of file to save to
:param str content: content to save
:return: None
:raises IOError: permissions issue saving, invalid directory, etc
|
def build_pydot(self, G=None): # pragma: no cover
"""
Build a pydot representation of this model. This needs pydot installed.
Example Usage::
np.random.seed(1000)
X = np.random.normal(0,1,(20,2))
beta = np.random.uniform(0,1,(2,1))
Y = X.dot(beta)
m = RidgeRegression(X, Y)
G = m.build_pydot()
G.write_png('example_hierarchy_layout.png')
The output looks like:
.. image:: ./example_hierarchy_layout.png
Rectangles are parameterized objects (nodes or leafs of hierarchy).
Trapezoids are param objects, which represent the arrays for parameters.
Black arrows show parameter hierarchical dependence. The arrow points
from parents towards children.
Orange arrows show the observer pattern. Self references (here) are
the references to the call to parameters changed and references upwards
are the references to tell the parents they need to update.
"""
import pydot # @UnresolvedImport
iamroot = False
if G is None:
G = pydot.Dot(graph_type='digraph', bgcolor=None)
iamroot=True
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
G.add_node(node)
for child in self.parameters:
child_node = child.build_pydot(G)
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
if str(id(o)) not in G.obj_dict['nodes']:
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
if iamroot:
return G
return node
|
Build a pydot representation of this model. This needs pydot installed.
Example Usage::
np.random.seed(1000)
X = np.random.normal(0,1,(20,2))
beta = np.random.uniform(0,1,(2,1))
Y = X.dot(beta)
m = RidgeRegression(X, Y)
G = m.build_pydot()
G.write_png('example_hierarchy_layout.png')
The output looks like:
.. image:: ./example_hierarchy_layout.png
Rectangles are parameterized objects (nodes or leafs of hierarchy).
Trapezoids are param objects, which represent the arrays for parameters.
Black arrows show parameter hierarchical dependence. The arrow points
from parents towards children.
Orange arrows show the observer pattern. Self references (here) are
the references to the call to parameters changed and references upwards
are the references to tell the parents they need to update.
|
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
|
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
|
def create_keypair(self, xml_bytes):
"""Parse the XML returned by the C{CreateKeyPair} function.
@param xml_bytes: XML bytes with a C{CreateKeyPairResponse} root
element.
@return: The L{Keypair} instance created.
"""
keypair_data = XML(xml_bytes)
key_name = keypair_data.findtext("keyName")
key_fingerprint = keypair_data.findtext("keyFingerprint")
key_material = keypair_data.findtext("keyMaterial")
return model.Keypair(key_name, key_fingerprint, key_material)
|
Parse the XML returned by the C{CreateKeyPair} function.
@param xml_bytes: XML bytes with a C{CreateKeyPairResponse} root
element.
@return: The L{Keypair} instance created.
|
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg
|
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
|
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
|
Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
|
def process_action(self, raw_user, channel, raw_message):
"""Called when a message is received from a channel or user."""
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
|
Called when a message is received from a channel or user.
|
def get_pages(parser, token):
"""Add to context the list of page links.
Usage:
.. code-block:: html+django
{% get_pages %}
This is mostly used for Digg-style pagination.
This call inserts in the template context a *pages* variable, as a sequence
of page links. You can use *pages* in different ways:
- just print *pages.get_rendered* and you will get Digg-style pagination displayed:
.. code-block:: html+django
{{ pages.get_rendered }}
- display pages count:
.. code-block:: html+django
{{ pages|length }}
- check if the page list contains more than one page:
.. code-block:: html+django
{{ pages.paginated }}
{# the following is equivalent #}
{{ pages|length > 1 }}
- get a specific page:
.. code-block:: html+django
{# the current selected page #}
{{ pages.current }}
{# the first page #}
{{ pages.first }}
{# the last page #}
{{ pages.last }}
{# the previous page (or nothing if you are on first page) #}
{{ pages.previous }}
{# the next page (or nothing if you are in last page) #}
{{ pages.next }}
{# the third page #}
{{ pages.3 }}
{# this means page.1 is the same as page.first #}
{# the 1-based index of the first item on the current page #}
{{ pages.current_start_index }}
{# the 1-based index of the last item on the current page #}
{{ pages.current_end_index }}
{# the total number of objects, across all pages #}
{{ pages.total_count }}
{# the first page represented as an arrow #}
{{ pages.first_as_arrow }}
{# the last page represented as an arrow #}
{{ pages.last_as_arrow }}
- iterate over *pages* to get all pages:
.. code-block:: html+django
{% for page in pages %}
{# display page link #}
{{ page.render_link}}
{# the page url (beginning with "?") #}
{{ page.url }}
{# the page path #}
{{ page.path }}
{# the page number #}
{{ page.number }}
{# a string representing the page (commonly the page number) #}
{{ page.label }}
{# check if the page is the current one #}
{{ page.is_current }}
{# check if the page is the first one #}
{{ page.is_first }}
{# check if the page is the last one #}
{{ page.is_last }}
{% endfor %}
You can change the variable name, e.g.:
.. code-block:: html+django
{% get_pages as page_links %}
Must be called after ``{% paginate objects %}``.
"""
# Validate args.
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
var_name = 'pages'
else:
args = args.split()
if len(args) == 2 and args[0] == 'as':
var_name = args[1]
else:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Call the node.
return GetPagesNode(var_name)
|
Add to context the list of page links.
Usage:
.. code-block:: html+django
{% get_pages %}
This is mostly used for Digg-style pagination.
This call inserts in the template context a *pages* variable, as a sequence
of page links. You can use *pages* in different ways:
- just print *pages.get_rendered* and you will get Digg-style pagination displayed:
.. code-block:: html+django
{{ pages.get_rendered }}
- display pages count:
.. code-block:: html+django
{{ pages|length }}
- check if the page list contains more than one page:
.. code-block:: html+django
{{ pages.paginated }}
{# the following is equivalent #}
{{ pages|length > 1 }}
- get a specific page:
.. code-block:: html+django
{# the current selected page #}
{{ pages.current }}
{# the first page #}
{{ pages.first }}
{# the last page #}
{{ pages.last }}
{# the previous page (or nothing if you are on first page) #}
{{ pages.previous }}
{# the next page (or nothing if you are in last page) #}
{{ pages.next }}
{# the third page #}
{{ pages.3 }}
{# this means page.1 is the same as page.first #}
{# the 1-based index of the first item on the current page #}
{{ pages.current_start_index }}
{# the 1-based index of the last item on the current page #}
{{ pages.current_end_index }}
{# the total number of objects, across all pages #}
{{ pages.total_count }}
{# the first page represented as an arrow #}
{{ pages.first_as_arrow }}
{# the last page represented as an arrow #}
{{ pages.last_as_arrow }}
- iterate over *pages* to get all pages:
.. code-block:: html+django
{% for page in pages %}
{# display page link #}
{{ page.render_link}}
{# the page url (beginning with "?") #}
{{ page.url }}
{# the page path #}
{{ page.path }}
{# the page number #}
{{ page.number }}
{# a string representing the page (commonly the page number) #}
{{ page.label }}
{# check if the page is the current one #}
{{ page.is_current }}
{# check if the page is the first one #}
{{ page.is_first }}
{# check if the page is the last one #}
{{ page.is_last }}
{% endfor %}
You can change the variable name, e.g.:
.. code-block:: html+django
{% get_pages as page_links %}
Must be called after ``{% paginate objects %}``.
|
def do_cleaning(self, arg):
"""Does the actual cleaning by using the delete methods above.
:param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the
list is cleaned.
:return: A string, the cleaned string. Or a list with cleaned string entries.
"""
if arg is not None:
if isinstance(arg, list):
newlist = []
for entry in arg:
newlist.append(self.do_cleaning(entry))
return newlist
else:
if sys.version_info[0] < 3:
arg = unicode(arg)
else:
arg = str(arg)
arg = self.delete_tags(arg)
arg = self.delete_whitespaces(arg)
return arg
else:
return None
|
Does the actual cleaning by using the delete methods above.
:param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the
list is cleaned.
:return: A string, the cleaned string. Or a list with cleaned string entries.
|
def plot_root_to_tip(self, add_internal=False, label=True, ax=None):
"""
Plot root-to-tip regression
Parameters
----------
add_internal : bool
If true, plot inte`rnal node positions
label : bool
If true, label the plots
ax : matplotlib axes
If not None, use the provided matplotlib axes to plot the results
"""
Treg = self.setup_TreeRegression()
if self.clock_model and 'cov' in self.clock_model:
cf = self.clock_model['valid_confidence']
else:
cf = False
Treg.clock_plot(ax=ax, add_internal=add_internal, confidence=cf, n_sigma=2,
regression=self.clock_model)
|
Plot root-to-tip regression
Parameters
----------
add_internal : bool
If true, plot inte`rnal node positions
label : bool
If true, label the plots
ax : matplotlib axes
If not None, use the provided matplotlib axes to plot the results
|
def restricted_cover(l, succsOf):
""" Returns a restricted <succsOf> which only takes and yields
values from <l> """
fzl = frozenset(l)
lut = dict()
for i in l:
lut[i] = fzl.intersection(succsOf(i))
return lambda x: lut[x]
|
Returns a restricted <succsOf> which only takes and yields
values from <l>
|
def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket_name=self.bucket,
object_name=self.dst,
mime_type=self.mime_type,
filename=self.src,
gzip=self.gzip,
)
|
Uploads the file to Google cloud storage
|
def lipd_read(path):
"""
Loads a LiPD file from local path. Unzip, read, and process data
Steps: create tmp, unzip lipd, read files into memory, manipulate data, move to original dir, delete tmp.
:param str path: Source path
:return none:
"""
_j = {}
dir_original = os.getcwd()
# Import metadata into object
try:
print("reading: {}".format(print_filename(path)))
# bigger than 2mb file? This could take a while
if os.stat(path).st_size > 1000000:
_size = os.stat(path).st_size
print("{} :That's a big file! This may take a while to load...".format("{} MB".format(round(_size/1000000,2))))
dir_tmp = create_tmp_dir()
unzipper(path, dir_tmp)
os.chdir(dir_tmp)
_dir_data = find_files()
os.chdir(_dir_data)
_j = read_jsonld()
_j = rm_empty_fields(_j)
_j = check_dsn(path, _j)
_j = update_lipd_version(_j)
_j = idx_num_to_name(_j)
_j = rm_empty_doi(_j)
_j = rm_empty_fields(_j)
_j = put_tsids(_j)
_csvs = read_csvs()
_j = merge_csv_metadata(_j, _csvs)
# Why ? Because we need to align the csv filenames with the table filenames. We don't need the csv output here.
_j, _csv = get_csv_from_metadata(_j["dataSetName"], _j)
os.chdir(dir_original)
shutil.rmtree(dir_tmp)
except FileNotFoundError:
print("Error: lipd_read: LiPD file not found. Please make sure the filename includes the .lpd extension")
except Exception as e:
logger_lipd.error("lipd_read: {}".format(e))
print("Error: lipd_read: unable to read LiPD: {}".format(e))
os.chdir(dir_original)
logger_lipd.info("lipd_read: record loaded: {}".format(path))
return _j
|
Loads a LiPD file from local path. Unzip, read, and process data
Steps: create tmp, unzip lipd, read files into memory, manipulate data, move to original dir, delete tmp.
:param str path: Source path
:return none:
|
def get_seh_chain(self):
"""
@rtype: list of tuple( int, int )
@return: List of structured exception handlers.
Each SEH is represented as a tuple of two addresses:
- Address of this SEH block
- Address of the SEH callback function
Do not confuse this with the contents of the SEH block itself,
where the first member is a pointer to the B{next} block instead.
@raise NotImplementedError:
This method is only supported in 32 bits versions of Windows.
"""
seh_chain = list()
try:
process = self.get_process()
seh = self.get_seh_chain_pointer()
while seh != 0xFFFFFFFF:
seh_func = process.read_pointer( seh + 4 )
seh_chain.append( (seh, seh_func) )
seh = process.read_pointer( seh )
except WindowsError:
seh_chain.append( (seh, None) )
return seh_chain
|
@rtype: list of tuple( int, int )
@return: List of structured exception handlers.
Each SEH is represented as a tuple of two addresses:
- Address of this SEH block
- Address of the SEH callback function
Do not confuse this with the contents of the SEH block itself,
where the first member is a pointer to the B{next} block instead.
@raise NotImplementedError:
This method is only supported in 32 bits versions of Windows.
|
def memory(self):
"""
The maximum number of bytes of memory the job will require to run.
"""
if self._memory is not None:
return self._memory
elif self._config is not None:
return self._config.defaultMemory
else:
raise AttributeError("Default value for 'memory' cannot be determined")
|
The maximum number of bytes of memory the job will require to run.
|
def _remove_attributes(attrs, remove_list):
"""
Removes attributes in the remove list from the input attribute dict
:param attrs : Dict of operator attributes
:param remove_list : list of attributes to be removed
:return new_attr : Dict of operator attributes without the listed attributes.
"""
new_attrs = {}
for attr in attrs.keys():
if attr not in remove_list:
new_attrs[attr] = attrs[attr]
return new_attrs
|
Removes attributes in the remove list from the input attribute dict
:param attrs : Dict of operator attributes
:param remove_list : list of attributes to be removed
:return new_attr : Dict of operator attributes without the listed attributes.
|
def children(self, unroll=False, skip_not_present=True):
"""
Returns an iterator that provides nodes for all immediate children of
this component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
Yields
------
:class:`~Node`
All immediate children
"""
for child_inst in self.inst.children:
if skip_not_present:
# Check if property ispresent == False
if not child_inst.properties.get('ispresent', True):
# ispresent was explicitly set to False. Skip it
continue
if unroll and isinstance(child_inst, comp.AddressableComponent) and child_inst.is_array:
# Unroll the array
range_list = [range(n) for n in child_inst.array_dimensions]
for idxs in itertools.product(*range_list):
N = Node._factory(child_inst, self.env, self)
N.current_idx = idxs # pylint: disable=attribute-defined-outside-init
yield N
else:
yield Node._factory(child_inst, self.env, self)
|
Returns an iterator that provides nodes for all immediate children of
this component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
Yields
------
:class:`~Node`
All immediate children
|
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = _findSwiplDar()
if path is None:
(path, swiHome) = _findSwiplMacOSHome()
else:
# This should work for other UNIX
(path, swiHome) = _findSwiplLin()
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome)
|
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
|
def trt_pmf(matrices):
"""
Fold full disaggregation matrix to tectonic region type PMF.
:param matrices:
a matrix with T submatrices
:returns:
an array of T probabilities one per each tectonic region type
"""
ntrts, nmags, ndists, nlons, nlats, neps = matrices.shape
pmf = numpy.zeros(ntrts)
for t in range(ntrts):
pmf[t] = 1. - numpy.prod(
[1. - matrices[t, i, j, k, l, m]
for i in range(nmags)
for j in range(ndists)
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return pmf
|
Fold full disaggregation matrix to tectonic region type PMF.
:param matrices:
a matrix with T submatrices
:returns:
an array of T probabilities one per each tectonic region type
|
def _get_consumption(self, url, start, end, aggregation):
"""
Request for both the get_consumption and
get_sensor_consumption methods.
Parameters
----------
url : str
start : dt.datetime
end : dt.datetime
aggregation : int
Returns
-------
dict
"""
start = self._to_milliseconds(start)
end = self._to_milliseconds(end)
headers = {"Authorization": "Bearer {}".format(self.access_token)}
params = {
"aggregation": aggregation,
"from": start,
"to": end
}
r = requests.get(url, headers=headers, params=params)
r.raise_for_status()
return r.json()
|
Request for both the get_consumption and
get_sensor_consumption methods.
Parameters
----------
url : str
start : dt.datetime
end : dt.datetime
aggregation : int
Returns
-------
dict
|
def rankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: rankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0] * n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i == n - 1 or svec[i] != svec[i + 1]:
averank = sumranks / float(dupcount) + 1
for j in range(i - dupcount + 1, i + 1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
|
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: rankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
|
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by `save_host_keys`.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If ``filename`` is left as ``None``, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
:param str filename: the filename to read, or ``None``
:raises: ``IOError`` --
if a filename was provided and the file could not be read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser("~/.ssh/known_hosts")
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
|
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by `save_host_keys`.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If ``filename`` is left as ``None``, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
:param str filename: the filename to read, or ``None``
:raises: ``IOError`` --
if a filename was provided and the file could not be read
|
def pool_create_event(self, pool_info):
"""Process pool create event.
Extract pool info and get listener info and call next
listen_create_event
"""
pool_data = pool_info.get('pool')
listeners = pool_data.get('listeners')
for listener in listeners:
l_id = listener.get('id')
l_info = self.neutronclient.show_listener(l_id)
self.listener_create_event(l_info)
|
Process pool create event.
Extract pool info and get listener info and call next
listen_create_event
|
def set_config_defaults(args):
""" Set configuration defaults
:param args: a dict of arguments
:type args: dict
:return: a dict of arguments
:rtype: dict
"""
defaults = {
'fail': 'fast',
'stdout': 'fail'
}
for key in defaults:
if not args[key]:
args[key] = defaults[key]
return args
|
Set configuration defaults
:param args: a dict of arguments
:type args: dict
:return: a dict of arguments
:rtype: dict
|
def synchronize(self, func=None, wait=None, errors=()):
"""
This method is Capybara's primary defense against asynchronicity problems. It works by
attempting to run a given decorated function until it succeeds. The exact behavior of this
method depends on a number of factors. Basically there are certain exceptions which, when
raised from the decorated function, instead of bubbling up, are caught, and the function is
re-run.
Certain drivers have no support for asynchronous processes. These drivers run the function,
and any error raised bubbles up immediately. This allows faster turn around in the case
where an expectation fails.
Only exceptions that are :exc:`ElementNotFound` or any subclass thereof cause the block to
be rerun. Drivers may specify additional exceptions which also cause reruns. This usually
occurs when a node is manipulated which no longer exists on the page. For example, the
Selenium driver specifies ``selenium.common.exceptions.StateElementReferenceException``.
As long as any of these exceptions are thrown, the function is re-run, until a certain
amount of time passes. The amount of time defaults to :data:`capybara.default_max_wait_time`
and can be overridden through the ``wait`` argument. This time is compared with the system
time to see how much time has passed. If the return value of ``time.time()`` is stubbed
out, Capybara will raise :exc:`FrozenInTime`.
Args:
func (Callable, optional): The function to decorate.
wait (int, optional): Number of seconds to retry this function.
errors (Tuple[Type[Exception]], optional): Exception types that cause the function to be
rerun. Defaults to ``driver.invalid_element_errors`` + :exc:`ElementNotFound`.
Returns:
Callable: The decorated function, or a decorator function.
Raises:
FrozenInTime: If the return value of ``time.time()`` appears stuck.
"""
def decorator(func):
@wraps(func)
def outer(*args, **kwargs):
seconds = wait if wait is not None else capybara.default_max_wait_time
def inner():
return func(*args, **kwargs)
if self.session.synchronized:
return inner()
else:
timer = Timer(seconds)
self.session.synchronized = True
try:
while True:
try:
return inner()
except Exception as e:
self.session.raise_server_error()
if not self._should_catch_error(e, errors):
raise
if timer.expired:
raise
sleep(0.05)
if timer.stalled:
raise FrozenInTime(
"time appears to be frozen, Capybara does not work with "
"libraries which freeze time, consider using time "
"traveling instead")
if capybara.automatic_reload:
self.reload()
finally:
self.session.synchronized = False
return outer
if func:
return decorator(func)
else:
return decorator
|
This method is Capybara's primary defense against asynchronicity problems. It works by
attempting to run a given decorated function until it succeeds. The exact behavior of this
method depends on a number of factors. Basically there are certain exceptions which, when
raised from the decorated function, instead of bubbling up, are caught, and the function is
re-run.
Certain drivers have no support for asynchronous processes. These drivers run the function,
and any error raised bubbles up immediately. This allows faster turn around in the case
where an expectation fails.
Only exceptions that are :exc:`ElementNotFound` or any subclass thereof cause the block to
be rerun. Drivers may specify additional exceptions which also cause reruns. This usually
occurs when a node is manipulated which no longer exists on the page. For example, the
Selenium driver specifies ``selenium.common.exceptions.StateElementReferenceException``.
As long as any of these exceptions are thrown, the function is re-run, until a certain
amount of time passes. The amount of time defaults to :data:`capybara.default_max_wait_time`
and can be overridden through the ``wait`` argument. This time is compared with the system
time to see how much time has passed. If the return value of ``time.time()`` is stubbed
out, Capybara will raise :exc:`FrozenInTime`.
Args:
func (Callable, optional): The function to decorate.
wait (int, optional): Number of seconds to retry this function.
errors (Tuple[Type[Exception]], optional): Exception types that cause the function to be
rerun. Defaults to ``driver.invalid_element_errors`` + :exc:`ElementNotFound`.
Returns:
Callable: The decorated function, or a decorator function.
Raises:
FrozenInTime: If the return value of ``time.time()`` appears stuck.
|
def p_select_related_where_statement(self, p):
'''
statement : SELECT ONE variable_name RELATED BY navigation_hook navigation_chain WHERE expression
| SELECT ANY variable_name RELATED BY navigation_hook navigation_chain WHERE expression
| SELECT MANY variable_name RELATED BY navigation_hook navigation_chain WHERE expression
'''
p[0] = SelectRelatedWhereNode(cardinality=p[2],
variable_name=p[3],
handle=p[6],
navigation_chain=p[7],
where_clause=p[9])
|
statement : SELECT ONE variable_name RELATED BY navigation_hook navigation_chain WHERE expression
| SELECT ANY variable_name RELATED BY navigation_hook navigation_chain WHERE expression
| SELECT MANY variable_name RELATED BY navigation_hook navigation_chain WHERE expression
|
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
title, content = super(ServiceTodoist, self).save_data(trigger_id, **data)
if self.token:
if title or content or data.get('link'):
content = title + ' ' + content + ' ' + data.get('link')
self.todoist.add_item(content)
sentence = str('todoist {} created').format(data.get('link'))
logger.debug(sentence)
status = True
else:
status = False
else:
logger.critical("no token or link provided for trigger ID {} ".format(trigger_id))
status = False
return status
|
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
|
def from_xml(cls, data, api=None, parser=None):
"""
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:type data: str | xml.etree.ElementTree.Element
:param api: The instance to query additional information if required.
:type api: Overpass
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:type parser: Integer | None
:return: New instance of Result object
:rtype: Result
"""
if parser is None:
if isinstance(data, str):
parser = XML_PARSER_SAX
else:
parser = XML_PARSER_DOM
result = cls(api=api)
if parser == XML_PARSER_DOM:
import xml.etree.ElementTree as ET
if isinstance(data, str):
root = ET.fromstring(data)
elif isinstance(data, ET.Element):
root = data
else:
raise exception.OverPyException("Unable to detect data type.")
for elem_cls in [Node, Way, Relation, Area]:
for child in root:
if child.tag.lower() == elem_cls._type_value:
result.append(elem_cls.from_xml(child, result=result))
elif parser == XML_PARSER_SAX:
if PY2:
from StringIO import StringIO
else:
from io import StringIO
source = StringIO(data)
sax_handler = OSMSAXHandler(result)
parser = make_parser()
parser.setContentHandler(sax_handler)
parser.parse(source)
else:
# ToDo: better exception
raise Exception("Unknown XML parser")
return result
|
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:type data: str | xml.etree.ElementTree.Element
:param api: The instance to query additional information if required.
:type api: Overpass
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:type parser: Integer | None
:return: New instance of Result object
:rtype: Result
|
def get(self,request,*args,**kwargs):
'''
Allow passing of basis and time limitations
'''
try:
year = int(self.kwargs.get('year'))
except (ValueError, TypeError):
year = getIntFromGet(request,'year')
kwargs.update({
'year': year,
'basis': request.GET.get('basis'),
})
if kwargs.get('basis') not in EXPENSE_BASES.keys():
kwargs['basis'] = 'accrualDate'
return super().get(request, *args, **kwargs)
|
Allow passing of basis and time limitations
|
def _sample_chain(args):
"""Sample a single chain for OptGPSampler.
center and n_samples are updated locally and forgotten afterwards.
"""
n, idx = args # has to be this way to work in Python 2.7
center = sampler.center
np.random.seed((sampler._seed + idx) % np.iinfo(np.int32).max)
pi = np.random.randint(sampler.n_warmup)
prev = sampler.warmup[pi, ]
prev = step(sampler, center, prev - center, 0.95)
n_samples = max(sampler.n_samples, 1)
samples = np.zeros((n, center.shape[0]))
for i in range(1, sampler.thinning * n + 1):
pi = np.random.randint(sampler.n_warmup)
delta = sampler.warmup[pi, ] - center
prev = step(sampler, prev, delta)
if sampler.problem.homogeneous and (
n_samples * sampler.thinning % sampler.nproj == 0):
prev = sampler._reproject(prev)
center = sampler._reproject(center)
if i % sampler.thinning == 0:
samples[i//sampler.thinning - 1, ] = prev
center = ((n_samples * center) / (n_samples + 1) +
prev / (n_samples + 1))
n_samples += 1
return (sampler.retries, samples)
|
Sample a single chain for OptGPSampler.
center and n_samples are updated locally and forgotten afterwards.
|
def watch_log_for(self, exprs, from_mark=None, timeout=600, process=None, verbose=False, filename='system.log'):
"""
Watch the log until one or more (regular) expression are found.
This methods when all the expressions have been found or the method
timeouts (a TimeoutError is then raised). On successful completion,
a list of pair (line matched, match object) is returned.
"""
start = time.time()
tofind = [exprs] if isinstance(exprs, string_types) else exprs
tofind = [re.compile(e) for e in tofind]
matchings = []
reads = ""
if len(tofind) == 0:
return None
log_file = os.path.join(self.get_path(), 'logs', filename)
output_read = False
while not os.path.exists(log_file):
time.sleep(.5)
if start + timeout < time.time():
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " [" + self.name + "] Timed out waiting for {} to be created.".format(log_file))
if process and not output_read:
process.poll()
if process.returncode is not None:
self.print_process_output(self.name, process, verbose)
output_read = True
if process.returncode != 0:
raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy
with open(log_file) as f:
if from_mark:
f.seek(from_mark)
while True:
# First, if we have a process to check, then check it.
# Skip on Windows - stdout/stderr is cassandra.bat
if not common.is_win() and not output_read:
if process:
process.poll()
if process.returncode is not None:
self.print_process_output(self.name, process, verbose)
output_read = True
if process.returncode != 0:
raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy
line = f.readline()
if line:
reads = reads + line
for e in tofind:
m = e.search(line)
if m:
matchings.append((line, m))
tofind.remove(e)
if len(tofind) == 0:
return matchings[0] if isinstance(exprs, string_types) else matchings
else:
# yep, it's ugly
time.sleep(1)
if start + timeout < time.time():
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " [" + self.name + "] Missing: " + str([e.pattern for e in tofind]) + ":\n" + reads[:50] + ".....\nSee {} for remainder".format(filename))
if process:
if common.is_win():
if not self.is_running():
return None
else:
process.poll()
if process.returncode == 0:
return None
|
Watch the log until one or more (regular) expression are found.
This methods when all the expressions have been found or the method
timeouts (a TimeoutError is then raised). On successful completion,
a list of pair (line matched, match object) is returned.
|
def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = map(int, map(set.intersection, pfam_scop_mapping.values())[0])
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print('%d hits' % len(hits))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].iteritems():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print('%d filtered_hits' % len(filtered_hits))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.iteritems()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d
|
Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
|
def conv_cy(self, cy_cl):
"""Convert cycles (cy/CL) to other units, such as FLOP/s or It/s."""
if not isinstance(cy_cl, PrefixedUnit):
cy_cl = PrefixedUnit(cy_cl, '', 'cy/CL')
clock = self.machine['clock']
element_size = self.kernel.datatypes_size[self.kernel.datatype]
elements_per_cacheline = int(self.machine['cacheline size']) // element_size
it_s = clock/cy_cl*elements_per_cacheline
it_s.unit = 'It/s'
flops_per_it = sum(self.kernel._flops.values())
performance = it_s*flops_per_it
performance.unit = 'FLOP/s'
cy_it = cy_cl*elements_per_cacheline
cy_it.unit = 'cy/It'
return {'It/s': it_s,
'cy/CL': cy_cl,
'cy/It': cy_it,
'FLOP/s': performance}
|
Convert cycles (cy/CL) to other units, such as FLOP/s or It/s.
|
def by_interval_lookup(self, style_key, style_value):
"""Return a processor for an "interval" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with an "interval" key whose value consists of a
sequence of tuples where each tuple should have the form `(start,
end, x)`, where start is the start of the interval (inclusive), end
is the end of the interval, and x is either a style attribute (str)
and a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function.
"""
style_attr = style_key if self.style_types[style_key] is bool else None
intervals = style_value["interval"]
def proc(value, result):
try:
value = float(value)
except TypeError:
return result
for start, end, lookup_value in intervals:
if start is None:
start = float("-inf")
if end is None:
end = float("inf")
if start <= value < end:
if not lookup_value:
return result
return self.render(style_attr or lookup_value, result)
return result
return proc
|
Return a processor for an "interval" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with an "interval" key whose value consists of a
sequence of tuples where each tuple should have the form `(start,
end, x)`, where start is the start of the interval (inclusive), end
is the end of the interval, and x is either a style attribute (str)
and a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function.
|
def check_call(state, callstr, argstr=None, expand_msg=None):
"""When checking a function definition of lambda function,
prepare has_equal_x for checking the call of a user-defined function.
Args:
callstr (str): call string that specifies how the function should be called, e.g. `f(1, a = 2)`.
``check_call()`` will replace ``f`` with the function/lambda you're targeting.
argstr (str): If specified, this overrides the way the function call is refered to in the expand message.
expand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains.
state (State): state object that is chained from.
:Example:
Student and solution code::
def my_power(x):
print("calculating sqrt...")
return(x * x)
SCT::
Ex().check_function_def('my_power').multi(
check_call("f(3)").has_equal_value()
check_call("f(3)").has_equal_output()
)
"""
state.assert_is(
["function_defs", "lambda_functions"],
"check_call",
["check_function_def", "check_lambda_function"],
)
if expand_msg is None:
expand_msg = "To verify it, we reran {{argstr}}. "
stu_part, _argstr = build_call(callstr, state.student_parts["node"])
sol_part, _ = build_call(callstr, state.solution_parts["node"])
append_message = {"msg": expand_msg, "kwargs": {"argstr": argstr or _argstr}}
child = part_to_child(stu_part, sol_part, append_message, state)
return child
|
When checking a function definition of lambda function,
prepare has_equal_x for checking the call of a user-defined function.
Args:
callstr (str): call string that specifies how the function should be called, e.g. `f(1, a = 2)`.
``check_call()`` will replace ``f`` with the function/lambda you're targeting.
argstr (str): If specified, this overrides the way the function call is refered to in the expand message.
expand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains.
state (State): state object that is chained from.
:Example:
Student and solution code::
def my_power(x):
print("calculating sqrt...")
return(x * x)
SCT::
Ex().check_function_def('my_power').multi(
check_call("f(3)").has_equal_value()
check_call("f(3)").has_equal_output()
)
|
def response(status, description, resource=DefaultResource):
# type: (HTTPStatus, str, Optional[Resource]) -> Callable
"""
Define an expected response.
The values are based off `Swagger <https://swagger.io/specification>`_.
"""
def inner(o):
value = Response(status, description, resource)
try:
getattr(o, 'responses').add(value)
except AttributeError:
setattr(o, 'responses', {value})
return o
return inner
|
Define an expected response.
The values are based off `Swagger <https://swagger.io/specification>`_.
|
def parse_subscription(self, params, region, subscription):
"""
Parse a single subscription and reference it in its corresponding topic
:param params: Global parameters (defaults to {})
:param subscription: SNS Subscription
"""
topic_arn = subscription.pop('TopicArn')
topic_name = topic_arn.split(':')[-1]
if topic_name in self.topics:
topic = self.topics[topic_name]
manage_dictionary(topic['subscriptions'], 'protocol', {})
protocol = subscription.pop('Protocol')
manage_dictionary(topic['subscriptions']['protocol'], protocol, [])
topic['subscriptions']['protocol'][protocol].append(subscription)
topic['subscriptions_count'] += 1
|
Parse a single subscription and reference it in its corresponding topic
:param params: Global parameters (defaults to {})
:param subscription: SNS Subscription
|
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
|
Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
|
def openParametersDialog(params, title=None):
'''
Opens a dialog to enter parameters.
Parameters are passed as a list of Parameter objects
Returns a dict with param names as keys and param values as values
Returns None if the dialog was cancelled
'''
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
dlg = ParametersDialog(params, title)
dlg.exec_()
QApplication.restoreOverrideCursor()
return dlg.values
|
Opens a dialog to enter parameters.
Parameters are passed as a list of Parameter objects
Returns a dict with param names as keys and param values as values
Returns None if the dialog was cancelled
|
def _init_steps(self,):
"""
Given the flow config and everything else, create a list of steps to run, sorted by step number.
:return: List[StepSpec]
"""
self._check_old_yaml_format()
config_steps = self.flow_config.steps
self._check_infinite_flows(config_steps)
steps = []
for number, step_config in config_steps.items():
specs = self._visit_step(number, step_config)
steps.extend(specs)
return sorted(steps, key=attrgetter("step_num"))
|
Given the flow config and everything else, create a list of steps to run, sorted by step number.
:return: List[StepSpec]
|
def cmd_export_all(*args):
"""
Arguments:
<output folder> [-- [--quality <0-100>] [--page_format <page_format>]]
Export all documents as PDF files.
Default quality is 50.
Default page format is A4.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docids": [
["xxx", "file:///tmp/xxx.pdf"],
["yyy", "file:///tmp/yyy.pdf"],
["zzz", "file:///tmp/zzz.pdf"]
],
"output_dir": "file:///tmp",
}
"""
(output_dir, quality, page_format) = _get_export_params(args)
dsearch = get_docsearch()
try:
os.mkdir(output_dir)
except FileExistsError: # NOQA (Python 3.x only)
pass
out = []
docs = [d for d in dsearch.docs]
docs.sort(key=lambda doc: doc.docid)
output_dir = FS.safe(output_dir)
for (doc_idx, doc) in enumerate(docs):
output_pdf = FS.join(output_dir, doc.docid + ".pdf")
exporter = doc.build_exporter(file_format="pdf")
if exporter.can_change_quality:
exporter.set_quality(quality)
if exporter.can_select_format:
exporter.set_page_format(page_format)
verbose(
"[{}/{}] Exporting {} --> {} ...".format(
doc_idx + 1, len(docs), doc.docid, output_pdf
)
)
exporter.save(output_pdf)
out.append((doc.docid, output_pdf))
doc = None
gc.collect()
verbose("Done")
reply({
"docids": out,
"output_dir": output_dir,
})
|
Arguments:
<output folder> [-- [--quality <0-100>] [--page_format <page_format>]]
Export all documents as PDF files.
Default quality is 50.
Default page format is A4.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docids": [
["xxx", "file:///tmp/xxx.pdf"],
["yyy", "file:///tmp/yyy.pdf"],
["zzz", "file:///tmp/zzz.pdf"]
],
"output_dir": "file:///tmp",
}
|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Locate request payload to a buffer.
Args:
output_buffer (stream): A data buffer in which to encode object
data, supporting a write method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
local_buffer = utils.BytearrayStream()
if self._maximum_items:
self._maximum_items.write(local_buffer, kmip_version=kmip_version)
if self._offset_items:
self._offset_items.write(local_buffer, kmip_version=kmip_version)
if self._storage_status_mask:
self._storage_status_mask.write(
local_buffer,
kmip_version=kmip_version
)
if self._object_group_member:
self._object_group_member.write(
local_buffer,
kmip_version=kmip_version
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self._attributes:
for attribute in self.attributes:
attribute.write(
local_buffer,
kmip_version=kmip_version
)
else:
if self._attributes:
# TODO (ph) Add a new utility to avoid using TemplateAttributes
template_attribute = objects.TemplateAttribute(
attributes=self.attributes
)
attributes = objects.convert_template_attribute_to_attributes(
template_attribute
)
attributes.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The Locate request payload is missing the attributes "
"list."
)
self.length = local_buffer.length()
super(LocateRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer)
|
Write the data encoding the Locate request payload to a buffer.
Args:
output_buffer (stream): A data buffer in which to encode object
data, supporting a write method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
def clean_key(self):
"""
Validate the key contains an email address.
"""
key = self.cleaned_data["key"]
gpg = get_gpg()
result = gpg.import_keys(key)
if result.count == 0:
raise forms.ValidationError(_("Invalid Key"))
return key
|
Validate the key contains an email address.
|
def subscribe(self, user_token, topic):
"""
Subscribe a user to the given topic.
:param str user_token: The token of the user.
:param str topic: The topic.
:raises `requests.exceptions.HTTPError`: If an HTTP error occurred.
"""
response = _request('POST',
url=self.url_v1('/user/subscriptions/' + topic),
user_agent=self.user_agent,
user_token=user_token,
)
_raise_for_status(response)
|
Subscribe a user to the given topic.
:param str user_token: The token of the user.
:param str topic: The topic.
:raises `requests.exceptions.HTTPError`: If an HTTP error occurred.
|
def _compile(self, compile_request):
"""Perform the process of compilation, writing object files to the request's 'output_dir'.
NB: This method must arrange the output files so that `collect_cached_objects()` can collect all
of the results (or vice versa)!
"""
try:
argv = self._make_compile_argv(compile_request)
except self._HeaderOnlyLibrary:
self.context.log.debug('{} is a header-only library'.format(compile_request))
return
compiler = compile_request.compiler
output_dir = compile_request.output_dir
env = compiler.invocation_environment_dict
with self.context.new_workunit(
name=self.workunit_label, labels=[WorkUnitLabel.COMPILER]) as workunit:
try:
process = subprocess.Popen(
argv,
cwd=output_dir,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
env=env)
except OSError as e:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error invoking '{exe}' with command {cmd} and environment {env} for request {req}: {err}"
.format(exe=compiler.exe_filename, cmd=argv, env=env, req=compile_request, err=e))
rc = process.wait()
if rc != 0:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error in '{section_name}' with command {cmd} and environment {env} for request {req}. "
"Exit code was: {rc}."
.format(section_name=self.workunit_label, cmd=argv, env=env, req=compile_request, rc=rc))
|
Perform the process of compilation, writing object files to the request's 'output_dir'.
NB: This method must arrange the output files so that `collect_cached_objects()` can collect all
of the results (or vice versa)!
|
def serialize_transaction(inputs, outputs, lock_time=0, version=1):
""" Serializes a transaction.
"""
# add in the inputs
serialized_inputs = ''.join([serialize_input(input) for input in inputs])
# add in the outputs
serialized_outputs = ''.join([serialize_output(output) for output in outputs])
return ''.join([
# add in the version number
hexlify(struct.pack('<I', version)),
# add in the number of inputs
hexlify(variable_length_int(len(inputs))),
# add in the inputs
serialized_inputs,
# add in the number of outputs
hexlify(variable_length_int(len(outputs))),
# add in the outputs
serialized_outputs,
# add in the lock time
hexlify(struct.pack('<I', lock_time)),
])
|
Serializes a transaction.
|
def has_colors(stream):
"""
Determine if the terminal supports ansi colors.
"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
return False
|
Determine if the terminal supports ansi colors.
|
def rnoncentral_t(mu, lam, nu, size=None):
"""
Non-central Student's t random variates.
"""
tau = rgamma(nu / 2., nu / (2. * lam), size)
return rnormal(mu, tau)
|
Non-central Student's t random variates.
|
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
|
Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
|
def clean_video_data(_data):
"""
Clean video data:
-> cleans title
-> ...
Args:
_data (dict): Information about the video.
Returns:
dict: Refined video data.
"""
data = _data.copy()
# TODO: fix this ugliness
title = data.get('title')
if title:
data['title'] = clean_title(title)
return data
|
Clean video data:
-> cleans title
-> ...
Args:
_data (dict): Information about the video.
Returns:
dict: Refined video data.
|
def RemoveNIC(self,network_id):
"""Remove the NIC associated with the provided network from the server.
https://www.ctl.io/api-docs/v2/#servers-remove-secondary-network
network_id - ID associated with the network to remove
>>> network = clc.v2.Networks(location="VA1").Get("10.128.166.0/24")
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT06'). \
RemoveNIC(network_id=network.id). \
WaitUntilComplete()
0
"""
return(
clc.v2.Requests(
clc.v2.API.Call('DELETE','servers/%s/%s/networks/%s'
% (self.alias,self.id,network_id),
session=self.session),
alias=self.alias,
session=self.session
))
|
Remove the NIC associated with the provided network from the server.
https://www.ctl.io/api-docs/v2/#servers-remove-secondary-network
network_id - ID associated with the network to remove
>>> network = clc.v2.Networks(location="VA1").Get("10.128.166.0/24")
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT06'). \
RemoveNIC(network_id=network.id). \
WaitUntilComplete()
0
|
def autosave(self, index):
"""
Autosave a file.
Do nothing if the `changed_since_autosave` flag is not set or the file
is newly created (and thus not named by the user). Otherwise, save a
copy of the file with the name given by `self.get_autosave_filename()`
and clear the `changed_since_autosave` flag. Errors raised when saving
are silently ignored.
Args:
index (int): index into self.stack.data
"""
finfo = self.stack.data[index]
document = finfo.editor.document()
if not document.changed_since_autosave or finfo.newly_created:
return
autosave_filename = self.get_autosave_filename(finfo.filename)
logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename)
try:
self.stack._write_to_file(finfo, autosave_filename)
document.changed_since_autosave = False
except EnvironmentError as error:
action = (_('Error while autosaving {} to {}')
.format(finfo.filename, autosave_filename))
msgbox = AutosaveErrorDialog(action, error)
msgbox.exec_if_enabled()
|
Autosave a file.
Do nothing if the `changed_since_autosave` flag is not set or the file
is newly created (and thus not named by the user). Otherwise, save a
copy of the file with the name given by `self.get_autosave_filename()`
and clear the `changed_since_autosave` flag. Errors raised when saving
are silently ignored.
Args:
index (int): index into self.stack.data
|
def connect(self, nice_quit_ev):
"""Connect the server. We expect this to implement backoff and all
connection logistics for servers that were discovered via a lookup
node.
"""
_logger.debug("Connecting to discovered node: [%s]", self.server_host)
stop_epoch = time.time() + \
nsq.config.client.MAXIMUM_CONNECT_ATTEMPT_PERIOD_S
timeout_s = nsq.config.client.INITIAL_CONNECT_FAIL_WAIT_S
backoff_rate = nsq.config.client.CONNECT_FAIL_WAIT_BACKOFF_RATE
while stop_epoch >= time.time() and nice_quit_ev.is_set() is False:
try:
c = self.primitive_connect()
except gevent.socket.error:
_logger.exception("Could not connect to discovered server: "
"[%s]", self.server_host)
else:
_logger.info("Discovered server-node connected: [%s]",
self.server_host)
return c
timeout_s = min(timeout_s * backoff_rate,
nsq.config.client.MAXIMUM_CONNECT_FAIL_WAIT_S)
_logger.info("Waiting for (%d) seconds before reconnecting.",
timeout_s)
gevent.sleep(timeout_s)
raise nsq.exceptions.NsqConnectGiveUpError(
"Could not connect to the nsqlookupd server: [%s]" %
(self.server_host,))
|
Connect the server. We expect this to implement backoff and all
connection logistics for servers that were discovered via a lookup
node.
|
def docgraph2freqt(docgraph, root=None, include_pos=False,
escape_func=FREQT_ESCAPE_FUNC):
"""convert a docgraph into a FREQT string."""
if root is None:
return u"\n".join(
sentence2freqt(docgraph, sentence, include_pos=include_pos,
escape_func=escape_func)
for sentence in docgraph.sentences)
else:
return sentence2freqt(docgraph, root, include_pos=include_pos,
escape_func=escape_func)
|
convert a docgraph into a FREQT string.
|
def runStickyEregressions(infile_name,interval_size,meas_err,sticky,all_specs):
'''
Runs regressions for the main tables of the StickyC paper and produces a LaTeX
table with results for one "panel".
Parameters
----------
infile_name : str
Name of tab-delimited text file with simulation data. Assumed to be in
the results directory, and was almost surely generated by makeStickyEdataFile
unless we resort to fabricating simulated data. THAT'S A JOKE, FUTURE REFEREES.
interval_size : int
Number of periods in each sub-interval.
meas_err : bool
Indicator for whether to add measurement error to DeltaLogC.
sticky : bool
Indicator for whether these results used sticky expectations.
all_specs : bool
Indicator for whether this panel should include all specifications or
just the OLS on lagged consumption growth.
Returns
-------
panel_text : str
String with one panel's worth of LaTeX input.
'''
# Read in the data from the infile
with open(results_dir + infile_name + '.txt') as f:
my_reader = csv.reader(f, delimiter='\t')
all_data = list(my_reader)
# Unpack the data into numpy arrays
obs = len(all_data) - 1
DeltaLogC_me = np.zeros(obs)
DeltaLogC = np.zeros(obs)
DeltaLogY = np.zeros(obs)
A = np.zeros(obs)
BigTheta = np.zeros(obs)
Delta8LogC = np.zeros(obs)
Delta8LogY = np.zeros(obs)
Delta8LogC_me = np.zeros(obs)
Measurement_Error = np.zeros(obs)
Mrkv_hist = np.zeros(obs,dtype=int)
R = np.zeros(obs)
has_mrkv = 'MrkvState' in all_data[0]
has_R = 'R' in all_data[0]
for i in range(obs):
j = i+1
DeltaLogC_me[i] = float(all_data[j][1])
DeltaLogC[i] = float(all_data[j][2])
DeltaLogY[i] = float(all_data[j][3])
A[i] = float(all_data[j][4])
BigTheta[i] = float(all_data[j][5])
Delta8LogC[i] = float(all_data[j][6])
Delta8LogY[i] = float(all_data[j][7])
Delta8LogC_me[i] = float(all_data[j][8])
Measurement_Error[i] = float(all_data[j][9])
if has_mrkv:
Mrkv_hist[i] = int(float(all_data[j][10]))
if has_R:
R[i] = float(all_data[j][11])
# Determine how many subsample intervals to run (and initialize array of coefficients)
N = DeltaLogC.size // interval_size
CoeffsArray = np.zeros((N,7)) # Order: DeltaLogC_OLS, DeltaLogC_IV, DeltaLogY_IV, A_OLS, DeltaLogC_HR, DeltaLogY_HR, A_HR
StdErrArray = np.zeros((N,7)) # Same order as above
RsqArray = np.zeros((N,5))
PvalArray = np.zeros((N,5))
OIDarray = np.zeros((N,5)) + np.nan
InstrRsqVec = np.zeros(N)
# Loop through subsample intervals, running various regressions
for n in range(N):
# Select the data subsample
start = n*interval_size
end = (n+1)*interval_size
if meas_err:
DeltaLogC_n = DeltaLogC_me[start:end]
Delta8LogC_n = Delta8LogC_me[start:end]
else:
DeltaLogC_n = DeltaLogC[start:end]
Delta8LogC_n = Delta8LogC[start:end]
DeltaLogY_n = DeltaLogY[start:end]
A_n = A[start:end]
Delta8LogY_n = Delta8LogY[start:end]
# Run OLS on log consumption
mod = sm.OLS(DeltaLogC_n[1:],sm.add_constant(DeltaLogC_n[0:-1]))
res = mod.fit()
CoeffsArray[n,0] = res._results.params[1]
StdErrArray[n,0] = res._results.HC0_se[1]
RsqArray[n,0] = res._results.rsquared_adj
PvalArray[n,0] = res._results.f_pvalue
# Define instruments for IV regressions
temp = np.transpose(np.vstack([DeltaLogC_n[1:-3],DeltaLogC_n[:-4],DeltaLogY_n[1:-3],DeltaLogY_n[:-4],A_n[1:-3],A_n[:-4],Delta8LogC_n[1:-3],Delta8LogY_n[1:-3]]))
instruments = sm.add_constant(temp) # With measurement error
# Run IV on log consumption
mod = sm.OLS(DeltaLogC_n[3:-1],instruments)
res = mod.fit()
DeltaLogC_predict = res.predict()
mod_2ndStage = sm.OLS(DeltaLogC_n[4:],sm.add_constant(DeltaLogC_predict))
res_2ndStage = mod_2ndStage.fit()
mod_IV = smsrg.IV2SLS(DeltaLogC_n[4:], sm.add_constant(DeltaLogC_n[3:-1]),instruments)
res_IV = mod_IV.fit()
CoeffsArray[n,1] = res_IV._results.params[1]
StdErrArray[n,1] = res_IV.bse[1]
RsqArray[n,1] = res_2ndStage._results.rsquared_adj
PvalArray[n,1] = res._results.f_pvalue
# Run IV on log income
mod = sm.OLS(DeltaLogY_n[4:],instruments)
res = mod.fit()
DeltaLogY_predict = res.predict()
mod_2ndStage = sm.OLS(DeltaLogC_n[4:],sm.add_constant(DeltaLogY_predict))
res_2ndStage = mod_2ndStage.fit()
mod_IV = smsrg.IV2SLS(DeltaLogC_n[4:], sm.add_constant(DeltaLogY_n[4:]),instruments)
res_IV = mod_IV.fit()
CoeffsArray[n,2] = res_IV._results.params[1]
StdErrArray[n,2] = res_IV.bse[1]
RsqArray[n,2] = res_2ndStage._results.rsquared_adj
PvalArray[n,2] = res._results.f_pvalue
# Run IV on assets
mod = sm.OLS(A_n[3:-1],instruments)
res = mod.fit()
A_predict = res.predict()
mod_2ndStage = sm.OLS(DeltaLogC_n[4:],sm.add_constant(A_predict))
res_2ndStage = mod_2ndStage.fit()
mod_IV = smsrg.IV2SLS(DeltaLogC_n[4:], sm.add_constant(A_n[3:-1]),instruments)
res_IV = mod_IV.fit()
CoeffsArray[n,3] = res_IV._results.params[1]
StdErrArray[n,3] = res_IV.bse[1]
RsqArray[n,3] = res_2ndStage._results.rsquared_adj
PvalArray[n,3] = res._results.f_pvalue
# Run horserace IV
regressors = sm.add_constant(np.transpose(np.array([DeltaLogC_predict,DeltaLogY_predict,A_predict])))
mod_2ndStage = sm.OLS(DeltaLogC_n[4:],regressors)
res_2ndStage = mod_2ndStage.fit()
mod_IV = smsrg.IV2SLS(DeltaLogC_n[4:], sm.add_constant(np.transpose(np.array([DeltaLogC_n[3:-1],DeltaLogY_n[4:],A_n[3:-1]]))),instruments)
res_IV = mod_IV.fit()
CoeffsArray[n,4] = res_IV._results.params[1]
CoeffsArray[n,5] = res_IV._results.params[2]
CoeffsArray[n,6] = res_IV._results.params[3]
StdErrArray[n,4] = res_IV._results.bse[1]
StdErrArray[n,5] = res_IV._results.bse[2]
StdErrArray[n,6] = res_IV._results.bse[3]
RsqArray[n,4] = res_2ndStage._results.rsquared_adj
PvalArray[n,4] = np.nan #Need to put in KP stat here, may have to do this in Stata
# Regress Delta C_{t+1} on instruments
mod = sm.OLS(DeltaLogC_n[4:],instruments)
res = mod.fit()
InstrRsqVec[n] = res._results.rsquared_adj
# Count the number of times we reach significance in each variable
t_stat_array = CoeffsArray/StdErrArray
C_successes_95 = np.sum(t_stat_array[:,4] > 1.96)
Y_successes_95 = np.sum(t_stat_array[:,5] > 1.96)
sigma_meas_err = np.std(Measurement_Error)
N_out = [C_successes_95,Y_successes_95,N,np.mean(InstrRsqVec),sigma_meas_err**2]
# Make results table and return it
panel_text = makeResultsPanel(Coeffs=np.mean(CoeffsArray,axis=0),
StdErrs=np.mean(StdErrArray,axis=0),
Rsq=np.mean(RsqArray,axis=0),
Pvals=np.mean(PvalArray,axis=0),
OID=np.mean(OIDarray,axis=0),
Counts=N_out,
meas_err=meas_err,
sticky=sticky,
all_specs=all_specs)
return panel_text
|
Runs regressions for the main tables of the StickyC paper and produces a LaTeX
table with results for one "panel".
Parameters
----------
infile_name : str
Name of tab-delimited text file with simulation data. Assumed to be in
the results directory, and was almost surely generated by makeStickyEdataFile
unless we resort to fabricating simulated data. THAT'S A JOKE, FUTURE REFEREES.
interval_size : int
Number of periods in each sub-interval.
meas_err : bool
Indicator for whether to add measurement error to DeltaLogC.
sticky : bool
Indicator for whether these results used sticky expectations.
all_specs : bool
Indicator for whether this panel should include all specifications or
just the OLS on lagged consumption growth.
Returns
-------
panel_text : str
String with one panel's worth of LaTeX input.
|
def name(self):
"""Return the complete file and path name of the file."""
return str(conf.lib.clang_getCString(conf.lib.clang_getFileName(self)))
|
Return the complete file and path name of the file.
|
def _deprecated_kwargs(kwargs, arg_newarg):
""" arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
"""
warn_for = []
for (arg, new_kw) in arg_newarg:
if arg in kwargs.keys():
val = kwargs.pop(arg)
kwargs[new_kw] = val
warn_for.append((arg, new_kw))
if len(warn_for) > 0:
if len(warn_for) == 1:
warnings.warn("Argument '{}' is deprecated. Use {} instead".
format(warn_for[0][0], warn_for[0][1]),
DeprecationWarning, stacklevel=4)
else:
args = ", ".join([x[0] for x in warn_for])
repl = ", ".join([x[1] for x in warn_for])
warnings.warn(
"Arguments '{}' are deprecated. Use '{}' instead respectively".
format(args, repl),
DeprecationWarning, stacklevel=4)
return kwargs
|
arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
|
def load(self, languages=[]):
"""Loads the Duckling corpus.
Languages can be specified, defaults to all.
Args:
languages: Optional parameter to specify languages,
e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. ["en", "fr"])
"""
duckling_load = self.clojure.var("duckling.core", "load!")
clojure_hashmap = self.clojure.var("clojure.core", "hash-map")
clojure_list = self.clojure.var("clojure.core", "list")
if languages:
# Duckling's load function expects ISO 639-1 Language Codes (e.g. "en")
iso_languages = [Language.convert_to_iso(lang) for lang in languages]
duckling_load.invoke(
clojure_hashmap.invoke(
self.clojure.read(':languages'),
clojure_list.invoke(*iso_languages)
)
)
else:
duckling_load.invoke()
self._is_loaded = True
|
Loads the Duckling corpus.
Languages can be specified, defaults to all.
Args:
languages: Optional parameter to specify languages,
e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. ["en", "fr"])
|
def name(function):
"""
Retrieve a pretty name for the function
:param function: function to get name from
:return: pretty name
"""
if isinstance(function, types.FunctionType):
return function.__name__
else:
return str(function)
|
Retrieve a pretty name for the function
:param function: function to get name from
:return: pretty name
|
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not call:
call = 'select'
if not get_configured_provider():
return
info = ['id', 'name', 'image', 'size', 'state', 'public_ips', 'private_ips']
return salt.utils.cloud.list_nodes_select(
list_nodes_full(call='action'),
__opts__.get('query.selection', info), call)
|
Return a list of the VMs that are on the provider, with select fields
|
def rating(self, value):
"""Set the rating parameter and regenerate the thumbnail link."""
self._rating = value
self._thumb = self._link_to_img()
|
Set the rating parameter and regenerate the thumbnail link.
|
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
|
get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
|
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeroes, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x' % ( rva ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
|
Walk and parse the delay import directory.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.