code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _unknown_args(self, args):
"""Log argparser unknown arguments.
Args:
args (list): List of unknown arguments
"""
for u in args:
self.tcex.log.warning(u'Unsupported arg found ({}).'.format(u))
|
Log argparser unknown arguments.
Args:
args (list): List of unknown arguments
|
def lookup_camera_by_id(self, device_id):
"""Return camera object by device_id."""
camera = list(filter(
lambda cam: cam.device_id == device_id, self.cameras))[0]
if camera:
return camera
return None
|
Return camera object by device_id.
|
def _check_pillar_minions(self, expr, delimiter, greedy):
'''
Return the minions found by looking via pillar
'''
return self._check_cache_minions(expr, delimiter, greedy, 'pillar')
|
Return the minions found by looking via pillar
|
def wait_for_binary_interface(self, **kwargs):
"""
Waits for the Binary CQL interface to be listening. If > 1.2 will check
log for 'Starting listening for CQL clients' before checking for the
interface to be listening.
Emits a warning if not listening after 30 seconds.
"""
if self.cluster.version() >= '1.2':
self.watch_log_for("Starting listening for CQL clients", **kwargs)
binary_itf = self.network_interfaces['binary']
if not common.check_socket_listening(binary_itf, timeout=30):
warnings.warn("Binary interface %s:%s is not listening after 30 seconds, node may have failed to start."
% (binary_itf[0], binary_itf[1]))
|
Waits for the Binary CQL interface to be listening. If > 1.2 will check
log for 'Starting listening for CQL clients' before checking for the
interface to be listening.
Emits a warning if not listening after 30 seconds.
|
def _fullqualname_method_py3(obj):
"""Fully qualified name for 'method' objects in Python 3.
"""
if inspect.isclass(obj.__self__):
cls = obj.__self__.__qualname__
else:
cls = obj.__self__.__class__.__qualname__
return obj.__self__.__module__ + '.' + cls + '.' + obj.__name__
|
Fully qualified name for 'method' objects in Python 3.
|
def for_user(self, user):
"""
All folders the given user can do something with.
"""
qs = SharedMemberQuerySet(model=self.model, using=self._db, user=user)
qs = qs.filter(Q(author=user) | Q(foldershareduser__user=user))
return qs.distinct() & self.distinct()
|
All folders the given user can do something with.
|
def get_work_item_by_id(self, wi_id):
'''
Retrieves a single work item based off of the supplied ID
:param wi_id: The work item ID number
:return: Workitem or None
'''
work_items = self.get_work_items(id=wi_id)
if work_items is not None:
return work_items[0]
return None
|
Retrieves a single work item based off of the supplied ID
:param wi_id: The work item ID number
:return: Workitem or None
|
def remove(self, label):
"""Remove a label.
Args:
label (gkeepapi.node.Label): The Label object.
"""
if label.id in self._labels:
self._labels[label.id] = None
self._dirty = True
|
Remove a label.
Args:
label (gkeepapi.node.Label): The Label object.
|
def get_range(self, ignore_blank_lines=True):
"""
Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int)
"""
ref_lvl = self.trigger_level
first_line = self._trigger.blockNumber()
block = self._trigger.next()
last_line = block.blockNumber()
lvl = self.scope_level
if ref_lvl == lvl: # for zone set programmatically such as imports
# in pyqode.python
ref_lvl -= 1
while (block.isValid() and
TextBlockHelper.get_fold_lvl(block) > ref_lvl):
last_line = block.blockNumber()
block = block.next()
if ignore_blank_lines and last_line:
block = block.document().findBlockByNumber(last_line)
while block.blockNumber() and block.text().strip() == '':
block = block.previous()
last_line = block.blockNumber()
return first_line, last_line
|
Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int)
|
def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True):
"""
Add a sender to the client to EventData object to an EventHub.
:param partition: Optionally specify a particular partition to send to.
If omitted, the events will be distributed to available partitions via
round-robin.
:type parition: str
:operation: An optional operation to be appended to the hostname in the target URL.
The value must start with `/` character.
:type operation: str
:param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
queued. Default value is 60 seconds. If set to 0, there will be no timeout.
:type send_timeout: int
:param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not
be pinged.
:type keep_alive: int
:param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs.
Default value is `True`.
:rtype: ~azure.eventhub.sender.Sender
"""
target = "amqps://{}{}".format(self.address.hostname, self.address.path)
if operation:
target = target + operation
handler = Sender(
self, target, partition=partition, send_timeout=send_timeout,
keep_alive=keep_alive, auto_reconnect=auto_reconnect)
self.clients.append(handler)
return handler
|
Add a sender to the client to EventData object to an EventHub.
:param partition: Optionally specify a particular partition to send to.
If omitted, the events will be distributed to available partitions via
round-robin.
:type parition: str
:operation: An optional operation to be appended to the hostname in the target URL.
The value must start with `/` character.
:type operation: str
:param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
queued. Default value is 60 seconds. If set to 0, there will be no timeout.
:type send_timeout: int
:param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not
be pinged.
:type keep_alive: int
:param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs.
Default value is `True`.
:rtype: ~azure.eventhub.sender.Sender
|
def event_params(segments, params, band=None, n_fft=None, slopes=None,
prep=None, parent=None):
"""Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
"""
if parent is not None:
progress = QProgressDialog('Computing parameters', 'Abort',
0, len(segments) - 1, parent)
progress.setWindowModality(Qt.ApplicationModal)
param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf',
'energy', 'peakef']
if params == 'all':
params = {k: 1 for k in param_keys}
if prep is None:
prep = {k: 0 for k in param_keys}
if band is None:
band = (None, None)
params_out = []
evt_output = False
for i, seg in enumerate(segments):
out = dict(seg)
dat = seg['data']
if params['dur']:
out['dur'] = float(dat.number_of('time')) / dat.s_freq
evt_output = True
if params['minamp']:
dat1 = dat
if prep['minamp']:
dat1 = seg['trans_data']
out['minamp'] = math(dat1, operator=_amin, axis='time')
evt_output = True
if params['maxamp']:
dat1 = dat
if prep['maxamp']:
dat1 = seg['trans_data']
out['maxamp'] = math(dat1, operator=_amax, axis='time')
evt_output = True
if params['ptp']:
dat1 = dat
if prep['ptp']:
dat1 = seg['trans_data']
out['ptp'] = math(dat1, operator=_ptp, axis='time')
evt_output = True
if params['rms']:
dat1 = dat
if prep['rms']:
dat1 = seg['trans_data']
out['rms'] = math(dat1, operator=(square, _mean, sqrt),
axis='time')
evt_output = True
for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]:
if params[pw] or params[pk]:
evt_output = True
if prep[pw] or prep[pk]:
prep_pw, prep_pk = band_power(seg['trans_data'], band,
scaling=pw, n_fft=n_fft)
if not (prep[pw] and prep[pk]):
raw_pw, raw_pk = band_power(dat, band,
scaling=pw, n_fft=n_fft)
if prep[pw]:
out[pw] = prep_pw
else:
out[pw] = raw_pw
if prep[pk]:
out[pk] = prep_pk
else:
out[pk] = raw_pk
if slopes:
evt_output = True
out['slope'] = {}
dat1 = dat
if slopes['prep']:
dat1 = seg['trans_data']
if slopes['invert']:
dat1 = math(dat1, operator=negative, axis='time')
if slopes['avg_slope'] and slopes['max_slope']:
level = 'all'
elif slopes['avg_slope']:
level = 'average'
else:
level = 'maximum'
for chan in dat1.axis['chan'][0]:
d = dat1(chan=chan)[0]
out['slope'][chan] = get_slopes(d, dat.s_freq, level=level)
if evt_output:
timeline = dat.axis['time'][0]
out['start'] = timeline[0]
out['end'] = timeline[-1]
params_out.append(out)
if parent:
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
parent.statusBar().showMessage(msg)
return
if parent:
progress.close()
return params_out
|
Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
|
def remove_builder(cls, builder_name: str):
"""Remove a registered builder `builder_name`.
No reason to use this except for tests.
"""
cls.builders.pop(builder_name, None)
for hook_spec in cls.hooks.values():
hook_spec.pop(builder_name, None)
|
Remove a registered builder `builder_name`.
No reason to use this except for tests.
|
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
orig = {}
for xml_file in self.filename.glob('*.xml'):
if xml_file.stem[0] != '.':
orig[xml_file.stem] = parse_xml(str(xml_file))
signals = sorted(self.filename.glob('signal*.bin'))
for signal in signals:
block_hdr, i_data = read_all_block_hdr(signal)
self._signal.append(signal)
self._block_hdr.append(block_hdr)
self._i_data.append(i_data)
n_samples = asarray([x['n_samples'][0] for x in block_hdr], 'q')
self._n_samples.append(n_samples)
try:
subj_id = orig['subject'][0][0]['name']
except KeyError:
subj_id = ''
try:
start_time = datetime.strptime(orig['info'][0]['recordTime'][:26],
'%Y-%m-%dT%H:%M:%S.%f')
except KeyError:
start_time = DEFAULT_DATETIME
self.start_time = start_time
videos = (list(self.filename.glob('*.mp4')) + # as described in specs
list(self.filename.glob('*.mov'))) # actual example
videos = [x for x in videos if x.stem[0] != '.'] # remove hidden files
if len(videos) > 1:
lg.warning('More than one video present: ' + ', '.join(videos))
self._videos = videos
# it only works if they have all the same sampling frequency
s_freq = [x[0]['freq'][0] for x in self._block_hdr]
assert all([x == s_freq[0] for x in s_freq])
SIGNAL = 0
s_freq = self._block_hdr[SIGNAL][0]['freq'][0]
n_samples = sum(self._n_samples[SIGNAL])
chan_name, self._nchan_signal1 = _read_chan_name(orig)
self._orig = orig
return subj_id, start_time, s_freq, chan_name, n_samples, orig
|
Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
|
def makeDigraph(automaton, inputAsString=repr,
outputAsString=repr,
stateAsString=repr):
"""
Produce a L{graphviz.Digraph} object from an automaton.
"""
digraph = graphviz.Digraph(graph_attr={'pack': 'true',
'dpi': '100'},
node_attr={'fontname': 'Menlo'},
edge_attr={'fontname': 'Menlo'})
for state in automaton.states():
if state is automaton.initialState:
stateShape = "bold"
fontName = "Menlo-Bold"
else:
stateShape = ""
fontName = "Menlo"
digraph.node(stateAsString(state),
fontame=fontName,
shape="ellipse",
style=stateShape,
color="blue")
for n, eachTransition in enumerate(automaton.allTransitions()):
inState, inputSymbol, outState, outputSymbols = eachTransition
thisTransition = "t{}".format(n)
inputLabel = inputAsString(inputSymbol)
port = "tableport"
table = tableMaker(inputLabel, [outputAsString(outputSymbol)
for outputSymbol in outputSymbols],
port=port)
digraph.node(thisTransition,
label=_gvhtml(table), margin="0.2", shape="none")
digraph.edge(stateAsString(inState),
'{}:{}:w'.format(thisTransition, port),
arrowhead="none")
digraph.edge('{}:{}:e'.format(thisTransition, port),
stateAsString(outState))
return digraph
|
Produce a L{graphviz.Digraph} object from an automaton.
|
def transpose_note(note, transpose, scale="C"):
""" Transpose a note
:param str note: note to transpose
:type transpose: int
:param str scale: key scale
:rtype: str
:return: transposed note
"""
val = note_to_val(note)
val += transpose
return val_to_note(val, scale)
|
Transpose a note
:param str note: note to transpose
:type transpose: int
:param str scale: key scale
:rtype: str
:return: transposed note
|
def app_routes(app):
""" list of route of an app
"""
_routes = []
for rule in app.url_map.iter_rules():
_routes.append({
'path': rule.rule,
'name': rule.endpoint,
'methods': list(rule.methods)
})
return jsonify({'routes': _routes})
|
list of route of an app
|
def forward_ad(node, wrt, preserve_result=False, check_dims=True):
"""Perform forward-mode AD on an AST.
This function analyses the AST to determine which variables are active and
proceeds by taking the naive derivative. Before returning the primal and
adjoint it annotates push and pop statements as such.
Args:
node: A `FunctionDef` AST node.
wrt: A tuple of argument indices with respect to which we take the
derivative.
preserve_result: A boolean indicating whether the original
non-differentiated function value should be returned
check_dims: A boolean indicating whether the provided derivatives should
have the same shape as their corresponding arguments.
Returns:
mod: A `Module` node containing the naive primal and adjoint of the
function which can be fed to the `split` and `joint` functions.
required: A list of tuples of functions and argument indices. These
functions were called by the function but did not have an adjoint.
"""
if not isinstance(node, gast.FunctionDef):
raise TypeError
# Activity analysis
cfg_obj = cfg.CFG.build_cfg(node)
cfg.Active(range(len(node.args.args))).visit(cfg_obj.entry)
# Build forward mode function
fad = ForwardAD(wrt, preserve_result, check_dims)
node = fad.visit(node)
# Annotate stacks
node = annotate.find_stacks(node)
# Clean up naive forward-mode fcode
node = gast.Module([node])
anno.clearanno(node)
return node, fad.required
|
Perform forward-mode AD on an AST.
This function analyses the AST to determine which variables are active and
proceeds by taking the naive derivative. Before returning the primal and
adjoint it annotates push and pop statements as such.
Args:
node: A `FunctionDef` AST node.
wrt: A tuple of argument indices with respect to which we take the
derivative.
preserve_result: A boolean indicating whether the original
non-differentiated function value should be returned
check_dims: A boolean indicating whether the provided derivatives should
have the same shape as their corresponding arguments.
Returns:
mod: A `Module` node containing the naive primal and adjoint of the
function which can be fed to the `split` and `joint` functions.
required: A list of tuples of functions and argument indices. These
functions were called by the function but did not have an adjoint.
|
def parse(self, target):
""" Parse nested rulesets
and save it in cache.
"""
if isinstance(target, ContentNode):
if target.name:
self.parent = target
self.name.parse(self)
self.name += target.name
target.ruleset.append(self)
self.root.cache['rset'][str(self.name).split()[0]].add(self)
super(Ruleset, self).parse(target)
|
Parse nested rulesets
and save it in cache.
|
def delete_category(category_id):
"""Delete a Category with id = category_id.
:param category_id: PYBOSSA Category ID
:type category_id: integer
:returns: True -- the response status code
"""
try:
res = _pybossa_req('delete', 'category', category_id)
if type(res).__name__ == 'bool':
return True
else:
return res
except: # pragma: no cover
raise
|
Delete a Category with id = category_id.
:param category_id: PYBOSSA Category ID
:type category_id: integer
:returns: True -- the response status code
|
def update_virtual_meta(self):
"""Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame."""
import astropy.units
try:
path = os.path.join(self.get_private_dir(create=False), "virtual_meta.yaml")
if os.path.exists(path):
meta_info = vaex.utils.read_json_or_yaml(path)
if 'virtual_columns' not in meta_info:
return
self.virtual_columns.update(meta_info["virtual_columns"])
self.variables.update(meta_info["variables"])
self.ucds.update(meta_info["ucds"])
self.descriptions.update(meta_info["descriptions"])
units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()}
self.units.update(units)
except:
logger.exception("non fatal error")
|
Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame.
|
def install_handler(self, event_type, handler, user_handle=None):
"""Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object)
"""
return self.visalib.install_visa_handler(self.session, event_type, handler, user_handle)
|
Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object)
|
def linewidth(self, linewidth=None):
"""Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to\
this.
:rtype: ``Number``"""
if linewidth is None:
return self._linewidth
else:
if not is_numeric(linewidth):
raise TypeError(
"linewidth must be number, not '%s'" % str(linewidth)
)
self._linewidth = linewidth
|
Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to\
this.
:rtype: ``Number``
|
def declare(self, queue='', virtual_host='/', passive=False, durable=False,
auto_delete=False, arguments=None):
"""Declare a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool auto_delete: Automatically delete when not in use
:param dict|None arguments: Queue key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
if passive:
return self.get(queue, virtual_host=virtual_host)
queue_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'arguments': arguments or {},
'vhost': virtual_host
}
)
return self.http_client.put(
API_QUEUE % (
quote(virtual_host, ''),
queue
),
payload=queue_payload)
|
Declare a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool auto_delete: Automatically delete when not in use
:param dict|None arguments: Queue key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
|
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
|
Build extensions in build directory, then copy if --inplace
|
def updateData(self, state_data, action_data, reward_data):
""" Updates the data used by the renderer.
"""
# self.dataLock.acquire()
self.state_data[:, self.updates] = state_data
self.action_data[:, self.updates] = action_data
self.reward_data[0, self.updates] = reward_data
self.updates += 1
self._render()
|
Updates the data used by the renderer.
|
def file_key_retire( blockchain_id, file_key, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Retire the given key. Move it to the head of the old key bundle list
@file_key should be data returned by file_key_lookup
Return {'status': True} on success
Return {'error': ...} on error
"""
config_dir = os.path.dirname(config_path)
url = file_url_expired_keys( blockchain_id )
proxy = blockstack_client.get_default_proxy( config_path=config_path )
old_key_bundle_res = blockstack_client.data_get( url, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in old_key_bundle_res:
log.warn('Failed to get old key bundle: %s' % old_key_bundle_res['error'])
old_key_list = []
else:
old_key_list = old_key_bundle_res['data']['old_keys']
for old_key in old_key_list:
if old_key['key_id'] == file_key['key_id']:
# already present
log.warning("Key %s is already retired" % file_key['key_id'])
return {'status': True}
old_key_list.insert(0, file_key )
res = blockstack_client.data_put( url, {'old_keys': old_key_list}, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in res:
log.error("Failed to append to expired key bundle: %s" % res['error'])
return {'error': 'Failed to append to expired key list'}
return {'status': True}
|
Retire the given key. Move it to the head of the old key bundle list
@file_key should be data returned by file_key_lookup
Return {'status': True} on success
Return {'error': ...} on error
|
def sum_sp_values(self):
"""
return system level values (spa + spb)
input:
"values": {
"spa": 385,
"spb": 505
},
return:
"values": {
"0": 890
},
"""
if self.values is None:
ret = IdValues()
else:
ret = IdValues({'0': sum(int(x) for x in self.values.values())})
return ret
|
return system level values (spa + spb)
input:
"values": {
"spa": 385,
"spb": 505
},
return:
"values": {
"0": 890
},
|
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1):
"""
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
"""
if not 0.0 <= block_pct_tokens_thresh <= 1.0:
raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]')
html = read_html_file(data_dir, fileroot)
blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True)
content_blocks = []
comments_blocks = []
for block in blocks:
block_split = block.split('\t')
num_block_tokens = len(block_split[2].split())
# total number of tokens in block is used as weights
content_blocks.append(
(float(block_split[0]), num_block_tokens, block_split[3].split()))
comments_blocks.append(
(float(block_split[1]), num_block_tokens, block_split[4].split()))
parsed_content_blocks = _parse_content_or_comments_blocks(
content_blocks, block_pct_tokens_thresh)
parsed_comments_blocks = _parse_content_or_comments_blocks(
comments_blocks, block_pct_tokens_thresh)
return (html, parsed_content_blocks, parsed_comments_blocks)
|
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
|
def req_withdraw(self, address, amount, currency, fee=0, addr_tag="", _async=False):
"""
申请提现虚拟币
:param address:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr_tag:
:return: {
"status": "ok",
"data": 700
}
"""
params = {
'address': address,
'amount': amount,
'currency': currency,
'fee': fee,
'addr-tag': addr_tag
}
path = '/v1/dw/withdraw/api/create'
return api_key_post(params, path, _async=_async)
|
申请提现虚拟币
:param address:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr_tag:
:return: {
"status": "ok",
"data": 700
}
|
def Liu(Tb, Tc, Pc):
r'''Calculates enthalpy of vaporization at the normal boiling point using
the Liu [1]_ correlation, and a chemical's critical temperature, pressure
and boiling point.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = RT_b \left[ \frac{T_b}{220}\right]^{0.0627} \frac{
(1-T_{br})^{0.38} \ln(P_c/P_A)}{1-T_{br} + 0.38 T_{br} \ln T_{br}}
Parameters
----------
Tb : float
Boiling temperature of the fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
This formulation can be adjusted for lower boiling points, due to the use
of a rationalized pressure relationship. The formulation is taken from
the original article.
A correction for alcohols and organic acids based on carbon number,
which only modifies the boiling point, is available but not implemented.
No sample calculations are available in the article.
Internal units: Pa and K
Examples
--------
Same problem as in Perry's examples
>>> Liu(294.0, 466.0, 5.55E6)
26378.566319606754
References
----------
.. [1] LIU, ZHI-YONG. "Estimation of Heat of Vaporization of Pure Liquid at
Its Normal Boiling Temperature." Chemical Engineering Communications
184, no. 1 (February 1, 2001): 221-28. doi:10.1080/00986440108912849.
'''
Tbr = Tb/Tc
return R*Tb*(Tb/220.)**0.0627*(1. - Tbr)**0.38*log(Pc/101325.) \
/ (1 - Tbr + 0.38*Tbr*log(Tbr))
|
r'''Calculates enthalpy of vaporization at the normal boiling point using
the Liu [1]_ correlation, and a chemical's critical temperature, pressure
and boiling point.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = RT_b \left[ \frac{T_b}{220}\right]^{0.0627} \frac{
(1-T_{br})^{0.38} \ln(P_c/P_A)}{1-T_{br} + 0.38 T_{br} \ln T_{br}}
Parameters
----------
Tb : float
Boiling temperature of the fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
This formulation can be adjusted for lower boiling points, due to the use
of a rationalized pressure relationship. The formulation is taken from
the original article.
A correction for alcohols and organic acids based on carbon number,
which only modifies the boiling point, is available but not implemented.
No sample calculations are available in the article.
Internal units: Pa and K
Examples
--------
Same problem as in Perry's examples
>>> Liu(294.0, 466.0, 5.55E6)
26378.566319606754
References
----------
.. [1] LIU, ZHI-YONG. "Estimation of Heat of Vaporization of Pure Liquid at
Its Normal Boiling Temperature." Chemical Engineering Communications
184, no. 1 (February 1, 2001): 221-28. doi:10.1080/00986440108912849.
|
def get_argument_starttime(self):
"""
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
"""
try:
starttime = self.get_argument(constants.PARAM_STARTTIME)
return starttime
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message)
|
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
|
def _parse_q2r(self, f):
"""Parse q2r output file
The format of q2r output is described at the mailing list below:
http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html
http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html
http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html
https://www.mail-archive.com/pw_forum@pwscf.org/msg24388.html
"""
natom, dim, epsilon, borns = self._parse_parameters(f)
fc_dct = {'fc': self._parse_fc(f, natom, dim),
'dimension': dim,
'dielectric': epsilon,
'born': borns}
return fc_dct
|
Parse q2r output file
The format of q2r output is described at the mailing list below:
http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html
http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html
http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html
https://www.mail-archive.com/pw_forum@pwscf.org/msg24388.html
|
def remove_namespace(self, ns_uri):
"""Removes the indicated namespace from this set."""
if not self.contains_namespace(ns_uri):
return
ni = self.__ns_uri_map.pop(ns_uri)
for prefix in ni.prefixes:
del self.__prefix_map[prefix]
|
Removes the indicated namespace from this set.
|
def add_arguments(parser, default_level=logging.INFO):
"""
Add arguments to an ArgumentParser or OptionParser for purposes of
grabbing a logging level.
"""
adder = (
getattr(parser, 'add_argument', None)
or getattr(parser, 'add_option')
)
adder(
'-l', '--log-level', default=default_level, type=log_level,
help="Set log level (DEBUG, INFO, WARNING, ERROR)")
|
Add arguments to an ArgumentParser or OptionParser for purposes of
grabbing a logging level.
|
def _parse_arguments():
"""Return a parser context result."""
parser = argparse.ArgumentParser(description="CMake AST Dumper")
parser.add_argument("filename", nargs=1, metavar=("FILE"),
help="read FILE")
return parser.parse_args()
|
Return a parser context result.
|
def score_x_of_a_kind_yatzy(dice: List[int], min_same_faces: int) -> int:
"""Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces
"""
for die, count in Counter(dice).most_common(1):
if count >= min_same_faces:
return die * min_same_faces
return 0
|
Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces
|
def do_mkdir(self, line):
"""mkdir DIRECTORY...
Creates one or more directories.
"""
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
if not mkdir(filename):
print_err('Unable to create %s' % filename)
|
mkdir DIRECTORY...
Creates one or more directories.
|
def handle_http_error(self, response, custom_messages=None,
raise_for_status=False):
"""Converts service errors to Python exceptions
Parameters
----------
response : requests.Response
A service response.
custom_messages : dict, optional
A mapping of custom exception messages to HTTP status codes.
raise_for_status : bool, optional
If True, the requests library provides Python exceptions.
Returns
-------
None
"""
if not custom_messages:
custom_messages = {}
if response.status_code in custom_messages.keys():
raise errors.HTTPError(custom_messages[response.status_code])
if raise_for_status:
response.raise_for_status()
|
Converts service errors to Python exceptions
Parameters
----------
response : requests.Response
A service response.
custom_messages : dict, optional
A mapping of custom exception messages to HTTP status codes.
raise_for_status : bool, optional
If True, the requests library provides Python exceptions.
Returns
-------
None
|
def from_point(cls, point, network=BitcoinMainNet, **kwargs):
"""Create a PublicKey from a point on the SECP256k1 curve.
:param point: A point on the SECP256k1 curve.
:type point: SECP256k1.point
"""
verifying_key = VerifyingKey.from_public_point(point, curve=SECP256k1)
return cls.from_verifying_key(verifying_key, network=network, **kwargs)
|
Create a PublicKey from a point on the SECP256k1 curve.
:param point: A point on the SECP256k1 curve.
:type point: SECP256k1.point
|
def _split_index(self, key):
"""
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
"""
if not isinstance(key, tuple):
key = (key,)
elif key == ():
return (), ()
if key[0] is Ellipsis:
num_pad = self.ndims - len(key) + 1
key = (slice(None),) * num_pad + key[1:]
elif len(key) < self.ndims:
num_pad = self.ndims - len(key)
key = key + (slice(None),) * num_pad
map_slice = key[:self.ndims]
if self._check_key_type:
map_slice = self._apply_key_type(map_slice)
if len(key) == self.ndims:
return map_slice, ()
else:
return map_slice, key[self.ndims:]
|
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
|
def is_bday(date, bday=None):
"""
Return true iff the given date is a business day.
Parameters
----------
date : :class:`pandas.Timestamp`
Any value that can be converted to a pandas Timestamp--e.g.,
'2012-05-01', dt.datetime(2012, 5, 1, 3)
bday : :class:`pandas.tseries.offsets.CustomBusinessDay`
Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`.
Pass this parameter in performance-sensitive contexts, such
as when calling this function in a loop. The creation of the `CustomBusinessDay`
object is the performance bottleneck of this function.
Cf. `pandas.tseries.offsets.CustomBusinessDay
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_.
Returns
-------
val : bool
True iff `date` is a business day
"""
_date = Timestamp(date)
if bday is None:
bday = CustomBusinessDay(calendar=USFederalHolidayCalendar())
return _date == (_date + bday) - bday
|
Return true iff the given date is a business day.
Parameters
----------
date : :class:`pandas.Timestamp`
Any value that can be converted to a pandas Timestamp--e.g.,
'2012-05-01', dt.datetime(2012, 5, 1, 3)
bday : :class:`pandas.tseries.offsets.CustomBusinessDay`
Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`.
Pass this parameter in performance-sensitive contexts, such
as when calling this function in a loop. The creation of the `CustomBusinessDay`
object is the performance bottleneck of this function.
Cf. `pandas.tseries.offsets.CustomBusinessDay
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_.
Returns
-------
val : bool
True iff `date` is a business day
|
def delete(self, *args, **kwargs):
"""
Deletes the video from youtube
Raises:
OperationError
"""
api = Api()
# Authentication is required for deletion
api.authenticate()
# Send API request, raises OperationError on unsuccessful deletion
api.delete_video(self.video_id)
# Call the super method
return super(Video, self).delete(*args, **kwargs)
|
Deletes the video from youtube
Raises:
OperationError
|
def mount(nbd, root=None):
'''
Pass in the nbd connection device location, mount all partitions and return
a dict of mount points
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.mount /dev/nbd0
'''
__salt__['cmd.run'](
'partprobe {0}'.format(nbd),
python_shell=False,
)
ret = {}
if root is None:
root = os.path.join(
tempfile.gettempdir(),
'nbd',
os.path.basename(nbd)
)
for part in glob.glob('{0}p*'.format(nbd)):
m_pt = os.path.join(root, os.path.basename(part))
time.sleep(1)
mnt = __salt__['mount.mount'](m_pt, part, True)
if mnt is not True:
continue
ret[m_pt] = part
return ret
|
Pass in the nbd connection device location, mount all partitions and return
a dict of mount points
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.mount /dev/nbd0
|
def where(self, inplace=False, **kwargs):
"""Return indices over every dimension that met the conditions.
Condition syntax:
*attribute* = value
Return indices that satisfy the condition where the attribute is equal
to the value
e.g. type_array = 'H'
*attribute* = list(value1, value2)
Return indices that satisfy the condition where the attribute is equal
to any of the value in the list.
e.g. type_array = ['H', 'O']
*dimension_index* = value: int
*dimension_index* = value: list(int)
Return only elements that correspond to the index in the specified dimension:
atom_index = 0
atom_index = [0, 1]
"""
masks = {k: np.ones(v, dtype='bool') for k,v in self.dimensions.items()}
def index_to_mask(index, n):
val = np.zeros(n, dtype='bool')
val[index] = True
return val
def masks_and(dict1, dict2):
return {k: dict1[k] & index_to_mask(dict2[k], len(dict1[k])) for k in dict1 }
for key in kwargs:
value = kwargs[key]
if key.endswith('_index'):
if isinstance(value, int):
value = [value]
dim = key[:-len('_index')]
m = self._propagate_dim(value, dim)
masks = masks_and(masks, m)
else:
attribute = self.get_attribute(key)
if isinstance(value, list):
mask = reduce(operator.or_, [attribute.value == m for m in value])
else:
mask = attribute.value == value
m = self._propagate_dim(mask, attribute.dim)
masks = masks_and(masks, m)
return masks
|
Return indices over every dimension that met the conditions.
Condition syntax:
*attribute* = value
Return indices that satisfy the condition where the attribute is equal
to the value
e.g. type_array = 'H'
*attribute* = list(value1, value2)
Return indices that satisfy the condition where the attribute is equal
to any of the value in the list.
e.g. type_array = ['H', 'O']
*dimension_index* = value: int
*dimension_index* = value: list(int)
Return only elements that correspond to the index in the specified dimension:
atom_index = 0
atom_index = [0, 1]
|
def parse(cls, expression):
"""
Parse the given console command definition into a dict.
:param expression: The expression to parse
:type expression: str
:rtype: dict
"""
parsed = {"name": None, "arguments": [], "options": []}
if not expression.strip():
raise ValueError("Console command signature is empty.")
expression = expression.replace(os.linesep, "")
matches = re.match(r"[^\s]+", expression)
if not matches:
raise ValueError("Unable to determine command name from signature.")
name = matches.group(0)
parsed["name"] = name
tokens = re.findall(r"\{\s*(.*?)\s*\}", expression)
if tokens:
parsed.update(cls._parameters(tokens))
return parsed
|
Parse the given console command definition into a dict.
:param expression: The expression to parse
:type expression: str
:rtype: dict
|
def generate(self, id_or_uri):
"""
Generates and returns a random range.
Args:
id_or_uri:
ID or URI of range.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/generate"
return self._client.get(uri)
|
Generates and returns a random range.
Args:
id_or_uri:
ID or URI of range.
Returns:
dict: A dict containing a list with IDs.
|
def parser(key = "default"):
"""Returns the parser for the given key, (e.g. 'ssh')"""
#Make sure we have a parser for that key. If we don't, then set
#one up if we know what parameters to use; otherwise return the
#default parser.
if key not in _parsers:
if key == "ssh":
_parsers["ssh"] = CodeParser(True, False)
else:
key = "default"
return _parsers[key]
|
Returns the parser for the given key, (e.g. 'ssh')
|
def _reads_per_position(bam_in, loci_file, out_dir):
"""
Create input for compute entropy
"""
data = Counter()
a = pybedtools.BedTool(bam_in)
b = pybedtools.BedTool(loci_file)
c = a.intersect(b, s=True, bed=True, wo=True)
for line in c:
end = int(line[1]) + 1 + int(line[2]) if line[5] == "+" else int(line[1]) + 1
start = int(line[1]) + 1 if line[5] == "+" else int(line[1]) + 1 + int(line[2])
side5 = "%s\t5p\t%s" % (line[15], start)
side3 = "%s\t3p\t%s" % (line[15], end)
data[side5] += 1
data[side3] += 1
counts_reads = op.join(out_dir, 'locus_readpos.counts')
with open(counts_reads, 'w') as out_handle:
for k in data:
print(k, file=out_handle, end="")
return counts_reads
|
Create input for compute entropy
|
def _parse_sentencetree(self, tree, parent_node_id=None, ignore_traces=True):
"""parse a sentence Tree into this document graph"""
def get_nodelabel(node):
if isinstance(node, nltk.tree.Tree):
return node.label()
elif isinstance(node, unicode):
return node.encode('utf-8')
else:
raise ValueError("Unexpected node type: {0}, {1}".format(type(node), node))
root_node_id = self._node_id
self.node[root_node_id]['label'] = get_nodelabel(tree)
for subtree in tree:
self._node_id += 1
node_label = get_nodelabel(subtree)
# unescape the node label, if necessary
node_label = PTB_BRACKET_UNESCAPE.get(node_label, node_label)
# TODO: refactor this, so we don't need to query this all the time
if ignore_traces and node_label == '-NONE-': # ignore tokens annotated for traces
continue
if isinstance(subtree, nltk.tree.Tree):
if len(subtree) > 1: # subtree is a syntactic category
node_attrs = {'label': node_label,
self.ns+':cat': node_label}
layers = {self.ns, self.ns+':syntax'}
else: # subtree represents a token and its POS tag
node_attrs = {'label': node_label}
layers = {self.ns}
edge_type = dg.EdgeTypes.dominance_relation
self.add_node(self._node_id, layers=layers,
attr_dict=node_attrs)
self.add_edge(root_node_id, self._node_id, edge_type=edge_type)
else: # isinstance(subtree, unicode); subtree is a token
# we'll have to modify the parent node of a token, since
# in NLTK Trees, even a leaf node (with its POS tag) is
# represented as a Tree (an iterator over a single unicode
# string), e.g. ``Tree('NNS', ['prices'])``
pos_tag = self.node[parent_node_id]['label']
token_attrs = {
'label': node_label, self.ns+':token': node_label,
self.ns+':pos': pos_tag}
self.node[parent_node_id].update(token_attrs)
self.tokens.append(parent_node_id)
if isinstance(subtree, nltk.tree.Tree):
self._parse_sentencetree(subtree, parent_node_id=self._node_id)
|
parse a sentence Tree into this document graph
|
def add_annotation(
self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
"""
bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s
self.g.add((a_s, RDF.type, OWL.Axiom))
self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))
self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred)))
self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))
else:
a_s: BNode = bnode
self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))
return bnode
|
Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
|
def current_changed(self, index):
"""Stack index has changed"""
# count = self.get_stack_count()
# for btn in (self.filelist_btn, self.previous_btn, self.next_btn):
# btn.setEnabled(count > 1)
editor = self.get_current_editor()
if editor.lsp_ready and not editor.document_opened:
editor.document_did_open()
if index != -1:
editor.setFocus()
logger.debug("Set focus to: %s" % editor.filename)
else:
self.reset_statusbar.emit()
self.opened_files_list_changed.emit()
self.stack_history.refresh()
self.stack_history.remove_and_append(index)
# Needed to avoid an error generated after moving/renaming
# files outside Spyder while in debug mode.
# See issue 8749.
try:
logger.debug("Current changed: %d - %s" %
(index, self.data[index].editor.filename))
except IndexError:
pass
self.update_plugin_title.emit()
if editor is not None:
# Needed in order to handle the close of files open in a directory
# that has been renamed. See issue 5157
try:
self.current_file_changed.emit(self.data[index].filename,
editor.get_position('cursor'))
except IndexError:
pass
|
Stack index has changed
|
def calibrate_signal(signal, resp, fs, frange):
"""Given original signal and recording, spits out a calibrated signal"""
# remove dc offset from recorded response (synthesized orignal shouldn't have one)
dc = np.mean(resp)
resp = resp - dc
npts = len(signal)
f0 = np.ceil(frange[0] / (float(fs) / npts))
f1 = np.floor(frange[1] / (float(fs) / npts))
y = resp
# y = y/np.amax(y) # normalize
Y = np.fft.rfft(y)
x = signal
# x = x/np.amax(x) # normalize
X = np.fft.rfft(x)
H = Y / X
# still issues warning because all of Y/X is executed to selected answers from
# H = np.where(X.real!=0, Y/X, 1)
# H[:f0].real = 1
# H[f1:].real = 1
# H = smooth(H)
A = X / H
return np.fft.irfft(A)
|
Given original signal and recording, spits out a calibrated signal
|
def set_latency(self, latency):
"""Set client latency."""
self._client['config']['latency'] = latency
yield from self._server.client_latency(self.identifier, latency)
|
Set client latency.
|
def backup(file_name, jail=None, chroot=None, root=None):
'''
Export installed packages into yaml+mtree file
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg
jail
Backup packages from the specified jail. Note that this will run the
command within the jail, and so the path to the backup file will be
relative to the root of the jail
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg jail=<jail name or id>
chroot
Backup packages from the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the backup file will be relative to the root of the
chroot.
root
Backup packages from the specified root (ignored if ``jail`` is
specified). Note that this will run the command within the root, and
so the path to the backup file will be relative to the root of the
root.
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot
'''
ret = __salt__['cmd.run'](
_pkg(jail, chroot, root) + ['backup', '-d', file_name],
output_loglevel='trace',
python_shell=False
)
return ret.split('...')[1]
|
Export installed packages into yaml+mtree file
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg
jail
Backup packages from the specified jail. Note that this will run the
command within the jail, and so the path to the backup file will be
relative to the root of the jail
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg jail=<jail name or id>
chroot
Backup packages from the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the backup file will be relative to the root of the
chroot.
root
Backup packages from the specified root (ignored if ``jail`` is
specified). Note that this will run the command within the root, and
so the path to the backup file will be relative to the root of the
root.
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot
|
def has_role(item):
"""A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name or ID specified.
If a string is specified, you must give the exact name of the role, including
caps and spelling.
If an integer is specified, you must give the exact snowflake ID of the role.
If the message is invoked in a private message context then the check will
return ``False``.
This check raises one of two special exceptions, :exc:`.MissingRole` if the user
is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
The name or ID of the role to check.
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage()
if isinstance(item, int):
role = discord.utils.get(ctx.author.roles, id=item)
else:
role = discord.utils.get(ctx.author.roles, name=item)
if role is None:
raise MissingRole(item)
return True
return check(predicate)
|
A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name or ID specified.
If a string is specified, you must give the exact name of the role, including
caps and spelling.
If an integer is specified, you must give the exact snowflake ID of the role.
If the message is invoked in a private message context then the check will
return ``False``.
This check raises one of two special exceptions, :exc:`.MissingRole` if the user
is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
The name or ID of the role to check.
|
def certify_date(value, required=True):
"""
Certifier for datetime.date values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None` Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, date):
raise CertifierTypeError(
message="expected timestamp (date∂), but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
|
Certifier for datetime.date values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None` Defaults to True.
:raises CertifierTypeError:
The type is invalid
|
def _header_string(basis_dict):
'''Creates a header with information about a basis set
Information includes description, revision, etc, but not references
'''
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'],
basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header
|
Creates a header with information about a basis set
Information includes description, revision, etc, but not references
|
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
|
Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
|
def prepare_request_body(self,
private_key=None,
subject=None,
issuer=None,
audience=None,
expires_at=None,
issued_at=None,
extra_claims=None,
body='',
scope=None,
include_client_id=False,
**kwargs):
"""Create and add a JWT assertion to the request body.
:param private_key: Private key used for signing and encrypting.
Must be given as a string.
:param subject: (sub) The principal that is the subject of the JWT,
i.e. which user is the token requested on behalf of.
For example, ``foo@example.com.
:param issuer: (iss) The JWT MUST contain an "iss" (issuer) claim that
contains a unique identifier for the entity that issued
the JWT. For example, ``your-client@provider.com``.
:param audience: (aud) A value identifying the authorization server as an
intended audience, e.g.
``https://provider.com/oauth2/token``.
:param expires_at: A unix expiration timestamp for the JWT. Defaults
to an hour from now, i.e. ``time.time() + 3600``.
:param issued_at: A unix timestamp of when the JWT was created.
Defaults to now, i.e. ``time.time()``.
:param extra_claims: A dict of additional claims to include in the JWT.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: The scope of the access request.
:param include_client_id: `True` to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in
`Section 3.2.1`_. False otherwise (default).
:type include_client_id: Boolean
:param not_before: A unix timestamp after which the JWT may be used.
Not included unless provided. *
:param jwt_id: A unique JWT token identifier. Not included unless
provided. *
:param kwargs: Extra credentials to include in the token request.
Parameters marked with a `*` above are not explicit arguments in the
function signature, but are specially documented arguments for items
appearing in the generic `**kwargs` keyworded input.
The "scope" parameter may be used, as defined in the Assertion
Framework for OAuth 2.0 Client Authentication and Authorization Grants
[I-D.ietf-oauth-assertions] specification, to indicate the requested
scope.
Authentication of the client is optional, as described in
`Section 3.2.1`_ of OAuth 2.0 [RFC6749] and consequently, the
"client_id" is only needed when a form of client authentication that
relies on the parameter is used.
The following non-normative example demonstrates an Access Token
Request with a JWT as an authorization grant (with extra line breaks
for display purposes only):
.. code-block: http
POST /token.oauth2 HTTP/1.1
Host: as.example.com
Content-Type: application/x-www-form-urlencoded
grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer
&assertion=eyJhbGciOiJFUzI1NiJ9.
eyJpc3Mi[...omitted for brevity...].
J9l-ZhwP[...omitted for brevity...]
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
import jwt
key = private_key or self.private_key
if not key:
raise ValueError('An encryption key must be supplied to make JWT'
' token requests.')
claim = {
'iss': issuer or self.issuer,
'aud': audience or self.audience,
'sub': subject or self.subject,
'exp': int(expires_at or time.time() + 3600),
'iat': int(issued_at or time.time()),
}
for attr in ('iss', 'aud', 'sub'):
if claim[attr] is None:
raise ValueError(
'Claim must include %s but none was given.' % attr)
if 'not_before' in kwargs:
claim['nbf'] = kwargs.pop('not_before')
if 'jwt_id' in kwargs:
claim['jti'] = kwargs.pop('jwt_id')
claim.update(extra_claims or {})
assertion = jwt.encode(claim, key, 'RS256')
assertion = to_unicode(assertion)
kwargs['client_id'] = self.client_id
kwargs['include_client_id'] = include_client_id
return prepare_token_request(self.grant_type,
body=body,
assertion=assertion,
scope=scope,
**kwargs)
|
Create and add a JWT assertion to the request body.
:param private_key: Private key used for signing and encrypting.
Must be given as a string.
:param subject: (sub) The principal that is the subject of the JWT,
i.e. which user is the token requested on behalf of.
For example, ``foo@example.com.
:param issuer: (iss) The JWT MUST contain an "iss" (issuer) claim that
contains a unique identifier for the entity that issued
the JWT. For example, ``your-client@provider.com``.
:param audience: (aud) A value identifying the authorization server as an
intended audience, e.g.
``https://provider.com/oauth2/token``.
:param expires_at: A unix expiration timestamp for the JWT. Defaults
to an hour from now, i.e. ``time.time() + 3600``.
:param issued_at: A unix timestamp of when the JWT was created.
Defaults to now, i.e. ``time.time()``.
:param extra_claims: A dict of additional claims to include in the JWT.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: The scope of the access request.
:param include_client_id: `True` to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in
`Section 3.2.1`_. False otherwise (default).
:type include_client_id: Boolean
:param not_before: A unix timestamp after which the JWT may be used.
Not included unless provided. *
:param jwt_id: A unique JWT token identifier. Not included unless
provided. *
:param kwargs: Extra credentials to include in the token request.
Parameters marked with a `*` above are not explicit arguments in the
function signature, but are specially documented arguments for items
appearing in the generic `**kwargs` keyworded input.
The "scope" parameter may be used, as defined in the Assertion
Framework for OAuth 2.0 Client Authentication and Authorization Grants
[I-D.ietf-oauth-assertions] specification, to indicate the requested
scope.
Authentication of the client is optional, as described in
`Section 3.2.1`_ of OAuth 2.0 [RFC6749] and consequently, the
"client_id" is only needed when a form of client authentication that
relies on the parameter is used.
The following non-normative example demonstrates an Access Token
Request with a JWT as an authorization grant (with extra line breaks
for display purposes only):
.. code-block: http
POST /token.oauth2 HTTP/1.1
Host: as.example.com
Content-Type: application/x-www-form-urlencoded
grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer
&assertion=eyJhbGciOiJFUzI1NiJ9.
eyJpc3Mi[...omitted for brevity...].
J9l-ZhwP[...omitted for brevity...]
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
|
def _cache_key_select_daterange(method, self, field_id, field_title, style=None):
"""
This function returns the key used to decide if method select_daterange has to be recomputed
"""
key = update_timer(), field_id, field_title, style
return key
|
This function returns the key used to decide if method select_daterange has to be recomputed
|
def get_plugin_conf(self, phase, name):
"""
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
"""
match = [x for x in self.template[phase] if x.get('name') == name]
return match[0]
|
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
|
def filter_exclude_downhole(self, threshold, filt=True):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
f = self.filt.grab_filt(filt)
if self.n == 1:
nfilt = filters.exclude_downhole(f, threshold)
else:
nfilt = []
for i in range(self.n):
nf = self.ns == i + 1
nfilt.append(filters.exclude_downhole(f & nf, threshold))
nfilt = np.apply_along_axis(any, 0, nfilt)
self.filt.add(name='downhole_excl_{:.0f}'.format(threshold),
filt=nfilt,
info='Exclude data downhole of {:.0f} consecutive filtered points.'.format(threshold),
params=(threshold, filt))
|
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
|
def populate_observable(self, time, kind, dataset, **kwargs):
"""
TODO: add documentation
"""
if kind in ['mesh', 'orb']:
return
if time==self.time and dataset in self.populated_at_time and 'pblum' not in kind:
# then we've already computed the needed columns
# TODO: handle the case of intensities already computed by
# /different/ dataset (ie RVs computed first and filling intensities
# and then lc requesting intensities with SAME passband/atm)
return
new_mesh_cols = getattr(self, '_populate_{}'.format(kind.lower()))(dataset, **kwargs)
for key, col in new_mesh_cols.items():
self.mesh.update_columns_dict({'{}:{}'.format(key, dataset): col})
self.populated_at_time.append(dataset)
|
TODO: add documentation
|
def _find_valid_index(self, how):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
assert how in ['first', 'last']
if len(self) == 0: # early stop
return None
is_valid = ~self.isna()
if self.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == 'first':
idxpos = is_valid.values[::].argmax()
if how == 'last':
idxpos = len(self) - 1 - is_valid.values[::-1].argmax()
chk_notna = is_valid.iat[idxpos]
idx = self.index[idxpos]
if not chk_notna:
return None
return idx
|
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
|
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
'''Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
'''
if isinstance(neurons, (list, tuple)):
files = neurons
name = name if name is not None else 'Population'
elif isinstance(neurons, StringType):
files = get_files_by_path(neurons)
name = name if name is not None else os.path.basename(neurons)
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, os.path.basename(f))
continue
raise
return population_class(pop, name=name)
|
Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
|
def potential_cloud_pixels(self):
"""Determine potential cloud pixels (PCPs)
Combine basic spectral testsr to get a premliminary cloud mask
First pass, section 3.1.1 in Zhu and Woodcock 2012
Equation 6 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
blue: ndarray
green: ndarray
red: ndarray
nir: ndarray
swir1: ndarray
swir2: ndarray
cirrus: ndarray
tirs1: ndarray
Output
------
ndarray:
potential cloud mask, boolean
"""
eq1 = self.basic_test()
eq2 = self.whiteness_test()
eq3 = self.hot_test()
eq4 = self.nirswir_test()
if self.sat == 'LC8':
cir = self.cirrus_test()
return (eq1 & eq2 & eq3 & eq4) | cir
else:
return eq1 & eq2 & eq3 & eq4
|
Determine potential cloud pixels (PCPs)
Combine basic spectral testsr to get a premliminary cloud mask
First pass, section 3.1.1 in Zhu and Woodcock 2012
Equation 6 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
blue: ndarray
green: ndarray
red: ndarray
nir: ndarray
swir1: ndarray
swir2: ndarray
cirrus: ndarray
tirs1: ndarray
Output
------
ndarray:
potential cloud mask, boolean
|
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords
|
This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
|
def select_valid_methods_P(self, T, P):
r'''Method to obtain a sorted list methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
P : float
Pressure at which to test methods, [Pa]
Returns
-------
sorted_valid_methods_P : list
Sorted lists of methods valid at T and P according to
`test_method_validity`
'''
# Same as select_valid_methods but with _P added to variables
if self.forced_P:
considered_methods = list(self.user_methods_P)
else:
considered_methods = list(self.all_methods_P)
if self.user_methods_P:
[considered_methods.remove(i) for i in self.user_methods_P]
preferences = sorted([self.ranked_methods_P.index(i) for i in considered_methods])
sorted_methods = [self.ranked_methods_P[i] for i in preferences]
if self.user_methods_P:
[sorted_methods.insert(0, i) for i in reversed(self.user_methods_P)]
sorted_valid_methods_P = []
for method in sorted_methods:
if self.test_method_validity_P(T, P, method):
sorted_valid_methods_P.append(method)
return sorted_valid_methods_P
|
r'''Method to obtain a sorted list methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
P : float
Pressure at which to test methods, [Pa]
Returns
-------
sorted_valid_methods_P : list
Sorted lists of methods valid at T and P according to
`test_method_validity`
|
def ResetConsoleColor() -> bool:
"""
Reset to the default text color on console window.
Return bool, True if succeed otherwise False.
"""
if sys.stdout:
sys.stdout.flush()
bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, _DefaultConsoleColor))
|
Reset to the default text color on console window.
Return bool, True if succeed otherwise False.
|
def random(key: str, index: Index, index_map: IndexMap=None) -> pd.Series:
"""Produces an indexed `pandas.Series` of uniformly distributed random numbers.
The index passed in typically corresponds to a subset of rows in a
`pandas.DataFrame` for which a probabilistic draw needs to be made.
Parameters
----------
key :
A string used to create a seed for the random number generation.
index :
The index used for the returned series.
index_map :
A mapping between the provided index (which may contain ints, floats,
datetimes or any arbitrary combination of them) and an integer index
into the random number array.
Returns
-------
pd.Series
A series of random numbers indexed by the provided index.
"""
if len(index) > 0:
random_state = np.random.RandomState(seed=get_hash(key))
# Generate a random number for every simulant.
#
# NOTE: We generate a full set of random numbers for the population
# even when we may only need a few. This ensures consistency in outcomes
# across simulations.
# See Also:
# 1. https://en.wikipedia.org/wiki/Variance_reduction
# 2. Untangling Uncertainty with Common Random Numbers: A Simulation Study; A.Flaxman, et. al., Summersim 2017
sample_size = index_map.map_size if index_map is not None else index.max() + 1
try:
draw_index = index_map[index]
except (IndexError, TypeError):
draw_index = index
raw_draws = random_state.random_sample(sample_size)
return pd.Series(raw_draws[draw_index], index=index)
return pd.Series(index=index)
|
Produces an indexed `pandas.Series` of uniformly distributed random numbers.
The index passed in typically corresponds to a subset of rows in a
`pandas.DataFrame` for which a probabilistic draw needs to be made.
Parameters
----------
key :
A string used to create a seed for the random number generation.
index :
The index used for the returned series.
index_map :
A mapping between the provided index (which may contain ints, floats,
datetimes or any arbitrary combination of them) and an integer index
into the random number array.
Returns
-------
pd.Series
A series of random numbers indexed by the provided index.
|
def instance(self, counter=None, pipeline_counter=None):
"""Returns all the information regarding a specific stage run
See the `Go stage instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-stage-instance
Args:
counter (int): The stage instance to fetch.
If falsey returns the latest stage instance from :meth:`history`.
pipeline_counter (int): The pipeline instance for which to fetch
the stage. If falsey returns the latest pipeline instance.
Returns:
Response: :class:`gocd.api.response.Response` object
"""
pipeline_counter = pipeline_counter or self.pipeline_counter
pipeline_instance = None
if not pipeline_counter:
pipeline_instance = self.server.pipeline(self.pipeline_name).instance()
self.pipeline_counter = int(pipeline_instance['counter'])
if not counter:
if pipeline_instance is None:
pipeline_instance = (
self.server
.pipeline(self.pipeline_name)
.instance(pipeline_counter)
)
for stages in pipeline_instance['stages']:
if stages['name'] == self.stage_name:
return self.instance(
counter=int(stages['counter']),
pipeline_counter=pipeline_counter
)
return self._get('/instance/{pipeline_counter:d}/{counter:d}'
.format(pipeline_counter=pipeline_counter, counter=counter))
|
Returns all the information regarding a specific stage run
See the `Go stage instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-stage-instance
Args:
counter (int): The stage instance to fetch.
If falsey returns the latest stage instance from :meth:`history`.
pipeline_counter (int): The pipeline instance for which to fetch
the stage. If falsey returns the latest pipeline instance.
Returns:
Response: :class:`gocd.api.response.Response` object
|
def raw_search(self, *args, **kwargs):
"""
Find the a set of emails matching each regular expression passed in against the (RFC822) content.
Args:
*args: list of regular expressions.
Kwargs:
limit (int) - Limit to how many of the most resent emails to search through.
date (datetime) - If specified, it will filter avoid checking messages older
than this date.
"""
limit = 50
try:
limit = kwargs['limit']
except KeyError:
pass
# Get first X messages.
self._mail.select("inbox")
# apply date filter.
try:
date = kwargs['date']
date_str = date.strftime("%d-%b-%Y")
_, email_ids = self._mail.search(None, '(SINCE "%s")' % date_str)
except KeyError:
_, email_ids = self._mail.search(None, 'ALL')
# Above call returns email IDs as an array containing 1 str
email_ids = email_ids[0].split()
matching_uids = []
for _ in range(1, min(limit, len(email_ids))):
email_id = email_ids.pop()
rfc_body = self._mail.fetch(email_id, "(RFC822)")[1][0][1]
match = True
for expr in args:
if re.search(expr, rfc_body) is None:
match = False
break
if match:
uid = re.search(
"UID\\D*(\\d+)\\D*", self._mail.fetch(email_id, 'UID')[1][0]).group(1)
matching_uids.append(uid)
return matching_uids
|
Find the a set of emails matching each regular expression passed in against the (RFC822) content.
Args:
*args: list of regular expressions.
Kwargs:
limit (int) - Limit to how many of the most resent emails to search through.
date (datetime) - If specified, it will filter avoid checking messages older
than this date.
|
def scheme_chunker(text, getreffs):
""" This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata
:param text: Text Object representing either an edition or a translation
:type text: MyCapytains.resources.inventory.Text
:param getreffs: callback function which retrieves a list of references
:type getreffs: function
:return: List of urn references with their human readable version
:rtype: [(str, str)]
"""
level = len(text.citation)
types = [citation.name for citation in text.citation]
if types == ["book", "poem", "line"]:
level = 2
elif types == ["book", "line"]:
return line_chunker(text, getreffs)
return [tuple([reff.split(":")[-1]]*2) for reff in getreffs(level=level)]
|
This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata
:param text: Text Object representing either an edition or a translation
:type text: MyCapytains.resources.inventory.Text
:param getreffs: callback function which retrieves a list of references
:type getreffs: function
:return: List of urn references with their human readable version
:rtype: [(str, str)]
|
def xml_replace(filename, **replacements):
"""Read the content of an XML template file (XMLT), apply the given
`replacements` to its substitution markers, and write the result into
an XML file with the same name but ending with `xml` instead of `xmlt`.
First, we write an XMLT file, containing a regular HTML comment, a
readily defined element `e1`, and some other elements with
substitutions markers. Substitution markers are HTML comments
starting and ending with the `|` character:
>>> from hydpy import xml_replace, TestIO
>>> with TestIO():
... with open('test1.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<!--a normal comment-->\\n'
... '<e1>element 1</e1>\\n'
... '<e2><!--|e2|--></e2>\\n'
... '<e3><!--|e3_|--></e3>\\n'
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e2><!--|e2|--></e2>')
Function |xml_replace| can both be called within a Python session and
from a command line. We start with the first type of application.
Each substitution marker must be met by a keyword argument unless
it holds a default value (`e4`). All arguments are converted to
a |str| object (`e3`). Template files can use the same substitution
marker multiple times (`e2`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> ELEMENT 4 (given argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>ELEMENT 4</e4>
<e2>E2</e2>
Without custom values, |xml_replace| applies predefined default
values, if available (`e4`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>E2</e2>
Missing and useless keyword arguments result in errors:
>>> with TestIO():
... xml_replace('test1', e2='E2')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2`, \
the following error occurred: Marker `e3_` cannot be replaced.
>>> with TestIO():
... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \
e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used.
Using different default values for the same substitution marker
is not allowed:
>>> from hydpy import pub, TestIO, xml_replace
>>> with TestIO():
... with open('test2.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e4><!--|e4=ELEMENT 4|--></e4>')
>>> with TestIO():
... xml_replace('test2', e4=4)
template file: test2.xmlt
target file: test2.xml
replacements:
e4 --> 4 (given argument)
e4 --> 4 (given argument)
>>> with TestIO():
... with open('test2.xml') as targetfile:
... print(targetfile.read())
<e4>4</e4>
<e4>4</e4>
>>> with TestIO():
... xml_replace('test2')
Traceback (most recent call last):
...
RuntimeError: Template file `test2.xmlt` defines different default values \
for marker `e4`.
As mentioned above, function |xml_replace| is registered as a "script
function" and can thus be used via command line:
>>> pub.scriptfunctions['xml_replace'].__name__
'xml_replace'
>>> pub.scriptfunctions['xml_replace'].__module__
'hydpy.exe.replacetools'
Use script |hyd| to execute function |xml_replace|:
>>> from hydpy import run_subprocess
>>> with TestIO():
... run_subprocess(
... 'hyd.py xml_replace test1 e2="Element 2" e3_=3')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> Element 2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> Element 2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>Element 2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>Element 2</e2>
"""
keywords = set(replacements.keys())
templatename = f'{filename}.xmlt'
targetname = f'{filename}.xml'
print(f'template file: {templatename}')
print(f'target file: {targetname}')
print('replacements:')
with open(templatename) as templatefile:
templatebody = templatefile.read()
parts = templatebody.replace('<!--|', '|-->').split('|-->')
defaults = {}
for idx, part in enumerate(parts):
if idx % 2:
subparts = part.partition('=')
if subparts[2]:
parts[idx] = subparts[0]
if subparts[0] not in replacements:
if ((subparts[0] in defaults) and
(defaults[subparts[0]] != str(subparts[2]))):
raise RuntimeError(
f'Template file `{templatename}` defines '
f'different default values for marker '
f'`{subparts[0]}`.')
defaults[subparts[0]] = str(subparts[2])
markers = parts[1::2]
try:
unused_keywords = keywords.copy()
for idx, part in enumerate(parts):
if idx % 2:
argument_info = 'given argument'
newpart = replacements.get(part)
if newpart is None:
argument_info = 'default argument'
newpart = defaults.get(part)
if newpart is None:
raise RuntimeError(
f'Marker `{part}` cannot be replaced.')
print(f' {part} --> {newpart} ({argument_info})')
parts[idx] = str(newpart)
unused_keywords.discard(part)
targetbody = ''.join(parts)
if unused_keywords:
raise RuntimeError(
f'Keyword(s) `{objecttools.enumeration(unused_keywords)}` '
f'cannot be used.')
with open(targetname, 'w') as targetfile:
targetfile.write(targetbody)
except BaseException:
objecttools.augment_excmessage(
f'While trying to replace the markers '
f'`{objecttools.enumeration(sorted(set(markers)))}` of the '
f'XML template file `{templatename}` with the available '
f'keywords `{objecttools.enumeration(sorted(keywords))}`')
|
Read the content of an XML template file (XMLT), apply the given
`replacements` to its substitution markers, and write the result into
an XML file with the same name but ending with `xml` instead of `xmlt`.
First, we write an XMLT file, containing a regular HTML comment, a
readily defined element `e1`, and some other elements with
substitutions markers. Substitution markers are HTML comments
starting and ending with the `|` character:
>>> from hydpy import xml_replace, TestIO
>>> with TestIO():
... with open('test1.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<!--a normal comment-->\\n'
... '<e1>element 1</e1>\\n'
... '<e2><!--|e2|--></e2>\\n'
... '<e3><!--|e3_|--></e3>\\n'
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e2><!--|e2|--></e2>')
Function |xml_replace| can both be called within a Python session and
from a command line. We start with the first type of application.
Each substitution marker must be met by a keyword argument unless
it holds a default value (`e4`). All arguments are converted to
a |str| object (`e3`). Template files can use the same substitution
marker multiple times (`e2`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> ELEMENT 4 (given argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>ELEMENT 4</e4>
<e2>E2</e2>
Without custom values, |xml_replace| applies predefined default
values, if available (`e4`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>E2</e2>
Missing and useless keyword arguments result in errors:
>>> with TestIO():
... xml_replace('test1', e2='E2')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2`, \
the following error occurred: Marker `e3_` cannot be replaced.
>>> with TestIO():
... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \
e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used.
Using different default values for the same substitution marker
is not allowed:
>>> from hydpy import pub, TestIO, xml_replace
>>> with TestIO():
... with open('test2.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e4><!--|e4=ELEMENT 4|--></e4>')
>>> with TestIO():
... xml_replace('test2', e4=4)
template file: test2.xmlt
target file: test2.xml
replacements:
e4 --> 4 (given argument)
e4 --> 4 (given argument)
>>> with TestIO():
... with open('test2.xml') as targetfile:
... print(targetfile.read())
<e4>4</e4>
<e4>4</e4>
>>> with TestIO():
... xml_replace('test2')
Traceback (most recent call last):
...
RuntimeError: Template file `test2.xmlt` defines different default values \
for marker `e4`.
As mentioned above, function |xml_replace| is registered as a "script
function" and can thus be used via command line:
>>> pub.scriptfunctions['xml_replace'].__name__
'xml_replace'
>>> pub.scriptfunctions['xml_replace'].__module__
'hydpy.exe.replacetools'
Use script |hyd| to execute function |xml_replace|:
>>> from hydpy import run_subprocess
>>> with TestIO():
... run_subprocess(
... 'hyd.py xml_replace test1 e2="Element 2" e3_=3')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> Element 2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> Element 2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>Element 2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>Element 2</e2>
|
def _check_repo_sign_utils_support(name):
'''
Check for specified command name in search path
'''
if salt.utils.path.which(name):
return True
else:
raise CommandExecutionError(
'utility \'{0}\' needs to be installed or made available in search path'.format(name)
)
|
Check for specified command name in search path
|
def geturl(environ, query=True, path=True, use_server_name=False):
"""Rebuilds a request URL (from PEP 333).
You may want to chose to use the environment variables
server_name and server_port instead of http_host in some case.
The parameter use_server_name allows you to chose.
:param query: Is QUERY_STRING included in URI (default: True)
:param path: Is path included in URI (default: True)
:param use_server_name: If SERVER_NAME/_HOST should be used instead of
HTTP_HOST
"""
url = [environ['wsgi.url_scheme'] + '://']
if use_server_name:
url.append(environ['SERVER_NAME'])
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url.append(':' + environ['SERVER_PORT'])
else:
if environ['SERVER_PORT'] != '80':
url.append(':' + environ['SERVER_PORT'])
else:
url.append(environ['HTTP_HOST'])
if path:
url.append(getpath(environ))
if query and environ.get('QUERY_STRING'):
url.append('?' + environ['QUERY_STRING'])
return ''.join(url)
|
Rebuilds a request URL (from PEP 333).
You may want to chose to use the environment variables
server_name and server_port instead of http_host in some case.
The parameter use_server_name allows you to chose.
:param query: Is QUERY_STRING included in URI (default: True)
:param path: Is path included in URI (default: True)
:param use_server_name: If SERVER_NAME/_HOST should be used instead of
HTTP_HOST
|
def _load(self):
"""
Function load.
:return: Response content
:raises: NotFoundError
"""
try:
get = requests.get(self._ref,
verify=self.http_verify,
auth=self.auth,
timeout=self.timeout)
except requests.exceptions.RequestException as err:
raise NotFoundError(err)
return get.content
|
Function load.
:return: Response content
:raises: NotFoundError
|
def store_magic_envelope_doc(self, payload):
"""Get the Magic Envelope, trying JSON first."""
try:
json_payload = json.loads(decode_if_bytes(payload))
except ValueError:
# XML payload
xml = unquote(decode_if_bytes(payload))
xml = xml.lstrip().encode("utf-8")
logger.debug("diaspora.protocol.store_magic_envelope_doc: xml payload: %s", xml)
self.doc = etree.fromstring(xml)
else:
logger.debug("diaspora.protocol.store_magic_envelope_doc: json payload: %s", json_payload)
self.doc = self.get_json_payload_magic_envelope(json_payload)
|
Get the Magic Envelope, trying JSON first.
|
def all_subclasses(cls):
"""Generator yielding all subclasses of `cls` recursively"""
for subcls in cls.__subclasses__():
yield subcls
for subsubcls in all_subclasses(subcls):
yield subsubcls
|
Generator yielding all subclasses of `cls` recursively
|
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interfaces_logical = junos_views.junos_logical_iface_table(self.device)
interfaces_logical.get()
# convert all the tuples to our pre-defined dict structure
def _convert_to_dict(interfaces):
# calling .items() here wont work.
# The dictionary values will end up being tuples instead of dictionaries
interfaces = dict(interfaces)
for iface, iface_data in interfaces.items():
result[iface] = {
"is_up": iface_data["is_up"],
# For physical interfaces <admin-status> will always be there, so just
# return the value interfaces[iface]['is_enabled']
# For logical interfaces if <iff-down> is present interface is disabled,
# otherwise interface is enabled
"is_enabled": (
True
if iface_data["is_enabled"] is None
else iface_data["is_enabled"]
),
"description": (iface_data["description"] or ""),
"last_flapped": float((iface_data["last_flapped"] or -1)),
"mac_address": napalm.base.helpers.convert(
napalm.base.helpers.mac,
iface_data["mac_address"],
py23_compat.text_type(iface_data["mac_address"]),
),
"speed": -1,
"mtu": 0,
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match_mtu = re.search(r"(\w+)", str(iface_data["mtu"]) or "")
mtu = napalm.base.helpers.convert(int, match_mtu.group(0), 0)
result[iface]["mtu"] = mtu
match = re.search(r"(\d+|[Aa]uto)(\w*)", iface_data["speed"] or "")
if match and match.group(1).lower() == "auto":
match = re.search(
r"(\d+)(\w*)", iface_data["negotiated_speed"] or ""
)
if match is None:
continue
speed_value = napalm.base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == "gbps":
speed_value *= 1000
result[iface]["speed"] = speed_value
return result
result = _convert_to_dict(interfaces)
result.update(_convert_to_dict(interfaces_logical))
return result
|
Return interfaces details.
|
def _prm_store_from_dict(self, fullname, store_dict, hdf5_group, store_flags, kwargs):
"""Stores a `store_dict`"""
for key, data_to_store in store_dict.items():
# self._logger.log(1, 'SUB-Storing %s [%s]', key, str(store_dict[key]))
original_hdf5_group = None
flag = store_flags[key]
if '.' in key:
original_hdf5_group = hdf5_group
split_key = key.split('.')
key = split_key.pop()
for inner_key in split_key:
hdf5_group, newly_created = self._all_create_or_get_group(inner_key,
hdf5_group)
if newly_created:
setattr(hdf5_group._v_attrs, HDF5StorageService.STORAGE_TYPE,
HDF5StorageService.NESTED_GROUP)
else:
store_type = self._all_get_from_attrs(hdf5_group, HDF5StorageService.STORAGE_TYPE)
if store_type != HDF5StorageService.NESTED_GROUP:
raise ValueError('You want to nested results but `%s` is already '
'of type `%s`!' % (hdf5_group._v_name, store_type))
# Iterate through the data and store according to the storage flags
if key in hdf5_group:
# We won't change any data that is found on disk
self._logger.debug(
'Found %s already in hdf5 node of %s, so I will ignore it.' %
(key, fullname))
continue
if flag == HDF5StorageService.TABLE:
# self._logger.log(1, 'SUB-Storing %s TABLE', key)
self._prm_write_into_pytable(key, data_to_store, hdf5_group, fullname,
**kwargs)
elif flag == HDF5StorageService.DICT:
# self._logger.log(1, 'SUB-Storing %s DICT', key)
self._prm_write_dict_as_table(key, data_to_store, hdf5_group, fullname,
**kwargs)
elif flag == HDF5StorageService.ARRAY:
# self._logger.log(1, 'SUB-Storing %s ARRAY', key)
self._prm_write_into_array(key, data_to_store, hdf5_group, fullname,
**kwargs)
elif flag in (HDF5StorageService.CARRAY,
HDF5StorageService.EARRAY,
HDF5StorageService.VLARRAY):
self._prm_write_into_other_array(key, data_to_store,
hdf5_group, fullname,
flag=flag, **kwargs)
elif flag in (HDF5StorageService.SERIES,
HDF5StorageService.FRAME,
# HDF5StorageService.PANEL
):
# self._logger.log(1, 'SUB-Storing %s PANDAS', key)
self._prm_write_pandas_data(key, data_to_store, hdf5_group, fullname,
flag, **kwargs)
elif flag == HDF5StorageService.SHARED_DATA:
pass # Shared data needs to be explicitly created and is not stored on
# the fly
else:
raise RuntimeError('You shall not pass!')
if original_hdf5_group is not None:
hdf5_group = original_hdf5_group
|
Stores a `store_dict`
|
def cart_to_polar(arr_c):
"""Return cartesian vectors in their polar representation.
Parameters
----------
arr_c: array, shape (a1, a2, ..., d)
Cartesian vectors, with last axis indexing the dimension.
Returns
-------
arr_p: array, shape of arr_c
Polar vectors, using (radius, inclination, azimuth) convention.
"""
if arr_c.shape[-1] == 1:
arr_p = arr_c.copy()
elif arr_c.shape[-1] == 2:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
elif arr_c.shape[-1] == 3:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0])
arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
else:
raise Exception('Invalid vector for polar representation')
return arr_p
|
Return cartesian vectors in their polar representation.
Parameters
----------
arr_c: array, shape (a1, a2, ..., d)
Cartesian vectors, with last axis indexing the dimension.
Returns
-------
arr_p: array, shape of arr_c
Polar vectors, using (radius, inclination, azimuth) convention.
|
def add_property(self, name, value):
# type: (str, object) -> bool
"""
Adds a property to the framework **if it is not yet set**.
If the property already exists (same name), then nothing is done.
Properties can't be updated.
:param name: The property name
:param value: The value to set
:return: True if the property was stored, else False
"""
with self.__properties_lock:
if name in self.__properties:
# Already stored property
return False
self.__properties[name] = value
return True
|
Adds a property to the framework **if it is not yet set**.
If the property already exists (same name), then nothing is done.
Properties can't be updated.
:param name: The property name
:param value: The value to set
:return: True if the property was stored, else False
|
def delete_connection(self, name, reason=None):
"""
Closes an individual connection. Give an optional reason
:param name: The connection name
:type name: str
:param reason: An option reason why the connection was deleted
:type reason: str
"""
headers = {'X-Reason': reason} if reason else {}
self._api_delete(
'/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
),
headers=headers,
)
|
Closes an individual connection. Give an optional reason
:param name: The connection name
:type name: str
:param reason: An option reason why the connection was deleted
:type reason: str
|
def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window)
|
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
|
def delete(self, *args):
"""Remove the key from the request cache and from memcache."""
cache = get_cache()
key = self.get_cache_key(*args)
if key in cache:
del cache[key]
|
Remove the key from the request cache and from memcache.
|
def R_op(self, inputs, eval_points):
"""Apply the adjoint of the Jacobian at ``inputs`` to ``eval_points``.
This is the symbolic counterpart of ODL's ::
op.derivative(x).adjoint(v)
See `grad` for its usage.
Parameters
----------
inputs : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic input to the gradient, the point at which the
Jacobian is computed.
eval_points : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic input to the adjoint of the Jacobian, i.e., the
variable to which the Jacobian adjoint should be applied.
Returns
-------
outputs : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic result of the application of the Jacobian adjoint.
It uses a wrapper class ``OdlDerivativeAdjointAsTheanoROp``
for ``(x, v) --> op.derivative(x).adjoint(v)``.
"""
# ODL weights spaces, Theano does not. We need to handle this
try:
dom_weight = self.operator.domain.weighting.const
except AttributeError:
dom_weight = 1.0
try:
ran_weight = self.operator.range.weighting.const
except AttributeError:
ran_weight = 1.0
scale = dom_weight / ran_weight
op = self
class TheanoJacobianAdjoint(theano.Op):
__props__ = ()
"""Wrap ``op.derivative(x).adjoint(v)`` into a Theano Op.
This Op has two inputs, ``x`` and ``v``, where ``x``
is the point at which the Jacobian is taken, and ``v`` the
tensor to which its adjoint is applied. There is only one output,
which is of the same type as ``v`` (and ``x``).
"""
def make_node(self, x, v):
"""Create a node for the computation graph."""
x = theano.tensor.as_tensor_variable(x)
v = theano.tensor.as_tensor_variable(v)
return theano.Apply(self, [x, v], [x.type()])
def perform(self, node, inputs_storage, output_storage):
"""Evaluate this node's computation.
This method computes ::
op.derivative(x).adjoint(v)
"""
x = inputs_storage[0]
v = inputs_storage[1]
out = output_storage[0]
out[0] = np.asarray(op.operator.derivative(x).adjoint(v))
if scale != 1.0:
out[0] *= scale
def infer_shape(self, node, input_shapes):
"""Return a list of output shapes based on ``input_shapes``."""
# Need to convert to native to avoid error in theano from
# future.int
return [tuple(native(si) for si in op.operator.domain.shape)]
r_op = TheanoJacobianAdjoint()
r_op_apply = r_op(inputs[0], eval_points[0])
return [r_op_apply]
|
Apply the adjoint of the Jacobian at ``inputs`` to ``eval_points``.
This is the symbolic counterpart of ODL's ::
op.derivative(x).adjoint(v)
See `grad` for its usage.
Parameters
----------
inputs : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic input to the gradient, the point at which the
Jacobian is computed.
eval_points : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic input to the adjoint of the Jacobian, i.e., the
variable to which the Jacobian adjoint should be applied.
Returns
-------
outputs : 1-element list of `theano.tensor.var.TensorVariable`
Symbolic result of the application of the Jacobian adjoint.
It uses a wrapper class ``OdlDerivativeAdjointAsTheanoROp``
for ``(x, v) --> op.derivative(x).adjoint(v)``.
|
def convert_surrogate_pair(match):
"""
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
"""
pair = match.group(0)
codept = 0x10000 + (ord(pair[0]) - 0xd800) * 0x400 + (ord(pair[1]) - 0xdc00)
return chr(codept)
|
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
|
def parse(args):
"""
Define the available arguments
"""
from tzlocal import get_localzone
try:
timezone = get_localzone()
if isinstance(timezone, pytz.BaseTzInfo):
timezone = timezone.zone
except Exception: # pragma: no cover
timezone = 'UTC'
if timezone == 'local':
timezone = 'UTC'
parser = argparse.ArgumentParser(description="""Bootstrap a django CMS project.
Major usage modes:
- wizard: djangocms -w -p /path/whatever project_name: ask for all the options through a
CLI wizard.
- batch: djangocms project_name: runs with the default values plus any
additional option provided (see below) with no question asked.
- config file: djangocms_installer --config-file /path/to/config.ini project_name: reads values
from an ini-style config file.
Check https://djangocms-installer.readthedocs.io/en/latest/usage.html for detailed usage
information.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config-file', dest='config_file', action='store',
default=None,
help='Configuration file for djangocms_installer')
parser.add_argument('--config-dump', dest='config_dump', action='store',
default=None,
help='Dump configuration file with current args')
parser.add_argument('--db', '-d', dest='db', action=DbAction,
default='sqlite://localhost/project.db',
help='Database configuration (in URL format). '
'Example: sqlite://localhost/project.db')
parser.add_argument('--i18n', '-i', dest='i18n', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django I18N / L10N setting; this is '
'automatically activated if more than '
'language is provided')
parser.add_argument('--use-tz', '-z', dest='use_timezone', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django timezone support')
parser.add_argument('--timezone', '-t', dest='timezone',
required=False, default=timezone,
action='store', help='Optional default time zone. Example: Europe/Rome')
parser.add_argument('--reversion', '-e', dest='reversion', action='store',
choices=('yes', 'no'),
default='yes', help='Install and configure reversion support '
'(only for django CMS 3.2 and 3.3)')
parser.add_argument('--permissions', dest='permissions', action='store',
choices=('yes', 'no'),
default='no', help='Activate CMS permission management')
parser.add_argument('--pip-options', help='pass custom pip options', default='')
parser.add_argument('--languages', '-l', dest='languages', action='append',
help='Languages to enable. Option can be provided multiple times, or as a '
'comma separated list. Only language codes supported by Django can '
'be used here. Example: en, fr-FR, it-IT')
parser.add_argument('--django-version', dest='django_version', action='store',
choices=data.DJANGO_SUPPORTED,
default=data.DJANGO_DEFAULT, help='Django version')
parser.add_argument('--cms-version', '-v', dest='cms_version', action='store',
choices=data.DJANGOCMS_SUPPORTED,
default=data.DJANGOCMS_DEFAULT, help='django CMS version')
parser.add_argument('--parent-dir', '-p', dest='project_directory',
default='',
action='store', help='Optional project parent directory')
parser.add_argument('--bootstrap', dest='bootstrap', action='store',
choices=('yes', 'no'),
default='no', help='Use Twitter Bootstrap Theme')
parser.add_argument('--templates', dest='templates', action='store',
default='no', help='Use custom template set')
parser.add_argument('--starting-page', dest='starting_page', action='store',
choices=('yes', 'no'),
default='no', help='Load a starting page with examples after installation '
'(english language only). Choose "no" if you use a '
'custom template set.')
parser.add_argument(dest='project_name', action='store',
help='Name of the project to be created')
# Command that lists the supported plugins in verbose description
parser.add_argument('--list-plugins', '-P', dest='plugins', action='store_true',
help='List plugins that\'s going to be installed and configured')
# Command that lists the supported plugins in verbose description
parser.add_argument('--dump-requirements', '-R', dest='dump_reqs', action='store_true',
help='It dumps the requirements that would be installed according to '
'parameters given. Together with --requirements argument is useful '
'for customizing the virtualenv')
# Advanced options. These have a predefined default and are not asked
# by config wizard.
parser.add_argument('--no-input', '-q', dest='noinput', action='store_true',
default=True, help='Don\'t run the configuration wizard, just use the '
'provided values')
parser.add_argument('--wizard', '-w', dest='wizard', action='store_true',
default=False, help='Run the configuration wizard')
parser.add_argument('--verbose', dest='verbose', action='store_true',
default=False,
help='Be more verbose and don\'t swallow subcommands output')
parser.add_argument('--filer', '-f', dest='filer', action='store_true',
default=True, help='Install and configure django-filer plugins '
'- Always enabled')
parser.add_argument('--requirements', '-r', dest='requirements_file', action='store',
default=None, help='Externally defined requirements file')
parser.add_argument('--no-deps', '-n', dest='no_deps', action='store_true',
default=False, help='Don\'t install package dependencies')
parser.add_argument('--no-plugins', dest='no_plugins', action='store_true',
default=False, help='Don\'t install plugins')
parser.add_argument('--no-db-driver', dest='no_db_driver', action='store_true',
default=False, help='Don\'t install database package')
parser.add_argument('--no-sync', '-m', dest='no_sync', action='store_true',
default=False, help='Don\'t run syncdb / migrate after bootstrapping')
parser.add_argument('--no-user', '-u', dest='no_user', action='store_true',
default=False, help='Don\'t create the admin user')
parser.add_argument('--template', dest='template', action='store',
default=None, help='The path or URL to load the django project '
'template from.')
parser.add_argument('--extra-settings', dest='extra_settings', action='store',
default=None, help='The path to an file that contains extra settings.')
parser.add_argument('--skip-empty-check', '-s', dest='skip_project_dir_check',
action='store_true',
default=False, help='Skip the check if project dir is empty.')
parser.add_argument('--delete-project-dir', '-c', dest='delete_project_dir',
action='store_true',
default=False, help='Delete project directory on creation failure.')
parser.add_argument('--utc', dest='utc',
action='store_true',
default=False, help='Use UTC timezone.')
if '--utc' in args:
for action in parser._positionals._actions:
if action.dest == 'timezone':
action.default = 'UTC'
# If config_args then pretend that config args came from the stdin and run parser again.
config_args = ini.parse_config_file(parser, args)
args = parser.parse_args(config_args + args)
if not args.wizard:
args.noinput = True
else:
args.noinput = False
if not args.project_directory:
args.project_directory = args.project_name
args.project_directory = os.path.abspath(args.project_directory)
# First of all, check if the project name is valid
if not validate_project(args.project_name):
sys.stderr.write(
'Project name "{0}" is not a valid app name, or it\'s already defined. '
'Please use only numbers, letters and underscores.\n'.format(args.project_name)
)
sys.exit(3)
# Checking the given path
setattr(args, 'project_path', os.path.join(args.project_directory, args.project_name).strip())
if not args.skip_project_dir_check:
if (os.path.exists(args.project_directory) and
[path for path in os.listdir(args.project_directory) if not path.startswith('.')]):
sys.stderr.write(
'Path "{0}" already exists and is not empty, please choose a different one\n'
'If you want to use this path anyway use the -s flag to skip this check.\n'
''.format(args.project_directory)
)
sys.exit(4)
if os.path.exists(args.project_path):
sys.stderr.write(
'Path "{0}" already exists, please choose a different one\n'.format(args.project_path)
)
sys.exit(4)
if args.config_dump and os.path.isfile(args.config_dump):
sys.stdout.write(
'Cannot dump because given configuration file "{0}" exists.\n'.format(args.config_dump)
)
sys.exit(8)
args = _manage_args(parser, args)
# what do we want here?!
# * if languages are given as multiple arguments, let's use it as is
# * if no languages are given, use a default and stop handling it further
# * if languages are given as a comma-separated list, split it and use the
# resulting list.
if not args.languages:
try:
args.languages = [locale.getdefaultlocale()[0].split('_')[0]]
except Exception: # pragma: no cover
args.languages = ['en']
elif isinstance(args.languages, six.string_types):
args.languages = args.languages.split(',')
elif len(args.languages) == 1 and isinstance(args.languages[0], six.string_types):
args.languages = args.languages[0].split(',')
args.languages = [lang.strip().lower() for lang in args.languages]
if len(args.languages) > 1:
args.i18n = 'yes'
args.aldryn = False
args.filer = True
# Convert version to numeric format for easier checking
try:
django_version, cms_version = supported_versions(args.django_version, args.cms_version)
cms_package = data.PACKAGE_MATRIX.get(
cms_version, data.PACKAGE_MATRIX[data.DJANGOCMS_LTS]
)
except RuntimeError as e: # pragma: no cover
sys.stderr.write(compat.unicode(e))
sys.exit(6)
if django_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a Django supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGO_SUPPORTED))
)
sys.exit(6)
if cms_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a django CMS supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGOCMS_SUPPORTED))
)
sys.exit(6)
default_settings = '{}.settings'.format(args.project_name)
env_settings = os.environ.get('DJANGO_SETTINGS_MODULE', default_settings)
if env_settings != default_settings:
sys.stderr.write(
'`DJANGO_SETTINGS_MODULE` is currently set to \'{0}\' which is not compatible with '
'djangocms installer.\nPlease unset `DJANGO_SETTINGS_MODULE` and re-run the installer '
'\n'.format(env_settings)
)
sys.exit(10)
if not getattr(args, 'requirements_file'):
requirements = []
# django CMS version check
if args.cms_version == 'develop':
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('develop', 'django CMS'))
elif args.cms_version == 'rc': # pragma: no cover
requirements.append(cms_package)
elif args.cms_version == 'beta': # pragma: no cover
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('beta', 'django CMS'))
else:
requirements.append(cms_package)
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['cms-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['cms-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['cms-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['cms-3.4'])
if not args.no_db_driver:
requirements.append(args.db_driver)
if not args.no_plugins:
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['plugins-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['plugins-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['plugins-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['plugins-3.4'])
requirements.extend(data.REQUIREMENTS['filer'])
if args.aldryn: # pragma: no cover
requirements.extend(data.REQUIREMENTS['aldryn'])
# Django version check
if args.django_version == 'develop': # pragma: no cover
requirements.append(data.DJANGO_DEVELOP)
warnings.warn(data.VERSION_WARNING.format('develop', 'Django'))
elif args.django_version == 'beta': # pragma: no cover
requirements.append(data.DJANGO_BETA)
warnings.warn(data.VERSION_WARNING.format('beta', 'Django'))
else:
requirements.append('Django<{0}'.format(less_than_version(django_version)))
if django_version == '1.8':
requirements.extend(data.REQUIREMENTS['django-1.8'])
elif django_version == '1.9':
requirements.extend(data.REQUIREMENTS['django-1.9'])
elif django_version == '1.10':
requirements.extend(data.REQUIREMENTS['django-1.10'])
elif django_version == '1.11':
requirements.extend(data.REQUIREMENTS['django-1.11'])
elif django_version == '2.0':
requirements.extend(data.REQUIREMENTS['django-2.0'])
elif django_version == '2.1':
requirements.extend(data.REQUIREMENTS['django-2.1'])
requirements.extend(data.REQUIREMENTS['default'])
setattr(args, 'requirements', '\n'.join(requirements).strip())
# Convenient shortcuts
setattr(args, 'cms_version', cms_version)
setattr(args, 'django_version', django_version)
setattr(args, 'settings_path',
os.path.join(args.project_directory, args.project_name, 'settings.py').strip())
setattr(args, 'urlconf_path',
os.path.join(args.project_directory, args.project_name, 'urls.py').strip())
if args.config_dump:
ini.dump_config_file(args.config_dump, args, parser)
return args
|
Define the available arguments
|
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> from unyt import cm
>>> A = [1, 2, 3]*cm
>>> B = [2, 3, 4]*cm
>>> uintersect1d(A, B)
unyt_array([2, 3], 'cm')
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = _validate_numpy_wrapper_units(v, [arr1, arr2])
return v
|
Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> from unyt import cm
>>> A = [1, 2, 3]*cm
>>> B = [2, 3, 4]*cm
>>> uintersect1d(A, B)
unyt_array([2, 3], 'cm')
|
def is_blackout(self) -> bool:
"""Does this alert match a blackout period?"""
if not current_app.config['NOTIFICATION_BLACKOUT']:
if self.severity in current_app.config['BLACKOUT_ACCEPT']:
return False
return db.is_blackout_period(self)
|
Does this alert match a blackout period?
|
def file_ops(staticfied, args):
"""Write to stdout or a file"""
destination = args.o or args.output
if destination:
with open(destination, 'w') as file:
file.write(staticfied)
else:
print(staticfied)
|
Write to stdout or a file
|
def to_dict(self):
"""
Encode the name, the status of all checks, and the current overall status.
"""
# evaluate checks
checks = {
key: HealthResult.evaluate(func, self.graph)
for key, func in self.checks.items()
}
dct = dict(
# return the service name helps for routing debugging
name=self.name,
ok=all(checks.values()),
)
if checks:
dct["checks"] = {
key: checks[key].to_dict()
for key in sorted(checks.keys())
}
return dct
|
Encode the name, the status of all checks, and the current overall status.
|
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
|
Provides list-like handling of a record index with a clearer
error message if the index is out of bounds.
|
def _removecleaner(self, cleaner):
"""
Remove the cleaner from the list if it already exists. Returns True if
the cleaner was removed.
"""
oldlen = len(self._old_cleaners)
self._old_cleaners = [
oldc for oldc in self._old_cleaners
if not oldc.issame(cleaner)
]
return len(self._old_cleaners) != oldlen
|
Remove the cleaner from the list if it already exists. Returns True if
the cleaner was removed.
|
def insert_from_segwizard(self, fileobj, instruments, name, version = None, comment = None):
"""
Parse the contents of the file object fileobj as a
segwizard-format segment list, and insert the result as a
new list of "active" segments into this LigolwSegments
object. A new entry will be created in the segment_definer
table for the segment list, and instruments, name and
comment are used to populate the entry's metadata. Note
that the "valid" segments are left empty, nominally
indicating that there are no periods of validity.
"""
self.add(LigolwSegmentList(active = segmentsUtils.fromsegwizard(fileobj, coltype = LIGOTimeGPS), instruments = instruments, name = name, version = version, comment = comment))
|
Parse the contents of the file object fileobj as a
segwizard-format segment list, and insert the result as a
new list of "active" segments into this LigolwSegments
object. A new entry will be created in the segment_definer
table for the segment list, and instruments, name and
comment are used to populate the entry's metadata. Note
that the "valid" segments are left empty, nominally
indicating that there are no periods of validity.
|
def share(self, base=None, keys=None, by=None, **kwargs):
"""
Share the formatoptions of one plotter with all the others
This method shares specified formatoptions from `base` with all the
plotters in this instance.
Parameters
----------
base: None, Plotter, xarray.DataArray, InteractiveList, or list of them
The source of the plotter that shares its formatoptions with the
others. It can be None (then the first instance in this project
is used), a :class:`~psyplot.plotter.Plotter` or any data object
with a *psy* attribute. If `by` is not None, then it is expected
that `base` is a list of data objects for each figure/axes
%(Plotter.share.parameters.keys)s
by: {'fig', 'figure', 'ax', 'axes'}
Share the formatoptions only with the others on the same
``'figure'`` or the same ``'axes'``. In this case, base must either
be ``None`` or a list of the types specified for `base`
%(Plotter.share.parameters.no_keys|plotters)s
See Also
--------
psyplot.plotter.share"""
if by is not None:
if base is not None:
if hasattr(base, 'psy') or isinstance(base, Plotter):
base = [base]
if by.lower() in ['ax', 'axes']:
bases = {ax: p[0] for ax, p in six.iteritems(
Project(base).axes)}
elif by.lower() in ['fig', 'figure']:
bases = {fig: p[0] for fig, p in six.iteritems(
Project(base).figs)}
else:
raise ValueError(
"*by* must be out of {'fig', 'figure', 'ax', 'axes'}. "
"Not %s" % (by, ))
else:
bases = {}
projects = self.axes if by == 'axes' else self.figs
for obj, p in projects.items():
p.share(bases.get(obj), keys, **kwargs)
else:
plotters = self.plotters
if not plotters:
return
if base is None:
if len(plotters) == 1:
return
base = plotters[0]
plotters = plotters[1:]
elif not isinstance(base, Plotter):
base = getattr(getattr(base, 'psy', base), 'plotter', base)
base.share(plotters, keys=keys, **kwargs)
|
Share the formatoptions of one plotter with all the others
This method shares specified formatoptions from `base` with all the
plotters in this instance.
Parameters
----------
base: None, Plotter, xarray.DataArray, InteractiveList, or list of them
The source of the plotter that shares its formatoptions with the
others. It can be None (then the first instance in this project
is used), a :class:`~psyplot.plotter.Plotter` or any data object
with a *psy* attribute. If `by` is not None, then it is expected
that `base` is a list of data objects for each figure/axes
%(Plotter.share.parameters.keys)s
by: {'fig', 'figure', 'ax', 'axes'}
Share the formatoptions only with the others on the same
``'figure'`` or the same ``'axes'``. In this case, base must either
be ``None`` or a list of the types specified for `base`
%(Plotter.share.parameters.no_keys|plotters)s
See Also
--------
psyplot.plotter.share
|
def childgroup(self, field):
"""
Return a list of fields stored by row regarding the configured grid
:param field: The original field this widget is attached to
"""
grid = getattr(self, "grid", None)
named_grid = getattr(self, "named_grid", None)
if grid is not None:
childgroup = self._childgroup(field.children, grid)
elif named_grid is not None:
childgroup = self._childgroup_by_name(field.children, named_grid)
else:
raise AttributeError(u"Missing the grid or named_grid argument")
return childgroup
|
Return a list of fields stored by row regarding the configured grid
:param field: The original field this widget is attached to
|
def md_to_pdf(input_name, output_name):
"""
Converts an input MarkDown file to a PDF of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .pdf can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.md
To convert the document:
>>> from aide_document import convert
>>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf')
.pdf can also be omitted from the second argument.
"""
if output_name[-4:] == '.pdf':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".pdf" )
|
Converts an input MarkDown file to a PDF of the given output name.
Parameters
==========
input_name : String
Relative file location of the input file to where this function is being called.
output_name : String
Relative file location of the output file to where this function is being called. Note that .pdf can be omitted.
Examples
========
Suppose we have a directory as follows:
data/
doc.md
To convert the document:
>>> from aide_document import convert
>>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf')
.pdf can also be omitted from the second argument.
|
def search_seqs(self, seqrec, in_seq, locus, run=0, partial_ann=None):
"""
search_seqs - method for annotating a BioPython sequence without alignment
:param seqrec: The reference sequence
:type seqrec: SeqRecord
:param locus: The gene locus associated with the sequence.
:type locus: str
:param in_seq: The input sequence
:type in_seq: SeqRecord
:param run: The number of runs that have been done
:type run: int
:param partial_ann: A partial annotation from a previous step
:type partial_ann: :ref:`ann`
:rtype: :ref:`ann`
Example usage:
>>> from Bio.Seq import Seq
>>> from seqann.seq_search import SeqSearch
>>> inseq = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> sqsrch = SeqSearch()
>>> ann = sqsrch.search_seqs(refseqs, inseq)
"""
# Extract out the sequences and feature names
# from the reference sequences
# The mapped features will be subtracted from seq_covered
# so the final seq_covered number will reflect the remaining
# number of base pairs that haven't been mapped.
#
# The coordinates and mapping will help determine what positions
# in the sequence have been mapped and to what features. The
# missing blocks variable will be generated using these.
structures = get_structures()
seq_covered = len(in_seq.seq)
coordinates = dict(map(lambda x: [x, 1],
[i for i in range(0, len(in_seq.seq)+1)]))
mapping = dict(map(lambda x: [x, 1],
[i for i in range(0, len(in_seq.seq)+1)]))
ambig_map = {}
found_feats = {}
feat_missing = {}
method = "nt_search" if not partial_ann else partial_ann.method
# If the partial annotation is provided
# then make the found_feats equal to
# what has already been annotated
feats = get_features(seqrec)
if partial_ann:
found_feats = partial_ann.features
if self.verbose and self.verbosity > 4:
self.logger.info("Found partial features:")
for f in found_feats:
self.logger.info(f)
# Skip references that only have features
# that have already been annoated
if len([f for f in feats if f in found_feats]) == len(feats):
if self.verbose:
self.logger.info("Skipping incomplete refseq")
return partial_ann
if self.verbose and self.verbosity > 1:
self.logger.info("Using partial annotation | "
+ locus + " "
+ str(len(partial_ann.features)))
coordinates = dict(map(lambda l: [l, 1],
[item for sublist
in partial_ann.blocks
for item in sublist]))
seq_covered = partial_ann.covered
mapping = partial_ann.mapping
if self.verbose and self.verbosity > 2:
self.logger.info("Partial sequence coverage = "
+ str(seq_covered))
self.logger.info("Partial sequence metho = "
+ method)
added_feat = {}
deleted_coords = {}
for feat_name in sorted(feats,
key=lambda k: structures[locus][k]):
# skip if partial annotation is provided
# and the feat name is not one of the
# missing features
if partial_ann and feat_name not in partial_ann.refmissing:
if self.verbose and self.verbosity > 1:
self.logger.info("Skipping " + feat_name
+ " - Already annotated")
continue
if self.verbose and self.verbosity > 1:
self.logger.info("Running seqsearch for " + feat_name)
# Search for the reference feature sequence in the
# input sequence. Record the coordinates if it's
# found and if it's found in multiple spots. If it
# is not found, then record that feature as missing.
seq_search = nt_search(str(in_seq.seq), str(feats[feat_name]))
if len(seq_search) == 2:
if self.verbose and self.verbosity > 0:
self.logger.info("Found exact match for " + feat_name)
seq_covered -= len(str(feats[feat_name]))
end = int(len(str(feats[feat_name])) + seq_search[1])
if feat_name == 'three_prime_UTR' \
and len(str(in_seq.seq)) > end:
end = len(str(in_seq.seq))
# If the feature is found and it's a five_prime_UTR then
# the start should always be 0, so insertions at the
# beinging of the sequence will be found.
start = seq_search[1] if feat_name != 'five_prime_UTR' else 0
si = seq_search[1]+1 if seq_search[1] != 0 and \
feat_name != 'five_prime_UTR' else 0
# check if this features has already been mapped
mapcheck = set([0 if i in coordinates else 1
for i in range(si, end+1)])
# Dont map features if they are out of order
skip = False
if found_feats and len(found_feats) > 0:
for f in found_feats:
o1 = structures[locus][feat_name]
o2 = structures[locus][f]
loctyp = loctype(found_feats[f].location.start,
found_feats[f].location.end,
start, end)
if o1 < o2 and loctyp:
skip = True
if self.verbose:
self.logger.info("Skipping map for "
+ feat_name)
elif o2 < o1 and not loctyp:
skip = True
if self.verbose:
self.logger.info("Skipping map for "
+ feat_name)
if 1 not in mapcheck and not skip:
for i in range(si, end+1):
if i in coordinates:
if feat_name == "exon_8" or feat_name == 'three_prime_UTR':
deleted_coords.update({i: coordinates[i]})
del coordinates[i]
else:
if self.verbose:
self.logger.error("seqsearch - should't be here "
+ locus + " - "
+ " - " + feat_name)
mapping[i] = feat_name
found_feats.update({feat_name:
SeqFeature(
FeatureLocation(
ExactPosition(start),
ExactPosition(end), strand=1),
type=feat_name)})
if feat_name == "exon_8" or feat_name == 'three_prime_UTR':
added_feat.update({feat_name: feats[feat_name]})
if self.verbose and self.verbosity > 3:
self.logger.info("Coordinates | Start = " + str(start) + " - End = " + str(end))
elif(len(seq_search) > 2):
if self.verbose and self.verbosity > 1:
self.logger.info("Found " + str(len(seq_search))
+ " matches for " + feat_name)
new_seq = [seq_search[0]]
for i in range(1, len(seq_search)):
tnp = seq_search[i]+1
if seq_search[i] in coordinates or tnp in coordinates:
new_seq.append(seq_search[i])
seq_search = new_seq
if(partial_ann and feat_name == "exon_8" and run > 0):
missing_feats = sorted(list(partial_ann.missing.keys()))
# * HARD CODED LOGIC * #
# > exon8 in class I maps to multiple spots in a sequence,
# often in the 3' UTR. These features need to be mapped
# last to make sure it's not mapping exon8 incorrectly.
if(missing_feats == ['exon_8', 'three_prime_UTR']
and len(seq_search) <= 3):
if self.verbose and self.verbosity > 0:
self.logger.info("Resolving exon_8")
seq_covered -= len(str(feats[feat_name]))
end = int(len(str(feats[feat_name])) + seq_search[1])
# If the feature is found and it's a five_prime_UTR then
# the start should always be 0, so insertions at the
# beinging of the sequence will be found.
start = seq_search[1]
si = seq_search[1]+1 if seq_search[1] != 0 else 0
# check if this features has already been mapped
mapcheck = set([0 if i in coordinates else 1
for i in range(si, end+1)])
for i in range(si, end+1):
if i in coordinates:
del coordinates[i]
else:
if self.verbose:
self.logger.error("seqsearch - should't be here "
+ locus + " - "
+ " - " + feat_name)
mapping[i] = feat_name
found_feats.update({feat_name:
SeqFeature(
FeatureLocation(
ExactPosition(start),
ExactPosition(end), strand=1),
type=feat_name)})
if self.verbose and self.verbosity > 0:
self.logger.info("Coordinates | Start = " + str(start) + " - End = " + str(end))
else:
if self.verbose and self.verbosity > 0:
self.logger.info("Adding ambig feature " + feat_name)
feat_missing.update({feat_name: feats[feat_name]})
ambig_map.update({feat_name:
seq_search[1:len(seq_search)]})
else:
if self.verbose and self.verbosity > 0:
self.logger.info("Adding ambig feature " + feat_name)
feat_missing.update({feat_name: feats[feat_name]})
ambig_map.update({feat_name: seq_search[1:len(seq_search)]})
else:
if self.verbose and self.verbosity > 1:
self.logger.info("No match for " + feat_name)
feat_missing.update({feat_name: feats[feat_name]})
blocks = getblocks(coordinates)
exact_matches = list(found_feats.keys())
# * HARD CODED LOGIC * #
# >
#
# HLA-DRB1 exon3 exact match - with intron1 and 3 missing
if('exon_3' in exact_matches and run == 99 and locus == 'HLA-DRB1'
and 'exon_2' in feat_missing and (len(blocks) == 1 or len(blocks) == 2)):
for b in blocks:
x = b[len(b)-1]
if x == max(list(mapping.keys())):
featname = "intron_3"
found_feats.update({featname:
SeqFeature(
FeatureLocation(
ExactPosition(b[0]-1),
ExactPosition(b[len(b)-1]),
strand=1),
type=featname)})
else:
featname = "exon_2"
found_feats.update({featname:
SeqFeature(
FeatureLocation(
ExactPosition(b[0]),
ExactPosition(b[len(b)-1]),
strand=1),
type=featname)})
seq_covered -= len(b)
if self.verbose and self.verbosity > 1:
self.logger.info("Successfully annotated class DRB1 II sequence")
return Annotation(features=found_feats,
covered=seq_covered,
seq=in_seq,
missing=feat_missing,
ambig=ambig_map,
method=method,
mapping=mapping,
exact_match=exact_matches)
# If it's a class II sequence and
# exon_2 is an exact match
# * HARD CODED LOGIC * #
# > It's common for exon2 to be fully sequenced
# but intron_2 and intron_1 to be partially sequenced,
# which can make it hard to annotate those to features.
# If there are two missing blocks that is small enough
# and they are before and after exon2, then it's very
# very likely to be intron_2 and intron_1.
if 'exon_2' in exact_matches and len(blocks) == 2 \
and is_classII(locus) and seq_covered < 300:
if self.verbose and self.verbosity > 1:
self.logger.info("Running search for class II sequence")
r = True
for b in blocks:
x = b[len(b)-1]
if x == max(list(mapping.keys())):
x = b[0]-1
else:
x += 1
f = mapping[x]
if f != 'exon_2':
r = False
if r:
for b in blocks:
x = b[len(b)-1]
if x == max(list(mapping.keys())):
featname = "intron_2"
found_feats.update({featname:
SeqFeature(
FeatureLocation(
ExactPosition(b[0]-1),
ExactPosition(b[len(b)-1]),
strand=1),
type=featname)})
else:
featname = "intron_1"
found_feats.update({featname:
SeqFeature(
FeatureLocation(
ExactPosition(b[0]),
ExactPosition(b[len(b)-1]),
strand=1),
type=featname)})
seq_covered -= len(b)
if self.verbose and self.verbosity > 1:
self.logger.info("Successfully annotated class II sequence")
return Annotation(features=found_feats,
covered=seq_covered,
seq=in_seq,
missing=feat_missing,
ambig=ambig_map,
method=method,
mapping=mapping,
exact_match=exact_matches)
annotated_feats, mb, mapping = self._resolve_unmapped(blocks,
feat_missing,
ambig_map,
mapping,
found_feats,
locus,
seq_covered
)
# * HARD CODED LOGIC * #
if(not mb and blocks and len(feat_missing.keys()) == 0
and len(ambig_map.keys()) == 0):
mb = blocks
if mb:
# Unmap exon 8
if locus in ['HLA-C', 'HLA-A'] and len(in_seq.seq) < 3000 \
and 'exon_8' in exact_matches:
for i in deleted_coords:
mapping[i] = 1
coordinates.update(deleted_coords)
mb = getblocks(coordinates)
feat_missing.update(added_feat)
# Delte from found features
del exact_matches[exact_matches.index('exon_8')]
del found_feats['exon_8']
if 'exon_8' in annotated_feats:
del annotated_feats['exon_8']
if 'three_prime_UTR' in found_feats:
del found_feats['three_prime_UTR']
if 'three_prime_UTR' in annotated_feats:
del annotated_feats['three_prime_UTR']
refmissing = [f for f in structures[locus]
if f not in annotated_feats]
if self.verbose and self.verbosity > 1:
self.logger.info("* Annotation not complete *")
# Print out what features were missing by the ref
if self.verbose and self.verbosity > 2:
self.logger.info("Refseq was missing these features = " + ",".join(list(refmissing)))
# Print out what features were ambig matches
if self.verbose and self.verbosity > 1 and len(ambig_map) > 1:
self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map)))
# Print out what features were exact matches
if self.verbose and self.verbosity > 2 and len(exact_matches) > 1:
self.logger.info("Features exact matches = " + ",".join(list(exact_matches)))
# Print out what features have been annotated
if self.verbose and self.verbosity > 1 and len(annotated_feats) > 1:
self.logger.info("Features annotated = " + ",".join(list(annotated_feats)))
# Print out what features are missing
if self.verbose and self.verbosity > 1 and len(feat_missing) > 1:
self.logger.info("Features missing = " + ",".join(list(feat_missing)))
annotation = Annotation(features=annotated_feats,
covered=seq_covered,
seq=in_seq,
missing=feat_missing,
ambig=ambig_map,
blocks=mb,
method=method,
refmissing=refmissing,
mapping=mapping,
exact_match=exact_matches,
annotation=None)
else:
mb = None
# Unmap exon 8
if locus in ['HLA-C', 'HLA-A'] and len(in_seq.seq) < 600 \
and 'exon_8' in exact_matches \
and 'three_prime_UTR' in annotated_feats\
and 'three_prime_UTR' not in exact_matches:
for i in deleted_coords:
mapping[i] = 1
coordinates.update(deleted_coords)
mb = getblocks(coordinates)
feat_missing.update(added_feat)
del exact_matches[exact_matches.index('exon_8')]
del found_feats['exon_8']
if 'exon_8' in annotated_feats:
del annotated_feats['exon_8']
if 'three_prime_UTR' in found_feats:
del found_feats['three_prime_UTR']
if 'three_prime_UTR' in annotated_feats:
del annotated_feats['three_prime_UTR']
if self.verbose:
self.logger.info("* No missing blocks after seq_search *")
# Print out what features were ambig matches
if self.verbose and self.verbosity > 0 and len(ambig_map) > 1:
self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map)))
# Print out what features were exact matches
if self.verbose and self.verbosity > 0 and len(exact_matches) > 1:
self.logger.info("Features exact matches = " + ",".join(list(exact_matches)))
# Print out what features have been annotated
if self.verbose and self.verbosity > 0 and len(annotated_feats) > 1:
self.logger.info("Features annotated = " + ",".join(list(annotated_feats)))
# Print out what features are missing
if self.verbose and self.verbosity > 0 and len(feat_missing) > 1:
self.logger.info("Features missing = " + ",".join(list(feat_missing)))
annotation = Annotation(features=annotated_feats,
covered=seq_covered,
seq=in_seq,
missing=feat_missing,
ambig=ambig_map,
method=method,
blocks=mb,
mapping=mapping,
exact_match=exact_matches,
annotation=None)
return annotation
|
search_seqs - method for annotating a BioPython sequence without alignment
:param seqrec: The reference sequence
:type seqrec: SeqRecord
:param locus: The gene locus associated with the sequence.
:type locus: str
:param in_seq: The input sequence
:type in_seq: SeqRecord
:param run: The number of runs that have been done
:type run: int
:param partial_ann: A partial annotation from a previous step
:type partial_ann: :ref:`ann`
:rtype: :ref:`ann`
Example usage:
>>> from Bio.Seq import Seq
>>> from seqann.seq_search import SeqSearch
>>> inseq = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> sqsrch = SeqSearch()
>>> ann = sqsrch.search_seqs(refseqs, inseq)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.