code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def includeme(config):
"""
Add pyramid_htmlmin n your pyramid include list.
"""
log.info('Loading htmlmin pyramid plugin')
for key, val in config.registry.settings.items():
if key.startswith('htmlmin.'):
log.debug('Setup %s = %s' % (key, val))
htmlmin_opts[key[8:]] = asbool(val)
if key.startswith('pyramid_htmlmin.'):
log.debug('Setup %s = %s' % (key, val))
opts[key[16:]] = asbool(val)
config.add_tween('pyramid_htmlmin.htmlmin_tween_factory', under=INGRESS)
|
Add pyramid_htmlmin n your pyramid include list.
|
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
'''
Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
'''
return _delete_resource(name, name_param='CacheSecurityGroupName',
desc='cache security group', res_type='cache_security_group',
region=region, key=key, keyid=keyid, profile=profile, **args)
|
Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
|
def Right(self, n = 1, dl = 0):
"""右方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.right_key, n)
|
右方向键n次
|
def flush(self):
"""
Flush the compressor. This will emit the remaining output data, but
will not destroy the compressor. It can be used, for example, to ensure
that given chunks of content will decompress immediately.
"""
chunks = []
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FLUSH))
while lib.BrotliEncoderHasMoreOutput(self._encoder) == lib.BROTLI_TRUE:
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FLUSH))
return b''.join(chunks)
|
Flush the compressor. This will emit the remaining output data, but
will not destroy the compressor. It can be used, for example, to ensure
that given chunks of content will decompress immediately.
|
def sphlat(r, colat, lons):
"""
Convert from spherical coordinates to latitudinal coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphlat_c.html
:param r: Distance of the point from the origin.
:type r: float
:param colat: Angle of the point from positive z axis (radians).
:type colat: float
:param lons: Angle of the point from the XZ plane (radians).
:type lons: float
:return:
Distance of a point from the origin,
Angle of the point from the XZ plane in radians,
Angle of the point from the XY plane in radians.
:rtype: tuple
"""
r = ctypes.c_double(r)
colat = ctypes.c_double(colat)
lons = ctypes.c_double(lons)
radius = ctypes.c_double()
lon = ctypes.c_double()
lat = ctypes.c_double()
libspice.sphcyl_c(r, colat, lons, ctypes.byref(radius), ctypes.byref(lon),
ctypes.byref(lat))
return radius.value, lon.value, lat.value
|
Convert from spherical coordinates to latitudinal coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphlat_c.html
:param r: Distance of the point from the origin.
:type r: float
:param colat: Angle of the point from positive z axis (radians).
:type colat: float
:param lons: Angle of the point from the XZ plane (radians).
:type lons: float
:return:
Distance of a point from the origin,
Angle of the point from the XZ plane in radians,
Angle of the point from the XY plane in radians.
:rtype: tuple
|
def _create_storage(storage_service, trajectory=None, **kwargs):
"""Creates a service from a constructor and checks which kwargs are not used"""
kwargs_copy = kwargs.copy()
kwargs_copy['trajectory'] = trajectory
matching_kwargs = get_matching_kwargs(storage_service, kwargs_copy)
storage_service = storage_service(**matching_kwargs)
unused_kwargs = set(kwargs.keys()) - set(matching_kwargs.keys())
return storage_service, unused_kwargs
|
Creates a service from a constructor and checks which kwargs are not used
|
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
|
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
|
def node_link_graph(data, directed=False, attrs=_attrs):
"""Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data
"""
directed = data.get('directed', directed)
graph = dn.DynGraph()
if directed:
graph = graph.to_directed()
id_ = attrs['id']
mapping = []
graph.graph = data.get('graph', {})
c = count()
for d in data['nodes']:
node = d.get(id_, next(c))
mapping.append(node)
nodedata = dict((make_str(k), v) for k, v in d.items() if k != id_)
graph.add_node(node, **nodedata)
for d in data['links']:
graph.add_interaction(d['source'], d["target"], d['time'])
return graph
|
Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data
|
def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
"""
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
"""
if cols_to_keep is None:
cols_for_index = [reference_col]
else:
cols_for_index = [reference_col] + cols_to_keep
check_params_columns_duplicate(id_cols + cols_for_index)
if method == 'between' or method == 'between_and_after':
df['start'] = df.groupby(id_cols)[reference_col].transform(min)
id_cols += ['start']
if method == 'between' or method == 'between_and_before':
df['end'] = df.groupby(id_cols)[reference_col].transform(max)
id_cols += ['end']
names = id_cols + cols_for_index
new_df = df.set_index(names)
index_values = df.groupby(id_cols).sum().index.values
if complete_index is None:
complete_index = df.groupby(cols_for_index).sum().index.values
elif isinstance(complete_index, dict):
if complete_index['type'] == 'date':
freq = complete_index['freq']
date_format = complete_index['format']
start = complete_index['start']
end = complete_index['end']
if isinstance(freq, dict):
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
complete_index = pd.date_range(start=start, end=end, freq=freq)
complete_index = complete_index.strftime(date_format)
else:
raise ParamsValueError(f'Unknown complete index type: '
f'{complete_index["type"]}')
if not isinstance(index_values[0], tuple):
index_values = [(x,) for x in index_values]
if not isinstance(complete_index[0], tuple):
complete_index = [(x,) for x in complete_index]
new_tuples_index = [x + y for x in index_values for y in complete_index]
new_index = pd.MultiIndex.from_tuples(new_tuples_index, names=names)
new_df = new_df.reindex(new_index).reset_index()
if method == 'between' or method == 'between_and_after':
new_df = new_df[new_df[reference_col] >= new_df['start']]
del new_df['start']
if method == 'between' or method == 'between_and_before':
new_df = new_df[new_df[reference_col] <= new_df['end']]
del new_df['end']
return new_df
|
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
|
def watch(self, *keys):
"""
Put the pipeline into immediate execution mode.
Does not actually watch any keys.
"""
if self.explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
self.watching = True
for key in keys:
self._watched_keys[key] = deepcopy(self.mock_redis.redis.get(self.mock_redis._encode(key)))
|
Put the pipeline into immediate execution mode.
Does not actually watch any keys.
|
def _parse_options(self, argv, location):
"""Parse the options part of an argument list.
IN:
lsArgs <list str>:
List of arguments. Will be altered.
location <str>:
A user friendly string describing where this data came from.
"""
observed = []
while argv:
if argv[0].startswith('--'):
name = argv.pop(0)[2:]
# '--' means end of options.
if not name:
break
if name not in self.options:
raise InvalidOption(name)
option = self.options[name]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(name)
observed.append(option)
option.parse(argv, name, location)
elif argv[0].startswith('-'):
# A single - is not an abbreviation block, but the first positional arg.
if argv[0] == '-':
break
block = argv.pop(0)[1:]
# Abbrevs for options that take values go last in the block.
for abbreviation in block[:-1]:
if self.abbreviations[abbreviation].nargs != 0:
raise BadAbbreviationBlock(abbreviation, block, "options that require value arguments must be last in abbreviation blocks")
# Parse individual options.
for abbreviation in block:
option = self.abbreviations[abbreviation]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(option.name)
observed.append(option)
option.parse(argv, '-' + abbreviation, location)
# only arguments that start with -- or - can be Options.
else:
break
|
Parse the options part of an argument list.
IN:
lsArgs <list str>:
List of arguments. Will be altered.
location <str>:
A user friendly string describing where this data came from.
|
def expand_effect_repertoire(self, new_purview=None):
"""See |Subsystem.expand_repertoire()|."""
return self.subsystem.expand_effect_repertoire(
self.effect.repertoire, new_purview)
|
See |Subsystem.expand_repertoire()|.
|
def eval_model(model, test, add_eval_metrics={}):
"""Evaluate model's performance on the test-set.
# Arguments
model: Keras model
test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`.
add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions
accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from
the `concise.eval_metrics` module.
# Returns
dictionary with evaluation metrics
"""
# evaluate the model
logger.info("Evaluate...")
# - model_metrics
model_metrics_values = model.evaluate(test[0], test[1], verbose=0,
batch_size=test[1].shape[0])
# evaluation is done in a single pass to have more precise metics
model_metrics = dict(zip(_listify(model.metrics_names),
_listify(model_metrics_values)))
# - eval_metrics
y_true = test[1]
y_pred = model.predict(test[0], verbose=0)
eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()}
# handle the case where the two metrics names intersect
# - omit duplicates from eval_metrics
intersected_keys = set(model_metrics).intersection(set(eval_metrics))
if len(intersected_keys) > 0:
logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones".
format(intersected_keys))
eval_metrics = _delete_keys(eval_metrics, intersected_keys)
return merge_dicts(model_metrics, eval_metrics)
|
Evaluate model's performance on the test-set.
# Arguments
model: Keras model
test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`.
add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions
accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from
the `concise.eval_metrics` module.
# Returns
dictionary with evaluation metrics
|
def _gaussian(x, amp, loc, std):
'''This is a simple gaussian.
Parameters
----------
x : np.array
The items at which the Gaussian is evaluated.
amp : float
The amplitude of the Gaussian.
loc : float
The central value of the Gaussian.
std : float
The standard deviation of the Gaussian.
Returns
-------
np.array
Returns the Gaussian evaluated at the items in `x`, using the provided
parameters of `amp`, `loc`, and `std`.
'''
return amp * np.exp(-((x - loc)*(x - loc))/(2.0*std*std))
|
This is a simple gaussian.
Parameters
----------
x : np.array
The items at which the Gaussian is evaluated.
amp : float
The amplitude of the Gaussian.
loc : float
The central value of the Gaussian.
std : float
The standard deviation of the Gaussian.
Returns
-------
np.array
Returns the Gaussian evaluated at the items in `x`, using the provided
parameters of `amp`, `loc`, and `std`.
|
def remove_stream_handlers(logger=None):
"""
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
# FileHandler is a subclass of StreamHandler so
# 'if not a StreamHandler' does not work
if (isinstance(handler, logging.FileHandler) or
isinstance(handler, logging.NullHandler) or
(isinstance(handler, logging.Handler) and not
isinstance(handler, logging.StreamHandler))):
new_handlers.append(handler)
logger.handlers = new_handlers
|
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
|
def aggregate_detail(slug_list, with_data_table=False):
"""Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
metrics_data = []
granularities = r._granularities()
# XXX converting granularties into their key-name for metrics.
keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in granularities]
# Our metrics data is of the form:
#
# (slug, {time_period: value, ... }).
#
# Let's convert this to (slug, list_of_values) so that the list of
# values is in the same order as the granularties
for slug, data in r.get_metrics(slug_list):
values = [data[t] for t in keys]
metrics_data.append((slug, values))
return {
'chart_id': "metric-aggregate-{0}".format("-".join(slug_list)),
'slugs': slug_list,
'metrics': metrics_data,
'with_data_table': with_data_table,
'granularities': [g.title() for g in keys],
}
|
Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table.
|
def find_last_true(sorted_list, true_criterion):
"""
Suppose we have a list of item [item1, item2, ..., itemN].
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
If we do a mapping::
>>> def true_criterion(item):
... return item <= 6
>>> [true_criterion(item) for item in sorted_list]
[True, True, ... True(last true), False, False, ... False]
this function returns the index of last true item.
we do can do the map for all item, and run a binary search to find the
index. But sometime the mapping function is expensive. This method avoid
run mapping function for all items.
Example::
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
criterion = def true_criterion(x): return x <= 6
boolean = [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
Solution::
# first, we check index = int((0 + 9)/2.0) = 4, it's True.
# Then check array[4 + 1], it's still True.
# Then we jump to int((4 + 9)/2.0) = 6, it's True.
# Then check array[6 + 1], ite's False. So array[6] is the one we need.
>>> find_last_true([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], true_criterion)
6
**中文文档**
功能: 假设有一组排序号了的元素, 从前往后假设前面的元素都满足某一条件, 而到了
中间某处起就不再满足了。本函数返回满足这一条件的最后一个元素。这在当检验是否
满足条件本身开销较大时, 能节约大量的计算时间。例如你要判定一系列网页中, 从
page1 到 page999, 从第几页开始出现404错误。假设是第400个, 那么如果一个个地
去试, 需要400次, 那如果从0 - 999之间去试, 只需要试验9次即可 (2 ** 9 = 512)
算法:
我们检验最中间的元素, 如果为False, 那么则检验左边所有未检验过的元素的最中间
的那个。如果为True, 那么检验右边所有未检验过的元素的最中间那个。重复这一过程
直到被检验的元素为True, 而下一个元素为False, 说明找到了。
例题::
有序数组 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
序号 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
条件 小于等于6
真值表 [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
解::
第一次检查``index = int((0+9)/2.0) = 4``, 为True,
检查array[4+1], 也是True。那么跳跃至``int((4+9)/2.0)=6``, 为True,。
再检查array[6+1], 为False, 很显然, 我们找到了。
"""
# exam first item, if not true, then impossible to find result
if not true_criterion(sorted_list[0]):
raise ValueError
# exam last item, if true, it is the one.
if true_criterion(sorted_list[-1]):
return sorted_list[-1]
lower, upper = 0, len(sorted_list) - 1
index = int((lower + upper) / 2.0)
while 1:
if true_criterion(sorted_list[index]):
if true_criterion(sorted_list[index + 1]):
lower = index
index = int((index + upper) / 2.0)
else:
return index
else:
upper = index
index = int((lower + index) / 2.0)
|
Suppose we have a list of item [item1, item2, ..., itemN].
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
If we do a mapping::
>>> def true_criterion(item):
... return item <= 6
>>> [true_criterion(item) for item in sorted_list]
[True, True, ... True(last true), False, False, ... False]
this function returns the index of last true item.
we do can do the map for all item, and run a binary search to find the
index. But sometime the mapping function is expensive. This method avoid
run mapping function for all items.
Example::
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
criterion = def true_criterion(x): return x <= 6
boolean = [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
Solution::
# first, we check index = int((0 + 9)/2.0) = 4, it's True.
# Then check array[4 + 1], it's still True.
# Then we jump to int((4 + 9)/2.0) = 6, it's True.
# Then check array[6 + 1], ite's False. So array[6] is the one we need.
>>> find_last_true([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], true_criterion)
6
**中文文档**
功能: 假设有一组排序号了的元素, 从前往后假设前面的元素都满足某一条件, 而到了
中间某处起就不再满足了。本函数返回满足这一条件的最后一个元素。这在当检验是否
满足条件本身开销较大时, 能节约大量的计算时间。例如你要判定一系列网页中, 从
page1 到 page999, 从第几页开始出现404错误。假设是第400个, 那么如果一个个地
去试, 需要400次, 那如果从0 - 999之间去试, 只需要试验9次即可 (2 ** 9 = 512)
算法:
我们检验最中间的元素, 如果为False, 那么则检验左边所有未检验过的元素的最中间
的那个。如果为True, 那么检验右边所有未检验过的元素的最中间那个。重复这一过程
直到被检验的元素为True, 而下一个元素为False, 说明找到了。
例题::
有序数组 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
序号 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
条件 小于等于6
真值表 [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
解::
第一次检查``index = int((0+9)/2.0) = 4``, 为True,
检查array[4+1], 也是True。那么跳跃至``int((4+9)/2.0)=6``, 为True,。
再检查array[6+1], 为False, 很显然, 我们找到了。
|
def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
"""Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
goto_threshold : float
Threshold of beat error for a beat to be "correct"
(Default value = 0.35)
goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this
(Default value = 0.2)
goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this
(Default value = 0.2)
Returns
-------
goto_score : float
Either 1.0 or 0.0 if some specific criteria are met
"""
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Error for each beat
beat_error = np.ones(reference_beats.shape[0])
# Flag for whether the reference and estimated beats are paired
paired = np.zeros(reference_beats.shape[0])
# Keep track of Goto's three criteria
goto_criteria = 0
for n in range(1, reference_beats.shape[0]-1):
# Get previous inner-reference-beat-interval
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
# Window start - in the middle of the current beat and the previous
window_min = reference_beats[n] - previous_interval
# Next inter-reference-beat-interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
# Window end - in the middle of the current beat and the next
window_max = reference_beats[n] + next_interval
# Get estimated beats in the window
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
# False negative/positive
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
# Single beat is paired!
paired[n] = 1
# Get offset of the estimated beat and the reference beat
offset = estimated_beats[beats_in_window] - reference_beats[n]
# Scale by previous or next interval
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
# Get indices of incorrect beats
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
# All beats are correct (first and last will be 0 so always correct)
if incorrect_beats.shape[0] < 3:
# Get the track of correct beats
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
# Get the track of maximal length
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
# Is the track length at least 25% of the song?
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
# If we have a track
if goto_criteria:
# Are mean and std of the track less than the required thresholds?
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
# If all criteria are met, score is 100%!
return 1.0*(goto_criteria == 3)
|
Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
goto_threshold : float
Threshold of beat error for a beat to be "correct"
(Default value = 0.35)
goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this
(Default value = 0.2)
goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this
(Default value = 0.2)
Returns
-------
goto_score : float
Either 1.0 or 0.0 if some specific criteria are met
|
def h2i(self, pkt, seconds):
"""Convert the number of seconds since 1-Jan-70 UTC to the packed
representation."""
if seconds is None:
seconds = 0
tmp_short = (seconds >> 32) & 0xFFFF
tmp_int = seconds & 0xFFFFFFFF
return struct.pack("!HI", tmp_short, tmp_int)
|
Convert the number of seconds since 1-Jan-70 UTC to the packed
representation.
|
def minimize(self, time, variables, **kwargs):
"""
Performs an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
**kwargs: Additional optimizer-specific arguments. The following arguments are used
by some optimizers:
- arguments: Dict of arguments for callables, like fn_loss.
- fn_loss: A callable returning the loss of the current model.
- fn_reference: A callable returning the reference values, in case of a comparative
loss.
- fn_kl_divergence: A callable returning the KL-divergence relative to the
current model.
- sampled_loss: A sampled loss (integer).
- return_estimated_improvement: Returns the estimated improvement resulting from
the natural gradient calculation if true.
- source_variables: List of source variables to synchronize with.
- global_variables: List of global variables to apply the proposed optimization
step to.
Returns:
The optimization operation.
"""
# # Add training variable gradient histograms/scalars to summary output
# # if 'gradients' in self.summary_labels:
# if any(k in self.summary_labels for k in ['gradients', 'gradients_histogram', 'gradients_scalar']):
# valid = True
# if isinstance(self, tensorforce.core.optimizers.TFOptimizer):
# gradients = self.optimizer.compute_gradients(kwargs['fn_loss']())
# elif isinstance(self.optimizer, tensorforce.core.optimizers.TFOptimizer):
# # This section handles "Multi_step" and may handle others
# # if failure is found, add another elif to handle that case
# gradients = self.optimizer.optimizer.compute_gradients(kwargs['fn_loss']())
# else:
# # Didn't find proper gradient information
# valid = False
# # Valid gradient data found, create summary data items
# if valid:
# for grad, var in gradients:
# if grad is not None:
# if any(k in self.summary_labels for k in ('gradients', 'gradients_scalar')):
# axes = list(range(len(grad.shape)))
# mean, var = tf.nn.moments(grad, axes)
# tf.contrib.summary.scalar(name='gradients/' + var.name + "/mean", tensor=mean)
# tf.contrib.summary.scalar(name='gradients/' + var.name + "/variance", tensor=var)
# if any(k in self.summary_labels for k in ('gradients', 'gradients_histogram')):
# tf.contrib.summary.histogram(name='gradients/' + var.name, tensor=grad)
deltas = self.step(time=time, variables=variables, **kwargs)
with tf.control_dependencies(control_inputs=deltas):
return tf.no_op()
|
Performs an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
**kwargs: Additional optimizer-specific arguments. The following arguments are used
by some optimizers:
- arguments: Dict of arguments for callables, like fn_loss.
- fn_loss: A callable returning the loss of the current model.
- fn_reference: A callable returning the reference values, in case of a comparative
loss.
- fn_kl_divergence: A callable returning the KL-divergence relative to the
current model.
- sampled_loss: A sampled loss (integer).
- return_estimated_improvement: Returns the estimated improvement resulting from
the natural gradient calculation if true.
- source_variables: List of source variables to synchronize with.
- global_variables: List of global variables to apply the proposed optimization
step to.
Returns:
The optimization operation.
|
def backup(self, backup_name, folder_key=None, folder_name=None):
"""Copies the google spreadsheet to the backup_name and folder specified.
Args:
backup_name (str): The name of the backup document to create.
folder_key (Optional) (str): The key of a folder that the new copy will
be moved to.
folder_name (Optional) (str): Like folder_key, references the folder to move a
backup to. If the folder can't be found, sheetsync will create it.
"""
folder = self._find_or_create_folder(folder_key, folder_name)
drive_service = self.drive_service
try:
source_rsrc = drive_service.files().get(fileId=self.document_key).execute()
except Exception, e:
logger.exception("Google API error. %s", e)
raise e
backup = self._create_new_or_copy(source_doc=source_rsrc,
target_name=backup_name,
folder=folder,
sheet_description="backup")
backup_key = backup['id']
return backup_key
|
Copies the google spreadsheet to the backup_name and folder specified.
Args:
backup_name (str): The name of the backup document to create.
folder_key (Optional) (str): The key of a folder that the new copy will
be moved to.
folder_name (Optional) (str): Like folder_key, references the folder to move a
backup to. If the folder can't be found, sheetsync will create it.
|
def _database_create(self, engine, database):
"""Create a new database and return a new url representing
a connection to the new database
"""
logger.info('Creating database "%s" in "%s"', database, engine)
database_operation(engine, 'create', database)
url = copy(engine.url)
url.database = database
return str(url)
|
Create a new database and return a new url representing
a connection to the new database
|
def get_job(self, job_id):
"""GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival
"""
try:
return RawMantaClient.get_job(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/job.json" % (self.account, job_id)
content = self.get_object(mpath, accept='application/json')
try:
return json.loads(content)
except ValueError:
raise errors.MantaError('invalid job data: %r' % content)
|
GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival
|
def get_device_by_name(self, device_name):
"""Search the list of connected devices by name.
device_name param is the string name of the device
"""
# Find the device for the vera device name we are interested in
found_device = None
for device in self.get_devices():
if device.name == device_name:
found_device = device
# found the first (and should be only) one so we will finish
break
if found_device is None:
logger.debug('Did not find device with {}'.format(device_name))
return found_device
|
Search the list of connected devices by name.
device_name param is the string name of the device
|
def add_stock(self, product_id, sku_info, quantity):
"""
增加库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 增加的库存数量
:return: 返回的 JSON 数据包
"""
return self._post(
'merchant/stock/add',
data={
"product_id": product_id,
"sku_info": sku_info,
"quantity": quantity
}
)
|
增加库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 增加的库存数量
:return: 返回的 JSON 数据包
|
def calc_tc_v1(self):
"""Adjust the measured air temperature to the altitude of the
individual zones.
Required control parameters:
|NmbZones|
|TCAlt|
|ZoneZ|
|ZRelT|
Required input sequence:
|hland_inputs.T|
Calculated flux sequences:
|TC|
Basic equation:
:math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)`
Examples:
Prepare two zones, the first one lying at the reference
height and the second one 200 meters above:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(2)
>>> zrelt(2.0)
>>> zonez(2.0, 4.0)
Applying the usual temperature lapse rate of 0.6°C/100m does
not affect the temperature of the first zone but reduces the
temperature of the second zone by 1.2°C:
>>> tcalt(0.6)
>>> inputs.t = 5.0
>>> model.calc_tc_v1()
>>> fluxes.tc
tc(5.0, 3.8)
"""
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nmbzones):
flu.tc[k] = inp.t-con.tcalt[k]*(con.zonez[k]-con.zrelt)
|
Adjust the measured air temperature to the altitude of the
individual zones.
Required control parameters:
|NmbZones|
|TCAlt|
|ZoneZ|
|ZRelT|
Required input sequence:
|hland_inputs.T|
Calculated flux sequences:
|TC|
Basic equation:
:math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)`
Examples:
Prepare two zones, the first one lying at the reference
height and the second one 200 meters above:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(2)
>>> zrelt(2.0)
>>> zonez(2.0, 4.0)
Applying the usual temperature lapse rate of 0.6°C/100m does
not affect the temperature of the first zone but reduces the
temperature of the second zone by 1.2°C:
>>> tcalt(0.6)
>>> inputs.t = 5.0
>>> model.calc_tc_v1()
>>> fluxes.tc
tc(5.0, 3.8)
|
def _send_consumer_aware_request(self, group, payloads, encoder_fn, decoder_fn):
"""
Send a list of requests to the consumer coordinator for the group
specified using the supplied encode/decode functions. As the payloads
that use consumer-aware requests do not contain the group (e.g.
OffsetFetchRequest), all payloads must be for a single group.
Arguments:
group: the name of the consumer group (str) the payloads are for
payloads: list of object-like entities with topic (str) and
partition (int) attributes; payloads with duplicate
topic+partition are not supported.
encode_fn: a method to encode the list of payloads to a request body,
must accept client_id, correlation_id, and payloads as
keyword arguments
decode_fn: a method to decode a response body into response objects.
The response objects must be object-like and have topic
and partition attributes
Returns:
List of response objects in the same order as the supplied payloads
"""
# encoders / decoders do not maintain ordering currently
# so we need to keep this so we can rebuild order before returning
original_ordering = [(p.topic, p.partition) for p in payloads]
broker = self._get_coordinator_for_group(group)
# Send the list of request payloads and collect the responses and
# errors
responses = {}
request_id = self._next_id()
log.debug('Request %s to %s: %s', request_id, broker, payloads)
request = encoder_fn(client_id=self.client_id,
correlation_id=request_id, payloads=payloads)
# Send the request, recv the response
try:
host, port, afi = get_ip_port_afi(broker.host)
conn = self._get_conn(host, broker.port, afi)
except KafkaConnectionError as e:
log.warning('KafkaConnectionError attempting to send request %s '
'to server %s: %s', request_id, broker, e)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
# No exception, try to get response
else:
future = conn.send(request_id, request)
while not future.is_done:
for r, f in conn.recv():
f.success(r)
# decoder_fn=None signal that the server is expected to not
# send a response. This probably only applies to
# ProduceRequest w/ acks = 0
if decoder_fn is None:
log.debug('Request %s does not expect a response '
'(skipping conn.recv)', request_id)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = None
return []
if future.failed():
log.warning('Error attempting to receive a '
'response to request %s from server %s: %s',
request_id, broker, future.exception)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
else:
response = future.value
_resps = []
for payload_response in decoder_fn(response):
topic_partition = (payload_response.topic,
payload_response.partition)
responses[topic_partition] = payload_response
_resps.append(payload_response)
log.debug('Response %s: %s', request_id, _resps)
# Return responses in the same order as provided
return [responses[tp] for tp in original_ordering]
|
Send a list of requests to the consumer coordinator for the group
specified using the supplied encode/decode functions. As the payloads
that use consumer-aware requests do not contain the group (e.g.
OffsetFetchRequest), all payloads must be for a single group.
Arguments:
group: the name of the consumer group (str) the payloads are for
payloads: list of object-like entities with topic (str) and
partition (int) attributes; payloads with duplicate
topic+partition are not supported.
encode_fn: a method to encode the list of payloads to a request body,
must accept client_id, correlation_id, and payloads as
keyword arguments
decode_fn: a method to decode a response body into response objects.
The response objects must be object-like and have topic
and partition attributes
Returns:
List of response objects in the same order as the supplied payloads
|
def _process_execute_error(self, msg):
""" Process a reply for an execution request that resulted in an error.
"""
content = msg['content']
# If a SystemExit is passed along, this means exit() was called - also
# all the ipython %exit magic syntax of '-k' to be used to keep
# the kernel running
if content['ename']=='SystemExit':
keepkernel = content['evalue']=='-k' or content['evalue']=='True'
self._keep_kernel_on_exit = keepkernel
self.exit_requested.emit(self)
else:
traceback = ''.join(content['traceback'])
self._append_plain_text(traceback)
|
Process a reply for an execution request that resulted in an error.
|
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, scale, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == numpy.bool
loss = self.loss_
# whether to use dropout in next iteration
do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1
for k in range(loss.K):
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_split=self.min_impurity_split,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(numpy.float64)
X = X_csr if X_csr is not None else X
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# add tree to ensemble
self.estimators_[i, k] = tree
# update tree leaves
if do_dropout:
# select base learners to be dropped for next iteration
drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state)
# adjust scaling factor of tree that is going to be trained in next iteration
scale[i + 1] = 1. / (n_dropped + 1.)
y_pred[:, k] = 0
for m in range(i + 1):
if drop_model[m] == 1:
# adjust scaling factor of dropped trees
scale[m] *= n_dropped / (n_dropped + 1.)
else:
# pseudoresponse of next iteration (without contribution of dropped trees)
y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel()
else:
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
return y_pred
|
Fit another stage of ``n_classes_`` trees to the boosting model.
|
def server(self):
"""Creates and returns a ServerConnection object."""
conn = self.connection_class(self)
with self.mutex:
self.connections.append(conn)
return conn
|
Creates and returns a ServerConnection object.
|
def _goto(self, pose, duration, wait, accurate):
""" Goes to a given cartesian pose.
:param matrix pose: homogeneous matrix representing the target position
:param float duration: move duration
:param bool wait: whether to wait for the end of the move
:param bool accurate: trade-off between accurate solution and computation time. By default, use the not so accurate but fast version.
"""
kwargs = {}
if not accurate:
kwargs['max_iter'] = 3
q0 = self.convert_to_ik_angles(self.joints_position)
q = self.inverse_kinematics(pose, initial_position=q0, **kwargs)
joints = self.convert_from_ik_angles(q)
last = self.motors[-1]
for m, pos in list(zip(self.motors, joints)):
m.goto_position(pos, duration,
wait=False if m != last else wait)
|
Goes to a given cartesian pose.
:param matrix pose: homogeneous matrix representing the target position
:param float duration: move duration
:param bool wait: whether to wait for the end of the move
:param bool accurate: trade-off between accurate solution and computation time. By default, use the not so accurate but fast version.
|
def obfn_g0var(self):
"""Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
"""
return self.var_y0() if self.opt['AuxVarObj'] else \
self.cnst_A0(None, self.Xf) - self.cnst_c0()
|
Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
|
def post_cleanup(self):
"""\
remove any divs that looks like non-content,
clusters of links, or paras with no gusto
"""
targetNode = self.article.top_node
node = self.add_siblings(targetNode)
for e in self.parser.getChildren(node):
e_tag = self.parser.getTag(e)
if e_tag != 'p':
if self.is_highlink_density(e) \
or self.is_table_and_no_para_exist(e) \
or not self.is_nodescore_threshold_met(node, e):
self.parser.remove(e)
return node
|
\
remove any divs that looks like non-content,
clusters of links, or paras with no gusto
|
def _truncate_to_field(model, field_name, value):
"""
Shorten data to fit in the specified model field.
If the data were too big for the field, it would cause a failure to
insert, so we shorten it, truncating in the middle (because
valuable information often shows up at the end.
"""
field = model._meta.get_field(field_name) # pylint: disable=protected-access
if len(value) > field.max_length:
midpoint = field.max_length // 2
len_after_midpoint = field.max_length - midpoint
first = value[:midpoint]
sep = '...'
last = value[len(value) - len_after_midpoint + len(sep):]
value = sep.join([first, last])
return value
|
Shorten data to fit in the specified model field.
If the data were too big for the field, it would cause a failure to
insert, so we shorten it, truncating in the middle (because
valuable information often shows up at the end.
|
def list(self, service_rec=None, host_rec=None, hostfilter=None):
"""
List a specific service or all services
:param service_rec: t_services.id
:param host_rec: t_hosts.id
:param hostfilter: Valid hostfilter or None
:return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr,
svc.t_hosts.f_hostname, svc.t_services.f_proto,
svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name,
svc.t_services.f_banner), ...]
"""
return self.send.service_list(service_rec, host_rec, hostfilter)
|
List a specific service or all services
:param service_rec: t_services.id
:param host_rec: t_hosts.id
:param hostfilter: Valid hostfilter or None
:return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr,
svc.t_hosts.f_hostname, svc.t_services.f_proto,
svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name,
svc.t_services.f_banner), ...]
|
def _get_headers(self):
"""Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts."""
user_agent = __api_lib_name__ + '/' + __version__ + '/' + \
PYTHON_VERSION
headers = {'User-Agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}
if self.key:
headers['Authorization'] = 'Bearer ' + self.key
return headers
|
Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts.
|
def _set_mldVlan(self, v, load=False):
"""
Setter method for mldVlan, mapped from YANG variable /interface_vlan/interface/vlan/ipv6/mldVlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mldVlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mldVlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mldVlan.mldVlan, is_container='container', presence=False, yang_name="mldVlan", rest_name="mld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Listener Discovery (MLD) Snooping', u'callpoint': u'MldsVlan', u'cli-incomplete-no': None, u'alt-name': u'mld'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mldVlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mldVlan.mldVlan, is_container='container', presence=False, yang_name="mldVlan", rest_name="mld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Listener Discovery (MLD) Snooping', u'callpoint': u'MldsVlan', u'cli-incomplete-no': None, u'alt-name': u'mld'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)""",
})
self.__mldVlan = t
if hasattr(self, '_set'):
self._set()
|
Setter method for mldVlan, mapped from YANG variable /interface_vlan/interface/vlan/ipv6/mldVlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mldVlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mldVlan() directly.
|
def collection(data, bins=10, *args, **kwargs):
"""Create histogram collection with shared binnning."""
from physt.histogram_collection import HistogramCollection
if hasattr(data, "columns"):
data = {column: data[column] for column in data.columns}
return HistogramCollection.multi_h1(data, bins, **kwargs)
|
Create histogram collection with shared binnning.
|
def exec_func_src3(func, globals_, sentinal=None, verbose=False,
start=None, stop=None):
"""
execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw
"""
import utool as ut
sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True)
if sentinal is not None:
sourcecode = ut.replace_between_tags(sourcecode, '', sentinal)
if start is not None or stop is not None:
sourcecode = '\n'.join(sourcecode.splitlines()[slice(start, stop)])
if verbose:
print(ut.color_text(sourcecode, 'python'))
six.exec_(sourcecode, globals_)
|
execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw
|
def next_conkey(self, conkey):
"""Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed.
"""
if conkey in self.conditions:
return conkey # Explicit conkey
conkeys = self.sorted_conkeys(prefix=conkey) # Might be empty.
if not conkeys:
# A trailing number given that does not already exist.
# accept possible gap from previous number.
return conkey
for candidate in conkeys:
if self.conditions[candidate] is None:
return candidate
i = self.cond_int(candidate) # The last one.
return re.sub(r'\d+', str(i + 1), candidate)
|
Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed.
|
def make_measurement(name,
channels,
lumi=1.0, lumi_rel_error=0.1,
output_prefix='./histfactory',
POI=None,
const_params=None,
verbose=False):
"""
Create a Measurement from a list of Channels
"""
if verbose:
llog = log['make_measurement']
llog.info("creating measurement {0}".format(name))
if not isinstance(channels, (list, tuple)):
channels = [channels]
# Create the measurement
meas = Measurement('measurement_{0}'.format(name), '')
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, string_types):
if verbose:
llog.info("setting POI {0}".format(POI))
meas.SetPOI(POI)
else:
if verbose:
llog.info("adding POIs {0}".format(', '.join(POI)))
for p in POI:
meas.AddPOI(p)
if verbose:
llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error))
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info("adding channel {0}".format(channel.GetName()))
meas.AddChannel(channel)
if const_params is not None:
if verbose:
llog.info("adding constant parameters {0}".format(
', '.join(const_params)))
for param in const_params:
meas.AddConstantParam(param)
return meas
|
Create a Measurement from a list of Channels
|
def set_value(self, control, value=None):
"""Set a value on the controller
If percent is True all controls will accept a value between -1.0 and 1.0
If not then:
Triggers are 0 to 255
Axis are -32768 to 32767
Control List:
AxisLx , Left Stick X-Axis
AxisLy , Left Stick Y-Axis
AxisRx , Right Stick X-Axis
AxisRy , Right Stick Y-Axis
BtnBack , Menu/Back Button
BtnStart , Start Button
BtnA , A Button
BtnB , B Button
BtnX , X Button
BtnY , Y Button
BtnThumbL , Left Thumbstick Click
BtnThumbR , Right Thumbstick Click
BtnShoulderL , Left Shoulder Button
BtnShoulderR , Right Shoulder Button
Dpad , Set Dpad Value (0 = Off, Use DPAD_### Constants)
TriggerL , Left Trigger
TriggerR , Right Trigger
"""
func = getattr(_xinput, 'Set' + control)
if 'Axis' in control:
target_type = c_short
if self.percent:
target_value = int(32767 * value)
else:
target_value = value
elif 'Btn' in control:
target_type = c_bool
target_value = bool(value)
elif 'Trigger' in control:
target_type = c_byte
if self.percent:
target_value = int(255 * value)
else:
target_value = value
elif 'Dpad' in control:
target_type = c_int
target_value = int(value)
func(c_uint(self.id), target_type(target_value))
|
Set a value on the controller
If percent is True all controls will accept a value between -1.0 and 1.0
If not then:
Triggers are 0 to 255
Axis are -32768 to 32767
Control List:
AxisLx , Left Stick X-Axis
AxisLy , Left Stick Y-Axis
AxisRx , Right Stick X-Axis
AxisRy , Right Stick Y-Axis
BtnBack , Menu/Back Button
BtnStart , Start Button
BtnA , A Button
BtnB , B Button
BtnX , X Button
BtnY , Y Button
BtnThumbL , Left Thumbstick Click
BtnThumbR , Right Thumbstick Click
BtnShoulderL , Left Shoulder Button
BtnShoulderR , Right Shoulder Button
Dpad , Set Dpad Value (0 = Off, Use DPAD_### Constants)
TriggerL , Left Trigger
TriggerR , Right Trigger
|
def geom_find_group(g, atwts, pr_ax, mom, tt, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL, \
dig=_DEF.SYMM_ATWT_ROUND_DIGITS,
avmax=_DEF.SYMM_AVG_MAX):
""" [Find all(?) proper rotation axes (n > 1) and reflection planes.]
.. todo:: Complete geom_find_axes docstring INCLUDING NEW HEADER LINE
DEPENDS on principal axes and moments being sorted such that:
I_A <= I_B <= I_C
Logic flow developed using:
1) http://symmetry.otterbein.edu/common/images/flowchart.pdf
Accessed 6 Mar 2015 (flow chart)
2) Largent et al. J Comp Chem 22: 1637-1642 (2012).
doi: 10.1002/jcc.22995
Helpful examples and descriptions of point groups from:
1) Wilson, Decius & Cross. "Molecular Vibrations." New York:
Dover (1980), pp 82-85.
2) "Molecular Structures of Organic Compounds -- Symmetry of
Molecules." Website of Prof. Dr. Stefan Immel, TU Darmstadt.
http://http://csi.chemie.tu-darmstadt.de/ak/immel/script/
redirect.cgi?filename=http://csi.chemie.tu-darmstadt.de/ak/
immel/tutorials/symmetry/index7.html. Accessed 6 Mar 2015.
Rotational symmetry numbers defined per:
Irikura, K. K. "Thermochemistry: Appendix B: Essential Statistical
Thermodynamics." Table II. NIST Computational Chemistry Comparison
& Benchmark Database. Online resource: http://cccbdb.nist.gov/
thermo.asp. Accessed 6 Mar 2015.
"""
#!TODO: Implement principal axes threshold checking to tell if a
# not-strictly spherical top is far enough from spherical to ignore
# looking for cubic groups. Ugh. Doesn't find the reflection planes
# in NH3. Going to have to explicitly deal with top type, since axes
# *must* be principal axes of the molecule, and off-principal axes
# will definitely never be symmetry elements.
# If asymmetric, only do pr_ax
# If symmetric, do the unique pr_ax and projections of atoms and
# midpoints normal to that axis
# If spherical, do everything, since every axis is inertially valid.
# If linear, pretty much just checking for inversion center to tell
# between C*v and D*h
# Imports
import numpy as np, itertools as itt
from scipy import linalg as spla
from ..const import PRM, EnumTopType as ETT
from itertools import combinations as nCr
from collections import namedtuple
from ..error import SymmError
# Define the Axis class
Axis = namedtuple('Axis', 'vector order refl')
# First, look for linear; exploit the top type, as linear should never
# be mis-attributed
if tt == ETT.LINEAR:
# Check for plane of symmetry; if there, D*h; if not, C*v
#!TODO: Once symmetry element reporting structure is established,
# revise here to report the molecular axis as the symmetry element.
if geom_symm_match(g, atwts, pr_ax[:,0], 0., True) < tol:
# Has symmetry plane; D*h
group = "D*h"
symm_fac = 2
return group, symm_fac
else:
# No symmetry plane; C*v
group = "C*v"
symm_fac = 1
return group, symm_fac
## end if
## end if
# Then, check for an atom
if tt == ETT.ATOM:
# Simple return
group= "Kh"
symm_fac = 1
return group, symm_fac
## end if
# Generally, trust that the top classification is going to be more
# rigorous than the symmetry identification. Thus, Spherical
# will almost certainly indicate a cubic group; Symmetrical, whether
# oblate or prolate, will indicate either a cubic group or a non-cubic
# with a principal rotation axis of order > 2; and Asymmetrical leaves
# room for any group to be found.
# (move much of this comment to the docstring once it's working)
# Vectorize the geometry and atwts
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False)
# Also make coordinate-split geometry
g_coord = g.reshape((g.shape[0] // 3, 3))
# Handle Spherical case
if tt == ETT.SPHERICAL:
# Build the list of atom midpoint axes
ax_midpts = []
for atwt in np.unique(atwts):
# Retrieve the sub-geometry
g_atwt = g_subset(g, atwts, atwt, dig)
# Only have axes to store if more than one atom
if g_atwt.shape[0] > 3:
# Reshape to grouped coordinates (row vectors)
g_atwt = g_atwt.reshape((g_atwt.shape[0] // 3, 3))
# Iterate over all unique index tuples of pairs
for tup in nCr(range(g_atwt.shape[0]), 2):
# Just vector-add the appropriate atomic
# coordinates; no need to normalize.
ax_midpts.append(np.add(*g_atwt[tup,:]))
## next tup
## end if more than one matched atom
## next atwt, to index all midpoint axes in the system
# Convert to 2-D array
ax_midpts = np.array(ax_midpts)
# Know for a fact that it should be a cubic group. Start looking at
# atom-wise vectors until an order > 1 axis is found.
order = i = 0
while order < 2 and i < g_coord.shape[0]:
# Store the axis
ax = g_coord[i,:]
# Only check if norm is large enough
if spla.norm(ax) > PRM.ZERO_VEC_TOL:
order, refl = geom_check_axis(g, atwts, ax, nmax, \
tol)
## end if
# Increment
i += 1
## loop
# At this point, check to see if nothing found (could happen, e.g.
# in C60 buckyball) and, if not, search midpoints between like
# atoms, again until an order > 1 axis is found.
# Otherwise, store the axis information as the initial reference.
if order >= 2:
# Found a good axis. Store as Axis.
ref_Axis = Axis(vector=ax, order=order, refl=refl)
else:
# No good axis found along atom positions. Search midpoints.
i = 0
while order < 2 and i < len(ax_midpts):
# Store the axis
ax = ax_midpts[i,:]
# Only check if norm is large enough
if spla.norm(ax) > PRM.ZERO_VEC_TOL:
order, refl = geom_check_axis(g, atwts, ax, \
nmax, tol)
## end if
# Increment
i += 1
## loop
# If nothing found here, raise exception
if order < 2:
raise SymmError(SymmError.NOTFOUND,
"Cubic point group not found in spherical top " +
"molecule.", "geom_find_group()")
## end if
# Store the found vector as Axis
ref_Axis = Axis(vector=ax, order=order, refl=refl)
## end if
#!RESUME: Search for other axes depending on the order of the axis found.
return ref_Axis
## end if order < 2, triggering check of atom pairs
# Leftover from originally not trusting top type
## # Must actually search for axes &c.
## #
## # Initialize the container for the principal axes
## Axes_pr = []
## for ax in [pr_ax[:,i] for i in range(3)]:
## order, refl = geom_check_axis(g, atwts, ax, nmax, tol)
## if order > 1 or refl:
## Axes_pr.append(Axis(vector=ax, order=order, refl=refl))
## ## end if
## ## next ax
## return Axes_pr
##
## # What is the max order found?
## # If < 3, asym or sph
## # If >=3, sym or sph; if multiple >2 then sph definitely
##
# Not doing it this way (brute force) any more.
## # Initialize the axes list to the principal axes (matrix of column
## # vectors)
## ax_list = pr_ax
##
## # Vectorize the geometry
## g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
##
## # Break into 3-vectors
## g_vecs = np.array(np.split(g, g.shape[0] // 3))
##
## # Add all the atom displacements to the axes list
## ax_list = np.column_stack((ax_list, g_vecs.T))
##
## # In each block of atom types, add axes up to 5th-order midpoints
## for atwt in np.unique(atwts):
## # Retrieve the sub-geometry
## g_atwt = g_subset(g, atwts, atwt, dig)
##
## # Reshape to grouped coordinates (row vectors)
## g_atwt = g_atwt.reshape((g_atwt.shape[0] // 3, 3))
##
## # If more than one atom with the given weight, start at pairs
## # and go up from there
## if g_atwt.shape[0] >= 2:
## for grp_order in range(2, 1 + min(avmax, g_atwt.shape[0])):
## # Retrieve all unique index tuples for the indicated order
## for tup in nCr(range(g_atwt.shape[0]), grp_order):
## # Just vector-add the appropriate atomic coordinates.
## # No need to normalize or anything.
## ax_list = np.column_stack((ax_list, \
## reduce(np.add,[g_atwt[i,:] for i in tup]).T))
## ## next tup
## ## next order
## ## end if
## ## next atwt
##
## # Scrub any collinear axes down to uniques
## # Filter parallel axes
## i = 0
## while i < ax_list.shape[1] - 1:
## j = i + 1
## while j < ax_list.shape[1]:
## # For ANY collinear axes, remove until only one remains.
## v1 = ax_list[:,i]
## v2 = ax_list[:,j]
## if 1 - np.abs(np.dot(v1, v2) / spla.norm(v1) / spla.norm(v2)) \
## < PRM.NON_PARALLEL_TOL:
## # Strip the duplicate vector
## ax_list = np.column_stack((
## [ax_list[:,c] for c in \
## range(ax_list.shape[1]) if c <> j]
## ))
##
## # Decrement j so that nothing is skipped
## j -= 1
##
## # Increment j
## j += 1
## ## loop j
##
## # Increment i
## i += 1
## ## loop i
##
## # Cull any too-small axes
## i = 0
## while i < ax_list.shape[1]:
## # Store vector
## v = ax_list[:,i]
##
## # Check magnitude
## if spla.norm(v) < PRM.ZERO_VEC_TOL:
## # Strip if too small of magnitude
## ax_list = np.column_stack((
## [ax_list[:,c] for c in \
## range(ax_list.shape[1]) if c <> i]
## ))
##
## # Decrement counter to maintain position in reduced array
## i -= 1
## ## end if
##
## # Increment counter
## i +=1
## ## loop
##
## # Search all remaining axes for rotations and reflections
## prop_list = []
## for v in [ax_list[:,i] for i in range(ax_list.shape[1])]:
## order = geom_find_rotsymm(g, atwts, v, \
## False, nmax, tol)[0]
## #print("Prin: " + str(v))
## if order > 1:
## # Rotational axis worth reporting is found. Check reflection
## if geom_symm_match(g, atwts, v, 0, True) < tol:
## # Does have a reflection
## prop_list.append((v,order,True))
## else:
## # No reflection
## prop_list.append((v,order,False))
## ## end if
## else:
## # No rotation, but check for reflection
## if geom_symm_match(g, atwts, v, 0, True) < tol:
## # Has a reflection; do report
## prop_list.append((v,1,True))
## ## end if
## ## end if
## ## next v
##
## # Then test all rotations for 2x-order impropers
##
## # Finally test for inversion center
##
## # Then search the point group catalog and assign
return prop_list
|
[Find all(?) proper rotation axes (n > 1) and reflection planes.]
.. todo:: Complete geom_find_axes docstring INCLUDING NEW HEADER LINE
DEPENDS on principal axes and moments being sorted such that:
I_A <= I_B <= I_C
Logic flow developed using:
1) http://symmetry.otterbein.edu/common/images/flowchart.pdf
Accessed 6 Mar 2015 (flow chart)
2) Largent et al. J Comp Chem 22: 1637-1642 (2012).
doi: 10.1002/jcc.22995
Helpful examples and descriptions of point groups from:
1) Wilson, Decius & Cross. "Molecular Vibrations." New York:
Dover (1980), pp 82-85.
2) "Molecular Structures of Organic Compounds -- Symmetry of
Molecules." Website of Prof. Dr. Stefan Immel, TU Darmstadt.
http://http://csi.chemie.tu-darmstadt.de/ak/immel/script/
redirect.cgi?filename=http://csi.chemie.tu-darmstadt.de/ak/
immel/tutorials/symmetry/index7.html. Accessed 6 Mar 2015.
Rotational symmetry numbers defined per:
Irikura, K. K. "Thermochemistry: Appendix B: Essential Statistical
Thermodynamics." Table II. NIST Computational Chemistry Comparison
& Benchmark Database. Online resource: http://cccbdb.nist.gov/
thermo.asp. Accessed 6 Mar 2015.
|
def fragment_fromstring(html, create_parent=False, base_url=None,
parser=None, **kw):
"""
Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL).
"""
if parser is None:
parser = html_parser
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, parser=parser, no_leading_text=not accept_leading_text,
base_url=base_url, **kw)
if create_parent:
if not isinstance(create_parent, basestring):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], basestring):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError(
"Multiple elements found (%s)"
% ', '.join([_element_name(e) for e in elements]))
el = elements[0]
if el.tail and el.tail.strip():
raise etree.ParserError(
"Element followed by text: %r" % el.tail)
el.tail = None
return el
|
Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL).
|
def finalize(self):
"""finalize for StatisticsConsumer"""
super(StatisticsConsumer, self).finalize()
# run statistics on timewave slice w at grid point g
# self.result = [(g, self.statistics(w)) for g, w in zip(self.grid, self.result)]
# self.result = zip(self.grid, (self.statistics(w) for w in self.result))
self.result = zip(self.grid, map(self.statistics, self.result))
|
finalize for StatisticsConsumer
|
def _reverse_rounding_method(method):
"""
Reverse meaning of ``method`` between positive and negative.
"""
if method is RoundingMethods.ROUND_UP:
return RoundingMethods.ROUND_DOWN
if method is RoundingMethods.ROUND_DOWN:
return RoundingMethods.ROUND_UP
if method is RoundingMethods.ROUND_HALF_UP:
return RoundingMethods.ROUND_HALF_DOWN
if method is RoundingMethods.ROUND_HALF_DOWN:
return RoundingMethods.ROUND_HALF_UP
if method in \
(RoundingMethods.ROUND_TO_ZERO, RoundingMethods.ROUND_HALF_ZERO):
return method
raise BasesAssertError('unknown method')
|
Reverse meaning of ``method`` between positive and negative.
|
def preserve_shape(func):
"""Preserve shape of the image."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
result = result.reshape(shape)
return result
return wrapped_function
|
Preserve shape of the image.
|
def _function_add_node(self, cfg_node, function_addr):
"""
Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None
"""
snippet = self._to_snippet(cfg_node=cfg_node)
self.kb.functions._add_node(function_addr, snippet)
|
Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None
|
def move(self, key, folder):
"""Move the specified key to folder.
folder must be an MdFolder instance. MdFolders can be obtained
through the 'folders' method call.
"""
# Basically this is a sophisticated __delitem__
# We need the path so we can make it in the new folder
path, host, flags = self._exists(key)
self._invalidate_cache()
# Now, move the message file to the new folder
newpath = joinpath(
folder.base,
folder.get_name(),
"cur", # we should probably move it to new if it's in new
basename(path)
)
self.filesystem.rename(path, newpath)
# And update the caches in the new folder
folder._invalidate_cache()
|
Move the specified key to folder.
folder must be an MdFolder instance. MdFolders can be obtained
through the 'folders' method call.
|
def root_mean_square(X):
''' root mean square for each variable in the segmented time series '''
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width)
|
root mean square for each variable in the segmented time series
|
def add_url_rule(
self,
path: str,
endpoint: Optional[str]=None,
view_func: Optional[Callable]=None,
methods: Optional[Iterable[str]]=None,
defaults: Optional[dict]=None,
host: Optional[str]=None,
subdomain: Optional[str]=None,
*,
provide_automatic_options: Optional[bool]=None,
is_websocket: bool=False,
strict_slashes: bool=True,
) -> None:
"""Add a route/url rule to the application.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def route():
...
app.add_url_rule('/', route)
Arguments:
path: The path to route on, should start with a ``/``.
func: Callable that returns a reponse.
methods: List of HTTP verbs the function routes.
endpoint: Optional endpoint name, if not present the
function name is used.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.route('/book', defaults={'page': 0})
@app.route('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
provide_automatic_options: Optionally False to prevent
OPTION handling.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
"""
endpoint = endpoint or _endpoint_from_view_func(view_func)
handler = ensure_coroutine(view_func)
if methods is None:
methods = getattr(view_func, 'methods', ['GET'])
methods = cast(Set[str], set(methods))
required_methods = set(getattr(view_func, 'required_methods', set()))
if provide_automatic_options is None:
automatic_options = getattr(view_func, 'provide_automatic_options', None)
if automatic_options is None:
automatic_options = 'OPTIONS' not in methods
else:
automatic_options = provide_automatic_options
if automatic_options:
required_methods.add('OPTIONS')
methods.update(required_methods)
if not self.url_map.host_matching and (host is not None or subdomain is not None):
raise RuntimeError('Cannot use host or subdomain without host matching enabled.')
if host is not None and subdomain is not None:
raise ValueError('Cannot set host and subdomain, please choose one or the other')
if subdomain is not None:
if self.config['SERVER_NAME'] is None:
raise RuntimeError('SERVER_NAME config is required to use subdomain in a route.')
host = f"{subdomain}.{self.config['SERVER_NAME']}"
elif host is None and self.url_map.host_matching:
host = self.config['SERVER_NAME']
if host is None:
raise RuntimeError(
'Cannot add a route with host matching enabled without either a specified '
'host or a config SERVER_NAME',
)
self.url_map.add(
self.url_rule_class(
path, methods, endpoint, host=host, provide_automatic_options=automatic_options,
defaults=defaults, is_websocket=is_websocket, strict_slashes=strict_slashes,
),
)
if handler is not None:
old_handler = self.view_functions.get(endpoint)
if getattr(old_handler, '_quart_async_wrapper', False):
old_handler = old_handler.__wrapped__ # type: ignore
if old_handler is not None and old_handler != view_func:
raise AssertionError(f"Handler is overwriting existing for endpoint {endpoint}")
self.view_functions[endpoint] = handler
|
Add a route/url rule to the application.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def route():
...
app.add_url_rule('/', route)
Arguments:
path: The path to route on, should start with a ``/``.
func: Callable that returns a reponse.
methods: List of HTTP verbs the function routes.
endpoint: Optional endpoint name, if not present the
function name is used.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.route('/book', defaults={'page': 0})
@app.route('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
provide_automatic_options: Optionally False to prevent
OPTION handling.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
|
def send_text(self, text):
"""Send a plain text message to the room."""
return self.client.api.send_message(self.room_id, text)
|
Send a plain text message to the room.
|
def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs):
"""Return a uniformly discretized L^p function space.
Parameters
----------
partition : `RectPartition`
Uniform partition to be used for discretization.
It defines the domain and the functions and the grid for
discretization.
dtype : optional
Data type for the discretized space, must be understood by the
`numpy.dtype` constructor. The default for ``None`` depends on the
``impl`` backend, usually it is ``'float64'`` or ``'float32'``.
impl : string, optional
Implementation of the data storage arrays
kwargs :
Additional keyword parameters, see `uniform_discr` for details.
Returns
-------
discr : `DiscreteLp`
The uniformly discretized function space.
Examples
--------
>>> part = odl.uniform_partition(0, 1, 10)
>>> uniform_discr_frompartition(part)
uniform_discr(0.0, 1.0, 10)
See Also
--------
uniform_discr : implicit uniform Lp discretization
uniform_discr_fromspace : uniform Lp discretization from an existing
function space
odl.discr.partition.uniform_partition :
partition of the function domain
"""
if not isinstance(partition, RectPartition):
raise TypeError('`partition` {!r} is not a `RectPartition` instance'
''.format(partition))
if not partition.is_uniform:
raise ValueError('`partition` is not uniform')
if dtype is not None:
dtype = np.dtype(dtype)
fspace = FunctionSpace(partition.set, out_dtype=dtype)
ds_type = tspace_type(fspace, impl, dtype)
if dtype is None:
dtype = ds_type.default_dtype()
weighting = kwargs.pop('weighting', None)
exponent = kwargs.pop('exponent', 2.0)
if weighting is None and is_numeric_dtype(dtype):
if exponent == float('inf') or partition.ndim == 0:
weighting = 1.0
else:
weighting = partition.cell_volume
tspace = ds_type(partition.shape, dtype, exponent=exponent,
weighting=weighting)
return DiscreteLp(fspace, partition, tspace, **kwargs)
|
Return a uniformly discretized L^p function space.
Parameters
----------
partition : `RectPartition`
Uniform partition to be used for discretization.
It defines the domain and the functions and the grid for
discretization.
dtype : optional
Data type for the discretized space, must be understood by the
`numpy.dtype` constructor. The default for ``None`` depends on the
``impl`` backend, usually it is ``'float64'`` or ``'float32'``.
impl : string, optional
Implementation of the data storage arrays
kwargs :
Additional keyword parameters, see `uniform_discr` for details.
Returns
-------
discr : `DiscreteLp`
The uniformly discretized function space.
Examples
--------
>>> part = odl.uniform_partition(0, 1, 10)
>>> uniform_discr_frompartition(part)
uniform_discr(0.0, 1.0, 10)
See Also
--------
uniform_discr : implicit uniform Lp discretization
uniform_discr_fromspace : uniform Lp discretization from an existing
function space
odl.discr.partition.uniform_partition :
partition of the function domain
|
def _determine_rotated_logfile(self):
"""
We suspect the logfile has been rotated, so try to guess what the
rotated filename is, and return it.
"""
rotated_filename = self._check_rotated_filename_candidates()
if rotated_filename and exists(rotated_filename):
if stat(rotated_filename).st_ino == self._offset_file_inode:
return rotated_filename
# if the inode hasn't changed, then the file shrank; this is expected with copytruncate,
# otherwise print a warning
if stat(self.filename).st_ino == self._offset_file_inode:
if self.copytruncate:
return rotated_filename
else:
sys.stderr.write(
"[pygtail] [WARN] file size of %s shrank, and copytruncate support is "
"disabled (expected at least %d bytes, was %d bytes).\n" %
(self.filename, self._offset, stat(self.filename).st_size))
return None
|
We suspect the logfile has been rotated, so try to guess what the
rotated filename is, and return it.
|
def fieldAlphaHistogram(
self, name, q='*:*', fq=None, nbins=10, includequeries=True
):
"""Generates a histogram of values from a string field. Output is:
[[low, high, count, query], ... ] Bin edges is determined by equal division
of the fields
"""
oldpersist = self.persistent
self.persistent = True
bins = []
qbin = []
fvals = []
try:
# get total number of values for the field
# TODO: this is a slow mechanism to retrieve the number of distinct values
# Need to replace this with something more efficient.
## Can probably replace with a range of alpha chars - need to check on
## case sensitivity
fvals = self.fieldValues(name, q, fq, maxvalues=-1)
nvalues = len(fvals[name]) / 2
if nvalues < nbins:
nbins = nvalues
if nvalues == nbins:
# Use equivalence instead of range queries to retrieve the values
for i in range(0, nbins):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = '%s:%s' % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
else:
delta = nvalues / nbins
if delta == 1:
# Use equivalence queries, except the last one which includes the
# remainder of terms
for i in range(0, nbins - 2):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = '%s:%s' % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
term = fvals[name][(nbins - 1) * 2]
bin = [term, fvals[name][((nvalues - 1) * 2)], 0]
binq = '%s:[%s TO *]' % (name, self.prepareQueryTerm(name, term))
qbin.append(binq)
bins.append(bin)
else:
# Use range for all terms
# now need to page through all the values and get those at the edges
coffset = 0.0
delta = float(nvalues) / float(nbins)
for i in range(0, nbins):
idxl = int(coffset) * 2
idxu = (int(coffset + delta) * 2) - 2
bin = [fvals[name][idxl], fvals[name][idxu], 0]
# logging.info(str(bin))
binq = ''
try:
if i == 0:
binq = '%s:[* TO %s]' % (
name,
self.prepareQueryTerm(name, bin[1]),
)
elif i == nbins - 1:
binq = '%s:[%s TO *]' % (
name,
self.prepareQueryTerm(name, bin[0]),
)
else:
binq = '%s:[%s TO %s]' % (
name,
self.prepareQueryTerm(name, bin[0]),
self.prepareQueryTerm(name, bin[1]),
)
except Exception:
self.logger.exception('Exception 1 in fieldAlphaHistogram:')
qbin.append(binq)
bins.append(bin)
coffset = coffset + delta
# now execute the facet query request
params = {
'q': q,
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': '1',
'facet.mincount': 1,
'wt': 'python',
}
request = urllib.parse.urlencode(params, doseq=True)
for sq in qbin:
try:
request = request + '&%s' % urllib.parse.urlencode(
{'facet.query': self.encoder(sq)[0]}
)
except Exception:
self.logger.exception('Exception 2 in fieldAlphaHistogram')
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
for i in range(0, len(bins)):
v = data['facet_counts']['facet_queries'][qbin[i]]
bins[i][2] = v
if includequeries:
bins[i].append(qbin[i])
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return bins
|
Generates a histogram of values from a string field. Output is:
[[low, high, count, query], ... ] Bin edges is determined by equal division
of the fields
|
def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):
"""Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return:
"""
if not policy_metadata:
policy_metadata = client.get_policy(PolicyArn=policy_arn)
policy_document = client.get_policy_version(PolicyArn=policy_arn,
VersionId=policy_metadata['Policy']['DefaultVersionId'])
return policy_document['PolicyVersion']['Document']
|
Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return:
|
def unit_overlap(evaluated_model, reference_model):
"""
Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same.
"""
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):
raise ValueError(
"Arguments has to be instances of 'sumy.models.TfDocumentModel'")
terms1 = frozenset(evaluated_model.terms)
terms2 = frozenset(reference_model.terms)
if not terms1 and not terms2:
raise ValueError(
"Documents can't be empty. Please pass the valid documents.")
common_terms_count = len(terms1 & terms2)
return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
|
Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same.
|
def register_model(cls, model):
"""
Register a model class according to its remote name
Args:
model: the model to register
"""
rest_name = model.rest_name
resource_name = model.resource_name
if rest_name not in cls._model_rest_name_registry:
cls._model_rest_name_registry[rest_name] = [model]
cls._model_resource_name_registry[resource_name] = [model]
elif model not in cls._model_rest_name_registry[rest_name]:
cls._model_rest_name_registry[rest_name].append(model)
cls._model_resource_name_registry[resource_name].append(model)
|
Register a model class according to its remote name
Args:
model: the model to register
|
def write_dltime (self, url_data):
"""Write url_data.dltime."""
self.writeln(u"<tr><td>"+self.part("dltime")+u"</td><td>"+
(_("%.3f seconds") % url_data.dltime)+
u"</td></tr>")
|
Write url_data.dltime.
|
def knob_end(self):
""" Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY.
"""
side_chain_atoms = self.knob_residue.side_chain
if not side_chain_atoms:
return self.knob_residue['CA']
distances = [distance(self.knob_residue['CB'], x) for x in side_chain_atoms]
max_d = max(distances)
knob_end_atoms = [atom for atom, d in zip(side_chain_atoms, distances) if d == max_d]
if len(knob_end_atoms) == 1:
return knob_end_atoms[0]._vector
else:
return numpy.mean([x._vector for x in knob_end_atoms], axis=0)
|
Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY.
|
def directional_hamming_distance(reference_intervals, estimated_intervals):
"""Compute the directional hamming distance between reference and
estimated intervals as defined by [#harte2010towards]_ and used for MIREX
'OverSeg', 'UnderSeg' and 'MeanSeg' measures.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> overseg = 1 - mir_eval.chord.directional_hamming_distance(
... ref_intervals, est_intervals)
>>> underseg = 1 - mir_eval.chord.directional_hamming_distance(
... est_intervals, ref_intervals)
>>> seg = min(overseg, underseg)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2), dtype=float
Reference chord intervals to score against.
estimated_intervals : np.ndarray, shape=(m, 2), dtype=float
Estimated chord intervals to score against.
Returns
-------
directional hamming distance : float
directional hamming distance between reference intervals and
estimated intervals.
"""
util.validate_intervals(estimated_intervals)
util.validate_intervals(reference_intervals)
# make sure chord intervals do not overlap
if len(reference_intervals) > 1 and (reference_intervals[:-1, 1] >
reference_intervals[1:, 0]).any():
raise ValueError('Chord Intervals must not overlap')
est_ts = np.unique(estimated_intervals.flatten())
seg = 0.
for start, end in reference_intervals:
dur = end - start
between_start_end = est_ts[(est_ts >= start) & (est_ts < end)]
seg_ts = np.hstack([start, between_start_end, end])
seg += dur - np.diff(seg_ts).max()
return seg / (reference_intervals[-1, 1] - reference_intervals[0, 0])
|
Compute the directional hamming distance between reference and
estimated intervals as defined by [#harte2010towards]_ and used for MIREX
'OverSeg', 'UnderSeg' and 'MeanSeg' measures.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> overseg = 1 - mir_eval.chord.directional_hamming_distance(
... ref_intervals, est_intervals)
>>> underseg = 1 - mir_eval.chord.directional_hamming_distance(
... est_intervals, ref_intervals)
>>> seg = min(overseg, underseg)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2), dtype=float
Reference chord intervals to score against.
estimated_intervals : np.ndarray, shape=(m, 2), dtype=float
Estimated chord intervals to score against.
Returns
-------
directional hamming distance : float
directional hamming distance between reference intervals and
estimated intervals.
|
def _fix_repo_url(repo_url):
"""Add empty credentials to a repo URL if not set, but only for HTTP/HTTPS.
This is to make git not hang while trying to read the username and
password from standard input."""
parsed = urlparse.urlparse(repo_url)
if parsed.scheme not in ('http', 'https'):
# Fix only for http and https.
return repo_url
username = parsed.username or ""
password = parsed.password or ""
port = ":" + parsed.port if parsed.port else ""
netloc = "".join((username, ":", password, "@", parsed.hostname, port))
part_list = list(parsed)
part_list[1] = netloc
return urlparse.urlunparse(part_list)
|
Add empty credentials to a repo URL if not set, but only for HTTP/HTTPS.
This is to make git not hang while trying to read the username and
password from standard input.
|
def get_rendition_size(self, spec, output_scale, crop):
"""
Wrapper to determine the overall rendition size and cropping box
Returns tuple of (size,box)
"""
if crop:
# Use the cropping rectangle size
_, _, width, height = crop
else:
# Use the original image size
width = self._record.width
height = self._record.height
mode = spec.get('resize', 'fit')
if mode == 'fit':
return self.get_rendition_fit_size(spec, width, height, output_scale)
if mode == 'fill':
return self.get_rendition_fill_size(spec, width, height, output_scale)
if mode == 'stretch':
return self.get_rendition_stretch_size(spec, width, height, output_scale)
raise ValueError("Unknown resize mode {}".format(mode))
|
Wrapper to determine the overall rendition size and cropping box
Returns tuple of (size,box)
|
def _check_operators(self):
"""Check Operators
This method checks if the input operators have a "cost" method
Raises
------
ValueError
For invalid operators type
ValueError
For operators without "cost" method
"""
if not isinstance(self._operators, (list, tuple, np.ndarray)):
raise TypeError(('Input operators must be provided as a list, '
'not {}').format(type(self._operators)))
for op in self._operators:
if not hasattr(op, 'cost'):
raise ValueError('Operators must contain "cost" method.')
op.cost = check_callable(op.cost)
|
Check Operators
This method checks if the input operators have a "cost" method
Raises
------
ValueError
For invalid operators type
ValueError
For operators without "cost" method
|
def decode(data):
'''
str -> bytes
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError('Malformed cashaddr. Cannot locate prefix: {}'
.format(riemann.netowrk.CASHADDR_PREFIX))
# the data is everything after the colon
prefix, data = data.split(':')
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError('Bad cash address checksum')
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6])
|
str -> bytes
|
def _make_fake_message(self, user_id, page_id, payload):
"""
Creates a fake message for the given user_id. It contains a postback
with the given payload.
"""
event = {
'sender': {
'id': user_id,
},
'recipient': {
'id': page_id,
},
'postback': {
'payload': ujson.dumps(payload),
},
}
return FacebookMessage(event, self, False)
|
Creates a fake message for the given user_id. It contains a postback
with the given payload.
|
def _main(self):
"""
process
"""
probes = self.config.get('probes', None)
if not probes:
raise ValueError('no probes specified')
for probe_config in self.config['probes']:
probe = plugin.get_probe(probe_config, self.plugin_context)
# FIXME - needs to check for output defined in plugin
if 'output' not in probe_config:
raise ValueError("no output specified")
# get all output targets and start / join them
for output_name in probe_config['output']:
output = plugin.get_output(output_name, self.plugin_context)
if not output.started:
output.start()
self.joins.append(output)
probe._emit.append(output)
probe.start()
self.joins.append(probe)
vaping.io.joinall(self.joins)
return 0
|
process
|
def validate_rc():
""" Before we execute any actions, let's validate our .vacationrc. """
transactions = rc.read()
if not transactions:
print('Your .vacationrc file is empty! Set days and rate.')
return False
transactions = sort(unique(transactions))
return validate_setup(transactions)
|
Before we execute any actions, let's validate our .vacationrc.
|
def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
"""
sections = re.compile("^## .+$", re.MULTILINE).split(data)
headings = re.findall("^## .+?$", data, re.MULTILINE)
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed
|
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
|
def get_file_str(path, saltenv='base'):
'''
Download a file from a URL to the Minion cache directory and return the
contents of that file
Returns ``False`` if Salt was unable to cache a file from a URL.
CLI Example:
.. code-block:: bash
salt '*' cp.get_file_str salt://my/file
'''
fn_ = cache_file(path, saltenv)
if isinstance(fn_, six.string_types):
try:
with salt.utils.files.fopen(fn_, 'r') as fp_:
return fp_.read()
except IOError:
return False
return fn_
|
Download a file from a URL to the Minion cache directory and return the
contents of that file
Returns ``False`` if Salt was unable to cache a file from a URL.
CLI Example:
.. code-block:: bash
salt '*' cp.get_file_str salt://my/file
|
def to_wire(self, file, compress=None, origin=None, **kw):
"""Convert the RRset to wire format."""
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw)
|
Convert the RRset to wire format.
|
def set_lacp_fallback(self, name, mode=None):
"""Configures the Port-Channel lacp_fallback
Args:
name(str): The Port-Channel interface name
mode(str): The Port-Channel LACP fallback setting
Valid values are 'disabled', 'static', 'individual':
* static - Fallback to static LAG mode
* individual - Fallback to individual ports
* disabled - Disable LACP fallback
Returns:
True if the operation succeeds otherwise False is returned
"""
if mode not in ['disabled', 'static', 'individual']:
return False
disable = True if mode == 'disabled' else False
commands = ['interface %s' % name]
commands.append(self.command_builder('port-channel lacp fallback',
value=mode, disable=disable))
return self.configure(commands)
|
Configures the Port-Channel lacp_fallback
Args:
name(str): The Port-Channel interface name
mode(str): The Port-Channel LACP fallback setting
Valid values are 'disabled', 'static', 'individual':
* static - Fallback to static LAG mode
* individual - Fallback to individual ports
* disabled - Disable LACP fallback
Returns:
True if the operation succeeds otherwise False is returned
|
def vote_choice_address(self) -> List[str]:
'''calculate the addresses on which the vote is casted.'''
if self.vote_id is None:
raise Exception("vote_id is required")
addresses = []
vote_init_txid = unhexlify(self.vote_id)
for choice in self.choices:
vote_cast_privkey = sha256(vote_init_txid + bytes(
list(self.choices).index(choice))
).hexdigest()
addresses.append(Kutil(network=self.deck.network,
privkey=bytearray.fromhex(vote_cast_privkey)).address)
return addresses
|
calculate the addresses on which the vote is casted.
|
def _get_elements(complex_type, root):
"""Get attribute elements
"""
found_elements = []
element = findall(root, '{%s}complexType' % XS_NAMESPACE,
attribute_name='name', attribute_value=complex_type)[0]
found_elements = findall(element, '{%s}element' % XS_NAMESPACE)
return found_elements
|
Get attribute elements
|
def on_menu_exit(self, event):
"""
Exit the GUI
"""
# also delete appropriate copy file
try:
self.help_window.Destroy()
except:
pass
if '-i' in sys.argv:
self.Destroy()
try:
sys.exit() # can raise TypeError if wx inspector was used
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex
|
Exit the GUI
|
def convert_op(self, op):
"""
Converts NeuroML arithmetic/logical operators to python equivalents.
@param op: NeuroML operator
@type op: string
@return: Python operator
@rtype: string
"""
if op == '.gt.':
return '>'
elif op == '.ge.' or op == '.geq.':
return '>='
elif op == '.lt.':
return '<'
elif op == '.le.':
return '<='
elif op == '.eq.':
return '=='
elif op == '.neq.':
return '!='
elif op == '.ne.': # .neq. is preferred!
return '!='
elif op == '^':
return '**'
elif op == '.and.':
return 'and'
elif op == '.or.':
return 'or'
else:
return op
|
Converts NeuroML arithmetic/logical operators to python equivalents.
@param op: NeuroML operator
@type op: string
@return: Python operator
@rtype: string
|
def get_rendered_fields(self, ctx=None):
'''
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
'''
if ctx is None:
ctx = RenderContext()
ctx.push(self)
current = self._fields[self._field_idx]
res = current.get_rendered_fields(ctx)
ctx.pop()
return res
|
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
|
def _get_shaperecords(self, num_fill_bits,
num_line_bits, shape_number):
"""Return an array of SHAPERECORDS."""
shape_records = []
bc = BitConsumer(self._src)
while True:
type_flag = bc.u_get(1)
if type_flag:
# edge record
straight_flag = bc.u_get(1)
num_bits = bc.u_get(4)
if straight_flag:
record = _make_object('StraightEdgeRecord')
record.TypeFlag = 1
record.StraightFlag = 1
record.NumBits = num_bits
record.GeneralLineFlag = general_line_flag = bc.u_get(1)
if general_line_flag:
record.DeltaX = bc.s_get(num_bits + 2)
record.DeltaY = bc.s_get(num_bits + 2)
else:
record.VertLineFlag = vert_line_flag = bc.s_get(1)
if vert_line_flag:
record.DeltaY = bc.s_get(num_bits + 2)
else:
record.DeltaX = bc.s_get(num_bits + 2)
else:
record = _make_object('CurvedEdgeRecord')
record.TypeFlag = 1
record.StraightFlag = 0
record.NumBits = num_bits
record.ControlDeltaX = bc.s_get(num_bits + 2)
record.ControlDeltaY = bc.s_get(num_bits + 2)
record.AnchorDeltaX = bc.s_get(num_bits + 2)
record.AnchorDeltaY = bc.s_get(num_bits + 2)
else:
# non edge record
record = _make_object('StyleChangeRecord')
record.TypeFlag = 0
five_bits = [bc.u_get(1) for _ in range(5)]
if not any(five_bits):
# the five bits are zero, this is an EndShapeRecord
break
# we're not done, store the proper flags
(record.StateNewStyles, record.StateLineStyle,
record.StateFillStyle1, record.StateFillStyle0,
record.StateMoveTo) = five_bits
if record.StateMoveTo:
record.MoveBits = move_bits = bc.u_get(5)
record.MoveDeltaX = bc.s_get(move_bits)
record.MoveDeltaY = bc.s_get(move_bits)
if record.StateFillStyle0:
record.FillStyle0 = bc.u_get(num_fill_bits)
if record.StateFillStyle1:
record.FillStyle1 = bc.u_get(num_fill_bits)
if record.StateLineStyle:
record.LineStyle = bc.u_get(num_line_bits)
if record.StateNewStyles:
record.FillStyles = self._get_struct_fillstylearray(
shape_number)
record.LineStyles = self._get_struct_linestylearray(
shape_number)
# these two not only belong to the record, but also
# modifies the number of bits read in the future
# if shape number bigs enough (didn't find this in the
# spec, but works for now, maybe '2' is not the limit...)
if shape_number > 2:
record.NumFillBits = num_fill_bits = bc.u_get(4)
record.NumLineBits = num_line_bits = bc.u_get(4)
else:
record.NumFillBits = bc.u_get(4)
record.NumLineBits = bc.u_get(4)
# reset the BC here, as the structures just read work at
# byte level
bc = BitConsumer(self._src)
shape_records.append(record)
return shape_records
|
Return an array of SHAPERECORDS.
|
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
|
Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
|
def fetch(version='bayestar2017'):
"""
Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
"""
doi = {
'bayestar2015': '10.7910/DVN/40C44C',
'bayestar2017': '10.7910/DVN/LCYHJG'
}
# Raise an error if the specified version of the map does not exist
try:
doi = doi[version]
except KeyError as err:
raise ValueError('Version "{}" does not exist. Valid versions are: {}'.format(
version,
', '.join(['"{}"'.format(k) for k in doi.keys()])
))
requirements = {
'bayestar2015': {'contentType': 'application/x-hdf'},
'bayestar2017': {'filename': 'bayestar2017.h5'}
}[version]
local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version))
# Download the data
fetch_utils.dataverse_download_doi(
doi,
local_fname,
file_requirements=requirements)
|
Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
|
def timeinfo(self):
"""Time series data of the time step.
Set to None if no time series data is available for this time step.
"""
if self.istep not in self.sdat.tseries.index:
return None
return self.sdat.tseries.loc[self.istep]
|
Time series data of the time step.
Set to None if no time series data is available for this time step.
|
def latitude(self, latitude):
"""Setter for latiutde."""
if not (-90 <= latitude <= 90):
raise ValueError('latitude was {}, but has to be in [-90, 90]'
.format(latitude))
self._latitude = latitude
|
Setter for latiutde.
|
def resource_to_url(resource, request=None, quote=False):
"""
Converts the given resource to a URL.
:param request: Request object (required for the host name part of the
URL). If this is not given, the current request is used.
:param bool quote: If set, the URL returned will be quoted.
"""
if request is None:
request = get_current_request()
# cnv = request.registry.getAdapter(request, IResourceUrlConverter)
reg = get_current_registry()
cnv = reg.getAdapter(request, IResourceUrlConverter)
return cnv.resource_to_url(resource, quote=quote)
|
Converts the given resource to a URL.
:param request: Request object (required for the host name part of the
URL). If this is not given, the current request is used.
:param bool quote: If set, the URL returned will be quoted.
|
def spades(args):
"""
%prog spades folder
Run automated SPADES.
"""
from jcvi.formats.fastq import readlen
p = OptionParser(spades.__doc__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
folder, = args
for p, pf in iter_project(folder):
rl = readlen([p[0], "--silent"])
# <http://spades.bioinf.spbau.ru/release3.1.0/manual.html#sec3.4>
kmers = None
if rl >= 150:
kmers = "21,33,55,77"
elif rl >= 250:
kmers = "21,33,55,77,99,127"
cmd = "spades.py"
if kmers:
cmd += " -k {0}".format(kmers)
cmd += " --careful"
cmd += " --pe1-1 {0} --pe1-2 {1}".format(*p)
cmd += " -o {0}_spades".format(pf)
print(cmd)
|
%prog spades folder
Run automated SPADES.
|
def _fetch(self, params, required, defaults):
"""Make the NVP request and store the response."""
defaults.update(params)
pp_params = self._check_and_update_params(required, defaults)
pp_string = self.signature + urlencode(pp_params)
response = self._request(pp_string)
response_params = self._parse_response(response)
log.debug('PayPal Request:\n%s\n', pprint.pformat(defaults))
log.debug('PayPal Response:\n%s\n', pprint.pformat(response_params))
# Gather all NVP parameters to pass to a new instance.
nvp_params = {}
tmpd = defaults.copy()
tmpd.update(response_params)
for k, v in tmpd.items():
if k in self.NVP_FIELDS:
nvp_params[str(k)] = v
# PayPal timestamp has to be formatted.
if 'timestamp' in nvp_params:
nvp_params['timestamp'] = paypaltime2datetime(nvp_params['timestamp'])
nvp_obj = PayPalNVP(**nvp_params)
nvp_obj.init(self.request, params, response_params)
nvp_obj.save()
return nvp_obj
|
Make the NVP request and store the response.
|
def _check_response(response, expected):
"""
Checks if the expected response code matches the actual response code.
If they're not equal, raises the appropriate exception
Args:
response: (int) Actual status code
expected: (int) Expected status code
"""
response_code = response.status_code
if expected == response_code:
return
if response_code < 400:
raise ex.UnexpectedResponseCodeException(response.text)
elif response_code == 401:
raise ex.UnauthorizedException(response.text)
elif response_code == 400:
raise ex.BadRequestException(response.text)
elif response_code == 403:
raise ex.ForbiddenException(response.text)
elif response_code == 404:
raise ex.NotFoundException(response.text)
elif response_code == 429:
raise ex.RateLimitedException(response.text)
else:
raise ex.InternalServerErrorException(response.text)
|
Checks if the expected response code matches the actual response code.
If they're not equal, raises the appropriate exception
Args:
response: (int) Actual status code
expected: (int) Expected status code
|
def class_in_progress(stack=None):
"""True if currently inside a class definition, else False."""
if stack is None:
stack = inspect.stack()
for frame in stack:
statement_list = frame[4]
if statement_list is None:
continue
if statement_list[0].strip().startswith('class '):
return True
return False
|
True if currently inside a class definition, else False.
|
async def delete(self, request, resource=None, **kwargs):
"""Delete a resource."""
if resource is None:
raise RESTNotFound(reason='Resource not found')
self.collection.remove(resource)
|
Delete a resource.
|
def data_files(self):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print('No files found for dataset %s/%s at %s' % (self.name,
self.subset,
FLAGS.data_dir))
self.download_message()
exit(-1)
return data_files
|
Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
|
def populate(self):
"""Populates a new cache.
"""
if self.exists:
raise CacheAlreadyExistsException('location: %s' % self.cache_uri)
self._populate_setup()
with closing(self.graph):
with self._download_metadata_archive() as metadata_archive:
for fact in self._iter_metadata_triples(metadata_archive):
self._add_to_graph(fact)
|
Populates a new cache.
|
def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material,
DiamTarget, DIM_FRACTAL):
"""Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size.
"""
return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3)
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-8/9)
* (DiamTarget / material.Diameter)**((8*(DIM_FRACTAL-3)) / 9)
)
|
Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size.
|
def registerFilter(self, column, patterns, is_regex=False,
ignore_case=False):
"""Register filter on a column of table.
@param column: The column name.
@param patterns: A single pattern or a list of patterns used for
matching column values.
@param is_regex: The patterns will be treated as regex if True, the
column values will be tested for equality with the
patterns otherwise.
@param ignore_case: Case insensitive matching will be used if True.
"""
if isinstance(patterns, basestring):
patt_list = (patterns,)
elif isinstance(patterns, (tuple, list)):
patt_list = list(patterns)
else:
raise ValueError("The patterns parameter must either be as string "
"or a tuple / list of strings.")
if is_regex:
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
patt_exprs = [re.compile(pattern, flags) for pattern in patt_list]
else:
if ignore_case:
patt_exprs = [pattern.lower() for pattern in patt_list]
else:
patt_exprs = patt_list
self._filters[column] = (patt_exprs, is_regex, ignore_case)
|
Register filter on a column of table.
@param column: The column name.
@param patterns: A single pattern or a list of patterns used for
matching column values.
@param is_regex: The patterns will be treated as regex if True, the
column values will be tested for equality with the
patterns otherwise.
@param ignore_case: Case insensitive matching will be used if True.
|
def derenzo_sources(space, min_pt=None, max_pt=None):
"""Create the PET/SPECT Derenzo sources phantom.
The Derenzo phantom contains a series of circles of decreasing size.
In 3d the phantom is simply the 2d phantom extended in the z direction as
cylinders.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created, must be 2- or
3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding
slice of the phantom is created (instead of squashing the whole
phantom into the slice).
min_pt, max_pt : array-like, optional
If provided, use these vectors to determine the bounding box of the
phantom instead of ``space.min_pt`` and ``space.max_pt``.
It is currently required that ``min_pt >= space.min_pt`` and
``max_pt <= space.max_pt``, i.e., shifting or scaling outside the
original space is not allowed.
Providing one of them results in a shift, e.g., for ``min_pt``::
new_min_pt = min_pt
new_max_pt = space.max_pt + (min_pt - space.min_pt)
Providing both results in a scaled version of the phantom.
Returns
-------
phantom : ``space`` element
The Derenzo source phantom in the given space.
"""
if space.ndim == 2:
return ellipsoid_phantom(space, _derenzo_sources_2d(), min_pt, max_pt)
if space.ndim == 3:
return ellipsoid_phantom(
space, cylinders_from_ellipses(_derenzo_sources_2d()),
min_pt, max_pt)
else:
raise ValueError('dimension not 2, no phantom available')
|
Create the PET/SPECT Derenzo sources phantom.
The Derenzo phantom contains a series of circles of decreasing size.
In 3d the phantom is simply the 2d phantom extended in the z direction as
cylinders.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created, must be 2- or
3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding
slice of the phantom is created (instead of squashing the whole
phantom into the slice).
min_pt, max_pt : array-like, optional
If provided, use these vectors to determine the bounding box of the
phantom instead of ``space.min_pt`` and ``space.max_pt``.
It is currently required that ``min_pt >= space.min_pt`` and
``max_pt <= space.max_pt``, i.e., shifting or scaling outside the
original space is not allowed.
Providing one of them results in a shift, e.g., for ``min_pt``::
new_min_pt = min_pt
new_max_pt = space.max_pt + (min_pt - space.min_pt)
Providing both results in a scaled version of the phantom.
Returns
-------
phantom : ``space`` element
The Derenzo source phantom in the given space.
|
def add_ones(a):
"""Adds a column of 1s at the end of the array"""
arr = N.ones((a.shape[0],a.shape[1]+1))
arr[:,:-1] = a
return arr
|
Adds a column of 1s at the end of the array
|
def call_pre_hook(awsclient, cloudformation):
"""Invoke the pre_hook BEFORE the config is read.
:param awsclient:
:param cloudformation:
"""
# TODO: this is deprecated!! move this to glomex_config_reader
# no config available
if not hasattr(cloudformation, 'pre_hook'):
# hook is not present
return
hook_func = getattr(cloudformation, 'pre_hook')
if not hook_func.func_code.co_argcount:
hook_func() # for compatibility with existing templates
else:
log.error('pre_hock can not have any arguments. The pre_hook it is ' +
'executed BEFORE config is read')
|
Invoke the pre_hook BEFORE the config is read.
:param awsclient:
:param cloudformation:
|
def get_daemon_stats(self, details=False):
"""Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict
"""
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('stats%s' % ('?details=1' if details else ''))
|
Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict
|
def list_items(path_to_directory, pattern, wanted):
"""All items in the given path which match the given glob and are wanted"""
if not path_to_directory:
return set()
needed = make_needed(pattern, path_to_directory, wanted)
return [os.path.join(path_to_directory, name)
for name in _names_in_directory(path_to_directory)
if needed(name)]
|
All items in the given path which match the given glob and are wanted
|
def get_sidecar(fname, allowedfileformats='default'):
"""
Loads sidecar or creates one
"""
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
for f in allowedfileformats:
fname = fname.split(f)[0]
fname += '.json'
if os.path.exists(fname):
with open(fname) as fs:
sidecar = json.load(fs)
else:
sidecar = {}
if 'filestatus' not in sidecar:
sidecar['filestatus'] = {}
sidecar['filestatus']['reject'] = False
sidecar['filestatus']['reason'] = []
return sidecar
|
Loads sidecar or creates one
|
def increase(self, infile):
'''Increase: 任意の箇所のバイト列と それより大きなサイズの任意のバイト列と入れ換える
'''
gf = infile[31:]
index = gf.index(random.choice(gf))
index_len = len(gf[index])
large_size_index = random.choice([gf.index(g) for g in gf if len(g) > index_len])
gf[index], gf[large_size_index] = gf[large_size_index], gf[index]
return infile[:31] + gf
|
Increase: 任意の箇所のバイト列と それより大きなサイズの任意のバイト列と入れ換える
|
def getReferenceSetByName(self, name):
"""
Returns the reference set with the specified name.
"""
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._referenceSetNameMap[name]
|
Returns the reference set with the specified name.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.