code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def parseService(self, yadis_url, uri, type_uris, service_element):
"""Set the state of this object based on the contents of the
service element."""
self.type_uris = type_uris
self.server_url = uri
self.used_yadis = True
if not self.isOPIdentifier():
# XXX: This has crappy implications for Service elements
# that contain both 'server' and 'signon' Types. But
# that's a pathological configuration anyway, so I don't
# think I care.
self.local_id = findOPLocalIdentifier(service_element,
self.type_uris)
self.claimed_id = yadis_url
|
Set the state of this object based on the contents of the
service element.
|
def set_subparsers_args(self, *args, **kwargs):
"""
Sets args and kwargs that are passed when creating a subparsers group
in an argparse.ArgumentParser i.e. when calling
argparser.ArgumentParser.add_subparsers
"""
self.subparsers_args = args
self.subparsers_kwargs = kwargs
|
Sets args and kwargs that are passed when creating a subparsers group
in an argparse.ArgumentParser i.e. when calling
argparser.ArgumentParser.add_subparsers
|
def add_thermodynamic(self, em=1000):
"""Apply thermodynamic constraints to the model.
Adding these constraints restricts the solution space to only
contain solutions that have no internal loops [Schilling00]_. This is
solved as a MILP problem as described in [Muller13]_. The time to solve
a problem with thermodynamic constraints is usually much longer than a
normal FBA problem.
The ``em`` parameter is the upper bound on the delta mu reaction
variables. This parameter has to be balanced based on the model size
since setting the value too low can result in the correct solutions
being infeasible and setting the value too high can result in
numerical instability which again makes the correct solutions
infeasible. The default value should work in all cases as long as the
model is not unusually large.
"""
internal = set(r for r in self._model.reactions
if not self._model.is_exchange(r))
# Reaction fluxes
v = self._v
# Indicator variable
alpha = self._prob.namespace(internal, types=lp.VariableType.Binary)
# Delta mu is the stoichiometrically weighted sum of the compound mus.
dmu = self._prob.namespace(internal)
for reaction_id in self._model.reactions:
if not self._model.is_exchange(reaction_id):
flux = v(reaction_id)
alpha_r = alpha(reaction_id)
dmu_r = dmu(reaction_id)
lower, upper = self._model.limits[reaction_id]
# Constrain the reaction to a direction determined by alpha
# and contrain the delta mu to a value in [-em; -1] if
# alpha is one, otherwise in [1; em].
self._prob.add_linear_constraints(
flux >= lower * (1 - alpha_r),
flux <= upper * alpha_r,
dmu_r >= -em * alpha_r + (1 - alpha_r),
dmu_r <= em * (1 - alpha_r) - alpha_r)
# Define mu variables
mu = self._prob.namespace(self._model.compounds)
tdbalance_lhs = {reaction_id: 0
for reaction_id in self._model.reactions}
for spec, value in iteritems(self._model.matrix):
compound, reaction_id = spec
if not self._model.is_exchange(reaction_id):
tdbalance_lhs[reaction_id] += mu(compound) * value
for reaction_id, lhs in iteritems(tdbalance_lhs):
if not self._model.is_exchange(reaction_id):
self._prob.add_linear_constraints(lhs == dmu(reaction_id))
|
Apply thermodynamic constraints to the model.
Adding these constraints restricts the solution space to only
contain solutions that have no internal loops [Schilling00]_. This is
solved as a MILP problem as described in [Muller13]_. The time to solve
a problem with thermodynamic constraints is usually much longer than a
normal FBA problem.
The ``em`` parameter is the upper bound on the delta mu reaction
variables. This parameter has to be balanced based on the model size
since setting the value too low can result in the correct solutions
being infeasible and setting the value too high can result in
numerical instability which again makes the correct solutions
infeasible. The default value should work in all cases as long as the
model is not unusually large.
|
async def async_get_current_program(channel, no_cache=False):
'''
Get the current program info
'''
chan = await async_determine_channel(channel)
guide = await async_get_program_guide(chan, no_cache)
if not guide:
_LOGGER.warning('Could not retrieve TV program for %s', channel)
return
now = datetime.datetime.now()
for prog in guide:
start = prog.get('start_time')
end = prog.get('end_time')
if now > start and now < end:
return prog
|
Get the current program info
|
def setItemPolicy(self, item, policy):
"""Sets the policy of the given item"""
index = item._combobox_indices[self.ColAction].get(policy, 0)
self._updateItemComboBoxIndex(item, self.ColAction, index)
combobox = self.itemWidget(item, self.ColAction)
if combobox:
combobox.setCurrentIndex(index)
|
Sets the policy of the given item
|
def get_users(profile='grafana'):
'''
List all users.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.get_users
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
response = requests.get(
'{0}/api/users'.format(profile['grafana_url']),
auth=_get_auth(profile),
headers=_get_headers(profile),
timeout=profile.get('grafana_timeout', 3),
)
if response.status_code >= 400:
response.raise_for_status()
return response.json()
|
List all users.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.get_users
|
def make_qr(content, error=None, version=None, mode=None, mask=None,
encoding=None, eci=False, boost_error=True):
"""\
Creates a QR Code (never a Micro QR Code).
See :py:func:`make` for a description of the parameters.
:rtype: QRCode
"""
return make(content, error=error, version=version, mode=mode, mask=mask,
encoding=encoding, eci=eci, micro=False, boost_error=boost_error)
|
\
Creates a QR Code (never a Micro QR Code).
See :py:func:`make` for a description of the parameters.
:rtype: QRCode
|
def get_or_create_model_key(self):
"""
Get or create key for the model.
Returns
~~~~~~~
(model_key, boolean) tuple
"""
model_cache_info = model_cache_backend.retrieve_model_cache_info(self.model._meta.db_table)
if not model_cache_info:
return uuid.uuid4().hex, True
return model_cache_info.table_key, False
|
Get or create key for the model.
Returns
~~~~~~~
(model_key, boolean) tuple
|
def compile_pycos(toc):
"""Given a TOC or equivalent list of tuples, generates all the required
pyc/pyo files, writing in a local directory if required, and returns the
list of tuples with the updated pathnames.
"""
global BUILDPATH
# For those modules that need to be rebuilt, use the build directory
# PyInstaller creates during the build process.
basepath = os.path.join(BUILDPATH, "localpycos")
new_toc = []
for (nm, fnm, typ) in toc:
# Trim the terminal "c" or "o"
source_fnm = fnm[:-1]
# If the source is newer than the compiled, or the compiled doesn't
# exist, we need to perform a build ourselves.
if mtime(source_fnm) > mtime(fnm):
try:
py_compile.compile(source_fnm)
except IOError:
# If we're compiling on a system directory, probably we don't
# have write permissions; thus we compile to a local directory
# and change the TOC entry accordingly.
ext = os.path.splitext(fnm)[1]
if "__init__" not in fnm:
# If it's a normal module, use last part of the qualified
# name as module name and the first as leading path
leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1]
else:
# In case of a __init__ module, use all the qualified name
# as leading path and use "__init__" as the module name
leading, mod_name = nm.split("."), "__init__"
leading = os.path.join(basepath, *leading)
if not os.path.exists(leading):
os.makedirs(leading)
fnm = os.path.join(leading, mod_name + ext)
py_compile.compile(source_fnm, fnm)
new_toc.append((nm, fnm, typ))
return new_toc
|
Given a TOC or equivalent list of tuples, generates all the required
pyc/pyo files, writing in a local directory if required, and returns the
list of tuples with the updated pathnames.
|
def contents(self):
"""Get svg string
"""
c = self._header[:]
c.append(' font-weight="{}"'.format(self.font_weight))
c.append(' font-family="{}"'.format(self.font_family))
c.append(' width="{}" height="{}"'.format(*self.screen_size))
sclw = self.original_size[0] * self.scale_factor
sclh = self.original_size[1] * self.scale_factor
longside = max([sclw, sclh])
width = round(longside + self.margin * 2, 2)
height = round(longside + self.margin * 2, 2)
xleft = round(-self.margin - (longside - sclw) / 2, 2)
ytop = round(-self.margin - (longside - sclh) / 2, 2)
c.append(' viewBox="{} {} {} {}">\n'.format(
xleft, ytop, width, height))
if self.bgcolor is not None:
c.append('<rect x="{}", y="{}" width="{}" height="{}" fill="{}" \
/>\n'.format(xleft, ytop, width, height, self.bgcolor))
c.extend(self._elems)
c.append("</svg>")
return "".join(c)
|
Get svg string
|
def _seg(chars):
"""按是否是汉字进行分词"""
s = '' # 保存一个词
ret = [] # 分词结果
flag = 0 # 上一个字符是什么? 0: 汉字, 1: 不是汉字
for n, c in enumerate(chars):
if RE_HANS.match(c): # 汉字, 确定 flag 的初始值
if n == 0: # 第一个字符
flag = 0
if flag == 0:
s += c
else: # 上一个字符不是汉字, 分词
ret.append(s)
flag = 0
s = c
else: # 不是汉字
if n == 0: # 第一个字符, 确定 flag 的初始值
flag = 1
if flag == 1:
s += c
else: # 上一个字符是汉字, 分词
ret.append(s)
flag = 1
s = c
ret.append(s) # 最后的词
return ret
|
按是否是汉字进行分词
|
def delete(ctx, short_name):
"""Delete a specific subscription by short name"""
wva = get_wva(ctx)
subscription = wva.get_subscription(short_name)
subscription.delete()
|
Delete a specific subscription by short name
|
def transform_audio(self, y):
'''Apply the scale transform to the tempogram
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt)
The scale transform magnitude coefficients
'''
data = super(TempoScale, self).transform_audio(y)
data['temposcale'] = np.abs(fmt(data.pop('tempogram'),
axis=1,
n_fmt=self.n_fmt)).astype(np.float32)[self.idx]
return data
|
Apply the scale transform to the tempogram
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt)
The scale transform magnitude coefficients
|
def default_namespace(self, value):
"""
Setter for **self.__default_namespace** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"default_namespace", value)
self.__default_namespace = value
|
Setter for **self.__default_namespace** attribute.
:param value: Attribute value.
:type value: unicode
|
def handle_msg(self, msg):
"""BGP message handler.
BGP message handling is shared between protocol instance and peer. Peer
only handles limited messages under suitable state. Here we handle
KEEPALIVE, UPDATE and ROUTE_REFRESH messages. UPDATE and ROUTE_REFRESH
messages are handled only after session is established.
"""
if msg.type == BGP_MSG_KEEPALIVE:
# If we receive a Keep Alive message in open_confirm state, we
# transition to established state.
if self.state.bgp_state == const.BGP_FSM_OPEN_CONFIRM:
self.state.bgp_state = const.BGP_FSM_ESTABLISHED
self._enqueue_init_updates()
elif msg.type == BGP_MSG_UPDATE:
assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
# Will try to process this UDPATE message further
self._handle_update_msg(msg)
elif msg.type == BGP_MSG_ROUTE_REFRESH:
# If its route-refresh message
assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
self._handle_route_refresh_msg(msg)
else:
# Open/Notification messages are currently handled by protocol and
# nothing is done inside peer, so should not see them here.
raise ValueError('Peer does not support handling of %s'
' message during %s state' %
(msg, self.state.bgp_state))
|
BGP message handler.
BGP message handling is shared between protocol instance and peer. Peer
only handles limited messages under suitable state. Here we handle
KEEPALIVE, UPDATE and ROUTE_REFRESH messages. UPDATE and ROUTE_REFRESH
messages are handled only after session is established.
|
def get_price(item):
"""Finds the price with the default locationGroupId"""
the_price = "No Default Pricing"
for price in item.get('prices', []):
if not price.get('locationGroupId'):
the_price = "%0.4f" % float(price['hourlyRecurringFee'])
return the_price
|
Finds the price with the default locationGroupId
|
def gevent_monkey_patch_report(self):
"""
Report effective gevent monkey patching on the logs.
"""
try:
import gevent.socket
import socket
if gevent.socket.socket is socket.socket:
self.log("gevent monkey patching is active")
return True
else:
self.notify_user("gevent monkey patching failed.")
except ImportError:
self.notify_user("gevent is not installed, monkey patching failed.")
return False
|
Report effective gevent monkey patching on the logs.
|
def _format_job_instance(job):
'''
Format the job instance correctly
'''
ret = {'Function': job.get('fun', 'unknown-function'),
'Arguments': salt.utils.json.loads(job.get('arg', '[]')),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')}
# TODO: Add Metadata support when it is merged from develop
return ret
|
Format the job instance correctly
|
def size(self, source):
'''Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
'''
result = []
for src in self.source_expand(source):
size = 0
for f in self.s3walk(src):
size += f['size']
result.append((src, size))
return result
|
Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
|
def trace(function, *args, **k) :
"""Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True"""
if doTrace : print ("> "+function.__name__, args, k)
result = function(*args, **k)
if doTrace : print ("< "+function.__name__, args, k, "->", result)
return result
|
Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True
|
def list(self, *args, **kwargs):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
networks = [self.prepare_model(item) for item in resp]
if greedy and version_gte(self.client.api._version, '1.28'):
for net in networks:
net.reload()
return networks
|
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == "s":
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != "s":
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == "l" or ch == "s" or ch == "z":
self.k = self.k + 1
elif self.m() == 1 and self.cvc(self.k):
self.setto("e")
|
step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
|
def _compute_zs_mat(sz:TensorImageSize, scale:float, squish:float,
invert:bool, row_pct:float, col_pct:float)->AffineMatrix:
"Utility routine to compute zoom/squish matrix."
orig_ratio = math.sqrt(sz[1]/sz[0])
for s,r,i in zip(scale,squish, invert):
s,r = 1/math.sqrt(s),math.sqrt(r)
if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture
w,h = (s/r, s*r) if i else (s*r,s/r)
col_c = (1-w) * (2*col_pct - 1)
row_c = (1-h) * (2*row_pct - 1)
return _get_zoom_mat(w, h, col_c, row_c)
#Fallback, hack to emulate a center crop without cropping anything yet.
if orig_ratio > 1: return _get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)
else: return _get_zoom_mat(1, orig_ratio**2, 0, 0.)
|
Utility routine to compute zoom/squish matrix.
|
def _set_data(self, data, offset=None, copy=False):
"""Internal method for set_data.
"""
# Copy if needed, check/normalize shape
data = np.array(data, copy=copy)
data = self._normalize_shape(data)
# Maybe resize to purge DATA commands?
if offset is None:
self._resize(data.shape)
elif all([i == 0 for i in offset]) and data.shape == self._shape:
self._resize(data.shape)
# Convert offset to something usable
offset = offset or tuple([0 for i in range(self._ndim)])
assert len(offset) == self._ndim
# Check if data fits
for i in range(len(data.shape)-1):
if offset[i] + data.shape[i] > self._shape[i]:
raise ValueError("Data is too large")
# Send GLIR command
self._glir.command('DATA', self._id, offset, data)
|
Internal method for set_data.
|
def _generate_struct_deserializer(self, struct):
"""Emits the deserialize method for the serialization object for the given struct."""
struct_name = fmt_class_prefix(struct)
with self.block_func(
func='deserialize',
args=fmt_func_args_declaration([('valueDict',
'NSDictionary<NSString *, id> *')]),
return_type='{} *'.format(struct_name),
class_func=True):
if not struct.all_fields and not struct.has_enumerated_subtypes():
self.emit('#pragma unused(valueDict)')
def emit_struct_deserialize_logic(struct):
for field in struct.all_fields:
data_type, nullable = unwrap_nullable(field.data_type)
input_value = 'valueDict[@"{}"]'.format(field.name)
if is_primitive_type(data_type):
deserialize_call = input_value
else:
deserialize_call = self._fmt_serialization_call(
field.data_type, input_value, False)
if nullable or field.has_default:
default_value = fmt_default_value(
field) if field.has_default else 'nil'
if is_primitive_type(data_type):
deserialize_call = '{} ?: {}'.format(
input_value, default_value)
else:
deserialize_call = '{} ? {} : {}'.format(
input_value, deserialize_call, default_value)
self.emit('{}{} = {};'.format(
fmt_type(field.data_type),
fmt_var(field.name), deserialize_call))
self.emit()
deserialized_obj_args = [(fmt_var(f.name), fmt_var(f.name))
for f in struct.all_fields]
init_call = fmt_func_call(
caller=fmt_alloc_call(caller=struct_name),
callee=self._cstor_name_from_fields(struct.all_fields),
args=fmt_func_args(deserialized_obj_args))
self.emit('return {};'.format(init_call))
if not struct.has_enumerated_subtypes():
emit_struct_deserialize_logic(struct)
else:
for tags, subtype in struct.get_all_subtypes_with_tags():
assert len(tags) == 1, tags
tag = tags[0]
base_string = 'if ([valueDict[@".tag"] isEqualToString:@"{}"])'
with self.block(base_string.format(tag)):
caller = fmt_serial_class(fmt_class_prefix(subtype))
args = fmt_func_args([('value', 'valueDict')])
deserialize_call = fmt_func_call(
caller=caller, callee='deserialize', args=args)
self.emit('return {};'.format(deserialize_call))
self.emit()
if struct.is_catch_all():
emit_struct_deserialize_logic(struct)
else:
description_str = (
'[NSString stringWithFormat:@"Tag has an invalid '
'value: \\\"%@\\\".", valueDict[@".tag"]]')
self._generate_throw_error('InvalidTag', description_str)
self.emit()
|
Emits the deserialize method for the serialization object for the given struct.
|
def read_string(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
|
Reads a string of a given length from the packet
|
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps
|
Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
|
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= theshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
|
Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= theshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
|
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon):
# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp
""" float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
"""
scale = gamma / np.sqrt(var + epsilon)
bias = beta - gamma * mean / np.sqrt(var + epsilon)
return [scale, bias]
|
float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
|
def _unpickle_channel(raw):
"""Try and unpickle a channel with sensible error handling
"""
try:
return pickle.loads(raw)
except (ValueError, pickle.UnpicklingError, EOFError, TypeError,
IndexError) as exc:
# maybe not pickled
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
try: # test if this is a valid channel name
Channel.MATCH.match(raw)
except ValueError:
raise exc
return raw
|
Try and unpickle a channel with sensible error handling
|
def getCert(username, password,
certHost=_SERVER,
certfile=None,
certQuery=_PROXY):
"""Access the cadc certificate server."""
if certfile is None:
certfile = tempfile.NamedTemporaryFile()
# Add the username and password.
# If we knew the realm, we could use it instead of ``None``.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = "http://" + certHost
logging.debug(top_level_url)
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
logging.debug(str(handler))
# create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(handler)
# Install the opener.
urllib2.install_opener(opener)
# buuld the url that with 'GET' a certificat using user_id/password info
url = "http://" + certHost + certQuery
logging.debug(url)
r = None
try:
r = opener.open(url)
except urllib2.HTTPError as e:
logging.debug(url)
logging.debug(str(e))
return False
logging.debug(str(r))
if r is not None:
while True:
buf = r.read()
logging.debug(buf)
if not buf:
break
certfile.write(buf)
r.close()
return certfile
|
Access the cadc certificate server.
|
def _process_health_pill_value(self,
wall_time,
step,
device_name,
output_slot,
node_name,
tensor_proto,
node_name_set=None):
"""Creates a HealthPillEvent containing various properties of a health pill.
Args:
wall_time: The wall time in seconds.
step: The session run step of the event.
device_name: The name of the node's device.
output_slot: The numeric output slot.
node_name: The name of the node (without the output slot).
tensor_proto: A tensor proto of data.
node_name_set: An optional set of node names that are relevant. If not
provided, no filtering by relevance occurs.
Returns:
An event_accumulator.HealthPillEvent. Or None if one could not be created.
"""
if node_name_set and node_name not in node_name_set:
# This event is not relevant.
return None
# Since we seek health pills for a specific step, this function
# returns 1 health pill per node per step. The wall time is the
# seconds since the epoch.
elements = list(tensor_util.make_ndarray(tensor_proto))
return HealthPillEvent(
wall_time=wall_time,
step=step,
device_name=device_name,
output_slot=output_slot,
node_name=node_name,
dtype=repr(tf.as_dtype(elements[12])),
shape=elements[14:],
value=elements)
|
Creates a HealthPillEvent containing various properties of a health pill.
Args:
wall_time: The wall time in seconds.
step: The session run step of the event.
device_name: The name of the node's device.
output_slot: The numeric output slot.
node_name: The name of the node (without the output slot).
tensor_proto: A tensor proto of data.
node_name_set: An optional set of node names that are relevant. If not
provided, no filtering by relevance occurs.
Returns:
An event_accumulator.HealthPillEvent. Or None if one could not be created.
|
def iplot_histogram(data, figsize=None, number_to_keep=None,
sort='asc', legend=None):
""" Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
# HTML
html_template = Template("""
<p>
<div id="histogram_$divNumber"></div>
</p>
""")
# JavaScript
javascript_template = Template("""
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
qVisualizations.plotState("histogram_$divNumber",
"histogram",
$executions,
$options);
});
</script>
""")
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
# set default figure size if none provided
if figsize is None:
figsize = (7, 5)
options = {'number_to_keep': 0 if number_to_keep is None else number_to_keep,
'sort': sort,
'show_legend': 0,
'width': int(figsize[0]),
'height': int(figsize[1])}
if legend:
options['show_legend'] = 1
data_to_plot = []
if isinstance(data, dict):
data = [data]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match number "
"of input executions: %s" %
(len(legend), len(data)))
for item, execution in enumerate(data):
exec_data = process_data(execution, options['number_to_keep'])
out_dict = {'data': exec_data}
if legend:
out_dict['name'] = legend[item]
data_to_plot.append(out_dict)
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': data_to_plot,
'options': options
})
display(HTML(html + javascript))
|
Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
|
def new_data(self, mem, addr, data):
"""Callback for when new memory data has been fetched"""
done = False
if mem.id == self.id:
if addr == LocoMemory.MEM_LOCO_INFO:
self.nr_of_anchors = data[0]
if self.nr_of_anchors == 0:
done = True
else:
self.anchor_data = \
[AnchorData() for _ in range(self.nr_of_anchors)]
self._request_page(0)
else:
page = int((addr - LocoMemory.MEM_LOCO_ANCHOR_BASE) /
LocoMemory.MEM_LOCO_ANCHOR_PAGE_SIZE)
self.anchor_data[page].set_from_mem_data(data)
next_page = page + 1
if next_page < self.nr_of_anchors:
self._request_page(next_page)
else:
done = True
if done:
self.valid = True
if self._update_finished_cb:
self._update_finished_cb(self)
self._update_finished_cb = None
|
Callback for when new memory data has been fetched
|
def nvmlDeviceSetEccMode(handle, mode):
r"""
/**
* Set the ECC mode for the device.
*
* For Kepler &tm; or newer fully supported devices.
* Only applicable to devices with ECC.
* Requires \a NVML_INFOROM_ECC version 1.0 or higher.
* Requires root/admin permissions.
*
* The ECC mode determines whether the GPU enables its ECC support.
*
* This operation takes effect after the next reboot.
*
* See \ref nvmlEnableState_t for details on available modes.
*
* @param device The identifier of the target device
* @param ecc The target ECC mode
*
* @return
* - \ref NVML_SUCCESS if the ECC mode was set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetEccMode()
*/
nvmlReturn_t DECLDIR nvmlDeviceSetEccMode
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetEccMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
|
r"""
/**
* Set the ECC mode for the device.
*
* For Kepler &tm; or newer fully supported devices.
* Only applicable to devices with ECC.
* Requires \a NVML_INFOROM_ECC version 1.0 or higher.
* Requires root/admin permissions.
*
* The ECC mode determines whether the GPU enables its ECC support.
*
* This operation takes effect after the next reboot.
*
* See \ref nvmlEnableState_t for details on available modes.
*
* @param device The identifier of the target device
* @param ecc The target ECC mode
*
* @return
* - \ref NVML_SUCCESS if the ECC mode was set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetEccMode()
*/
nvmlReturn_t DECLDIR nvmlDeviceSetEccMode
|
def opened(self, block_identifier: BlockSpecification) -> bool:
""" Returns if the channel is opened. """
return self.token_network.channel_is_opened(
participant1=self.participant1,
participant2=self.participant2,
block_identifier=block_identifier,
channel_identifier=self.channel_identifier,
)
|
Returns if the channel is opened.
|
def buildcontent(self):
"""Build HTML content only, no header or body tags. To be useful this
will usually require the attribute `juqery_on_ready` to be set which
will wrap the js in $(function(){<regular_js>};)
"""
self.buildcontainer()
# if the subclass has a method buildjs this method will be
# called instead of the method defined here
# when this subclass method is entered it does call
# the method buildjschart defined here
self.buildjschart()
self.htmlcontent = self.template_content_nvd3.render(chart=self)
|
Build HTML content only, no header or body tags. To be useful this
will usually require the attribute `juqery_on_ready` to be set which
will wrap the js in $(function(){<regular_js>};)
|
def basic_stats(self):
"""Return a markdown representation of simple statistics."""
comment_score = sum(comment.score for comment in self.comments)
if self.comments:
comment_duration = (self.comments[-1].created_utc -
self.comments[0].created_utc)
comment_rate = self._rate(len(self.comments), comment_duration)
else:
comment_rate = 0
submission_duration = self.max_date - self.min_date
submission_rate = self._rate(len(self.submissions),
submission_duration)
submission_score = sum(sub.score for sub in self.submissions.values())
values = [('Total', len(self.submissions), len(self.comments)),
('Rate (per day)', '{:.2f}'.format(submission_rate),
'{:.2f}'.format(comment_rate)),
('Unique Redditors', len(self.submitters),
len(self.commenters)),
('Combined Score', submission_score, comment_score)]
retval = 'Period: {:.2f} days\n\n'.format(submission_duration / 86400.)
retval += '||Submissions|Comments|\n:-:|--:|--:\n'
for quad in values:
retval += '__{}__|{}|{}\n'.format(*quad)
return retval + '\n'
|
Return a markdown representation of simple statistics.
|
def _create_ring(self, nodes):
"""Generate a ketama compatible continuum/ring.
"""
for node_name, node_conf in nodes:
for w in range(0, node_conf['vnodes'] * node_conf['weight']):
self._distribution[node_name] += 1
self._ring[self.hashi('%s-%s' % (node_name, w))] = node_name
self._keys = sorted(self._ring.keys())
|
Generate a ketama compatible continuum/ring.
|
def visualRect(self, index):
"""
Returns the visual rectangle for the inputed index.
:param index | <QModelIndex>
:return <QtCore.QRect>
"""
rect = super(XTreeWidget, self).visualRect(index)
item = self.itemFromIndex(index)
if not rect.isNull() and item and item.isFirstColumnSpanned():
vpos = self.viewport().mapFromParent(QtCore.QPoint(0, 0))
rect.setX(vpos.x())
rect.setWidth(self.width())
return rect
return rect
|
Returns the visual rectangle for the inputed index.
:param index | <QModelIndex>
:return <QtCore.QRect>
|
def zero_level_calibrate(self, duration, t0=0.0):
"""Performs zero-level calibration from the chosen time interval.
This changes the previously lodaded data in-place.
Parameters
--------------------
duration : float
Number of timeunits to use for calibration
t0 : float
Starting time for calibration
Returns
----------------------
gyro_data : (3, N) float ndarray
The calibrated data (note that it is also changed in-place!)
"""
t1 = t0 + duration
indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1))
m = np.mean(self.gyro_data[:, indices], axis=1)
self.gyro_data -= m.reshape(3,1)
return self.gyro_data
|
Performs zero-level calibration from the chosen time interval.
This changes the previously lodaded data in-place.
Parameters
--------------------
duration : float
Number of timeunits to use for calibration
t0 : float
Starting time for calibration
Returns
----------------------
gyro_data : (3, N) float ndarray
The calibrated data (note that it is also changed in-place!)
|
def inference(self, observed_arr):
'''
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
'''
if observed_arr.ndim < 4:
# Add rank for channel.
observed_arr = np.expand_dims(observed_arr, axis=1)
self.__add_channel_flag = True
else:
self.__add_channel_flag = False
return super().inference(observed_arr)
|
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def start(self, *args, **kwargs):
"""Start the task.
This is:
* not threadsave
* assumed to be called in the gtk mainloop
"""
args = (self.counter,) + args
thread = threading.Thread(
target=self._work_callback,
args=args, kwargs=kwargs
)
thread.setDaemon(self.daemon)
thread.start()
|
Start the task.
This is:
* not threadsave
* assumed to be called in the gtk mainloop
|
def orient_directed_graph(self, data, graph):
"""Run the algorithm on a directed_graph.
Args:
data (pandas.DataFrame): DataFrame containing the data
graph (networkx.DiGraph): Skeleton of the graph to orient
Returns:
networkx.DiGraph: Solution on the given skeleton.
.. warning::
The algorithm is ran on the skeleton of the given graph.
"""
warnings.warn("The algorithm is ran on the skeleton of the given graph.")
return self.orient_undirected_graph(data, nx.Graph(graph))
|
Run the algorithm on a directed_graph.
Args:
data (pandas.DataFrame): DataFrame containing the data
graph (networkx.DiGraph): Skeleton of the graph to orient
Returns:
networkx.DiGraph: Solution on the given skeleton.
.. warning::
The algorithm is ran on the skeleton of the given graph.
|
def nvmlUnitGetHandleByIndex(index):
r"""
/**
* Acquire the handle for a particular unit, based on its index.
*
* For S-class products.
*
* Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount().
* For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1.
*
* The order in which NVML enumerates units has no guarantees of consistency between reboots.
*
* @param index The index of the target unit, >= 0 and < \a unitCount
* @param unit Reference in which to return the unit handle
*
* @return
* - \ref NVML_SUCCESS if \a unit has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex
"""
"""
/**
* Acquire the handle for a particular unit, based on its index.
*
* For S-class products.
*
* Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount().
* For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1.
*
* The order in which NVML enumerates units has no guarantees of consistency between reboots.
*
* @param index The index of the target unit, >= 0 and < \a unitCount
* @param unit Reference in which to return the unit handle
*
* @return
* - \ref NVML_SUCCESS if \a unit has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
"""
c_index = c_uint(index)
unit = c_nvmlUnit_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex")
ret = fn(c_index, byref(unit))
_nvmlCheckReturn(ret)
return bytes_to_str(unit)
|
r"""
/**
* Acquire the handle for a particular unit, based on its index.
*
* For S-class products.
*
* Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount().
* For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1.
*
* The order in which NVML enumerates units has no guarantees of consistency between reboots.
*
* @param index The index of the target unit, >= 0 and < \a unitCount
* @param unit Reference in which to return the unit handle
*
* @return
* - \ref NVML_SUCCESS if \a unit has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex
|
def figure_tensor(func, **tf_pyfunc_kwargs):
'''Decorate matplotlib drawing routines.
This dectorator is meant to decorate functions that return matplotlib
figures. The decorated function has to have the following signature
def decorated(*args, **kwargs) -> figure or iterable of figures
where `*args` can be any positional argument and `**kwargs` are any
keyword arguments. The decorated function returns a tensor of shape
`[NumFigures, Height, Width, 3]` of type `tf.uint8`.
The drawing code is invoked during running of TensorFlow sessions,
at a time when all positional tensor arguments have been evaluated
by the session. The decorated function is then passed the tensor values.
All non tensor arguments remain unchanged.
'''
name = tf_pyfunc_kwargs.pop('name', func.__name__)
@wraps(func)
def wrapper(*func_args, **func_kwargs):
tf_args = PositionalTensorArgs(func_args)
def pyfnc_callee(*tensor_values, **unused):
try:
figs = as_list(func(*tf_args.mix_args(tensor_values), **func_kwargs))
for f in figs:
f.canvas.draw()
return figure_buffer(figs)
except Exception:
print('-'*5 + 'tfmpl catched exception' + '-'*5)
print(traceback.format_exc())
print('-'*20)
raise
return tf.py_func(pyfnc_callee, tf_args.tensor_args, tf.uint8, name=name, **tf_pyfunc_kwargs)
return wrapper
|
Decorate matplotlib drawing routines.
This dectorator is meant to decorate functions that return matplotlib
figures. The decorated function has to have the following signature
def decorated(*args, **kwargs) -> figure or iterable of figures
where `*args` can be any positional argument and `**kwargs` are any
keyword arguments. The decorated function returns a tensor of shape
`[NumFigures, Height, Width, 3]` of type `tf.uint8`.
The drawing code is invoked during running of TensorFlow sessions,
at a time when all positional tensor arguments have been evaluated
by the session. The decorated function is then passed the tensor values.
All non tensor arguments remain unchanged.
|
def _check_feature_types(self):
""" Checks that feature types are a subset of allowed feature types. (`None` is handled
:raises: ValueError
"""
if self.default_feature_type is not None and self.default_feature_type not in self.allowed_feature_types:
raise ValueError('Default feature type parameter must be one of the allowed feature types')
for feature_type in self.feature_collection:
if feature_type is not None and feature_type not in self.allowed_feature_types:
raise ValueError('Feature type has to be one of {}, but {} found'.format(self.allowed_feature_types,
feature_type))
|
Checks that feature types are a subset of allowed feature types. (`None` is handled
:raises: ValueError
|
def update(self):
"""Update the AMP list."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
for k, v in iteritems(self.glances_amps.update()):
stats.append({'key': k,
'name': v.NAME,
'result': v.result(),
'refresh': v.refresh(),
'timer': v.time_until_refresh(),
'count': v.count(),
'countmin': v.count_min(),
'countmax': v.count_max()})
else:
# Not available in SNMP mode
pass
# Update the stats
self.stats = stats
return self.stats
|
Update the AMP list.
|
def sync_objects_out(self, force=False):
"""Synchronize from objects to records, and records to files"""
self.log('---- Sync Objects Out ----')
from ambry.bundle.files import BuildSourceFile
self.dstate = self.STATES.BUILDING
for f in self.build_source_files.list_records():
self.log('Sync: {}'.format(f.record.path))
f.objects_to_record()
self.commit()
|
Synchronize from objects to records, and records to files
|
def to_example_dict(encoder, inputs, mask, outputs):
"""Convert single h5 record to an example dict."""
# Inputs
bases = []
input_ids = []
last_idx = -1
for row in np.argwhere(inputs):
idx, base_id = row
idx, base_id = int(idx), int(base_id)
assert idx > last_idx # if not, means 2 True values in 1 row
# Some rows are all False. Those rows are mapped to UNK_ID.
while idx != last_idx + 1:
bases.append(encoder.UNK)
last_idx += 1
bases.append(encoder.BASES[base_id])
last_idx = idx
assert len(inputs) == len(bases)
input_ids = encoder.encode(bases)
input_ids.append(text_encoder.EOS_ID)
# Targets: mask and output
targets_mask = [float(v) for v in mask]
# The output is (n, m); store targets_shape so that it can be reshaped
# properly on the other end.
targets = [float(v) for v in outputs.flatten()]
targets_shape = [int(dim) for dim in outputs.shape]
assert mask.shape[0] == outputs.shape[0]
example_keys = ["inputs", "targets_mask", "targets", "targets_shape"]
ex_dict = dict(
zip(example_keys, [input_ids, targets_mask, targets, targets_shape]))
return ex_dict
|
Convert single h5 record to an example dict.
|
def add_rednoise(psr,A,gamma,components=10,seed=None):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
N.random.seed(seed)
t = psr.toas()
minx, maxx = N.min(t), N.max(t)
x = (t - minx) / (maxx - minx)
T = (day/year) * (maxx - minx)
size = 2*components
F = N.zeros((psr.nobs,size),'d')
f = N.zeros(size,'d')
for i in range(components):
F[:,2*i] = N.cos(2*math.pi*(i+1)*x)
F[:,2*i+1] = N.sin(2*math.pi*(i+1)*x)
f[2*i] = f[2*i+1] = (i+1) / T
norm = A**2 * year**2 / (12 * math.pi**2 * T)
prior = norm * f**(-gamma)
y = N.sqrt(prior) * N.random.randn(size)
psr.stoas[:] += (1.0/day) * N.dot(F,y)
|
Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed.
|
def get_getter(cls, prop_name, # @NoSelf
user_getter=None, getter_takes_name=False):
"""Returns a function wich is a getter for a property.
prop_name is the name off the property.
user_getter is an optional function doing the work. If
specified, that function will be called instead of getting
the attribute whose name is in 'prop_name'.
If user_getter is specified with a False value for
getter_takes_name (default), than the method is used to get
the value of the property. If True is specified for
getter_takes_name, then the user_getter is called by
passing the property name (i.e. it is considered a general
method which receive the property name whose value has to
be returned.)
"""
if user_getter:
if getter_takes_name: # wraps the property name
_deps = type(cls)._get_old_style_getter_deps(cls, prop_name,
user_getter)
def _getter(self, deps=_deps):
return user_getter(self, prop_name)
else:
_getter = user_getter
return _getter
def _getter(self): # @DuplicatedSignature
return getattr(self, PROP_NAME % {'prop_name' : prop_name})
return _getter
|
Returns a function wich is a getter for a property.
prop_name is the name off the property.
user_getter is an optional function doing the work. If
specified, that function will be called instead of getting
the attribute whose name is in 'prop_name'.
If user_getter is specified with a False value for
getter_takes_name (default), than the method is used to get
the value of the property. If True is specified for
getter_takes_name, then the user_getter is called by
passing the property name (i.e. it is considered a general
method which receive the property name whose value has to
be returned.)
|
def validate(self, **kwargs):
"""
Validates a data file
:param file_path: path to file to be loaded.
:param data: pre loaded YAML object (optional).
:return: Bool to indicate the validity of the file.
"""
default_data_schema = json.load(open(self.default_schema_file, 'r'))
# even though we are using the yaml package to load,
# it supports JSON and YAML
data = kwargs.pop("data", None)
file_path = kwargs.pop("file_path", None)
if file_path is None:
raise LookupError("file_path argument must be supplied")
if data is None:
try:
data = yaml.load(open(file_path, 'r'), Loader=Loader)
except Exception as e:
self.add_validation_message(ValidationMessage(file=file_path, message=
'There was a problem parsing the file.\n' + e.__str__()))
return False
try:
if 'type' in data:
custom_schema = self.load_custom_schema(data['type'])
json_validate(data, custom_schema)
else:
json_validate(data, default_data_schema)
except ValidationError as ve:
self.add_validation_message(
ValidationMessage(file=file_path,
message=ve.message + ' in ' + str(ve.instance)))
if self.has_errors(file_path):
return False
else:
return True
|
Validates a data file
:param file_path: path to file to be loaded.
:param data: pre loaded YAML object (optional).
:return: Bool to indicate the validity of the file.
|
def apply(self, data, path=None, applicator=None):
"""
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
Arguments:
data -- dict of data
Returns:
Cleaned data
"""
if applicator:
applicator.pset = self
else:
applicator = Applicator(self)
return applicator.apply(data, path=path)
|
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
Arguments:
data -- dict of data
Returns:
Cleaned data
|
def parse_iso8601(text):
"""
ISO 8601 compliant parser.
:param text: The string to parse
:type text: str
:rtype: datetime.datetime or datetime.time or datetime.date
"""
parsed = _parse_iso8601_duration(text)
if parsed is not None:
return parsed
m = ISO8601_DT.match(text)
if not m:
raise ParserError("Invalid ISO 8601 string")
ambiguous_date = False
is_date = False
is_time = False
year = 0
month = 1
day = 1
minute = 0
second = 0
microsecond = 0
tzinfo = None
if m:
if m.group("date"):
# A date has been specified
is_date = True
if m.group("isocalendar"):
# We have a ISO 8601 string defined
# by week number
if (
m.group("weeksep")
and not m.group("weekdaysep")
and m.group("isoweekday")
):
raise ParserError("Invalid date string: {}".format(text))
if not m.group("weeksep") and m.group("weekdaysep"):
raise ParserError("Invalid date string: {}".format(text))
try:
date = _get_iso_8601_week(
m.group("isoyear"), m.group("isoweek"), m.group("isoweekday")
)
except ParserError:
raise
except ValueError:
raise ParserError("Invalid date string: {}".format(text))
year = date["year"]
month = date["month"]
day = date["day"]
else:
# We have a classic date representation
year = int(m.group("year"))
if not m.group("monthday"):
# No month and day
month = 1
day = 1
else:
if m.group("month") and m.group("day"):
# Month and day
if not m.group("daysep") and len(m.group("day")) == 1:
# Ordinal day
ordinal = int(m.group("month") + m.group("day"))
leap = is_leap(year)
months_offsets = MONTHS_OFFSETS[leap]
if ordinal > months_offsets[13]:
raise ParserError("Ordinal day is out of range")
for i in range(1, 14):
if ordinal <= months_offsets[i]:
day = ordinal - months_offsets[i - 1]
month = i - 1
break
else:
month = int(m.group("month"))
day = int(m.group("day"))
else:
# Only month
if not m.group("monthsep"):
# The date looks like 201207
# which is invalid for a date
# But it might be a time in the form hhmmss
ambiguous_date = True
month = int(m.group("month"))
day = 1
if not m.group("time"):
# No time has been specified
if ambiguous_date:
# We can "safely" assume that the ambiguous date
# was actually a time in the form hhmmss
hhmmss = "{}{:0>2}".format(str(year), str(month))
return datetime.time(int(hhmmss[:2]), int(hhmmss[2:4]), int(hhmmss[4:]))
return datetime.date(year, month, day)
if ambiguous_date:
raise ParserError("Invalid date string: {}".format(text))
if is_date and not m.group("timesep"):
raise ParserError("Invalid date string: {}".format(text))
if not is_date:
is_time = True
# Grabbing hh:mm:ss
hour = int(m.group("hour"))
minsep = m.group("minsep")
if m.group("minute"):
minute = int(m.group("minute"))
elif minsep:
raise ParserError("Invalid ISO 8601 time part")
secsep = m.group("secsep")
if secsep and not minsep and m.group("minute"):
# minute/second separator but no hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
if m.group("second"):
if not secsep and minsep:
# No minute/second separator but hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
second = int(m.group("second"))
elif secsep:
raise ParserError("Invalid ISO 8601 time part")
# Grabbing subseconds, if any
if m.group("subsecondsection"):
# Limiting to 6 chars
subsecond = m.group("subsecond")[:6]
microsecond = int("{:0<6}".format(subsecond))
# Grabbing timezone, if any
tz = m.group("tz")
if tz:
if tz == "Z":
offset = 0
else:
negative = True if tz.startswith("-") else False
tz = tz[1:]
if ":" not in tz:
if len(tz) == 2:
tz = "{}00".format(tz)
off_hour = tz[0:2]
off_minute = tz[2:4]
else:
off_hour, off_minute = tz.split(":")
offset = ((int(off_hour) * 60) + int(off_minute)) * 60
if negative:
offset = -1 * offset
tzinfo = FixedTimezone(offset)
if is_time:
return datetime.time(hour, minute, second, microsecond)
return datetime.datetime(
year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo
)
|
ISO 8601 compliant parser.
:param text: The string to parse
:type text: str
:rtype: datetime.datetime or datetime.time or datetime.date
|
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
|
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
|
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
|
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
|
def config_unset(name,
value_regex=None,
repo=None,
user=None,
password=None,
output_encoding=None,
**kwargs):
r'''
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No matching keys are set'}
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
all_ = kwargs.pop('all', False)
if kwargs:
return _fail(
ret,
salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)
)
if not global_ and not repo:
return _fail(
ret,
'Non-global config options require the \'repo\' argument to be '
'set'
)
if not isinstance(name, six.string_types):
name = six.text_type(name)
if value_regex is not None:
if not isinstance(value_regex, six.string_types):
value_regex = six.text_type(value_regex)
# Ensure that the key regex matches the full key name
key = '^' + name.lstrip('^').rstrip('$') + '$'
# Get matching keys/values
pre_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
if not pre_matches:
# No changes need to be made
return ret
# Perform sanity check on the matches. We can't proceed if the value_regex
# matches more than one value in a given key, and 'all' is not set to True
if not all_:
greedy_matches = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(pre_matches)
if len(y) > 1]
if greedy_matches:
if value_regex is not None:
return _fail(
ret,
'Multiple values are matched by value_regex for the '
'following keys (set \'all\' to True to force removal): '
'{0}'.format('; '.join(greedy_matches))
)
else:
return _fail(
ret,
'Multivar(s) matched by the key expression (set \'all\' '
'to True to force removal): {0}'.format(
'; '.join(greedy_matches)
)
)
if __opts__['test']:
ret['changes'] = pre_matches
return _neutral_test(
ret,
'{0} key(s) would have value(s) unset'.format(len(pre_matches))
)
if value_regex is None:
pre = pre_matches
else:
# Get all keys matching the key expression, so we can accurately report
# on changes made.
pre = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
failed = []
# Unset the specified value(s). There is no unset for regexes so loop
# through the pre_matches dict and unset each matching key individually.
for key_name in pre_matches:
try:
__salt__['git.config_unset'](
cwd=repo,
key=name,
value_regex=value_regex,
all=all_,
user=user,
password=password,
output_encoding=output_encoding,
**{'global': global_}
)
except CommandExecutionError as exc:
msg = 'Failed to unset \'{0}\''.format(key_name)
if value_regex is not None:
msg += ' using value_regex \'{1}\''
msg += ': ' + _strip_exc(exc)
log.error(msg)
failed.append(key_name)
if failed:
return _fail(
ret,
'Error(s) occurred unsetting values for the following keys (see '
'the minion log for details): {0}'.format(', '.join(failed))
)
post = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
for key_name in pre:
if key_name not in post:
ret['changes'][key_name] = pre[key_name]
unset = [x for x in pre[key_name] if x not in post[key_name]]
if unset:
ret['changes'][key_name] = unset
if value_regex is None:
post_matches = post
else:
post_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
if post_matches:
failed = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(post_matches)]
return _fail(
ret,
'Failed to unset value(s): {0}'.format('; '.join(failed))
)
ret['comment'] = 'Value(s) successfully unset'
return ret
|
r'''
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
|
def simple_memoize(callable_object):
"""Simple memoization for functions without keyword arguments.
This is useful for mapping code objects to module in this context.
inspect.getmodule() requires a number of system calls, which may slow down
the tracing considerably. Caching the mapping from code objects (there is
*one* code object for each function, regardless of how many simultaneous
activations records there are).
In this context we can ignore keyword arguments, but a generic memoizer
ought to take care of that as well.
"""
cache = dict()
def wrapper(*rest):
if rest not in cache:
cache[rest] = callable_object(*rest)
return cache[rest]
return wrapper
|
Simple memoization for functions without keyword arguments.
This is useful for mapping code objects to module in this context.
inspect.getmodule() requires a number of system calls, which may slow down
the tracing considerably. Caching the mapping from code objects (there is
*one* code object for each function, regardless of how many simultaneous
activations records there are).
In this context we can ignore keyword arguments, but a generic memoizer
ought to take care of that as well.
|
def _get_friends_count(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id, count=1)
return response["count"]
|
https://vk.com/dev/friends.get
|
def get_properties(self, mode, name_list=None):
"""Return properties as list of 2-tuples (name, value).
If mode is 'name', then None is returned for the value.
name
the property name in Clark notation.
value
may have different types, depending on the status:
- string or unicode: for standard property values.
- etree.Element: for complex values.
- DAVError in case of errors.
- None: if mode == 'name'.
@param mode: "allprop", "name", or "named"
@param name_list: list of property names in Clark Notation (required for mode 'named')
This default implementation basically calls self.get_property_names() to
get the list of names, then call self.get_property_value on each of them.
"""
assert mode in ("allprop", "name", "named")
if mode in ("allprop", "name"):
# TODO: 'allprop' could have nameList, when <include> option is
# implemented
assert name_list is None
name_list = self.get_property_names(mode == "allprop")
else:
assert name_list is not None
propList = []
namesOnly = mode == "name"
for name in name_list:
try:
if namesOnly:
propList.append((name, None))
else:
value = self.get_property_value(name)
propList.append((name, value))
except DAVError as e:
propList.append((name, e))
except Exception as e:
propList.append((name, as_DAVError(e)))
if self.provider.verbose >= 2:
traceback.print_exc(10, sys.stdout)
return propList
|
Return properties as list of 2-tuples (name, value).
If mode is 'name', then None is returned for the value.
name
the property name in Clark notation.
value
may have different types, depending on the status:
- string or unicode: for standard property values.
- etree.Element: for complex values.
- DAVError in case of errors.
- None: if mode == 'name'.
@param mode: "allprop", "name", or "named"
@param name_list: list of property names in Clark Notation (required for mode 'named')
This default implementation basically calls self.get_property_names() to
get the list of names, then call self.get_property_value on each of them.
|
def handle_pkg_optional_fields(self, package, package_node):
"""
Write package optional fields.
"""
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.versionInfo, 'version')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.packageFileName, 'file_name')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.supplier, 'supplier')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.originator, 'originator')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.sourceInfo, 'source_info')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.licenseComments, 'license_comment')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.summary, 'summary')
self.handle_package_literal_optional(package, package_node, self.spdx_namespace.description, 'description')
if package.has_optional_field('check_sum'):
checksum_node = self.create_checksum_node(package.check_sum)
self.graph.add((package_node, self.spdx_namespace.checksum, checksum_node))
if package.has_optional_field('homepage'):
homepage_node = URIRef(self.to_special_value(package.homepage))
homepage_triple = (package_node, self.doap_namespace.homepage, homepage_node)
self.graph.add(homepage_triple)
|
Write package optional fields.
|
def check_num_columns_in_param_list_arrays(param_list):
"""
Ensure that each array in param_list, that is not None, has the same number
of columns. Raises a helpful ValueError if otherwise.
Parameters
----------
param_list : list of ndarrays or None.
Returns
-------
None.
"""
try:
num_columns = param_list[0].shape[1]
assert all([x is None or (x.shape[1] == num_columns)
for x in param_list])
except AssertionError:
msg = "param_list arrays should have equal number of columns."
raise ValueError(msg)
return None
|
Ensure that each array in param_list, that is not None, has the same number
of columns. Raises a helpful ValueError if otherwise.
Parameters
----------
param_list : list of ndarrays or None.
Returns
-------
None.
|
def reverse(self, search: str):
"""Return reverse DNS lookup information we have for the given IPv{4,6} address with history of changes. Multiple reverse DNS entries may match. We return all of them.
"""
url_path = "/api/reverse/{search}".format(search=search)
return self._request(path=url_path)
|
Return reverse DNS lookup information we have for the given IPv{4,6} address with history of changes. Multiple reverse DNS entries may match. We return all of them.
|
def delete_folder(self, folder_id, folder_etag=None, recursive=None):
'''Delete specified folder.
Pass folder_etag to avoid race conditions (raises error 412).
recursive keyword does just what it says on the tin.'''
return self( join('folders', folder_id),
dict(recursive=recursive), method='delete',
headers={'If-Match': folder_etag} if folder_etag else dict() )
|
Delete specified folder.
Pass folder_etag to avoid race conditions (raises error 412).
recursive keyword does just what it says on the tin.
|
def sieve(self, name=None, sample_rate=None, sample_range=None,
exact_match=False, **others):
"""Find all `Channels <Channel>` in this list matching the
specified criteria.
Parameters
----------
name : `str`, or regular expression
any part of the channel name against which to match
(or full name if `exact_match=False` is given)
sample_rate : `float`
rate (number of samples per second) to match exactly
sample_range : 2-`tuple`
`[low, high]` closed interval or rates to match within
exact_match : `bool`
return channels matching `name` exactly, default: `False`
Returns
-------
new : `ChannelList`
a new `ChannelList` containing the matching channels
"""
# format name regex
if isinstance(name, Pattern):
flags = name.flags
name = name.pattern
else:
flags = 0
if exact_match:
name = name if name.startswith(r'\A') else r"\A%s" % name
name = name if name.endswith(r'\Z') else r"%s\Z" % name
name_regexp = re.compile(name, flags=flags)
matched = list(self)
if name is not None:
matched = [entry for entry in matched if
name_regexp.search(entry.name) is not None]
if sample_rate is not None:
sample_rate = (sample_rate.value if
isinstance(sample_rate, units.Quantity) else
float(sample_rate))
matched = [entry for entry in matched if entry.sample_rate and
entry.sample_rate.value == sample_rate]
if sample_range is not None:
matched = [entry for entry in matched if
sample_range[0] <= entry.sample_rate.value <=
sample_range[1]]
for attr, val in others.items():
if val is not None:
matched = [entry for entry in matched if
(hasattr(entry, attr) and
getattr(entry, attr) == val)]
return self.__class__(matched)
|
Find all `Channels <Channel>` in this list matching the
specified criteria.
Parameters
----------
name : `str`, or regular expression
any part of the channel name against which to match
(or full name if `exact_match=False` is given)
sample_rate : `float`
rate (number of samples per second) to match exactly
sample_range : 2-`tuple`
`[low, high]` closed interval or rates to match within
exact_match : `bool`
return channels matching `name` exactly, default: `False`
Returns
-------
new : `ChannelList`
a new `ChannelList` containing the matching channels
|
def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None):
"""
Callback invoked when a previously-opened connection is closed.
Args:
connection (pika.connection.SelectConnection): The connection that
was just closed.
reply_code_or_reason (int|Exception): The reason why the channel
was closed. In older versions of pika, this is the AMQP code.
reply_text (str): The human-readable reason the connection was
closed (only in older versions of pika)
"""
self._channel = None
if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed):
reply_code = reply_code_or_reason.reply_code
reply_text = reply_code_or_reason.reply_text
elif isinstance(reply_code_or_reason, int):
reply_code = reply_code_or_reason
else:
reply_code = 0
reply_text = str(reply_code_or_reason)
if reply_code == 200:
# Normal shutdown, exit the consumer.
_log.info("Server connection closed (%s), shutting down", reply_text)
connection.ioloop.stop()
else:
_log.warning(
"Connection to %s closed unexpectedly (%d): %s",
connection.params.host,
reply_code,
reply_text,
)
self.call_later(1, self.reconnect)
|
Callback invoked when a previously-opened connection is closed.
Args:
connection (pika.connection.SelectConnection): The connection that
was just closed.
reply_code_or_reason (int|Exception): The reason why the channel
was closed. In older versions of pika, this is the AMQP code.
reply_text (str): The human-readable reason the connection was
closed (only in older versions of pika)
|
def filepattern(self, *args, **kwargs):
"""Returns a list of filepatterns, one for each problem."""
return [p.filepattern(*args, **kwargs) for p in self.problems]
|
Returns a list of filepatterns, one for each problem.
|
def main(argv=None):
""" Call transliterator from a command line.
python transliterator.py text inputFormat outputFormat
... writes the transliterated text to stdout
text -- the text to be transliterated OR the name of a file containing the text
inputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated FROM, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
outputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated TO, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
"""
print (transliterate('jaya gaNeza! zrIrAmajayam', 'harvardkyoto', 'devanagari'))
if argv is None:
argv = sys.argv
try:
text, inFormat, outFormat = argv[1:4]
except ValueError:
print (main.__doc__)
return 2
inFormat = inFormat.upper()
outFormat = outFormat.upper()
# try assuming "text" is a filename
try:
f = open(text)
except IOError:
# it wasn't, so it must be the actual text
print (transliterate(text, inFormat, outFormat))
return 0
else:
i = 1
for text in f.readlines():
if len(text) > 0 and not text.startswith('#'):
print (transliterate(text, inFormat, outFormat).strip('\n'))
i = i + 1
f.close()
return 0
|
Call transliterator from a command line.
python transliterator.py text inputFormat outputFormat
... writes the transliterated text to stdout
text -- the text to be transliterated OR the name of a file containing the text
inputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated FROM, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
outputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated TO, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
|
def get_commits(self, repo, organization='llnl'):
"""
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/commits')
is_only_today = False
if not os.path.exists(path): #no previous path, get all commits
all_commits = repo.iter_commits()
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every commit
all_commits = repo.iter_commits()
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_commits = repo.iter_commits(since=date)
for commit in all_commits:
self.commits_json[repo.name].append(commit.to_json())
#for csv
count = 0
for commit in repo.iter_commits():
count += 1
return count
|
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
|
def get(self, request, path):
"""Return HTML (or other related content) for Meteor."""
if path == 'meteor_runtime_config.js':
config = {
'DDP_DEFAULT_CONNECTION_URL': request.build_absolute_uri('/'),
'PUBLIC_SETTINGS': self.meteor_settings.get('public', {}),
'ROOT_URL': request.build_absolute_uri(
'%s/' % (
self.runtime_config.get('ROOT_URL_PATH_PREFIX', ''),
),
),
'ROOT_URL_PATH_PREFIX': '',
}
# Use HTTPS instead of HTTP if SECURE_SSL_REDIRECT is set
if config['DDP_DEFAULT_CONNECTION_URL'].startswith('http:') \
and settings.SECURE_SSL_REDIRECT:
config['DDP_DEFAULT_CONNECTION_URL'] = 'https:%s' % (
config['DDP_DEFAULT_CONNECTION_URL'].split(':', 1)[1],
)
config.update(self.runtime_config)
return HttpResponse(
'__meteor_runtime_config__ = %s;' % dumps(config),
content_type='text/javascript',
)
try:
file_path, content_type = self.url_map[path]
with open(file_path, 'r') as content:
return HttpResponse(
content.read(),
content_type=content_type,
)
except KeyError:
return HttpResponse(self.html)
|
Return HTML (or other related content) for Meteor.
|
def send_to_azure(instance, data, thread_number, sub_commit, table_info, nb_threads):
"""
data = {
"table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist,
"columns_name" : [first_column_name,second_column_name,...,last_column_name],
"rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...]
}
"""
rows = data["rows"]
if not rows:
return 0
columns_name = data["columns_name"]
table_name = data["table_name"] + "_" + str(thread_number)
print(C.HEADER + "Create table %s..." % table_name + C.ENDC)
create_table_from_info(instance, table_info, table_name)
print(C.OKGREEN + "Create table %s...OK" % table_name + C.ENDC)
small_batch_size = int(2099 / len(columns_name))
cnxn = connect(instance)
cursor = cnxn.cursor()
# Initialize counters
boolean = True
total_rows = len(rows)
question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))])
counter = 0
while boolean:
temp_row = []
question_mark_list = []
for i in range(small_batch_size):
if rows:
temp_row.append(rows.pop())
question_mark_list.append(question_mark_pattern)
else:
boolean = False
continue
counter = counter + len(temp_row)
# percent = round(float(counter * 100) / total_rows)
threads_state = eval(read_file("threads_state_%s" % str(thread_number)))
threads_state["iteration"] = counter
write_in_file("threads_state_%s" % str(thread_number), str(threads_state))
# print(threads_state)
if sub_commit:
suffix = "rows sent"
# print("Thread %s : %s %% rows sent" % (str(thread_number), str(percent)))
else:
suffix = "rows prepared to be sent"
print_progress_bar_multi_threads(nb_threads, suffix=suffix)
# print("Thread %s : %s %% rows prepared to be sent" % (str(thread_number), str(percent)))
data_values_str = ','.join(question_mark_list)
columns_name_str = ", ".join(columns_name)
inserting_request = '''INSERT INTO %s (%s) VALUES %s ;''' % (table_name, columns_name_str, data_values_str)
final_data = [y for x in temp_row for y in x]
if final_data:
cursor.execute(inserting_request, final_data)
if sub_commit:
commit_function(cnxn)
if not sub_commit:
commit_function(cnxn)
cursor.close()
cnxn.close()
return 0
|
data = {
"table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist,
"columns_name" : [first_column_name,second_column_name,...,last_column_name],
"rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...]
}
|
def get_function(self, name):
"""
Get a ValueRef pointing to the function named *name*.
NameError is raised if the symbol isn't found.
"""
p = ffi.lib.LLVMPY_GetNamedFunction(self, _encode_string(name))
if not p:
raise NameError(name)
return ValueRef(p, 'function', dict(module=self))
|
Get a ValueRef pointing to the function named *name*.
NameError is raised if the symbol isn't found.
|
def handle_ChannelClose(self, frame):
""" AMQP server closed the channel with an error """
# By docs:
# The response to receiving a Close after sending Close must be to
# send Close-Ok.
#
# No need for additional checks
self.sender.send_CloseOK()
exc = exceptions._get_exception_type(frame.payload.reply_code)
self._close_all(exc)
|
AMQP server closed the channel with an error
|
def factory(ec, code=None, token=None, refresh=None, **kwargs):
"""
Create a token handler
:param code:
:param token:
:param refresh:
:return: TokenHandler instance
"""
TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'}
args = {}
if code:
args['code_handler'] = init_token_handler(ec, code, TTYPE['code'])
if token:
args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token'])
if refresh:
args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh'])
return TokenHandler(**args)
|
Create a token handler
:param code:
:param token:
:param refresh:
:return: TokenHandler instance
|
def __get_untitled_file_name(self):
"""
Returns an untitled file name.
:return: Untitled file name.
:rtype: unicode
"""
untitledNameId = Editor._Editor__untitled_name_id
for file in self.list_files():
if not os.path.dirname(file) == self.__default_session_directory:
continue
search = re.search(r"\d+", os.path.basename(file))
if not search:
continue
untitledNameId = max(int(search.group(0)), untitledNameId) + 1
name = "{0} {1}.{2}".format(self.__default_file_name, untitledNameId, self.__default_file_extension)
Editor._Editor__untitled_name_id += 1
LOGGER.debug("> Next untitled file name: '{0}'.".format(name))
return name
|
Returns an untitled file name.
:return: Untitled file name.
:rtype: unicode
|
def create_dataset(self, name, **kwargs):
"""Create an array.
Parameters
----------
name : string
Array name.
data : array_like, optional
Initial data.
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If not provided, will be guessed from `shape` and
`dtype`.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : zarr.sync.ArraySynchronizer, optional
Array synchronizer.
filters : sequence of Codecs, optional
Sequence of filters to use to encode chunk data prior to
compression.
overwrite : bool, optional
If True, replace any existing array or group with the given name.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
Returns
-------
a : zarr.core.Array
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> d1 = g1.create_dataset('foo', shape=(10000, 10000),
... chunks=(1000, 1000))
>>> d1
<zarr.core.Array '/foo' (10000, 10000) float64>
>>> d2 = g1.create_dataset('bar/baz/qux', shape=(100, 100, 100),
... chunks=(100, 10, 10))
>>> d2
<zarr.core.Array '/bar/baz/qux' (100, 100, 100) float64>
"""
return self._write_op(self._create_dataset_nosync, name, **kwargs)
|
Create an array.
Parameters
----------
name : string
Array name.
data : array_like, optional
Initial data.
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If not provided, will be guessed from `shape` and
`dtype`.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : zarr.sync.ArraySynchronizer, optional
Array synchronizer.
filters : sequence of Codecs, optional
Sequence of filters to use to encode chunk data prior to
compression.
overwrite : bool, optional
If True, replace any existing array or group with the given name.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
Returns
-------
a : zarr.core.Array
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> d1 = g1.create_dataset('foo', shape=(10000, 10000),
... chunks=(1000, 1000))
>>> d1
<zarr.core.Array '/foo' (10000, 10000) float64>
>>> d2 = g1.create_dataset('bar/baz/qux', shape=(100, 100, 100),
... chunks=(100, 10, 10))
>>> d2
<zarr.core.Array '/bar/baz/qux' (100, 100, 100) float64>
|
def _verify(function):
"""decorator to ensure pin is properly set up"""
# @functools.wraps
def wrapped(pin, *args, **kwargs):
pin = int(pin)
if pin not in _open:
ppath = gpiopath(pin)
if not os.path.exists(ppath):
log.debug("Creating Pin {0}".format(pin))
with _export_lock:
with open(pjoin(gpio_root, 'export'), 'w') as f:
_write(f, pin)
value = open(pjoin(ppath, 'value'), FMODE)
direction = open(pjoin(ppath, 'direction'), FMODE)
_open[pin] = PinState(value=value, direction=direction)
return function(pin, *args, **kwargs)
return wrapped
|
decorator to ensure pin is properly set up
|
def get_type(self):
"""
Return the type of the field
:rtype: string
"""
if self.type_idx_value == None:
self.type_idx_value = self.CM.get_type(self.type_idx)
return self.type_idx_value
|
Return the type of the field
:rtype: string
|
async def issue_cmd(self, cmd, value, retry=3):
"""
Issue a command, then await and return the return value.
This method is a coroutine
"""
async with self._cmd_lock:
if not self.connected:
_LOGGER.debug(
"Serial transport closed, not sending command %s", cmd)
return
while not self._cmdq.empty():
_LOGGER.debug("Clearing leftover message from command queue:"
" %s", await self._cmdq.get())
_LOGGER.debug("Sending command: %s with value %s", cmd, value)
self.transport.write(
'{}={}\r\n'.format(cmd, value).encode('ascii'))
if cmd == OTGW_CMD_REPORT:
expect = r'^{}:\s*([A-Z]{{2}}|{}=[^$]+)$'.format(cmd, value)
else:
expect = r'^{}:\s*([^$]+)$'.format(cmd)
async def send_again(err):
"""Resend the command."""
nonlocal retry
_LOGGER.warning("Command %s failed with %s, retrying...", cmd,
err)
retry -= 1
self.transport.write(
'{}={}\r\n'.format(cmd, value).encode('ascii'))
async def process(msg):
"""Process a possible response."""
_LOGGER.debug("Got possible response for command %s: %s", cmd,
msg)
if msg in OTGW_ERRS:
# Some errors appear by themselves on one line.
if retry == 0:
raise OTGW_ERRS[msg]
await send_again(msg)
return
if cmd == OTGW_CMD_MODE and value == 'R':
# Device was reset, msg contains build info
while not re.match(
r'OpenTherm Gateway \d+\.\d+\.\d+', msg):
msg = await self._cmdq.get()
return True
match = re.match(expect, msg)
if match:
if match.group(1) in OTGW_ERRS:
# Some errors are considered a response.
if retry == 0:
raise OTGW_ERRS[match.group(1)]
await send_again(msg)
return
ret = match.group(1)
if cmd == OTGW_CMD_SUMMARY and ret == '1':
# Expects a second line
part2 = await self._cmdq.get()
ret = [ret, part2]
return ret
if re.match(r'Error 0[1-4]', msg):
_LOGGER.warning("Received %s. If this happens during a "
"reset of the gateway it can be safely "
"ignored.", msg)
return
_LOGGER.warning("Unknown message in command queue: %s", msg)
await send_again(msg)
while True:
msg = await self._cmdq.get()
ret = await process(msg)
if ret is not None:
return ret
|
Issue a command, then await and return the return value.
This method is a coroutine
|
def respond(self, text, sessionID = "general"):
"""
Generate a response to the user input.
:type text: str
:param text: The string to be mapped
:rtype: str
"""
text = self.__normalize(text)
previousText = self.__normalize(self.conversation[sessionID][-2])
text_correction = self.__correction(text)
current_topic = self.topic[sessionID]
current_topic_order = current_topic.split(".")
while current_topic_order:
try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID)
except ValueError as e:pass
current_topic_order.pop()
current_topic = ".".join(current_topic_order)
try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID)
except ValueError as e:return "Sorry I couldn't find anything relevant"
|
Generate a response to the user input.
:type text: str
:param text: The string to be mapped
:rtype: str
|
def read_igor_D_gene_parameters(params_file_name):
"""Load genD from file.
genD is a list of genomic D information. Each element is a list of the name
of the D allele and the germline sequence.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genD : list
List of genomic D information.
"""
params_file = open(params_file_name, 'r')
D_gene_info = {}
in_D_gene_sec = False
for line in params_file:
if line.startswith('#GeneChoice;D_gene;'):
in_D_gene_sec = True
elif in_D_gene_sec:
if line[0] == '%':
split_line = line[1:].split(';')
D_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])]
else:
break
params_file.close()
genD = [[]]*len(D_gene_info.keys())
for D_gene in D_gene_info.keys():
genD[D_gene_info[D_gene][1]] = [D_gene, D_gene_info[D_gene][0]]
return genD
|
Load genD from file.
genD is a list of genomic D information. Each element is a list of the name
of the D allele and the germline sequence.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genD : list
List of genomic D information.
|
def _scaled_int(s):
r"""Convert a 3 byte string to a signed integer value."""
s = bytearray(s) # For Python 2
# Get leftmost bit (sign) as 1 (if 0) or -1 (if 1)
sign = 1 - ((s[0] & 0x80) >> 6)
# Combine remaining bits
int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2])
log.debug('Source: %s Int: %x Sign: %d', ' '.join(hex(c) for c in s), int_val, sign)
# Return scaled and with proper sign
return (sign * int_val) / 10000.
|
r"""Convert a 3 byte string to a signed integer value.
|
def emit_java_headers(target, source, env):
"""Create and return lists of Java stub header files that will
be created from a set of class files.
"""
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
classdir = env.get('JAVACLASSDIR')
if not classdir:
try:
s = source[0]
except IndexError:
classdir = '.'
else:
try:
classdir = s.attributes.java_classdir
except AttributeError:
classdir = '.'
classdir = env.Dir(classdir).rdir()
if str(classdir) == '.':
c_ = None
else:
c_ = str(classdir) + os.sep
slist = []
for src in source:
try:
classname = src.attributes.java_classname
except AttributeError:
classname = str(src)
if c_ and classname[:len(c_)] == c_:
classname = classname[len(c_):]
if class_suffix and classname[-len(class_suffix):] == class_suffix:
classname = classname[:-len(class_suffix)]
classname = SCons.Tool.javac.classname(classname)
s = src.rfile()
s.attributes.java_classname = classname
slist.append(s)
s = source[0].rfile()
if not hasattr(s.attributes, 'java_classdir'):
s.attributes.java_classdir = classdir
if target[0].__class__ is SCons.Node.FS.File:
tlist = target
else:
if not isinstance(target[0], SCons.Node.FS.Dir):
target[0].__class__ = SCons.Node.FS.Dir
target[0]._morph()
tlist = []
for s in source:
fname = s.attributes.java_classname.replace('.', '_') + '.h'
t = target[0].File(fname)
t.attributes.java_lookupdir = target[0]
tlist.append(t)
return tlist, source
|
Create and return lists of Java stub header files that will
be created from a set of class files.
|
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
|
def compare_values(values0, values1):
"""Compares all the values of a single registry key."""
values0 = {v[0]: v[1:] for v in values0}
values1 = {v[0]: v[1:] for v in values1}
created = [(k, v[0], v[1]) for k, v in values1.items() if k not in values0]
deleted = [(k, v[0], v[1]) for k, v in values0.items() if k not in values1]
modified = [(k, v[0], v[1]) for k, v in values0.items()
if v != values1.get(k, None)]
return created, deleted, modified
|
Compares all the values of a single registry key.
|
def calculate_mvgd_stats(nw):
"""
MV Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
mvgd_stats : pandas.DataFrame
Dataframe containing several statistical numbers about the MVGD
"""
##############################
omega = 2 * pi * 50
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
# Collect info from nw into dataframes
# define dictionaries for collection
trafos_dict = {}
generators_dict = {}
branches_dict = {}
ring_dict = {}
LA_dict = {}
other_nodes_dict = {}
lv_branches_dict = {}
# initiate indexes
trafos_idx = 0
gen_idx = 0
branch_idx = 0
ring_idx = 0
LA_idx = 0
lv_branches_idx = 0
# loop over MV grid districts
for district in nw.mv_grid_districts():
# node of MV station
root = district.mv_grid.station()
###################################
# get impedance of path to each terminal node
# and get thermal capacity of first segment of path to each terminal node
# store properties of terminal nodes in dictionaries
# properties are e.g. impedance of path, length of path, thermal limit of first segment of path
mv_impedances = {}
mvlv_impedances = {}
mv_path_lengths = {}
mvlv_path_lengths = {}
mv_thermal_limits = {} # I_max of first segment on MV for each MV path
lv_thermal_limits = {} # I_max of first segment on LV for each LV path
mvlv_thermal_limits = {} # I_max of first segment on MV for each MVLV path
n_outgoing_LV = 0
n_stations_LV = 0
n_outgoing_MV = 0
G = district.mv_grid._graph
for node in G.nodes():
if isinstance(node, MVStationDing0):
n_outgoing_MV += len(list(G.neighbors(node)))
continue
mv_impedance = 0
mv_path_length = 0
if not isinstance(node, MVCableDistributorDing0) and not isinstance(node, CircuitBreakerDing0):
if not nx.has_path(G, root, node):
continue
#print(node, node.lv_load_area.is_aggregated) # only debug
else:
path = nx.shortest_path(G, root, node)
for i in range(len(path) - 1):
mv_impedance += np.sqrt(
(G.adj[path[i]][path[i + 1]]['branch'].type[
'L'] * 1e-3 * omega * \
G.adj[path[i]][path[i + 1]][
'branch'].length) ** 2. + \
(G.adj[path[i]][path[i + 1]]['branch'].type[
'R'] * \
G.adj[path[i]][path[i + 1]][
'branch'].length) ** 2.)
mv_path_length += G.adj[path[i]][path[i + 1]][
'branch'].length
mv_impedances[node] = mv_impedance
mv_path_lengths[node] = mv_path_length
mv_thermal_limit = G.adj[path[0]][path[1]]['branch'].type['I_max_th']
mv_thermal_limits[node] = mv_thermal_limit
if isinstance(node, LVStationDing0):
# add impedance of transformers in LV station
lvstation_impedance = 0.
for trafo in node.transformers():
lvstation_impedance += 1. / np.hypot(trafo.r,trafo.x) # transformers operating in parallel
if lvstation_impedance > 0.: # avoid dividing by zero
lvstation_impedance = 1. / lvstation_impedance
else:
lvstation_impedance = 0.
# identify LV nodes belonging to LV station
for lv_LA in district.lv_load_areas():
for lv_dist in lv_LA.lv_grid_districts():
if lv_dist.lv_grid._station == node:
G_lv = lv_dist.lv_grid._graph
# loop over all LV terminal nodes belonging to LV station
for lv_node in G_lv.nodes():
if isinstance(lv_node, GeneratorDing0) or isinstance(lv_node, LVLoadDing0):
path = nx.shortest_path(G_lv, node, lv_node)
lv_impedance = lvstation_impedance
lv_path_length = 0.
for i in range(len(path)-1):
lv_impedance += np.sqrt((G_lv.adj[path[i]][path[i+1]]['branch'].type['L'] * 1e-3 * omega * \
G_lv.adj[path[i]][path[i+1]]['branch'].length)**2. + \
(G_lv.adj[path[i]][path[i+1]]['branch'].type['R'] * \
G_lv.adj[path[i]][path[i+1]]['branch'].length)**2.)
lv_path_length += G_lv.adj[path[i]][path[i+1]]['branch'].length
lv_thermal_limit = G_lv.adj[path[0]][path[1]]['branch'].type['I_max_th']
mvlv_impedances[lv_node] = mv_impedance + lv_impedance
mvlv_path_lengths[lv_node] = mv_path_length + lv_path_length
lv_thermal_limits[lv_node] = lv_thermal_limit
mvlv_thermal_limits[lv_node] = mv_thermal_limit
elif isinstance(lv_node, LVStationDing0):
n_outgoing_LV += len(list(G_lv.neighbors(lv_node)))
n_stations_LV += 1
# compute mean values by looping over terminal nodes
sum_impedances = 0.
sum_thermal_limits = 0.
sum_path_lengths = 0.
n_terminal_nodes_MV = 0
# terminal nodes on MV
for terminal_node in mv_impedances.keys(): # neglect LVStations here because already part of MVLV paths below
if not isinstance(terminal_node, LVStationDing0) and not isinstance(terminal_node, MVStationDing0):
sum_impedances += mv_impedances[terminal_node]
sum_thermal_limits += mv_thermal_limits[terminal_node]
sum_path_lengths += mv_path_lengths[terminal_node]
n_terminal_nodes_MV += 1
sum_thermal_limits_LV = 0.
n_terminal_nodes_LV = 0
# terminal nodes on LV
for terminal_node in mvlv_impedances.keys():
sum_impedances += mvlv_impedances[terminal_node]
sum_thermal_limits += mvlv_thermal_limits[terminal_node]
sum_thermal_limits_LV += lv_thermal_limits[terminal_node]
sum_path_lengths += mvlv_path_lengths[terminal_node]
n_terminal_nodes_LV += 1
n_terminal_nodes = n_terminal_nodes_MV + n_terminal_nodes_LV
if n_terminal_nodes < 1:
mean_impedance = np.nan
mean_thermal_limit = np.nan
mean_path_length = np.nan
else:
mean_impedance = sum_impedances / n_terminal_nodes
mean_thermal_limit = sum_thermal_limits / n_terminal_nodes
mean_path_length = sum_path_lengths / n_terminal_nodes
if n_terminal_nodes_LV < 1:
mean_thermal_limit_LV = np.nan
else:
mean_thermal_limit_LV = sum_thermal_limits_LV / n_terminal_nodes_LV
number_outgoing_LV = n_outgoing_LV # / n_stations_LV
number_outgoing_MV = n_outgoing_MV
###################################
# compute path lengths (written by Miguel)
max_mv_path = 0
max_mvlv_path = 0
# rings
nodes_in_rings = []
branches_in_rings = []
for ring in district.mv_grid.rings_full_data():
ring_idx += 1
# generation cap
ring_gen = 0
for node in ring[2]:
nodes_in_rings.append(node)
if isinstance(node, GeneratorDing0):
ring_gen += node.capacity
# length
ring_length = 0
for branch in ring[1]:
branches_in_rings.append(branch)
ring_length += branch.length / 1e3
ring_dict[ring_idx] = {
'grid_id': district.mv_grid.id_db,
'ring_length': ring_length,
'ring_capacity': ring_gen,
}
# transformers in main station
for trafo in district.mv_grid.station().transformers():
trafos_idx += 1
trafos_dict[trafos_idx] = {
'grid_id': district.mv_grid.id_db,
's_max_a': trafo.s_max_a}
# Generators and other MV special nodes
cd_count = 0
LVs_count = 0
cb_count = 0
lv_trafo_count = 0
lv_trafo_cap = 0
for node in district.mv_grid._graph.nodes():
mv_path_length = 0
mvlv_path_length = 0
if isinstance(node, GeneratorDing0):
gen_idx += 1
isolation = not node in nodes_in_rings
subtype = node.subtype
if subtype == None:
subtype = 'other'
generators_dict[gen_idx] = {
'grid_id': district.mv_grid.id_db,
'type': node.type,
'sub_type': node.type + '/' + subtype,
'gen_cap': node.capacity,
'v_level': node.v_level,
'isolation': isolation,
}
mv_path_length = district.mv_grid.graph_path_length(
node_source=root,
node_target=node)
elif isinstance(node, MVCableDistributorDing0):
cd_count += 1
elif isinstance(node, LVStationDing0):
LVs_count += 1
lv_trafo_count += len([trafo for trafo in node.transformers()])
lv_trafo_cap += np.sum([trafo.s_max_a for trafo in node.transformers()])
if not node.lv_load_area.is_aggregated:
mv_path_length = district.mv_grid.graph_path_length(
node_source=root,
node_target=node)
max_lv_path = 0
for lv_LA in district.lv_load_areas():
for lv_dist in lv_LA.lv_grid_districts():
if lv_dist.lv_grid._station == node:
for lv_node in lv_dist.lv_grid._graph.nodes():
lv_path_length = lv_dist.lv_grid.graph_path_length(
node_source=node,
node_target=lv_node)
max_lv_path = max(max_lv_path, lv_path_length)
mvlv_path_length = mv_path_length + max_lv_path
elif isinstance(node, CircuitBreakerDing0):
cb_count += 1
max_mv_path = max(max_mv_path, mv_path_length / 1000)
max_mvlv_path = max(max_mvlv_path, mvlv_path_length / 1000)
other_nodes_dict[district.mv_grid.id_db] = {
'CD_count': cd_count,
'LV_count': LVs_count,
'CB_count': cb_count,
'MVLV_trafo_count': lv_trafo_count,
'MVLV_trafo_cap': lv_trafo_cap,
'max_mv_path': max_mv_path,
'max_mvlv_path': max_mvlv_path,
'mean_impedance': mean_impedance,
'mean_thermal_limit': mean_thermal_limit,
'mean_thermal_limit_LV': mean_thermal_limit_LV,
'mean_path_length': mean_path_length / 1.e3,
'number_outgoing_LV': number_outgoing_LV,
'number_outgoing_MV': number_outgoing_MV
}
# branches
for branch in district.mv_grid.graph_edges():
branch_idx += 1
br_in_ring = branch['branch'] in branches_in_rings
branches_dict[branch_idx] = {
'grid_id': district.mv_grid.id_db,
'length': branch['branch'].length / 1e3,
'type_name': branch['branch'].type['name'],
'type_kind': branch['branch'].kind,
'in_ring': br_in_ring,
}
# Load Areas
for LA in district.lv_load_areas():
LA_idx += 1
LA_dict[LA_idx] = {
'grid_id': district.mv_grid.id_db,
'is_agg': LA.is_aggregated,
'is_sat': LA.is_satellite,
# 'peak_gen':LA.peak_generation,
}
LA_pop = 0
residential_peak_load = 0
retail_peak_load = 0
industrial_peak_load = 0
agricultural_peak_load = 0
lv_gen_level_6 = 0
lv_gen_level_7 = 0
for lv_district in LA.lv_grid_districts():
LA_pop = + lv_district.population
residential_peak_load += lv_district.peak_load_residential
retail_peak_load += lv_district.peak_load_retail
industrial_peak_load += lv_district.peak_load_industrial
agricultural_peak_load += lv_district.peak_load_agricultural
# generation capacity
for g in lv_district.lv_grid.generators():
if g.v_level == 6:
lv_gen_level_6 += g.capacity
elif g.v_level == 7:
lv_gen_level_7 += g.capacity
# branches lengths
for br in lv_district.lv_grid.graph_edges():
lv_branches_idx += 1
lv_branches_dict[lv_branches_idx] = {
'grid_id': district.mv_grid.id_db,
'length': br['branch'].length / 1e3,
'type_name': br['branch'].type.to_frame().columns[0], # why is it different as for MV grids?
'type_kind': br['branch'].kind,
}
LA_dict[LA_idx].update({
'population': LA_pop,
'residential_peak_load': residential_peak_load,
'retail_peak_load': retail_peak_load,
'industrial_peak_load': industrial_peak_load,
'agricultural_peak_load': agricultural_peak_load,
'total_peak_load': residential_peak_load + retail_peak_load + \
industrial_peak_load + agricultural_peak_load,
'lv_generation': lv_gen_level_6 + lv_gen_level_7,
'lv_gens_lvl_6': lv_gen_level_6,
'lv_gens_lvl_7': lv_gen_level_7,
})
# geographic
# ETRS (equidistant) to WGS84 (conformal) projection
proj = partial(
pyproj.transform,
# pyproj.Proj(init='epsg:3035'), # source coordinate system
# pyproj.Proj(init='epsg:4326')) # destination coordinate system
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
district_geo = transform(proj, district.geo_data)
other_nodes_dict[district.mv_grid.id_db].update({'Dist_area': district_geo.area})
mvgd_stats = pd.DataFrame.from_dict({}, orient='index')
###################################
# built dataframes from dictionaries
trafos_df = pd.DataFrame.from_dict(trafos_dict, orient='index')
generators_df = pd.DataFrame.from_dict(generators_dict, orient='index')
other_nodes_df = pd.DataFrame.from_dict(other_nodes_dict, orient='index')
branches_df = pd.DataFrame.from_dict(branches_dict, orient='index')
lv_branches_df = pd.DataFrame.from_dict(lv_branches_dict, orient='index')
ring_df = pd.DataFrame.from_dict(ring_dict, orient='index')
LA_df = pd.DataFrame.from_dict(LA_dict, orient='index')
###################################
# Aggregated data HV/MV Trafos
if not trafos_df.empty:
mvgd_stats = pd.concat([mvgd_stats, trafos_df.groupby('grid_id').count()['s_max_a']], axis=1)
mvgd_stats = pd.concat([mvgd_stats, trafos_df.groupby('grid_id').sum()[['s_max_a']]], axis=1)
mvgd_stats.columns = ['N° of HV/MV Trafos', 'Trafos HV/MV Acc s_max_a']
###################################
# Aggregated data Generators
if not generators_df.empty:
# MV generation per sub_type
mv_generation = generators_df.groupby(['grid_id', 'sub_type'])['gen_cap'].sum().to_frame().unstack(level=-1)
mv_generation.columns = ['Gen. Cap. of MV ' + _[1] if isinstance(_, tuple) else _
for _ in mv_generation.columns]
mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1)
# MV generation at V levels
mv_generation = generators_df.groupby(
['grid_id', 'v_level'])['gen_cap'].sum().to_frame().unstack(level=-1)
mv_generation.columns = ['Gen. Cap. of MV at v_level ' + str(_[1])
if isinstance(_, tuple) else _
for _ in mv_generation.columns]
mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1)
# Isolated generators
mv_generation = generators_df[generators_df['isolation']].groupby(
['grid_id'])['gen_cap'].count().to_frame() # .unstack(level=-1)
mv_generation.columns = ['N° of isolated MV Generators']
mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1)
###################################
# Aggregated data of other nodes
if not other_nodes_df.empty:
# print(other_nodes_df['CD_count'].to_frame())
mvgd_stats['N° of Cable Distr'] = other_nodes_df['CD_count'].to_frame().astype(int)
mvgd_stats['N° of LV Stations'] = other_nodes_df['LV_count'].to_frame().astype(int)
mvgd_stats['N° of Circuit Breakers'] = other_nodes_df['CB_count'].to_frame().astype(int)
mvgd_stats['District Area'] = other_nodes_df['Dist_area'].to_frame()
mvgd_stats['N° of MV/LV Trafos'] = other_nodes_df['MVLV_trafo_count'].to_frame().astype(int)
mvgd_stats['Trafos MV/LV Acc s_max_a'] = other_nodes_df['MVLV_trafo_cap'].to_frame()
mvgd_stats['Length of MV max path'] = other_nodes_df['max_mv_path'].to_frame()
mvgd_stats['Length of MVLV max path'] = other_nodes_df['max_mvlv_path'].to_frame()
mvgd_stats['Impedance Z of path to terminal node (mean value)'] = \
other_nodes_df['mean_impedance'].to_frame()
mvgd_stats['I_max of first segment of path from MV station to terminal node (mean value)'] = \
other_nodes_df['mean_thermal_limit'].to_frame()
mvgd_stats['I_max of first segment of path from LV station to terminal node (mean value)'] = \
other_nodes_df['mean_thermal_limit_LV'].to_frame()
mvgd_stats['Length of path from MV station to terminal node (mean value)'] = \
other_nodes_df['mean_path_length'].to_frame()
mvgd_stats['Number of lines and cables going out from LV stations'] = \
other_nodes_df['number_outgoing_LV'].to_frame()
mvgd_stats['Number of lines and cables going out from MV stations'] = \
other_nodes_df['number_outgoing_MV'].to_frame()
###################################
# Aggregated data of MV Branches
if not branches_df.empty:
# km of underground cable
branches_data = branches_df[branches_df['type_kind'] == 'cable'].groupby(
['grid_id'])['length'].sum().to_frame()
branches_data.columns = ['Length of MV underground cables']
mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1)
# km of overhead lines
branches_data = branches_df[branches_df['type_kind'] == 'line'].groupby(
['grid_id'])['length'].sum().to_frame()
branches_data.columns = ['Length of MV overhead lines']
mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1)
# km of different wire types
branches_data = branches_df.groupby(
['grid_id', 'type_name'])['length'].sum().to_frame().unstack(level=-1)
branches_data.columns = ['Length of MV type ' + _[1] if isinstance(_, tuple) else _
for _ in branches_data.columns]
mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1)
# branches not in ring
total_br = branches_df.groupby(['grid_id'])['length'].count().to_frame()
ring_br = branches_df[branches_df['in_ring']].groupby(
['grid_id'])['length'].count().to_frame()
branches_data = total_br - ring_br
total_br.columns = ['N° of MV branches']
mvgd_stats = pd.concat([mvgd_stats, total_br], axis=1)
branches_data.columns = ['N° of MV branches not in a ring']
mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1)
###################################
# Aggregated data of LV Branches
if not lv_branches_df.empty:
# km of underground cable
lv_branches_data = lv_branches_df[lv_branches_df['type_kind'] == 'cable'].groupby(
['grid_id'])['length'].sum().to_frame()
lv_branches_data.columns = ['Length of LV underground cables']
mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1)
# km of overhead lines
lv_branches_data = lv_branches_df[lv_branches_df['type_kind'] == 'line'].groupby(
['grid_id'])['length'].sum().to_frame()
lv_branches_data.columns = ['Length of LV overhead lines']
mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1)
# km of different wire types
lv_branches_data = lv_branches_df.groupby(
['grid_id', 'type_name'])['length'].sum().to_frame().unstack(level=-1)
lv_branches_data.columns = ['Length of LV type ' + _[1] if isinstance(_, tuple) else _
for _ in lv_branches_data.columns]
mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1)
# n° of branches
total_lv_br = lv_branches_df.groupby(['grid_id'])['length'].count().to_frame()
total_lv_br.columns = ['N° of LV branches']
mvgd_stats = pd.concat([mvgd_stats, total_lv_br], axis=1)
###################################
# Aggregated data of Rings
if not ring_df.empty:
# N° of rings
ring_data = ring_df.groupby(['grid_id'])['grid_id'].count().to_frame()
ring_data.columns = ['N° of MV Rings']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
# min,max,mean km of all rings
ring_data = ring_df.groupby(['grid_id'])['ring_length'].min().to_frame()
ring_data.columns = ['Length of MV Ring min']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
ring_data = ring_df.groupby(['grid_id'])['ring_length'].max().to_frame()
ring_data.columns = ['Length of MV Ring max']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
ring_data = ring_df.groupby(['grid_id'])['ring_length'].mean().to_frame()
ring_data.columns = ['Length of MV Ring mean']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
# km of all rings
ring_data = ring_df.groupby(['grid_id'])['ring_length'].sum().to_frame()
ring_data.columns = ['Length of MV Rings total']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
# km of non-ring
non_ring_data = branches_df.groupby(['grid_id'])['length'].sum().to_frame()
non_ring_data.columns = ['Length of MV Rings total']
ring_data = non_ring_data - ring_data
ring_data.columns = ['Length of MV Non-Rings total']
mvgd_stats = pd.concat([mvgd_stats, ring_data.round(1).abs()], axis=1)
# rings generation capacity
ring_data = ring_df.groupby(['grid_id'])['ring_capacity'].sum().to_frame()
ring_data.columns = ['Gen. Cap. Connected to MV Rings']
mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1)
###################################
# Aggregated data of Load Areas
if not LA_df.empty:
LA_data = LA_df.groupby(['grid_id'])['population'].count().to_frame()
LA_data.columns = ['N° of Load Areas']
mvgd_stats = pd.concat([mvgd_stats, LA_data], axis=1)
LA_data = LA_df.groupby(['grid_id'])['population',
'residential_peak_load',
'retail_peak_load',
'industrial_peak_load',
'agricultural_peak_load',
'total_peak_load',
'lv_generation',
'lv_gens_lvl_6',
'lv_gens_lvl_7'
].sum()
LA_data.columns = ['LA Total Population',
'LA Total LV Peak Load Residential',
'LA Total LV Peak Load Retail',
'LA Total LV Peak Load Industrial',
'LA Total LV Peak Load Agricultural',
'LA Total LV Peak Load total',
'LA Total LV Gen. Cap.',
'Gen. Cap. of LV at v_level 6',
'Gen. Cap. of LV at v_level 7',
]
mvgd_stats = pd.concat([mvgd_stats, LA_data], axis=1)
###################################
# Aggregated data of Aggregated Load Areas
if not LA_df.empty:
agg_LA_data = LA_df[LA_df['is_agg']].groupby(
['grid_id'])['population'].count().to_frame()
agg_LA_data.columns = ['N° of Load Areas - Aggregated']
mvgd_stats = pd.concat([mvgd_stats, agg_LA_data], axis=1)
sat_LA_data = LA_df[LA_df['is_sat']].groupby(
['grid_id'])['population'].count().to_frame()
sat_LA_data.columns = ['N° of Load Areas - Satellite']
mvgd_stats = pd.concat([mvgd_stats, sat_LA_data], axis=1)
agg_LA_data = LA_df[LA_df['is_agg']].groupby(['grid_id'])['population',
'lv_generation',
'total_peak_load'].sum()
agg_LA_data.columns = ['LA Aggregated Population',
'LA Aggregated LV Gen. Cap.', 'LA Aggregated LV Peak Load total'
]
mvgd_stats = pd.concat([mvgd_stats, agg_LA_data], axis=1)
###################################
mvgd_stats = mvgd_stats.fillna(0)
mvgd_stats = mvgd_stats[sorted(mvgd_stats.columns.tolist())]
return mvgd_stats
|
MV Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
mvgd_stats : pandas.DataFrame
Dataframe containing several statistical numbers about the MVGD
|
def extract_atoms(molecule):
"""Return a string with all atoms in molecule"""
if molecule == '':
return molecule
try:
return float(molecule)
except BaseException:
pass
atoms = ''
if not molecule[0].isalpha():
i = 0
while not molecule[i].isalpha():
i += 1
prefactor = float(molecule[:i])
if prefactor < 0:
prefactor = abs(prefactor)
sign = '-'
else:
sign = ''
molecule = molecule[i:]
else:
prefactor = 1
sign = ''
for k in range(len(molecule)):
if molecule[k].isdigit():
for j in range(int(molecule[k]) - 1):
atoms += molecule[k - 1]
else:
atoms += molecule[k]
if prefactor % 1 == 0:
atoms *= int(prefactor)
elif prefactor % 1 == 0.5:
atoms_sort = sorted(atoms)
N = len(atoms)
atoms = ''
for n in range(N):
for m in range(int(prefactor - 0.5)):
atoms += atoms_sort[n]
if n % 2 == 0:
atoms += atoms_sort[n]
return sign + ''.join(sorted(atoms))
|
Return a string with all atoms in molecule
|
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
|
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
|
def is_module_on_std_lib_path(cls, module):
"""
Sometimes .py files are symlinked to the real python files, such as the case of virtual
env. However the .pyc files are created under the virtual env directory rather than
the path in cls.STANDARD_LIB_PATH. Hence this function checks for both.
:param module: a module
:return: True if module is on interpreter's stdlib path. False otherwise.
"""
module_file_real_path = os.path.realpath(module.__file__)
if module_file_real_path.startswith(cls.STANDARD_LIB_PATH):
return True
elif os.path.splitext(module_file_real_path)[1] == '.pyc':
py_file_real_path = os.path.realpath(os.path.splitext(module_file_real_path)[0] + '.py')
return py_file_real_path.startswith(cls.STANDARD_LIB_PATH)
return False
|
Sometimes .py files are symlinked to the real python files, such as the case of virtual
env. However the .pyc files are created under the virtual env directory rather than
the path in cls.STANDARD_LIB_PATH. Hence this function checks for both.
:param module: a module
:return: True if module is on interpreter's stdlib path. False otherwise.
|
def format_cert_name(env='', account='', region='', certificate=None):
"""Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
"""
cert_name = None
if certificate:
if certificate.startswith('arn'):
LOG.info("Full ARN provided...skipping lookup.")
cert_name = certificate
else:
generated_cert_name = generate_custom_cert_name(env, region, account, certificate)
if generated_cert_name:
LOG.info("Found generated certificate %s from template", generated_cert_name)
cert_name = generated_cert_name
else:
LOG.info("Using default certificate name logic")
cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format(
account=account, name=certificate))
LOG.debug('Certificate name: %s', cert_name)
return cert_name
|
Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
|
def _annotate_validations(eval_files, data):
"""Add annotations about potential problem regions to validation VCFs.
"""
for key in ["tp", "tp-calls", "fp", "fn"]:
if eval_files.get(key):
eval_files[key] = annotation.add_genome_context(eval_files[key], data)
return eval_files
|
Add annotations about potential problem regions to validation VCFs.
|
def sine(w, A=1, phi=0, offset=0):
''' Return a driver function that can advance a sequence of sine values.
.. code-block:: none
value = A * sin(w*i + phi) + offset
Args:
w (float) : a frequency for the sine driver
A (float) : an amplitude for the sine driver
phi (float) : a phase offset to start the sine driver with
offset (float) : a global offset to add to the driver values
'''
from math import sin
def f(i):
return A * sin(w*i + phi) + offset
return partial(force, sequence=_advance(f))
|
Return a driver function that can advance a sequence of sine values.
.. code-block:: none
value = A * sin(w*i + phi) + offset
Args:
w (float) : a frequency for the sine driver
A (float) : an amplitude for the sine driver
phi (float) : a phase offset to start the sine driver with
offset (float) : a global offset to add to the driver values
|
def store_text_log_summary_artifact(job, text_log_summary_artifact):
"""
Store the contents of the text log summary artifact
"""
step_data = json.loads(
text_log_summary_artifact['blob'])['step_data']
result_map = {v: k for (k, v) in TextLogStep.RESULTS}
with transaction.atomic():
for step in step_data['steps']:
name = step['name'][:TextLogStep._meta.get_field('name').max_length]
# process start/end times if we have them
# we currently don't support timezones in treeherder, so
# just ignore that when importing/updating the bug to avoid
# a ValueError (though by default the text log summaries
# we produce should have time expressed in UTC anyway)
time_kwargs = {}
for tkey in ('started', 'finished'):
if step.get(tkey):
time_kwargs[tkey] = dateutil.parser.parse(
step[tkey], ignoretz=True)
log_step = TextLogStep.objects.create(
job=job,
started_line_number=step['started_linenumber'],
finished_line_number=step['finished_linenumber'],
name=name,
result=result_map[step['result']],
**time_kwargs)
if step.get('errors'):
for error in step['errors']:
TextLogError.objects.create(
step=log_step,
line_number=error['linenumber'],
line=astral_filter(error['line']))
# get error summary immediately (to warm the cache)
error_summary.get_error_summary(job)
|
Store the contents of the text log summary artifact
|
def change_attributes(self, bounds, radii, colors):
"""Reinitialize the buffers, to accomodate the new
attributes. This is used to change the number of cylinders to
be displayed.
"""
self.n_cylinders = len(bounds)
self.is_empty = True if self.n_cylinders == 0 else False
if self.is_empty:
self.bounds = bounds
self.radii = radii
self.colors = colors
return # Do nothing
# We pass the starting position 8 times, and each of these has
# a mapping to the bounding box corner.
self.bounds = np.array(bounds, dtype='float32')
vertices, directions = self._gen_bounds(self.bounds)
self.radii = np.array(radii, dtype='float32')
prim_radii = self._gen_radii(self.radii)
self.colors = np.array(colors, dtype='uint8')
prim_colors = self._gen_colors(self.colors)
local = np.array([
# First face -- front
0.0, 0.0, 0.0,
0.0, 1.0, 0.0,
1.0, 1.0, 0.0,
0.0, 0.0, 0.0,
1.0, 1.0, 0.0,
1.0, 0.0, 0.0,
# Second face -- back
0.0, 0.0, 1.0,
0.0, 1.0, 1.0,
1.0, 1.0, 1.0,
0.0, 0.0, 1.0,
1.0, 1.0, 1.0,
1.0, 0.0, 1.0,
# Third face -- left
0.0, 0.0, 0.0,
0.0, 0.0, 1.0,
0.0, 1.0, 1.0,
0.0, 0.0, 0.0,
0.0, 1.0, 1.0,
0.0, 1.0, 0.0,
# Fourth face -- right
1.0, 0.0, 0.0,
1.0, 0.0, 1.0,
1.0, 1.0, 1.0,
1.0, 0.0, 0.0,
1.0, 1.0, 1.0,
1.0, 1.0, 0.0,
# Fifth face -- up
0.0, 1.0, 0.0,
0.0, 1.0, 1.0,
1.0, 1.0, 1.0,
0.0, 1.0, 0.0,
1.0, 1.0, 1.0,
1.0, 1.0, 0.0,
# Sixth face -- down
0.0, 0.0, 0.0,
0.0, 0.0, 1.0,
1.0, 0.0, 1.0,
0.0, 0.0, 0.0,
1.0, 0.0, 1.0,
1.0, 0.0, 0.0,
]).astype('float32')
local = np.tile(local, self.n_cylinders)
self._verts_vbo = VertexBuffer(vertices,GL_DYNAMIC_DRAW)
self._directions_vbo = VertexBuffer(directions, GL_DYNAMIC_DRAW)
self._local_vbo = VertexBuffer(local,GL_DYNAMIC_DRAW)
self._color_vbo = VertexBuffer(prim_colors, GL_DYNAMIC_DRAW)
self._radii_vbo = VertexBuffer(prim_radii, GL_DYNAMIC_DRAW)
|
Reinitialize the buffers, to accomodate the new
attributes. This is used to change the number of cylinders to
be displayed.
|
def buffer_read(library, session, count):
"""Reads data from device or interface through the use of a formatted I/O read buffer.
Corresponds to viBufRead function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param count: Number of bytes to be read.
:return: data read, return value of the library call.
:rtype: bytes, :class:`pyvisa.constants.StatusCode`
"""
buffer = create_string_buffer(count)
return_count = ViUInt32()
ret = library.viBufRead(session, buffer, count, byref(return_count))
return buffer.raw[:return_count.value], ret
|
Reads data from device or interface through the use of a formatted I/O read buffer.
Corresponds to viBufRead function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param count: Number of bytes to be read.
:return: data read, return value of the library call.
:rtype: bytes, :class:`pyvisa.constants.StatusCode`
|
def strsettings(self, indent=0, maxindent=25, width=0):
"""Return user friendly help on positional arguments.
indent is the number of spaces preceeding the text on each line.
The indent of the documentation is dependent on the length of the
longest label that is shorter than maxindent. A label longer than
maxindent will be printed on its own line.
width is maximum allowed page width, use self.width if 0.
"""
out = []
makelabel = lambda name: ' ' * indent + name + ': '
settingsindent = _autoindent([makelabel(s) for s in self.options], indent, maxindent)
for name in self.option_order:
option = self.options[name]
label = makelabel(name)
settingshelp = "%s(%s): %s" % (option.formatname, option.strvalue, option.location)
wrapped = self._wrap_labelled(label, settingshelp, settingsindent, width)
out.extend(wrapped)
return '\n'.join(out)
|
Return user friendly help on positional arguments.
indent is the number of spaces preceeding the text on each line.
The indent of the documentation is dependent on the length of the
longest label that is shorter than maxindent. A label longer than
maxindent will be printed on its own line.
width is maximum allowed page width, use self.width if 0.
|
def get(self, field_paths=None, transaction=None):
"""Retrieve a snapshot of the current document.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this reference
will be retrieved in.
Returns:
~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of
the current document. If the document does not exist at
the time of `snapshot`, the snapshot `reference`, `data`,
`update_time`, and `create_time` attributes will all be
`None` and `exists` will be `False`.
"""
if isinstance(field_paths, six.string_types):
raise ValueError("'field_paths' must be a sequence of paths, not a string.")
if field_paths is not None:
mask = common_pb2.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
firestore_api = self._client._firestore_api
try:
document_pb = firestore_api.get_document(
self._document_path,
mask=mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._client._rpc_metadata,
)
except exceptions.NotFound:
data = None
exists = False
create_time = None
update_time = None
else:
data = _helpers.decode_dict(document_pb.fields, self._client)
exists = True
create_time = document_pb.create_time
update_time = document_pb.update_time
return DocumentSnapshot(
reference=self,
data=data,
exists=exists,
read_time=None, # No server read_time available
create_time=create_time,
update_time=update_time,
)
|
Retrieve a snapshot of the current document.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this reference
will be retrieved in.
Returns:
~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of
the current document. If the document does not exist at
the time of `snapshot`, the snapshot `reference`, `data`,
`update_time`, and `create_time` attributes will all be
`None` and `exists` will be `False`.
|
def revise(csp, Xi, Xj, removals):
"Return true if we remove a value."
revised = False
for x in csp.curr_domains[Xi][:]:
# If Xi=x conflicts with Xj=y for every possible y, eliminate Xi=x
if every(lambda y: not csp.constraints(Xi, x, Xj, y),
csp.curr_domains[Xj]):
csp.prune(Xi, x, removals)
revised = True
return revised
|
Return true if we remove a value.
|
def get_eventhub_info(self):
"""
Get details on the specified EventHub.
Keys in the details dictionary include:
-'name'
-'type'
-'created_at'
-'partition_count'
-'partition_ids'
:rtype: dict
"""
alt_creds = {
"username": self._auth_config.get("iot_username"),
"password":self._auth_config.get("iot_password")}
try:
mgmt_auth = self._create_auth(**alt_creds)
mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug)
mgmt_client.open()
mgmt_msg = Message(application_properties={'name': self.eh_name})
response = mgmt_client.mgmt_request(
mgmt_msg,
constants.READ_OPERATION,
op_type=b'com.microsoft:eventhub',
status_code_field=b'status-code',
description_fields=b'status-description')
eh_info = response.get_data()
output = {}
if eh_info:
output['name'] = eh_info[b'name'].decode('utf-8')
output['type'] = eh_info[b'type'].decode('utf-8')
output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000)
output['partition_count'] = eh_info[b'partition_count']
output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']]
return output
finally:
mgmt_client.close()
|
Get details on the specified EventHub.
Keys in the details dictionary include:
-'name'
-'type'
-'created_at'
-'partition_count'
-'partition_ids'
:rtype: dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.