code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def broadcast(*sinks_):
"""The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast`
"""
@push
def bc():
sinks = [s() for s in sinks_]
while True:
msg = yield
for s in sinks:
s.send(msg)
return bc
|
The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast`
|
def _sb_decoder(self):
"""
Figures out what to do with a received sub-negotiation block.
"""
#print "at decoder"
bloc = self.telnet_sb_buffer
if len(bloc) > 2:
if bloc[0] == TTYPE and bloc[1] == IS:
self.terminal_type = bloc[2:]
#print "Terminal type = '%s'" % self.terminal_type
if bloc[0] == NAWS:
if len(bloc) != 5:
print "Bad length on NAWS SB:", len(bloc)
else:
self.columns = (256 * ord(bloc[1])) + ord(bloc[2])
self.rows = (256 * ord(bloc[3])) + ord(bloc[4])
#print "Screen is %d x %d" % (self.columns, self.rows)
self.telnet_sb_buffer = ''
|
Figures out what to do with a received sub-negotiation block.
|
def set_calibrated_weights(self):
"""
Modify the weights to use the calibrated weights
"""
period = self.period
survey_scenario = self.survey_scenario
assert survey_scenario.simulation is not None
for simulation in [survey_scenario.simulation, survey_scenario.baseline_simulation]:
if simulation is None:
continue
simulation.set_input(self.weight_name, period, self.weight)
|
Modify the weights to use the calibrated weights
|
async def states(self, country: str) -> list:
"""Return a list of supported states in a country."""
data = await self._request(
'get', 'states', params={'country': country})
return [d['state'] for d in data['data']]
|
Return a list of supported states in a country.
|
def call_listen(self, chunks, running):
'''
Find all of the listen routines and call the associated mod_watch runs
'''
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk['state'], chunk['__id__'], chunk['name'])] = chunk
if 'listen' in chunk:
listeners.append({(chunk['state'], chunk['__id__'], chunk['name']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
listeners.append({(key, val, 'lookup'): [{chunk['state']: chunk['__id__']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if chunk['__id__'] == listen_to or \
chunk['name'] == listen_to:
listen_to = {chunk['state']: chunk['__id__']}
found = True
if not found:
continue
for lkey, lval in six.iteritems(listen_to):
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
'name': 'listen_{0}:{1}'.format(lkey, lval),
'result': False,
'changes': {}
}}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data) for cref, data in six.iteritems(crefs) if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]['changes']:
if not any(key[0] == cref[0] and key[1] in cref for cref in crefs):
rerror = {_l_tag(key[0], key[1]):
{'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
'name': 'listen_{0}:{1}'.format(key[0], key[1]),
'result': False,
'changes': {}}}
errors.update(rerror)
continue
new_chunks = [data for cref, data in six.iteritems(crefs) if key[0] == cref[0] and key[1] in cref]
for chunk in new_chunks:
low = chunk.copy()
low['sfun'] = chunk['fun']
low['fun'] = 'mod_watch'
low['__id__'] = 'listener_{0}'.format(low['__id__'])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]['__run_num__'] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
|
Find all of the listen routines and call the associated mod_watch runs
|
def form_valid(self, post_form, attachment_formset, **kwargs):
""" Processes valid forms.
Called if all forms are valid. Creates a Post instance along with associated attachments if
required and then redirects to a success page.
"""
save_attachment_formset = attachment_formset is not None \
and not self.preview
if self.preview:
return self.render_to_response(
self.get_context_data(
preview=True, post_form=post_form, attachment_formset=attachment_formset,
**kwargs
),
)
# This is not a preview ; the object is going to be saved
self.forum_post = post_form.save()
if save_attachment_formset:
attachment_formset.post = self.forum_post
attachment_formset.save()
messages.success(self.request, self.success_message)
if not self.forum_post.approved:
messages.warning(self.request, self.approval_required_message)
return HttpResponseRedirect(self.get_success_url())
|
Processes valid forms.
Called if all forms are valid. Creates a Post instance along with associated attachments if
required and then redirects to a success page.
|
def is_action_available(self, action):
"""Determines whether action is available.
That is, executing it would change the state.
"""
temp_state = np.rot90(self._state, action)
return self._is_action_available_left(temp_state)
|
Determines whether action is available.
That is, executing it would change the state.
|
def _AddStopTimeObjectUnordered(self, stoptime, schedule):
"""Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later."""
stop_time_class = self.GetGtfsFactory().StopTime
cursor = schedule._connection.cursor()
insert_query = "INSERT INTO stop_times (%s) VALUES (%s);" % (
','.join(stop_time_class._SQL_FIELD_NAMES),
','.join(['?'] * len(stop_time_class._SQL_FIELD_NAMES)))
cursor = schedule._connection.cursor()
cursor.execute(
insert_query, stoptime.GetSqlValuesTuple(self.trip_id))
|
Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later.
|
def ls(args):
"""
List sites
----------
Show list of installed sites.
::
usage: makesite ls [-h] [-v] [-p PATH]
Show list of installed sites.
optional arguments:
-p PATH, --path PATH path to makesite sites instalation dir. you can set it
in $makesite_home env variable.
Examples: ::
makesite ls
"""
assert args.path, "Not finded MAKESITE HOME."
print_header("Installed sites:")
for site in gen_sites(args.path):
LOGGER.debug(site.get_info())
return True
|
List sites
----------
Show list of installed sites.
::
usage: makesite ls [-h] [-v] [-p PATH]
Show list of installed sites.
optional arguments:
-p PATH, --path PATH path to makesite sites instalation dir. you can set it
in $makesite_home env variable.
Examples: ::
makesite ls
|
def declare_queue(self, queue_name='', passive=False, durable=False,
exclusive=False, auto_delete=False, arguments=None):
"""
ε£°ζδΈδΈͺιε
:param queue_name: ιεε
:param passive:
:param durable:
:param exclusive:
:param auto_delete:
:param arguments:
:return: pika ζ‘ζΆηζηιζΊεθ°ιεε
"""
result = self._channel.queue_declare(
queue=queue_name,
passive=passive,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments
)
return result.method.queue
|
ε£°ζδΈδΈͺιε
:param queue_name: ιεε
:param passive:
:param durable:
:param exclusive:
:param auto_delete:
:param arguments:
:return: pika ζ‘ζΆηζηιζΊεθ°ιεε
|
def _expand_slice(self, indices):
"""
Expands slices containing steps into a list.
"""
keys = list(self.data.keys())
expanded = []
for idx, ind in enumerate(indices):
if isinstance(ind, slice) and ind.step is not None:
dim_ind = slice(ind.start, ind.stop)
if dim_ind == slice(None):
condition = self._all_condition()
elif dim_ind.start is None:
condition = self._upto_condition(dim_ind)
elif dim_ind.stop is None:
condition = self._from_condition(dim_ind)
else:
condition = self._range_condition(dim_ind)
dim_vals = unique_iterator(k[idx] for k in keys)
expanded.append(set([k for k in dim_vals if condition(k)][::int(ind.step)]))
else:
expanded.append(ind)
return tuple(expanded)
|
Expands slices containing steps into a list.
|
def get_grid_points_by_rotations(address_orig,
reciprocal_rotations,
mesh,
is_shift=None,
is_dense=False):
"""Returns grid points obtained after rotating input grid address
Parameters
----------
address_orig : array_like
Grid point address to be rotated.
dtype='intc', shape=(3,)
reciprocal_rotations : array_like
Rotation matrices {R} with respect to reciprocal basis vectors.
Defined by q'=Rq.
dtype='intc', shape=(rotations, 3, 3)
mesh : array_like
dtype='intc', shape=(3,)
is_shift : array_like, optional
With (1) or without (0) half grid shifts with respect to grid intervals
sampled along reciprocal basis vectors. Default is None, which
gives [0, 0, 0].
is_dense : bool, optional
rot_grid_points is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
rot_grid_points : ndarray
Grid points obtained after rotating input grid address
dtype='intc' or 'uintp', shape=(rotations,)
"""
_set_no_error()
if is_shift is None:
_is_shift = np.zeros(3, dtype='intc')
else:
_is_shift = np.array(is_shift, dtype='intc')
rot_grid_points = np.zeros(len(reciprocal_rotations), dtype='uintp')
spg.grid_points_by_rotations(
rot_grid_points,
np.array(address_orig, dtype='intc'),
np.array(reciprocal_rotations, dtype='intc', order='C'),
np.array(mesh, dtype='intc'),
_is_shift)
if is_dense:
return rot_grid_points
else:
return np.array(rot_grid_points, dtype='intc')
|
Returns grid points obtained after rotating input grid address
Parameters
----------
address_orig : array_like
Grid point address to be rotated.
dtype='intc', shape=(3,)
reciprocal_rotations : array_like
Rotation matrices {R} with respect to reciprocal basis vectors.
Defined by q'=Rq.
dtype='intc', shape=(rotations, 3, 3)
mesh : array_like
dtype='intc', shape=(3,)
is_shift : array_like, optional
With (1) or without (0) half grid shifts with respect to grid intervals
sampled along reciprocal basis vectors. Default is None, which
gives [0, 0, 0].
is_dense : bool, optional
rot_grid_points is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
rot_grid_points : ndarray
Grid points obtained after rotating input grid address
dtype='intc' or 'uintp', shape=(rotations,)
|
def add_task(self, task_id, backend, category, backend_args,
archive_args=None, sched_args=None):
"""Add and schedule a task.
:param task_id: id of the task
:param backend: name of the backend
:param category: category of the items to fecth
:param backend_args: args needed to initialize the backend
:param archive_args: args needed to initialize the archive
:param sched_args: scheduling args for this task
:returns: the task created
"""
try:
archiving_cfg = self.__parse_archive_args(archive_args)
scheduling_cfg = self.__parse_schedule_args(sched_args)
self.__validate_args(task_id, backend, category, backend_args)
except ValueError as e:
raise e
try:
task = self._tasks.add(task_id, backend, category, backend_args,
archiving_cfg=archiving_cfg,
scheduling_cfg=scheduling_cfg)
except AlreadyExistsError as e:
raise e
self._scheduler.schedule_task(task.task_id)
return task
|
Add and schedule a task.
:param task_id: id of the task
:param backend: name of the backend
:param category: category of the items to fecth
:param backend_args: args needed to initialize the backend
:param archive_args: args needed to initialize the archive
:param sched_args: scheduling args for this task
:returns: the task created
|
def kmodels(wordlen: int, k: int, input=None, output=None):
"""Return a circuit taking a wordlen bitvector where only k
valuations return True. Uses encoding from [1].
Note that this is equivalent to (~x < k).
- TODO: Add automated simplification so that the circuits
are equiv.
[1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model
Counting." IJCAI. 2015.
"""
assert 0 <= k < 2**wordlen
if output is None:
output = _fresh()
if input is None:
input = _fresh()
input_names = named_indexes(wordlen, input)
atoms = map(aiger.atom, input_names)
active = False
expr = aiger.atom(False)
for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)):
active |= bit
if not active: # Skip until first 1.
continue
expr = (expr | atom) if bit else (expr & atom)
return aigbv.AIGBV(
aig=expr.aig,
input_map=frozenset([(input, tuple(input_names))]),
output_map=frozenset([(output, (expr.output,))]),
)
|
Return a circuit taking a wordlen bitvector where only k
valuations return True. Uses encoding from [1].
Note that this is equivalent to (~x < k).
- TODO: Add automated simplification so that the circuits
are equiv.
[1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model
Counting." IJCAI. 2015.
|
def arguments_to_lists(function):
"""
Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters
"""
def l_function(*args, **kwargs):
l_args = [_to_list(arg) for arg in args]
l_kwargs = {}
for key, value in kwargs.items():
l_kwargs[key] = _to_list(value)
return function(*l_args, **l_kwargs)
return l_function
|
Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters
|
def find_by_id(self, story, params={}, **options):
"""Returns the full record for a single story.
Parameters
----------
story : {Id} Globally unique identifier for the story.
[params] : {Object} Parameters for the request
"""
path = "/stories/%s" % (story)
return self.client.get(path, params, **options)
|
Returns the full record for a single story.
Parameters
----------
story : {Id} Globally unique identifier for the story.
[params] : {Object} Parameters for the request
|
def delete_doc_by_id(self, collection, doc_id, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str id: ID of the document to be deleted. Can specify '*' to delete everything.
Deletes items from Solr based on the ID. ::
>>> solr.delete_doc_by_id('SolrClient_unittest','changeme')
"""
if ' ' in doc_id:
doc_id = '"{}"'.format(doc_id)
temp = {"delete": {"query": 'id:{}'.format(doc_id)}}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=json.dumps(temp),
**kwargs)
return resp
|
:param str collection: The name of the collection for the request
:param str id: ID of the document to be deleted. Can specify '*' to delete everything.
Deletes items from Solr based on the ID. ::
>>> solr.delete_doc_by_id('SolrClient_unittest','changeme')
|
def bel_process_belrdf():
"""Process BEL RDF and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
belrdf = body.get('belrdf')
bp = bel.process_belrdf(belrdf)
return _stmts_from_proc(bp)
|
Process BEL RDF and return INDRA Statements.
|
def version():
"""Display full version information."""
# Print out the current version of Tower CLI.
click.echo('Tower CLI %s' % __version__)
# Print out the current API version of the current code base.
click.echo('API %s' % CUR_API_VERSION)
# Attempt to connect to the Ansible Tower server.
# If we succeed, print a version; if not, generate a failure.
try:
r = client.get('/config/')
except RequestException as ex:
raise exc.TowerCLIError('Could not connect to Ansible Tower.\n%s' %
six.text_type(ex))
config = r.json()
license = config.get('license_info', {}).get('license_type', 'open')
if license == 'open':
server_type = 'AWX'
else:
server_type = 'Ansible Tower'
click.echo('%s %s' % (server_type, config['version']))
# Print out Ansible version of server
click.echo('Ansible %s' % config['ansible_version'])
|
Display full version information.
|
def _set_link_local_route_oif_type(self, v, load=False):
"""
Setter method for link_local_route_oif_type, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/link_local_route_oif_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_route_oif_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_route_oif_type() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'management': {'value': 3}, u've': {'value': 4}, u'fortygigabitethernet': {'value': 6}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 7}, u'null': {'value': 5}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_route_oif_type must be of a type compatible with enumeration""",
'defined-type': "brocade-ipv6-rtm:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'management': {'value': 3}, u've': {'value': 4}, u'fortygigabitethernet': {'value': 6}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 7}, u'null': {'value': 5}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)""",
})
self.__link_local_route_oif_type = t
if hasattr(self, '_set'):
self._set()
|
Setter method for link_local_route_oif_type, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/link_local_route_oif_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_route_oif_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_route_oif_type() directly.
|
def number_to_dp(number: Optional[float],
dp: int,
default: Optional[str] = "",
en_dash_for_minus: bool = True) -> str:
"""
Format number to ``dp`` decimal places, optionally using a UTF-8 en dash
for minus signs.
"""
if number is None:
return default
if number == float("inf"):
return u"β"
if number == float("-inf"):
s = u"-β"
else:
s = u"{:.{precision}f}".format(number, precision=dp)
if en_dash_for_minus:
s = s.replace("-", u"β") # hyphen becomes en dash for minus sign
return s
|
Format number to ``dp`` decimal places, optionally using a UTF-8 en dash
for minus signs.
|
def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the properties of this API Root.
This invokes the ``Get API Root Information`` endpoint.
"""
response = self.__raw = self._conn.get(self.url,
headers={"Accept": accept})
self._populate_fields(**response)
self._loaded_information = True
|
Update the properties of this API Root.
This invokes the ``Get API Root Information`` endpoint.
|
def _wr_ver_n_key(self, fout_txt, verbose):
"""Write GO DAG version and key indicating presence of GO ID in a list."""
with open(fout_txt, 'w') as prt:
self._prt_ver_n_key(prt, verbose)
print(' WROTE: {TXT}'.format(TXT=fout_txt))
|
Write GO DAG version and key indicating presence of GO ID in a list.
|
def pad_chunk_columns(chunk):
"""Given a set of items to be inserted, make sure they all have the
same columns by padding columns with None if they are missing."""
columns = set()
for record in chunk:
columns.update(record.keys())
for record in chunk:
for column in columns:
record.setdefault(column, None)
return chunk
|
Given a set of items to be inserted, make sure they all have the
same columns by padding columns with None if they are missing.
|
def inheritsFrom(self, target_name):
''' Return true if this target inherits from the named target (directly
or indirectly. Also returns true if this target is the named
target. Otherwise return false.
'''
for t in self.hierarchy:
if t and t.getName() == target_name or target_name in t.description.get('inherits', {}):
return True
return False
|
Return true if this target inherits from the named target (directly
or indirectly. Also returns true if this target is the named
target. Otherwise return false.
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
return _dict
|
Return a json dictionary representing this model.
|
def IsAllSpent(self):
"""
Flag indicating if all balance is spend.
Returns:
bool:
"""
for item in self.Items:
if item == CoinState.Confirmed:
return False
return True
|
Flag indicating if all balance is spend.
Returns:
bool:
|
def _parse_kexgss_continue(self, m):
"""
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
message
"""
if not self.transport.server_mode:
srv_token = m.get_string()
m = Message()
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
)
self.transport.send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
else:
pass
|
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
message
|
def db(self):
"""Return the correct KV store for this execution."""
if self._db is None:
if self.tcex.default_args.tc_playbook_db_type == 'Redis':
from .tcex_redis import TcExRedis
self._db = TcExRedis(
self.tcex.default_args.tc_playbook_db_path,
self.tcex.default_args.tc_playbook_db_port,
self.tcex.default_args.tc_playbook_db_context,
)
elif self.tcex.default_args.tc_playbook_db_type == 'TCKeyValueAPI':
from .tcex_key_value import TcExKeyValue
self._db = TcExKeyValue(self.tcex)
else:
err = u'Invalid DB Type: ({})'.format(self.tcex.default_args.tc_playbook_db_type)
raise RuntimeError(err)
return self._db
|
Return the correct KV store for this execution.
|
def group_membership_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership"
api_path = "/api/v2/group_memberships/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership
|
def relabel(self, column_label, new_label):
"""Changes the label(s) of column(s) specified by ``column_label`` to
labels in ``new_label``.
Args:
``column_label`` -- (single str or array of str) The label(s) of
columns to be changed to ``new_label``.
``new_label`` -- (single str or array of str): The label name(s)
of columns to replace ``column_label``.
Raises:
``ValueError`` -- if ``column_label`` is not in table, or if
``column_label`` and ``new_label`` are not of equal length.
``TypeError`` -- if ``column_label`` and/or ``new_label`` is not
``str``.
Returns:
Original table with ``new_label`` in place of ``column_label``.
>>> table = Table().with_columns(
... 'points', make_array(1, 2, 3),
... 'id', make_array(12345, 123, 5123))
>>> table.relabel('id', 'yolo')
points | yolo
1 | 12,345
2 | 123
3 | 5,123
>>> table.relabel(make_array('points', 'yolo'),
... make_array('red', 'blue'))
red | blue
1 | 12,345
2 | 123
3 | 5,123
>>> table.relabel(make_array('red', 'green', 'blue'),
... make_array('cyan', 'magenta', 'yellow', 'key'))
Traceback (most recent call last):
...
ValueError: Invalid arguments. column_label and new_label must be of equal length.
"""
if isinstance(column_label, numbers.Integral):
column_label = self._as_label(column_label)
if isinstance(column_label, str) and isinstance(new_label, str):
column_label, new_label = [column_label], [new_label]
if len(column_label) != len(new_label):
raise ValueError('Invalid arguments. column_label and new_label '
'must be of equal length.')
old_to_new = dict(zip(column_label, new_label)) # maps old labels to new ones
for label in column_label:
if not (label in self.labels):
raise ValueError('Invalid labels. Column labels must '
'already exist in table in order to be replaced.')
rewrite = lambda s: old_to_new[s] if s in old_to_new else s
columns = [(rewrite(s), c) for s, c in self._columns.items()]
self._columns = collections.OrderedDict(columns)
for label in self._formats:
# TODO(denero) Error when old and new columns share a name
if label in column_label:
formatter = self._formats.pop(label)
self._formats[old_to_new[label]] = formatter
return self
|
Changes the label(s) of column(s) specified by ``column_label`` to
labels in ``new_label``.
Args:
``column_label`` -- (single str or array of str) The label(s) of
columns to be changed to ``new_label``.
``new_label`` -- (single str or array of str): The label name(s)
of columns to replace ``column_label``.
Raises:
``ValueError`` -- if ``column_label`` is not in table, or if
``column_label`` and ``new_label`` are not of equal length.
``TypeError`` -- if ``column_label`` and/or ``new_label`` is not
``str``.
Returns:
Original table with ``new_label`` in place of ``column_label``.
>>> table = Table().with_columns(
... 'points', make_array(1, 2, 3),
... 'id', make_array(12345, 123, 5123))
>>> table.relabel('id', 'yolo')
points | yolo
1 | 12,345
2 | 123
3 | 5,123
>>> table.relabel(make_array('points', 'yolo'),
... make_array('red', 'blue'))
red | blue
1 | 12,345
2 | 123
3 | 5,123
>>> table.relabel(make_array('red', 'green', 'blue'),
... make_array('cyan', 'magenta', 'yellow', 'key'))
Traceback (most recent call last):
...
ValueError: Invalid arguments. column_label and new_label must be of equal length.
|
def get_events(self):
"""Return a set of events.
Which have been occured since the last call of this method.
This method should be called regulary to get all occuring
Events. There are three different Event types/classes
which can be returned:
- DeviceStateChangedEvent, if any device changed it's state
due to an applied action or just because of other reasons
- CommandExecutionStateChangedEvent, a executed command goes
through several phases which can be followed
- ExecutionStateChangedEvent, ******** todo
:return: an array of Events or empty array
:rtype: list
raises ValueError in case of protocol issues
:Seealso:
- apply_actions
- launch_action_group
- get_history
"""
header = BASE_HEADERS.copy()
header['Cookie'] = self.__cookie
request = requests.post(BASE_URL + 'getEvents',
headers=header,
timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.get_events()
return
try:
result = request.json()
except ValueError as error:
raise Exception(
"Not a valid result for getEvent," +
" protocol error: " + error)
return self._get_events(result)
|
Return a set of events.
Which have been occured since the last call of this method.
This method should be called regulary to get all occuring
Events. There are three different Event types/classes
which can be returned:
- DeviceStateChangedEvent, if any device changed it's state
due to an applied action or just because of other reasons
- CommandExecutionStateChangedEvent, a executed command goes
through several phases which can be followed
- ExecutionStateChangedEvent, ******** todo
:return: an array of Events or empty array
:rtype: list
raises ValueError in case of protocol issues
:Seealso:
- apply_actions
- launch_action_group
- get_history
|
def confirm_reservation(self, username, domain, password, email=None):
"""Confirm a reservation for a username.
The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and
optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`.
"""
self.set_password(username=username, domain=domain, password=password)
if email is not None:
self.set_email(username=username, domain=domain, email=email)
|
Confirm a reservation for a username.
The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and
optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`.
|
def _dict_to_object(desired_type: Type[T], contents_dict: Dict[str, Any], logger: Logger,
options: Dict[str, Dict[str, Any]], conversion_finder: ConversionFinder = None,
is_dict_of_dicts: bool = False) -> T:
"""
Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have
the correct type are intelligently converted if possible
:param desired_type:
:param contents_dict:
:param logger:
:param options:
:param conversion_finder:
:param is_dict_of_dicts:
:return:
"""
# collect pep-484 information in the constructor to be able to understand what is required
constructor_args_types_and_opt = get_constructor_attributes_types(desired_type)
try:
# for each attribute, convert the types of its parsed values if required
dict_for_init = dict()
for attr_name, provided_attr_value in contents_dict.items():
# check if this attribute name is required by the constructor
if attr_name in constructor_args_types_and_opt.keys():
# check the theoretical type wanted by the constructor
attr_type_required = constructor_args_types_and_opt[attr_name][0]
# resolve forward references
attr_type_required = resolve_forward_ref(attr_type_required)
if not is_dict_of_dicts:
if is_valid_pep484_type_hint(attr_type_required):
# this will not fail if type information is not present;the attribute will only be used 'as is'
full_attr_name = get_pretty_type_str(desired_type) + '.' + attr_name
dict_for_init[attr_name] = ConversionFinder.try_convert_value(conversion_finder, full_attr_name,
provided_attr_value,
attr_type_required, logger,
options)
else:
warn("Constructor for type <{t}> has no valid PEP484 Type hint for attribute {att}, trying to "
"use the parsed value in the dict directly".format(t=get_pretty_type_str(desired_type),
att=attr_name))
dict_for_init[attr_name] = provided_attr_value
else:
# in that mode, the attribute value itself is a dict, so the attribute needs to be built from that
# dict first
if isinstance(provided_attr_value, dict):
# recurse : try to build this attribute from the dictionary provided. We need to know the type
# for this otherwise we wont be able to call the constructor :)
if (attr_type_required is None) or (attr_type_required is Parameter.empty):
raise TypeInformationRequiredError.create_for_object_attributes(desired_type, attr_name,
attr_type_required)
elif not is_valid_pep484_type_hint(attr_type_required):
raise InvalidPEP484TypeHint.create_for_object_attributes(desired_type, attr_name,
attr_type_required)
else:
# we can build the attribute from the sub-dict
dict_for_init[attr_name] = dict_to_object(attr_type_required, provided_attr_value,
logger, options,
conversion_finder=conversion_finder)
else:
raise ValueError('Error while trying to build object of type ' + str(desired_type) + ' from a '
'dictionary of dictionaries. Entry \'' + attr_name + '\' is not a dictionary')
else:
if is_dict_of_dicts and attr_name is 'DEFAULT':
# -- tolerate but ignore - this is probably due to a configparser
# warning('Property name \'' + attr_name + '\' is not an attribute of the object constructor. <'
# + get_pretty_type_str(desired_type) + '> constructor attributes are : '
# + list(set(constructor_args_types.keys()) - {'self'}) + '. However it is named DEFAULT')
pass
else:
# the dictionary entry does not correspond to a valid attribute of the object
raise InvalidAttributeNameForConstructorError.create(desired_type,
list(set(constructor_args_types_and_opt.keys()) - {'self'}),
attr_name)
# create the object using its constructor
try:
return desired_type(**dict_for_init)
except Exception as e:
# Wrap into an Exception
raise ObjectInstantiationException.create(desired_type, dict_for_init, e)
except TypeError as e:
raise CaughtTypeErrorDuringInstantiation.create(desired_type, contents_dict, e)
|
Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have
the correct type are intelligently converted if possible
:param desired_type:
:param contents_dict:
:param logger:
:param options:
:param conversion_finder:
:param is_dict_of_dicts:
:return:
|
def set_trace(context):
"""
Start a pdb set_trace inside of the template with the context available as
'context'. Uses ipdb if available.
"""
try:
import ipdb as pdb
except ImportError:
import pdb
print("For best results, pip install ipdb.")
print("Variables that are available in the current context:")
render = lambda s: template.Template(s).render(context)
availables = get_variables(context)
pprint(availables)
print('Type `availables` to show this list.')
print('Type <variable_name> to access one.')
print('Use render("template string") to test template rendering')
# Cram context variables into the local scope
for var in availables:
locals()[var] = context[var]
pdb.set_trace()
return ''
|
Start a pdb set_trace inside of the template with the context available as
'context'. Uses ipdb if available.
|
def fix_version(context):
"""Fix the version in metadata.txt
Relevant context dict item for both prerelease and postrelease:
``new_version``.
"""
if not prerequisites_ok():
return
lines = codecs.open('metadata.txt', 'rU', 'utf-8').readlines()
for index, line in enumerate(lines):
if line.startswith('version'):
new_line = 'version=%s\n' % context['new_version']
lines[index] = new_line
time.sleep(1)
codecs.open('metadata.txt', 'w', 'utf-8').writelines(lines)
|
Fix the version in metadata.txt
Relevant context dict item for both prerelease and postrelease:
``new_version``.
|
def _remove(self, obj):
"""Python 2.4 compatibility."""
for idx, item in enumerate(self._queue):
if item == obj:
del self._queue[idx]
break
|
Python 2.4 compatibility.
|
def conditionally_create_profile(role_name, service_type):
"""
Check that there is a 1:1 correspondence with an InstanceProfile having the same name
as the role, and that the role is contained in it. Create InstanceProfile and attach to role if needed.
"""
# make instance profile if this service_type gets an instance profile
if service_type not in INSTANCE_PROFILE_SERVICE_TYPES:
print_if_verbose("service type: {} not eligible for instance profile".format(service_type))
return
instance_profile = get_instance_profile(role_name)
if not instance_profile:
print("Create instance profile: {}".format(role_name))
if CONTEXT.commit:
try:
instance_profile = CLIENTS["iam"].create_instance_profile(InstanceProfileName=role_name)
except ClientError as error:
fail("Exception creating instance profile named: {} {}".format(role_name, sys.exc_info(), error))
else:
print_if_verbose("instance profile already exists: {}".format(role_name))
# attach instance profile to role; test 'if instance_profile' because we drop through to here in a dry run
if instance_profile and not instance_profile_contains_role(instance_profile, role_name):
print("Add role: {} to instance profile: {}".format(role_name, role_name))
if CONTEXT.commit:
try:
CLIENTS["iam"].add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name)
except ClientError as error:
fail("Exception adding role to instance profile: {} {}".format(role_name, sys.exc_info(), error))
else:
print_if_verbose("instance profile already contains role: {}".format(role_name))
|
Check that there is a 1:1 correspondence with an InstanceProfile having the same name
as the role, and that the role is contained in it. Create InstanceProfile and attach to role if needed.
|
def build_dated_queryset(self):
"""
Build pages for all years in the queryset.
"""
qs = self.get_dated_queryset()
years = self.get_date_list(qs)
[self.build_year(dt) for dt in years]
|
Build pages for all years in the queryset.
|
def init_logger(self):
"""Create configuration for the root logger."""
# All logs are comming to this logger
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
# Logging to console
if self.min_log_level_to_print:
level = self.min_log_level_to_print
handler_class = logging.StreamHandler
self._create_handler(handler_class, level)
# Logging to file
if self.min_log_level_to_save:
level = self.min_log_level_to_save
handler_class = logging.handlers.TimedRotatingFileHandler
self._create_handler(handler_class, level)
# Logging to syslog
if self.min_log_level_to_syslog:
level = self.min_log_level_to_syslog
handler_class = logging.handlers.SysLogHandler
self._create_handler(handler_class, level)
# Logging to email
if self.min_log_level_to_mail:
level = self.min_log_level_to_mail
handler_class = AlkiviEmailHandler
self._create_handler(handler_class, level)
return
|
Create configuration for the root logger.
|
def basicConfig(level=logging.WARNING, transient_level=logging.NOTSET):
"""Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
"""
fmt = "%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s"
logging.root.setLevel(transient_level) # <--- IMPORTANT
hand = TransientStreamHandler(level=level)
hand.setFormatter(logging.Formatter(fmt))
logging.root.addHandler(hand)
|
Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
|
def create(self, user_id, name, department=None, position=None,
mobile=None, gender=0, tel=None, email=None,
weixin_id=None, extattr=None):
"""
εε»Ίζε
https://work.weixin.qq.com/api/doc#90000/90135/90195
"""
user_data = optionaldict()
user_data['userid'] = user_id
user_data['name'] = name
user_data['gender'] = gender
user_data['department'] = department
user_data['position'] = position
user_data['mobile'] = mobile
user_data['tel'] = tel
user_data['email'] = email
user_data['weixinid'] = weixin_id
user_data['extattr'] = extattr
return self._post(
'user/create',
data=user_data
)
|
εε»Ίζε
https://work.weixin.qq.com/api/doc#90000/90135/90195
|
def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Get a resource type api versions
'''
if kwargs is None:
kwargs = {}
if 'resource_provider' not in kwargs:
raise SaltCloudSystemExit(
'A resource_provider must be specified'
)
if 'resource_type' not in kwargs:
raise SaltCloudSystemExit(
'A resource_type must be specified'
)
api_versions = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace=kwargs['resource_provider']
)
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == kwargs['resource_type']:
resource_dict = resource.as_dict()
api_versions = resource_dict['api_versions']
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
return api_versions
|
Get a resource type api versions
|
def get_transfer_role(chain_state: ChainState, secrethash: SecretHash) -> Optional[str]:
"""
Returns 'initiator', 'mediator' or 'target' to signify the role the node has
in a transfer. If a transfer task is not found for the secrethash then the
function returns None
"""
task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash)
if not task:
return None
return role_from_transfer_task(task)
|
Returns 'initiator', 'mediator' or 'target' to signify the role the node has
in a transfer. If a transfer task is not found for the secrethash then the
function returns None
|
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
|
Return just the highstate or the errors
|
def eeg_microstates_plot(method, path="", extension=".png", show_sensors_position=False, show_sensors_name=False, plot=True, save=True, dpi=150, contours=0, colorbar=False, separate=False):
"""
Plot the microstates.
"""
# Generate and store figures
figures = []
names = []
# Check if microstates metrics available
try:
microstates = method["microstates_good_fit"]
except KeyError:
microstates = method["microstates"]
# Create individual plot for each microstate
for microstate in set(microstates):
if microstate != "Bad":
values = np.mean(method["data"][np.where(microstates == microstate)], axis=0)
values = np.array(values, ndmin=2).T
evoked = mne.EvokedArray(values, method["raw.info_example"], 0)
fig = evoked.plot_topomap(times=0, title=microstate, size=6, contours=contours, time_format="", show=plot, colorbar=colorbar, show_names=show_sensors_name, sensors=show_sensors_position)
figures.append(fig)
# Save separate figures
name = path + "microstate_%s_%s%s%s_%s%i_%s%s" %(microstate, method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
fig.savefig(name, dpi=dpi)
names.append(name)
# Save Combined plot
if save is True:
# Combine all plots
image_template = PIL.Image.open(names[0])
X, Y = image_template.size
image_template.close()
combined = PIL.Image.new('RGB', (int(X*len(set(microstates))/2), int( Y*len(set(microstates))/2)))
fig = 0
for x in np.arange(0, len(set(microstates))/2*int(X), int(X)):
for y in np.arange(0, len(set(microstates))/2*int(Y), int(Y)):
try:
newfig = PIL.Image.open(names[fig])
combined.paste(newfig, (int(x), int(y)))
newfig.close()
except:
pass
fig += 1
#combined.show()
combined_name = path + "microstates_%s%s%s_%s%i_%s%s" %(method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
combined.save(combined_name)
# Detete separate plots in needed
if separate is False or save is False:
for name in names:
os.remove(name)
return(figures)
|
Plot the microstates.
|
def analytic_file(self, new_status, old_status=None):
"""
Generate :code:`Analytic/*` files based on the given old and
new statuses.
:param new_status: The new status of the domain.
:type new_status: str
:param old_status: The old status of the domain.
:type old_status: str
"""
if not old_status:
# The old status is not given.
# We set the old status as the one given globally.
old_status = self.domain_status
if "file_to_test" in PyFunceble.INTERN and PyFunceble.INTERN["file_to_test"]:
# We are not testing as an imported module.
# We partially construct the path to the file to write/print.
output = (
self.output_parent_dir
+ PyFunceble.OUTPUTS["analytic"]["directories"]["parent"]
+ "%s%s"
)
if new_status.lower() in PyFunceble.STATUS["list"]["up"]:
# The new status is in the list of up status.
# We complete the output directory.
output = output % (
PyFunceble.OUTPUTS["analytic"]["directories"]["up"],
PyFunceble.OUTPUTS["analytic"]["filenames"]["up"],
)
# We generate the hosts file.
Generate("HTTP_Active").info_files()
elif new_status.lower() in PyFunceble.STATUS["list"]["potentially_up"]:
# The new status is in the list of down status.
# We complete the output directory.
output = output % (
PyFunceble.OUTPUTS["analytic"]["directories"]["potentially_up"],
PyFunceble.OUTPUTS["analytic"]["filenames"]["potentially_up"],
)
# We generate the hosts file.
Generate("potentially_up").info_files()
elif new_status.lower() in PyFunceble.STATUS["list"]["suspicious"]:
# The new status is in the list of suspicious status.
# We complete the output directory.
output = output % (
PyFunceble.OUTPUTS["analytic"]["directories"]["suspicious"],
PyFunceble.OUTPUTS["analytic"]["filenames"]["suspicious"],
)
# We generate the hosts file.
Generate("suspicious").info_files()
else:
# The new status is in the list of up and down status.
# We complete the output directory.
output = output % (
PyFunceble.OUTPUTS["analytic"]["directories"]["potentially_down"],
PyFunceble.OUTPUTS["analytic"]["filenames"]["potentially_down"],
)
# We generate the hosts files.
Generate("potentially_down").info_files()
# We print the information on file.
Prints(
[
self.tested,
old_status,
PyFunceble.INTERN["http_code"],
PyFunceble.CURRENT_TIME,
],
"HTTP",
output,
True,
).data()
|
Generate :code:`Analytic/*` files based on the given old and
new statuses.
:param new_status: The new status of the domain.
:type new_status: str
:param old_status: The old status of the domain.
:type old_status: str
|
def com_daltonmaag_check_ufolint(font):
"""Run ufolint on UFO source directory."""
import subprocess
ufolint_cmd = ["ufolint", font]
try:
subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
yield FAIL, ("ufolint failed the UFO source. Output follows :"
"\n\n{}\n").format(e.output.decode())
except OSError:
yield ERROR, "ufolint is not available!"
else:
yield PASS, "ufolint passed the UFO source."
|
Run ufolint on UFO source directory.
|
def calculate_query_times(**kwargs):
"""
Calculates aggregate query times from all iteration times
Kwargs:
total_times(list): List of total time calculations
execution_times(list): List of execution_time calculations
results_iter_times(list): List of results_iter_time calculations
connect_times(list): List of connect_time calculations
Returns:
query_execution(dict): Query times
False(bool): The query failed. Exception should be logged.
"""
return {
"total_time_avg": round(numpy.mean(kwargs["total_times"]), 1),
"total_time_min": round(numpy.min(kwargs["total_times"]), 1),
"total_time_max": round(numpy.max(kwargs["total_times"]), 1),
"total_time_85": round(numpy.percentile(kwargs["total_times"], 85), 1),
"execution_time_avg": round(numpy.mean(kwargs["execution_times"]), 1),
"execution_time_min": round(numpy.min(kwargs["execution_times"]), 1),
"execution_time_max": round(numpy.max(kwargs["execution_times"]), 1),
"execution_time_85": round(
numpy.percentile(kwargs["execution_times"], 85), 1
),
"execution_time_25": round(
numpy.percentile(kwargs["execution_times"], 25), 1
),
"execution_time_std": round(numpy.std(kwargs["execution_times"]), 1),
"connect_time_avg": round(numpy.mean(kwargs["connect_times"]), 1),
"connect_time_min": round(numpy.min(kwargs["connect_times"]), 1),
"connect_time_max": round(numpy.max(kwargs["connect_times"]), 1),
"connect_time_85": round(
numpy.percentile(kwargs["connect_times"], 85), 1
),
"results_iter_time_avg": round(
numpy.mean(kwargs["results_iter_times"]), 1
),
"results_iter_time_min": round(
numpy.min(kwargs["results_iter_times"]), 1
),
"results_iter_time_max": round(
numpy.max(kwargs["results_iter_times"]), 1
),
"results_iter_time_85": round(
numpy.percentile(kwargs["results_iter_times"], 85), 1
),
}
|
Calculates aggregate query times from all iteration times
Kwargs:
total_times(list): List of total time calculations
execution_times(list): List of execution_time calculations
results_iter_times(list): List of results_iter_time calculations
connect_times(list): List of connect_time calculations
Returns:
query_execution(dict): Query times
False(bool): The query failed. Exception should be logged.
|
def intersection_update(self, other):
"""Update the set, removing any elements from other which are not
in both sets.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
# we make a copy of the list so that we can remove items from
# the list without breaking the iterator.
for item in list(self.items):
if item not in other.items:
self.items.remove(item)
|
Update the set, removing any elements from other which are not
in both sets.
@param other: the collection of items with which to update the set
@type other: Set object
|
def parents( self, node ):
"""Retrieve/calculate the set of parents for the given node"""
if 'index' in node:
index = node['index']()
parents = list(meliaeloader.children( node, index, 'parents' ))
return parents
return []
|
Retrieve/calculate the set of parents for the given node
|
def init_db(drop_all=False, bind=engine):
"""Initialize the database, optionally dropping existing tables."""
try:
if drop_all:
Base.metadata.drop_all(bind=bind)
Base.metadata.create_all(bind=bind)
except OperationalError as err:
msg = 'password authentication failed for user "dallinger"'
if msg in err.message:
sys.stderr.write(db_user_warning)
raise
return session
|
Initialize the database, optionally dropping existing tables.
|
def choose_database_name(metadata, config):
"""
Choose the database name to use.
As a default, databases should be named after the service that uses them. In addition,
database names should be different between unit testing and runtime so that there is
no chance of a unit test dropping a real database by accident.
"""
if config.database_name is not None:
# we allow -- but do not encourage -- database name configuration
return config.database_name
if metadata.testing:
# by convention, we provision different databases for unit testing and runtime
return f"{metadata.name}_test_db"
return f"{metadata.name}_db"
|
Choose the database name to use.
As a default, databases should be named after the service that uses them. In addition,
database names should be different between unit testing and runtime so that there is
no chance of a unit test dropping a real database by accident.
|
def hashable_to_uuid(hashable_):
"""
TODO: ensure that python2 and python3 agree on hashes of the same
information
Args:
hashable_ (hashable): hashables are bytes-like objects
An object that supports the Buffer Protocol, like bytes, bytearray
or memoryview. Bytes-like objects can be used for various operations
that expect binary data, such as compression, saving to a binary
file or sending over a socket. Some operations need the binary data
to be mutable, in which case not all bytes-like objects can apply.
Returns:
UUID: uuid_
CommandLine:
python -m utool.util_hash --test-hashable_to_uuid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> hashables = [
>>> 'foobar',
>>> 'foobar'.encode('utf-8'),
>>> u'foobar',
>>> 10,
>>> [1, 2, 3],
>>> ]
>>> uuids = []
>>> for hashable_ in hashables:
>>> uuid_ = hashable_to_uuid(hashable_)
>>> uuids.append(uuid_)
>>> result = ut.repr4(ut.lmap(str, uuids), strvals=True, nobr=True)
>>> print(result)
8843d7f9-2416-211d-e9eb-b963ff4ce281,
8843d7f9-2416-211d-e9eb-b963ff4ce281,
8843d7f9-2416-211d-e9eb-b963ff4ce281,
e864ece8-8880-43b6-7277-c8b2cefe96ad,
a01eda32-e4e0-b139-3274-e91d1b3e9ecf,
"""
bytes_ = _ensure_hashable_bytes(hashable_)
try:
bytes_sha1 = hashlib.sha1(bytes_)
except TypeError:
print('hashable_ = %r' % (hashable_,))
print('bytes_ = %r' % (bytes_,))
raise
# Digest them into a hash
hashbytes_20 = bytes_sha1.digest()
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_
|
TODO: ensure that python2 and python3 agree on hashes of the same
information
Args:
hashable_ (hashable): hashables are bytes-like objects
An object that supports the Buffer Protocol, like bytes, bytearray
or memoryview. Bytes-like objects can be used for various operations
that expect binary data, such as compression, saving to a binary
file or sending over a socket. Some operations need the binary data
to be mutable, in which case not all bytes-like objects can apply.
Returns:
UUID: uuid_
CommandLine:
python -m utool.util_hash --test-hashable_to_uuid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> hashables = [
>>> 'foobar',
>>> 'foobar'.encode('utf-8'),
>>> u'foobar',
>>> 10,
>>> [1, 2, 3],
>>> ]
>>> uuids = []
>>> for hashable_ in hashables:
>>> uuid_ = hashable_to_uuid(hashable_)
>>> uuids.append(uuid_)
>>> result = ut.repr4(ut.lmap(str, uuids), strvals=True, nobr=True)
>>> print(result)
8843d7f9-2416-211d-e9eb-b963ff4ce281,
8843d7f9-2416-211d-e9eb-b963ff4ce281,
8843d7f9-2416-211d-e9eb-b963ff4ce281,
e864ece8-8880-43b6-7277-c8b2cefe96ad,
a01eda32-e4e0-b139-3274-e91d1b3e9ecf,
|
def number_observer(t=None, targets=None):
"""
Return a number observer. If t is None, return NumberObserver. If t is a number,
return FixedIntervalNumberObserver. If t is an iterable (a list of numbers), return
TimingNumberObserver.
Parameters
----------
t : float, list or tuple, optional. default None
A timing of the observation. See above.
targets : list or tuple, optional. default None
A list of strings suggesting Species observed.
Returns
-------
obs : NumberObserver, FixedIntervalNumberObserver or TimingNumberObserver
"""
from ecell4_base.core import NumberObserver, FixedIntervalNumberObserver, TimingNumberObserver
if t is None:
return NumberObserver(targets)
elif isinstance(t, numbers.Number):
return FixedIntervalNumberObserver(t, targets)
elif hasattr(t, '__iter__'):
if targets is not None:
return TimingNumberObserver(t, targets)
else:
return TimingNumberObserver(t)
else:
raise TypeError("An invalid type was given. Either number or iterable is required.")
|
Return a number observer. If t is None, return NumberObserver. If t is a number,
return FixedIntervalNumberObserver. If t is an iterable (a list of numbers), return
TimingNumberObserver.
Parameters
----------
t : float, list or tuple, optional. default None
A timing of the observation. See above.
targets : list or tuple, optional. default None
A list of strings suggesting Species observed.
Returns
-------
obs : NumberObserver, FixedIntervalNumberObserver or TimingNumberObserver
|
def save(self, *args, **kwargs):
"""
**uid**: :code:`electiontype:{name}`
"""
self.uid = 'electiontype:{}'.format(self.slug)
super(ElectionType, self).save(*args, **kwargs)
|
**uid**: :code:`electiontype:{name}`
|
def gen_salt_and_hash(val=None):
""" Generate a salt & hash
If no string is provided then a random string will be
used to hash & referred to as `val`.
The salt will always be randomly generated & the hash
will be a sha256 hex value of the `val` & the salt as
a concatenated string. It follows the guidance here:
crackstation.net/hashing-security.htm#properhashing
:param val: str
:return: tuple of strings (salt, hash)
"""
if not val:
val = random_str()
str_salt = random_str()
str_hash = hashlib.sha256(val + str_salt).hexdigest()
return str_salt, str_hash
|
Generate a salt & hash
If no string is provided then a random string will be
used to hash & referred to as `val`.
The salt will always be randomly generated & the hash
will be a sha256 hex value of the `val` & the salt as
a concatenated string. It follows the guidance here:
crackstation.net/hashing-security.htm#properhashing
:param val: str
:return: tuple of strings (salt, hash)
|
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0):
"""Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed.
"""
logger = logging.getLogger(__name__)
# check grism and filter
grism_name = rectwv_coeff.tags['grism']
logger.info('Grism: ' + grism_name)
filter_name = rectwv_coeff.tags['filter']
logger.info('Filter: ' + filter_name)
# list of slitlets to be computed
list_valid_islitlets = list(range(1, EMIR_NBARS + 1))
for idel in rectwv_coeff.missing_slitlets:
list_valid_islitlets.remove(idel)
if abs(debugplot) >= 10:
print('>>> valid slitlet numbers:\n', list_valid_islitlets)
# ---
# check that the CSU configuration corresponds to longslit
csu_bar_slit_center_list = []
for islitlet in list_valid_islitlets:
csu_bar_slit_center_list.append(
rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center']
)
if abs(debugplot) >= 10:
logger.debug('Checking csu_bar_slit_center values:')
summary(np.array(csu_bar_slit_center_list), debug=True)
pause_debugplot(debugplot)
# ---
# polynomial coefficients corresponding to the wavelength calibration
# step 0: determine poldeg_refined, checking that it is the same for
# all the slitlets
poldeg_refined_list = []
for islitlet in list_valid_islitlets:
poldeg_refined_list.append(
len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1
)
# remove duplicates
poldeg_refined_list = list(set(poldeg_refined_list))
if len(poldeg_refined_list) != 1:
raise ValueError('Unexpected different poldeg_refined found: ' +
str(poldeg_refined_list))
poldeg_refined = poldeg_refined_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly = []
for i in range(poldeg_refined + 1):
xp = []
yp = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
wpoly_coeff = tmp_dict['wpoly_coeff']
if wpoly_coeff is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp.append(wpoly_coeff[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp),
deg=2,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='coeff[' + str(i) + ']',
title="Fit to refined wavelength calibration coefficients",
geometry=geometry,
debugplot=debugplot
)
list_poly.append(poly)
# step 2: use the variation of each polynomial coefficient with
# y0_reference_middle to infer the expected wavelength calibration
# polynomial for each rectifified slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
list_new_coeff = []
for i in range(poldeg_refined + 1):
new_coeff = list_poly[i](y0_reference_middle)
list_new_coeff.append(new_coeff)
tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff
# ---
# rectification transformation coefficients aij and bij
# step 0: determine order_fmap, checking that it is the same for
# all the slitlets
order_fmap_list = []
for islitlet in list_valid_islitlets:
order_fmap_list.append(
rectwv_coeff.contents[islitlet - 1]['ttd_order']
)
# remove duplicates
order_fmap_list = list(set(order_fmap_list))
if len(order_fmap_list) != 1:
raise ValueError('Unexpected different order_fmap found')
order_fmap = order_fmap_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly_ttd_aij = []
list_poly_ttd_bij = []
list_poly_tti_aij = []
list_poly_tti_bij = []
ncoef_ttd = ncoef_fmap(order_fmap)
for i in range(ncoef_ttd):
xp = []
yp_ttd_aij = []
yp_ttd_bij = []
yp_tti_aij = []
yp_tti_bij = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
ttd_aij = tmp_dict['ttd_aij']
ttd_bij = tmp_dict['ttd_bij']
tti_aij = tmp_dict['tti_aij']
tti_bij = tmp_dict['tti_bij']
if ttd_aij is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp_ttd_aij.append(ttd_aij[i])
yp_ttd_bij.append(ttd_bij[i])
yp_tti_aij.append(tti_aij[i])
yp_tti_bij.append(tti_bij[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_bij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_bij.append(poly)
# step 2: use the variation of each coefficient with y0_reference_middle
# to infer the expected rectification transformation for each slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
tmp_dict['ttd_order_longslit_model'] = order_fmap
ttd_aij_longslit_model = []
ttd_bij_longslit_model = []
tti_aij_longslit_model = []
tti_bij_longslit_model = []
for i in range(ncoef_ttd):
new_coeff = list_poly_ttd_aij[i](y0_reference_middle)
ttd_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_ttd_bij[i](y0_reference_middle)
ttd_bij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_aij[i](y0_reference_middle)
tti_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_bij[i](y0_reference_middle)
tti_bij_longslit_model.append(new_coeff)
tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model
tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model
tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model
tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model
# ---
# update uuid and meta_info in output JSON structure
rectwv_coeff.uuid = str(uuid4())
rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat()
# return updated object
return rectwv_coeff
|
Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed.
|
def mark_quality(self, start_time, length, qual_name):
"""Mark signal quality, only add the new ones.
Parameters
----------
start_time : int
start time in s of the epoch being scored.
length : int
duration in s of the epoch being scored.
qual_name : str
one of the stages defined in global stages.
"""
y_pos = BARS['quality']['pos0']
height = 10
# the -1 is really important, otherwise we stay on the edge of the rect
old_score = self.scene.itemAt(start_time + length / 2,
y_pos + height - 1,
self.transform())
# check we are not removing the black border
if old_score is not None and old_score.pen() == NoPen:
lg.debug('Removing old score at {}'.format(start_time))
self.scene.removeItem(old_score)
self.idx_annot.remove(old_score)
if qual_name == 'Poor':
rect = QGraphicsRectItem(start_time, y_pos, length, height)
rect.setPen(NoPen)
rect.setBrush(Qt.black)
self.scene.addItem(rect)
self.idx_annot.append(rect)
|
Mark signal quality, only add the new ones.
Parameters
----------
start_time : int
start time in s of the epoch being scored.
length : int
duration in s of the epoch being scored.
qual_name : str
one of the stages defined in global stages.
|
def to_representation(self, instance):
"""
Serialize the EnterpriseCustomerCatalog object.
Arguments:
instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.
Returns:
dict: The EnterpriseCustomerCatalog converted to a dict.
"""
request = self.context['request']
enterprise_customer = instance.enterprise_customer
representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance)
# Retrieve the EnterpriseCustomerCatalog search results from the discovery service.
paginated_content = instance.get_paginated_content(request.GET)
count = paginated_content['count']
search_results = paginated_content['results']
for item in search_results:
content_type = item['content_type']
marketing_url = item.get('marketing_url')
if marketing_url:
item['marketing_url'] = utils.update_query_parameters(
marketing_url, utils.get_enterprise_utm_context(enterprise_customer)
)
# Add the Enterprise enrollment URL to each content item returned from the discovery service.
if content_type == 'course':
item['enrollment_url'] = instance.get_course_enrollment_url(item['key'])
if content_type == 'courserun':
item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key'])
if content_type == 'program':
item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid'])
# Build pagination URLs
previous_url = None
next_url = None
page = int(request.GET.get('page', '1'))
request_uri = request.build_absolute_uri()
if paginated_content['previous']:
previous_url = utils.update_query_parameters(request_uri, {'page': page - 1})
if paginated_content['next']:
next_url = utils.update_query_parameters(request_uri, {'page': page + 1})
representation['count'] = count
representation['previous'] = previous_url
representation['next'] = next_url
representation['results'] = search_results
return representation
|
Serialize the EnterpriseCustomerCatalog object.
Arguments:
instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.
Returns:
dict: The EnterpriseCustomerCatalog converted to a dict.
|
def base_taskname(taskname, packagename=None):
"""
Extract the base name of the task.
Many tasks in the `drizzlepac` have "compound" names such as
'drizzlepac.sky'. This function will search for the presence of a dot
in the input `taskname` and if found, it will return the string
to the right of the right-most dot. If a dot is not found, it will return
the input string.
Parameters
----------
taskname : str, None
Full task name. If it is `None`, :py:func:`base_taskname` will
return `None`\ .
packagename : str, None (Default = None)
Package name. It is assumed that a compound task name is formed by
concatenating `packagename` + '.' + `taskname`\ . If `packagename`
is not `None`, :py:func:`base_taskname` will check that the string
to the left of the right-most dot matches `packagename` and will
raise an `AssertionError` if the package name derived from the
input `taskname` does not match the supplied `packagename`\ . This
is intended as a check for discrepancies that may arise
during the development of the tasks. If `packagename` is `None`,
no such check will be performed.
Raises
------
AssertionError
Raised when package name derived from the input `taskname` does not
match the supplied `packagename`
"""
if not isinstance(taskname, str):
return taskname
indx = taskname.rfind('.')
if indx >= 0:
base_taskname = taskname[(indx+1):]
pkg_name = taskname[:indx]
else:
base_taskname = taskname
pkg_name = ''
assert(True if packagename is None else (packagename == pkg_name))
return base_taskname
|
Extract the base name of the task.
Many tasks in the `drizzlepac` have "compound" names such as
'drizzlepac.sky'. This function will search for the presence of a dot
in the input `taskname` and if found, it will return the string
to the right of the right-most dot. If a dot is not found, it will return
the input string.
Parameters
----------
taskname : str, None
Full task name. If it is `None`, :py:func:`base_taskname` will
return `None`\ .
packagename : str, None (Default = None)
Package name. It is assumed that a compound task name is formed by
concatenating `packagename` + '.' + `taskname`\ . If `packagename`
is not `None`, :py:func:`base_taskname` will check that the string
to the left of the right-most dot matches `packagename` and will
raise an `AssertionError` if the package name derived from the
input `taskname` does not match the supplied `packagename`\ . This
is intended as a check for discrepancies that may arise
during the development of the tasks. If `packagename` is `None`,
no such check will be performed.
Raises
------
AssertionError
Raised when package name derived from the input `taskname` does not
match the supplied `packagename`
|
def __get_query_filters(cls, filters={}, inverse=False):
"""
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
"""
query_filters = []
for name in filters:
if name[0] == '*' and not inverse:
# An inverse filter and not inverse mode
continue
if name[0] != '*' and inverse:
# A direct filter and inverse mode
continue
field_name = name[1:] if name[0] == '*' else name
params = {field_name: filters[name]}
# trying to use es_dsl only and not creating hard coded queries
query_filters.append(Q('match_phrase', **params))
return query_filters
|
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
|
def blit(self, surface, pos=(0, 0)):
"""
Blits a surface on the screen at pos
:param surface: Surface to blit
:param pos: Top left corner to start blitting
:type surface: Surface
:type pos: tuple
"""
for x in range(surface.width):
for y in range(surface.height):
point = (x + pos[0], y + pos[1])
if self.point_on_screen(point):
self.matrix[point[0]][point[1]] = surface.matrix[x][y]
|
Blits a surface on the screen at pos
:param surface: Surface to blit
:param pos: Top left corner to start blitting
:type surface: Surface
:type pos: tuple
|
def disable_metrics_collection(self, as_group, metrics=None):
"""
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
"""
params = {'AutoScalingGroupName': as_group}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params)
|
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
|
def router_main(self):
'''
Main method for router; we stay in a loop in this method, receiving
packets until the end of time.
'''
while True:
gotpkt = True
try:
timestamp,dev,pkt = self.net.recv_packet(timeout=1.0)
except NoPackets:
log_debug("No packets available in recv_packet")
gotpkt = False
except Shutdown:
log_debug("Got shutdown signal")
break
if gotpkt:
log_debug("Got a packet: {}".format(str(pkt)))
|
Main method for router; we stay in a loop in this method, receiving
packets until the end of time.
|
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, let's imagine a graph made of
three nodes (A, B and C) making a cycle. The evaluation starts
at node A. The Taskmaster first considers whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like::
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph::
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
|
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, let's imagine a graph made of
three nodes (A, B and C) making a cycle. The evaluation starts
at node A. The Taskmaster first considers whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like::
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph::
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
|
def import_locations(self, cells_file):
"""Parse OpenCellID.org data files.
``import_locations()`` returns a dictionary with keys containing the
OpenCellID.org_ database identifier, and values consisting of
a ``Cell`` objects.
It expects cell files in the following format::
22747,52.0438995361328,-0.2246370017529,234,33,2319,647,0,1,
2008-04-05 21:32:40,2008-04-05 21:32:40
22995,52.3305015563965,-0.2255620062351,234,10,20566,4068,0,1,
2008-04-05 21:32:59,2008-04-05 21:32:59
23008,52.3506011962891,-0.2234109938145,234,10,10566,4068,0,1,
2008-04-05 21:32:59,2008-04-05 21:32:59
The above file processed by ``import_locations()`` will return the
following ``dict`` object::
{23008: Cell(23008, 52.3506011963, -0.223410993814, 234, 10, 10566,
4068, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 59),
datetime.datetime(2008, 4, 5, 21, 32, 59)),
22747: Cell(22747, 52.0438995361, -0.224637001753, 234, 33, 2319,
647, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 40),
datetime.datetime(2008, 4, 5, 21, 32, 40)),
22995: Cell(22995, 52.3305015564, -0.225562006235, 234, 10, 20566,
4068, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 59),
datetime.datetime(2008, 4, 5, 21, 32, 59))}
Args:
cells_file (iter): Cell data to read
Returns:
dict: Cell data with their associated database identifier
.. _OpenCellID.org: http://opencellid.org/
"""
self._cells_file = cells_file
field_names = ('ident', 'latitude', 'longitude', 'mcc', 'mnc', 'lac',
'cellid', 'crange', 'samples', 'created', 'updated')
parse_date = lambda s: datetime.datetime.strptime(s,
'%Y-%m-%d %H:%M:%S')
field_parsers = (int, float, float, int, int, int, int, int, int,
parse_date, parse_date)
data = utils.prepare_csv_read(cells_file, field_names)
for row in data:
try:
cell = dict((n, p(row[n]))
for n, p in zip(field_names, field_parsers))
except ValueError:
if r"\N" in row.values():
# A few entries are incomplete, and when that occurs the
# export includes the string "\N" to denote missing
# data. We just ignore them for now
logging.debug('Skipping incomplete entry %r' % row)
break
else:
raise utils.FileFormatError('opencellid.org')
else:
self[row['ident']] = Cell(**cell)
|
Parse OpenCellID.org data files.
``import_locations()`` returns a dictionary with keys containing the
OpenCellID.org_ database identifier, and values consisting of
a ``Cell`` objects.
It expects cell files in the following format::
22747,52.0438995361328,-0.2246370017529,234,33,2319,647,0,1,
2008-04-05 21:32:40,2008-04-05 21:32:40
22995,52.3305015563965,-0.2255620062351,234,10,20566,4068,0,1,
2008-04-05 21:32:59,2008-04-05 21:32:59
23008,52.3506011962891,-0.2234109938145,234,10,10566,4068,0,1,
2008-04-05 21:32:59,2008-04-05 21:32:59
The above file processed by ``import_locations()`` will return the
following ``dict`` object::
{23008: Cell(23008, 52.3506011963, -0.223410993814, 234, 10, 10566,
4068, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 59),
datetime.datetime(2008, 4, 5, 21, 32, 59)),
22747: Cell(22747, 52.0438995361, -0.224637001753, 234, 33, 2319,
647, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 40),
datetime.datetime(2008, 4, 5, 21, 32, 40)),
22995: Cell(22995, 52.3305015564, -0.225562006235, 234, 10, 20566,
4068, 0, 1, datetime.datetime(2008, 4, 5, 21, 32, 59),
datetime.datetime(2008, 4, 5, 21, 32, 59))}
Args:
cells_file (iter): Cell data to read
Returns:
dict: Cell data with their associated database identifier
.. _OpenCellID.org: http://opencellid.org/
|
def doesnt_have(self, relation, boolean='and', extra=None):
"""
Add a relationship count to the query.
:param relation: The relation to count
:type relation: str
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder
"""
return self.has(relation, '<', 1, boolean, extra)
|
Add a relationship count to the query.
:param relation: The relation to count
:type relation: str
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder
|
def entity(self, entity_id, get_files=False, channel=None,
include_stats=True, includes=None):
'''Get the default data for any entity (e.g. bundle or charm).
@param entity_id The entity's id either as a reference or a string
@param get_files Whether to fetch the files for the charm or not.
@param channel Optional channel name.
@param include_stats Optionally disable stats collection.
@param includes An optional list of meta info to include, as a
sequence of strings. If None, the default include list is used.
'''
if includes is None:
includes = DEFAULT_INCLUDES[:]
if get_files and 'manifest' not in includes:
includes.append('manifest')
if include_stats and 'stats' not in includes:
includes.append('stats')
return self._meta(entity_id, includes, channel=channel)
|
Get the default data for any entity (e.g. bundle or charm).
@param entity_id The entity's id either as a reference or a string
@param get_files Whether to fetch the files for the charm or not.
@param channel Optional channel name.
@param include_stats Optionally disable stats collection.
@param includes An optional list of meta info to include, as a
sequence of strings. If None, the default include list is used.
|
def to_even_columns(data, headers=None):
"""
Nicely format the 2-dimensional list into evenly spaced columns
"""
result = ''
col_width = max(len(word) for row in data for word in row) + 2 # padding
if headers:
header_width = max(len(word) for row in headers for word in row) + 2
if header_width > col_width:
col_width = header_width
result += "".join(word.ljust(col_width) for word in headers) + "\n"
result += '-' * col_width * len(headers) + "\n"
for row in data:
result += "".join(word.ljust(col_width) for word in row) + "\n"
return result
|
Nicely format the 2-dimensional list into evenly spaced columns
|
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
|
Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
|
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if self.parent.step_kw_purpose.\
selected_purpose() == layer_purpose_hazard:
new_step = self.parent.step_kw_hazard_category
else:
if is_raster_layer(self.parent.layer):
new_step = self.parent.step_kw_band_selector
else:
new_step = self.parent.step_kw_layermode
return new_step
|
Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
|
def get_two_parameters(self, regex_exp, parameters):
"""
Get two parameters from a given regex expression
Raise an exception if more than two were found
:param regex_exp:
:param parameters:
:return:
"""
Rx, Ry, other = self.get_parameters(regex_exp, parameters)
if other is not None and other.strip():
raise iarm.exceptions.ParsingError("Extra arguments found: {}".format(other))
if Rx and Ry:
return Rx.upper(), Ry.upper()
elif not Rx:
raise iarm.exceptions.ParsingError("Missing first positional argument")
else:
raise iarm.exceptions.ParsingError("Missing second positional argument")
|
Get two parameters from a given regex expression
Raise an exception if more than two were found
:param regex_exp:
:param parameters:
:return:
|
def _unescape(self, msg):
"""
Removes double quotes that were used to escape double quotes. Expects
a string without its delimiting quotes, or a number. Returns a new
unescaped string.
"""
if isinstance(msg, (int, float, long)):
return msg
unescaped = ''
i = 0
while i < len(msg):
unescaped += msg[i]
if msg[i] == '"':
i+=1
i+=1
return unescaped
|
Removes double quotes that were used to escape double quotes. Expects
a string without its delimiting quotes, or a number. Returns a new
unescaped string.
|
def _ensure_unicode_string(string):
"""Returns a unicode string for string.
:param string:
The input string.
:type string:
`basestring`
:returns:
A unicode string.
:rtype:
`unicode`
"""
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
return string
|
Returns a unicode string for string.
:param string:
The input string.
:type string:
`basestring`
:returns:
A unicode string.
:rtype:
`unicode`
|
def from_secrets_file(client_secrets, storage=None, flags=None,
storage_path=None, api_version="v3", readonly=False,
http_client=None, ga_hook=None):
"""Create a client for a web or installed application.
Create a client with a client secrets file.
Args:
client_secrets: str, path to the client secrets file (downloadable from
Google API Console)
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA.
"""
scope = GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE
flow = flow_from_clientsecrets(client_secrets,
scope=scope)
storage = _get_storage(storage, storage_path)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, flags)
return Client(_build(credentials, api_version, http_client), ga_hook)
|
Create a client for a web or installed application.
Create a client with a client secrets file.
Args:
client_secrets: str, path to the client secrets file (downloadable from
Google API Console)
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA.
|
def shutdown(self):
"""
Send the shutdown message to the Connection.
:return: True if the shutdown completed successfully (i.e. both sides
have sent closure alerts), False otherwise (in which case you
call :meth:`recv` or :meth:`send` when the connection becomes
readable/writeable).
"""
result = _lib.SSL_shutdown(self._ssl)
if result < 0:
self._raise_ssl_error(self._ssl, result)
elif result > 0:
return True
else:
return False
|
Send the shutdown message to the Connection.
:return: True if the shutdown completed successfully (i.e. both sides
have sent closure alerts), False otherwise (in which case you
call :meth:`recv` or :meth:`send` when the connection becomes
readable/writeable).
|
def _evalTimeStr(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseTimeStr()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
if s in self.ptc.re_values['now']:
self.currentContext.updateAccuracy(pdtContext.ACU_NOW)
else:
# Given string is a natural language time string like
# lunch, midnight, etc
sTime = self.ptc.getSource(s, sourceTime)
if sTime:
sourceTime = sTime
self.currentContext.updateAccuracy(pdtContext.ACU_HALFDAY)
return sourceTime
|
Evaluate text passed by L{_partialParseTimeStr()}
|
def diffusionAddCountsFromSource(grph, source, target, nodeType = 'citations', extraType = None, diffusionLabel = 'DiffusionCount', extraKeys = None, countsDict = None, extraMapping = None):
"""Does a diffusion using [diffusionCount()](#metaknowledge.diffusion.diffusionCount) and updates _grph_ with it, using the nodes in the graph as keys in the diffusion, i.e. the source. The name of the attribute the counts are added to is given by _diffusionLabel_. If the graph is not composed of citations from the source and instead is another tag _nodeType_ needs to be given the tag string.
# Parameters
_grph_ : `networkx Graph`
> The graph to be updated
_source_ : `RecordCollection`
> The `RecordCollection` that created _grph_
_target_ : `RecordCollection`
> The `RecordCollection` that will be counted
_nodeType_ : `optional [str]`
> default `'citations'`, the tag that constants the values used to create _grph_
# Returns
`dict[:int]`
> The counts dictioanry used to add values to _grph_. *Note* _grph_ is modified by the function and the return is done in case you need it.
"""
progArgs = (0, "Starting to add counts to graph")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
PBar.updateVal(0, 'Getting counts')
if countsDict is None:
countsDict = diffusionCount(source, target, sourceType = nodeType, extraValue = extraType, _ProgBar = PBar, extraMapping = extraMapping)
try:
if not isinstance(countsDict.keys().__iter__().__next__(), str):
PBar.updateVal(.5, "Prepping the counts")
newCountsDict = {}
while True:
try:
k, v = countsDict.popitem()
except KeyError:
break
newCountsDict[str(k)] = v
countsDict = newCountsDict
except StopIteration:
pass
count = 0
for n in grph.nodes_iter():
PBar.updateVal(.5 + .5 * (count / len(grph)), "Adding count for '{}'".format(n))
if extraType is not None:
if extraKeys:
for key in extraKeys:
grph.node[n][key] = 0
grph.node[n][diffusionLabel] = 0
try:
for k, v in countsDict[n].items():
if k == 'TargetCount':
grph.node[n][diffusionLabel] = v
else:
if k:
grph.node[n][k] = v
except KeyError:
grph.node[n][diffusionLabel] = 0
else:
grph.node[n][diffusionLabel] = countsDict.get(n, 0)
count += 1
PBar.finish("Done adding diffusion counts to a graph")
return countsDict
|
Does a diffusion using [diffusionCount()](#metaknowledge.diffusion.diffusionCount) and updates _grph_ with it, using the nodes in the graph as keys in the diffusion, i.e. the source. The name of the attribute the counts are added to is given by _diffusionLabel_. If the graph is not composed of citations from the source and instead is another tag _nodeType_ needs to be given the tag string.
# Parameters
_grph_ : `networkx Graph`
> The graph to be updated
_source_ : `RecordCollection`
> The `RecordCollection` that created _grph_
_target_ : `RecordCollection`
> The `RecordCollection` that will be counted
_nodeType_ : `optional [str]`
> default `'citations'`, the tag that constants the values used to create _grph_
# Returns
`dict[:int]`
> The counts dictioanry used to add values to _grph_. *Note* _grph_ is modified by the function and the return is done in case you need it.
|
def get_library(self, username, status=None):
"""Fetches a users library.
:param str username: The user to get the library from.
:param str status: only return the items with the supplied status.
Can be one of `currently-watching`, `plan-to-watch`, `completed`,
`on-hold` or `dropped`.
:returns: List of Library objects.
"""
r = self._query_('/users/%s/library' % username, 'GET',
params={'status': status})
results = [LibraryEntry(item) for item in r.json()]
return results
|
Fetches a users library.
:param str username: The user to get the library from.
:param str status: only return the items with the supplied status.
Can be one of `currently-watching`, `plan-to-watch`, `completed`,
`on-hold` or `dropped`.
:returns: List of Library objects.
|
def _let_to_py_ast(ctx: GeneratorContext, node: Let) -> GeneratedPyAST:
"""Return a Python AST Node for a `let*` expression."""
assert node.op == NodeOp.LET
with ctx.new_symbol_table("let"):
let_body_ast: List[ast.AST] = []
for binding in node.bindings:
init_node = binding.init
assert init_node is not None
init_ast = gen_py_ast(ctx, init_node)
binding_name = genname(munge(binding.name))
let_body_ast.extend(init_ast.dependencies)
let_body_ast.append(
ast.Assign(
targets=[ast.Name(id=binding_name, ctx=ast.Store())],
value=init_ast.node,
)
)
ctx.symbol_table.new_symbol(
sym.symbol(binding.name), binding_name, LocalType.LET
)
let_result_name = genname("let_result")
body_ast = _synthetic_do_to_py_ast(ctx, node.body)
let_body_ast.extend(map(statementize, body_ast.dependencies))
let_body_ast.append(
ast.Assign(
targets=[ast.Name(id=let_result_name, ctx=ast.Store())],
value=body_ast.node,
)
)
return GeneratedPyAST(
node=ast.Name(id=let_result_name, ctx=ast.Load()), dependencies=let_body_ast
)
|
Return a Python AST Node for a `let*` expression.
|
def show(self, index):
"""
This class overrides this method
"""
if self.menu and self.menu.parent:
self.text = "Return to %s menu" % self.menu.parent.title
else:
self.text = "Exit"
return super(ExitItem, self).show(index)
|
This class overrides this method
|
def get_meta(self, key=None):
"""Get metadata value for collection."""
if self.is_fake:
return {}
if key == "tag":
return self.tag
elif key is None:
ret = {}
for key in self.journal.info.keys():
ret[key] = self.meta_mappings.map_get(self.journal.info, key)[1]
return ret
else:
key, value = self.meta_mappings.map_get(self.journal.info, key)
return value
|
Get metadata value for collection.
|
def parse_template(input_filename, output_filename=''):
""" Parses a template file
Replaces all occurences of @@problem_id@@ by the value
of the 'problem_id' key in data dictionary
input_filename: file to parse
output_filename: if not specified, overwrite input file
"""
data = load_input()
with open(input_filename, 'rb') as file:
template = file.read().decode("utf-8")
# Check if 'input' in data
if not 'input' in data:
raise ValueError("Could not find 'input' in data")
# Parse template
for field in data['input']:
subs = ["filename", "value"] if isinstance(data['input'][field], dict) and "filename" in data['input'][field] and "value" in data['input'][field] else [""]
for sub in subs:
displayed_field = field + (":" if sub else "") + sub
regex = re.compile("@([^@]*)@" + displayed_field + '@([^@]*)@')
for prefix, postfix in set(regex.findall(template)):
if sub == "value":
text = open(data['input'][field][sub], 'rb').read().decode('utf-8')
elif sub:
text = data['input'][field][sub]
else:
text = data['input'][field]
rep = "\n".join([prefix + v + postfix for v in text.splitlines()])
template = template.replace("@{0}@{1}@{2}@".format(prefix, displayed_field, postfix), rep)
if output_filename == '':
output_filename=input_filename
# Ensure directory of resulting file exists
try:
os.makedirs(os.path.dirname(output_filename))
except OSError as e:
pass
# Write file
with open(output_filename, 'wb') as file:
file.write(template.encode("utf-8"))
|
Parses a template file
Replaces all occurences of @@problem_id@@ by the value
of the 'problem_id' key in data dictionary
input_filename: file to parse
output_filename: if not specified, overwrite input file
|
def _validate_arguments(self):
"""method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
"""
super(SplineTerm, self)._validate_arguments()
if self.basis not in self._bases:
raise ValueError("basis must be one of {}, "\
"but found: {}".format(self._bases, self.basis))
# n_splines
self.n_splines = check_param(self.n_splines, param_name='n_splines',
dtype='int', constraint='>= 0')
# spline_order
self.spline_order = check_param(self.spline_order,
param_name='spline_order',
dtype='int', constraint='>= 0')
# n_splines + spline_order
if not self.n_splines > self.spline_order:
raise ValueError('n_splines must be > spline_order. '\
'found: n_splines = {} and spline_order = {}'\
.format(self.n_splines, self.spline_order))
# by
if self.by is not None:
self.by = check_param(self.by,
param_name='by',
dtype='int', constraint='>= 0')
return self
|
method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
|
def _getScalesRand(self):
"""
Internal function for parameter initialization
Return a vector of random scales
"""
if self.P>1:
scales = []
for term_i in range(self.n_randEffs):
_scales = sp.randn(self.diag[term_i].shape[0])
if self.jitter[term_i]>0:
_scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
scales.append(_scales)
scales = sp.concatenate(scales)
else:
scales=sp.randn(self.vd.getNumberScales())
return scales
|
Internal function for parameter initialization
Return a vector of random scales
|
def get_process_by_id(self, process_id):
"""GetProcessById.
[Preview API] Get a process by ID.
:param str process_id: ID for a process.
:rtype: :class:`<Process> <azure.devops.v5_1.core.models.Process>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
response = self._send(http_method='GET',
location_id='93878975-88c5-4e6a-8abb-7ddd77a8a7d8',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('Process', response)
|
GetProcessById.
[Preview API] Get a process by ID.
:param str process_id: ID for a process.
:rtype: :class:`<Process> <azure.devops.v5_1.core.models.Process>`
|
def get_hashhash(self, username):
"""
Generate a digest of the htpasswd hash
"""
return hashlib.sha256(
self.users.get_hash(username)
).hexdigest()
|
Generate a digest of the htpasswd hash
|
def do_stack(self, arg):
"""
[~thread] k - show the stack trace
[~thread] stack - show the stack trace
"""
if arg: # XXX TODO add depth parameter
raise CmdError("too many arguments")
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = process.get_thread(tid)
try:
stack_trace = thread.get_stack_trace_with_labels()
if stack_trace:
print(CrashDump.dump_stack_trace_with_labels(stack_trace),)
else:
print("No stack trace available for thread (%d)" % tid)
except WindowsError:
print("Can't get stack trace for thread (%d)" % tid)
|
[~thread] k - show the stack trace
[~thread] stack - show the stack trace
|
def _get_healthmgr_cmd(self):
''' get the command to start the topology health manager processes '''
healthmgr_main_class = 'org.apache.heron.healthmgr.HealthManager'
healthmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which could be very big,
# for instance, the default -Xmx in Twitter mesos machine is around 18GB
'-Xmx1024M',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+PrintCommandLineFlags',
'-Xloggc:log-files/gc.healthmgr.log',
'-Djava.net.preferIPv4Stack=true',
'-cp', self.health_manager_classpath,
healthmgr_main_class,
"--cluster", self.cluster,
"--role", self.role,
"--environment", self.environment,
"--topology_name", self.topology_name,
"--metricsmgr_port", self.metrics_manager_port]
return Command(healthmgr_cmd, self.shell_env)
|
get the command to start the topology health manager processes
|
def to_eaf(self, skipempty=True, pointlength=0.1):
"""Convert the object to an pympi.Elan.Eaf object
:param int pointlength: Length of respective interval from points in
seconds
:param bool skipempty: Skip the empty annotations
:returns: :class:`pympi.Elan.Eaf` object
:raises ImportError: If the Eaf module can't be loaded.
:raises ValueError: If the pointlength is not strictly positive.
"""
from pympi.Elan import Eaf
eaf_out = Eaf()
if pointlength <= 0:
raise ValueError('Pointlength should be strictly positive')
for tier in self.get_tiers():
eaf_out.add_tier(tier.name)
for ann in tier.get_intervals(True):
if tier.tier_type == 'TextTier':
ann = (ann[0], ann[0]+pointlength, ann[1])
if ann[2].strip() or not skipempty:
eaf_out.add_annotation(tier.name, int(round(ann[0]*1000)),
int(round(ann[1]*1000)), ann[2])
return eaf_out
|
Convert the object to an pympi.Elan.Eaf object
:param int pointlength: Length of respective interval from points in
seconds
:param bool skipempty: Skip the empty annotations
:returns: :class:`pympi.Elan.Eaf` object
:raises ImportError: If the Eaf module can't be loaded.
:raises ValueError: If the pointlength is not strictly positive.
|
def hostname(self):
"""Get the hostname that this connection is associated with"""
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0]
|
Get the hostname that this connection is associated with
|
def _expand_formula_(formula_string):
"""
Accounts for the many ways a user may write a formula string, and returns an expanded chemical formula string.
Assumptions:
-The Chemical Formula string it is supplied is well-written, and has no hanging parethneses
-The number of repeats occurs after the elemental symbol or ) ] character EXCEPT in the case of a hydrate where it is assumed to be in front of the first element
-All hydrates explicitly use the Β· symbol
-Only (, (,[, ], ., Β· are "important" symbols to intrepreting the string.
-IONS ARE NOT HANDLED
:param formula_string: a messy chemical formula string
:return: a non-emperical but expanded formula string
"""
formula_string = re.sub(r'[^A-Za-z0-9\(\)\[\]\Β·\.]+', '', formula_string)
hydrate_pos = formula_string.find('Β·')
if hydrate_pos >= 0:
formula_string = _expand_hydrate_(hydrate_pos, formula_string)
search_result = re.search(
r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))',
formula_string)
if search_result is None:
return formula_string
this_start = search_result.start()
this_end = search_result.end()
this_string = search_result.group()
this_expansion_array = re.findall(
r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))', this_string)
for a in this_expansion_array:
if a[1] == "":
a = (a[0], 1)
parenth_expanded = ""
multiplier = float(a[1])
element_array = re.findall('[A-Z][^A-Z]*', a[0])
for e in element_array:
occurance_array = re.findall('[0-9][^0-9]*', e)
if len(occurance_array) == 0:
occurance_array.append(1)
for o in occurance_array:
symbol = re.findall('[A-Z][a-z]*', e)
total_num = float(o) * multiplier
if total_num.is_integer():
total_num = int(total_num)
total_str = str(total_num)
if total_str == "1":
total_str = ""
new_string = symbol[0] + total_str
parenth_expanded += new_string
formula_string = formula_string[0:this_start] + \
parenth_expanded + formula_string[this_end:]
return _expand_formula_(formula_string)
|
Accounts for the many ways a user may write a formula string, and returns an expanded chemical formula string.
Assumptions:
-The Chemical Formula string it is supplied is well-written, and has no hanging parethneses
-The number of repeats occurs after the elemental symbol or ) ] character EXCEPT in the case of a hydrate where it is assumed to be in front of the first element
-All hydrates explicitly use the Β· symbol
-Only (, (,[, ], ., Β· are "important" symbols to intrepreting the string.
-IONS ARE NOT HANDLED
:param formula_string: a messy chemical formula string
:return: a non-emperical but expanded formula string
|
def _ffn_layer_multi_inputs(inputs_list,
hparams,
ffn_layer_type="dense",
name="ffn",
kernel_initializer=None,
bias_initializer=None,
activation=None,
pad_remover=None,
preprocess=False,
postprocess=False):
"""Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
Args:
inputs_list: list of input tensors
hparams: hyper-parameters
ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense
name: name
kernel_initializer: kernel initializer
bias_initializer: bias initializer
activation: activation function
pad_remover: pad remover
preprocess: if preprocess the input
postprocess: if postprocess the output
Returns:
a tensor
Raises:
ValueError: Unknown ffn_layer type.
"""
# need at least one inputs
num_inputs = len(inputs_list)
assert num_inputs > 0
if preprocess and num_inputs == 1:
inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams)
if postprocess:
original_inputs = inputs_list[0]
# the output size is the hidden size of the main inputs
main_input = inputs_list[0]
original_shape = common_layers.shape_list(main_input)
assert hparams.hidden_size == common_layers.shape_list(main_input)[-1]
# all the inputs are in the same shape with main inputs
for inputs in inputs_list:
main_input.get_shape().assert_is_compatible_with(inputs.get_shape())
def remove_pads(x):
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
return x
if pad_remover:
for i, inputs in enumerate(inputs_list):
inputs_list[i] = remove_pads(inputs)
ffn_inputs = inputs_list[0]
if len(inputs_list) != 1:
ffn_inputs = tf.concat(inputs_list, axis=-1)
if ffn_layer_type == "dense":
output = common_layers.dense(
ffn_inputs,
hparams.hidden_size,
name=name,
activation=activation,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
elif ffn_layer_type == "dense_dropconnect":
output = common_layers.dense_dropconnect(
ffn_inputs,
hparams.hidden_size,
name=name,
dropconnect_dropout=hparams.dropconnect_dropout,
output_activation=activation)
postprocess = False # no dropout on the output unit
elif ffn_layer_type == "dense_relu_dense":
output = common_layers.dense_relu_dense(
ffn_inputs,
hparams.filter_size,
hparams.hidden_size,
name=name,
dropout=hparams.relu_dropout,
output_activation=activation,
)
else:
raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type)
if pad_remover:
# Restore `output` to the original shape of `x`, including padding.
output = tf.reshape(
pad_remover.restore(tf.squeeze(output, axis=0)), original_shape)
if postprocess:
if num_inputs == 1:
output = common_layers.layer_postprocess(original_inputs, output, hparams)
else: # only dropout (no residual)x
hp = copy.copy(hparams)
hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace(
"a", "")
output = common_layers.layer_postprocess(original_inputs, output, hp)
return output
|
Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
Args:
inputs_list: list of input tensors
hparams: hyper-parameters
ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense
name: name
kernel_initializer: kernel initializer
bias_initializer: bias initializer
activation: activation function
pad_remover: pad remover
preprocess: if preprocess the input
postprocess: if postprocess the output
Returns:
a tensor
Raises:
ValueError: Unknown ffn_layer type.
|
def remove_exit(self):
"""
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else.
Returns:
bool: True if item needed to be removed, False otherwise.
"""
if self.items:
if self.items[-1] is self.exit_item:
del self.items[-1]
return True
return False
|
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else.
Returns:
bool: True if item needed to be removed, False otherwise.
|
def to_yaml(template, clean_up=False, long_form=False):
"""
Assume the input is JSON and convert to YAML
"""
data = load_json(template)
if clean_up:
data = clean(data)
return dump_yaml(data, clean_up, long_form)
|
Assume the input is JSON and convert to YAML
|
def recent_comments(context):
"""
Dashboard widget for displaying recent comments.
"""
latest = context["settings"].COMMENTS_NUM_LATEST
comments = ThreadedComment.objects.all().select_related("user")
context["comments"] = comments.order_by("-id")[:latest]
return context
|
Dashboard widget for displaying recent comments.
|
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
|
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
|
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
|
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
|
def handle_message_registered(self, msg_data, host):
"""Processes messages that have been delivered by a registered client.
Args:
msg (string): The raw packet data delivered from the listener. This
data will be unserialized and then processed based on the packet's
method.
host (tuple): The (address, host) tuple of the source message.
Returns:
A response that will be sent back to the client via the listener.
"""
response = None
if msg_data["method"] == "EVENT":
logger.debug("<%s> <euuid:%s> Event message "
"received" % (msg_data["cuuid"], msg_data["euuid"]))
response = self.event(msg_data["cuuid"],
host,
msg_data["euuid"],
msg_data["event_data"],
msg_data["timestamp"],
msg_data["priority"])
elif msg_data["method"] == "OK EVENT":
logger.debug("<%s> <euuid:%s> Event confirmation message "
"received" % (msg_data["cuuid"], msg_data["euuid"]))
try:
del self.event_uuids[msg_data["euuid"]]
except KeyError:
logger.warning("<%s> <euuid:%s> Euuid does not exist in event "
"buffer. Key was removed before we could process "
"it." % (msg_data["cuuid"], msg_data["euuid"]))
elif msg_data["method"] == "OK NOTIFY":
logger.debug("<%s> <euuid:%s> Ok notify "
"received" % (msg_data["cuuid"], msg_data["euuid"]))
try:
del self.event_uuids[msg_data["euuid"]]
except KeyError:
logger.warning("<%s> <euuid:%s> Euuid does not exist in event "
"buffer. Key was removed before we could process "
"it." % (msg_data["cuuid"], msg_data["euuid"]))
return response
|
Processes messages that have been delivered by a registered client.
Args:
msg (string): The raw packet data delivered from the listener. This
data will be unserialized and then processed based on the packet's
method.
host (tuple): The (address, host) tuple of the source message.
Returns:
A response that will be sent back to the client via the listener.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.