code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dialog_node') and self.dialog_node is not None:
_dict['dialog_node'] = self.dialog_node
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'conditions') and self.conditions is not None:
_dict['conditions'] = self.conditions
if hasattr(self, 'parent') and self.parent is not None:
_dict['parent'] = self.parent
if hasattr(self,
'previous_sibling') and self.previous_sibling is not None:
_dict['previous_sibling'] = self.previous_sibling
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = self.output._to_dict()
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'next_step') and self.next_step is not None:
_dict['next_step'] = self.next_step._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'node_type') and self.node_type is not None:
_dict['type'] = self.node_type
if hasattr(self, 'event_name') and self.event_name is not None:
_dict['event_name'] = self.event_name
if hasattr(self, 'variable') and self.variable is not None:
_dict['variable'] = self.variable
if hasattr(self, 'actions') and self.actions is not None:
_dict['actions'] = [x._to_dict() for x in self.actions]
if hasattr(self, 'digress_in') and self.digress_in is not None:
_dict['digress_in'] = self.digress_in
if hasattr(self, 'digress_out') and self.digress_out is not None:
_dict['digress_out'] = self.digress_out
if hasattr(self,
'digress_out_slots') and self.digress_out_slots is not None:
_dict['digress_out_slots'] = self.digress_out_slots
if hasattr(self, 'user_label') and self.user_label is not None:
_dict['user_label'] = self.user_label
if hasattr(self, 'disabled') and self.disabled is not None:
_dict['disabled'] = self.disabled
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
|
Return a json dictionary representing this model.
|
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
|
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
|
def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs):
'''
Initialize a new container
.. code-block:: bash
salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\
[cpushare=cgroups_cpushare] [memory=cgroups_memory] \\
[template=lxc_template_name] [clone=original name] \\
[profile=lxc_profile] [network_proflile=network_profile] \\
[nic=network_profile] [nic_opts=nic_opts] \\
[start=(true|false)] [seed=(true|false)] \\
[install=(true|false)] [config=minion_config] \\
[snapshot=(true|false)]
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion on which to initialize the container **(required)**
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
See lxc.init_interface module documentation
cpuset
cgroups cpuset.
cpushare
cgroups cpu shares.
memory
cgroups memory limit, in MB
.. versionchanged:: 2015.5.0
If no value is passed, no limit is set. In earlier Salt versions,
not passing this value causes a 1024MB memory limit to be set, and
it was necessary to pass ``memory=0`` to set no limit.
template
Name of LXC template on which to base this container
clone
Clone this container from an existing container
profile
A LXC profile (defined in config or pillar).
network_profile
Network profile to use for the container
.. versionadded:: 2015.5.2
nic
.. deprecated:: 2015.5.0
Use ``network_profile`` instead
nic_opts
Extra options for network interfaces. E.g.:
``{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}``
start
Start the newly created container.
seed
Seed the container with the minion config and autosign its key.
Default: true
install
If salt-minion is not already installed, install it. Default: true
config
Optional config parameters. By default, the id is set to
the name of the container.
'''
path = kwargs.get('path', None)
if quiet:
log.warning("'quiet' argument is being deprecated."
' Please migrate to --quiet')
ret = {'comment': '', 'result': True}
if host is None:
# TODO: Support selection of host based on available memory/cpu/etc.
ret['comment'] = 'A host must be provided'
ret['result'] = False
return ret
if isinstance(names, six.string_types):
names = names.split(',')
if not isinstance(names, list):
ret['comment'] = 'Container names are not formed as a list'
ret['result'] = False
return ret
# check that the host is alive
client = salt.client.get_local_client(__opts__['conf_file'])
alive = False
try:
if client.cmd(host, 'test.ping', timeout=20).get(host, None):
alive = True
except (TypeError, KeyError):
pass
if not alive:
ret['comment'] = 'Host {0} is not reachable'.format(host)
ret['result'] = False
return ret
log.info('Searching for LXC Hosts')
data = __salt__['lxc.list'](host, quiet=True, path=path)
for host, containers in six.iteritems(data):
for name in names:
if name in sum(six.itervalues(containers), []):
log.info(
'Container \'%s\' already exists on host \'%s\', init '
'can be a NO-OP', name, host
)
if host not in data:
ret['comment'] = 'Host \'{0}\' was not found'.format(host)
ret['result'] = False
return ret
kw = salt.utils.args.clean_kwargs(**kwargs)
pub_key = kw.get('pub_key', None)
priv_key = kw.get('priv_key', None)
explicit_auth = pub_key and priv_key
approve_key = kw.get('approve_key', True)
seeds = {}
seed_arg = kwargs.get('seed', True)
if approve_key and not explicit_auth:
skey = salt.key.Key(__opts__)
all_minions = skey.all_keys().get('minions', [])
for name in names:
seed = seed_arg
if name in all_minions:
try:
if client.cmd(name, 'test.ping', timeout=20).get(name, None):
seed = False
except (TypeError, KeyError):
pass
seeds[name] = seed
kv = salt.utils.virt.VirtKey(host, name, __opts__)
if kv.authorize():
log.info('Container key will be preauthorized')
else:
ret['comment'] = 'Container key preauthorization failed'
ret['result'] = False
return ret
log.info('Creating container(s) \'%s\' on host \'%s\'', names, host)
cmds = []
for name in names:
args = [name]
kw = salt.utils.args.clean_kwargs(**kwargs)
if saltcloud_mode:
kw = copy.deepcopy(kw)
kw['name'] = name
saved_kwargs = kw
kw = client.cmd(
host, 'lxc.cloud_init_interface', args + [kw],
tgt_type='list', timeout=600).get(host, {})
kw.update(saved_kwargs)
name = kw.pop('name', name)
# be sure not to seed an already seeded host
kw['seed'] = seeds.get(name, seed_arg)
if not kw['seed']:
kw.pop('seed_cmd', '')
cmds.append(
(host,
name,
client.cmd_iter(host, 'lxc.init', args, kwarg=kw, timeout=600)))
done = ret.setdefault('done', [])
errors = ret.setdefault('errors', _OrderedDict())
for ix, acmd in enumerate(cmds):
hst, container_name, cmd = acmd
containers = ret.setdefault(hst, [])
herrs = errors.setdefault(hst, _OrderedDict())
serrs = herrs.setdefault(container_name, [])
sub_ret = next(cmd)
error = None
if isinstance(sub_ret, dict) and host in sub_ret:
j_ret = sub_ret[hst]
container = j_ret.get('ret', {})
if container and isinstance(container, dict):
if not container.get('result', False):
error = container
else:
error = 'Invalid return for {0}: {1} {2}'.format(
container_name, container, sub_ret)
else:
error = sub_ret
if not error:
error = 'unknown error (no return)'
if error:
ret['result'] = False
serrs.append(error)
else:
container['container_name'] = name
containers.append(container)
done.append(container)
# marking ping status as True only and only if we have at
# least provisioned one container
ret['ping_status'] = bool(len(done))
# for all provisioned containers, last job is to verify
# - the key status
# - we can reach them
for container in done:
# explicitly check and update
# the minion key/pair stored on the master
container_name = container['container_name']
key = os.path.join(__opts__['pki_dir'], 'minions', container_name)
if explicit_auth:
fcontent = ''
if os.path.exists(key):
with salt.utils.files.fopen(key) as fic:
fcontent = salt.utils.stringutils.to_unicode(fic.read()).strip()
pub_key = salt.utils.stringutils.to_unicode(pub_key)
if pub_key.strip() != fcontent:
with salt.utils.files.fopen(key, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(pub_key))
fic.flush()
mid = j_ret.get('mid', None)
if not mid:
continue
def testping(**kw):
mid_ = kw['mid']
ping = client.cmd(mid_, 'test.ping', timeout=20)
time.sleep(1)
if ping:
return 'OK'
raise Exception('Unresponsive {0}'.format(mid_))
ping = salt.utils.cloud.wait_for_fun(testping, timeout=21, mid=mid)
if ping != 'OK':
ret['ping_status'] = False
ret['result'] = False
# if no lxc detected as touched (either inited or verified)
# we result to False
if not done:
ret['result'] = False
if not quiet:
__jid_event__.fire_event({'message': ret}, 'progress')
return ret
|
Initialize a new container
.. code-block:: bash
salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\
[cpushare=cgroups_cpushare] [memory=cgroups_memory] \\
[template=lxc_template_name] [clone=original name] \\
[profile=lxc_profile] [network_proflile=network_profile] \\
[nic=network_profile] [nic_opts=nic_opts] \\
[start=(true|false)] [seed=(true|false)] \\
[install=(true|false)] [config=minion_config] \\
[snapshot=(true|false)]
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion on which to initialize the container **(required)**
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
See lxc.init_interface module documentation
cpuset
cgroups cpuset.
cpushare
cgroups cpu shares.
memory
cgroups memory limit, in MB
.. versionchanged:: 2015.5.0
If no value is passed, no limit is set. In earlier Salt versions,
not passing this value causes a 1024MB memory limit to be set, and
it was necessary to pass ``memory=0`` to set no limit.
template
Name of LXC template on which to base this container
clone
Clone this container from an existing container
profile
A LXC profile (defined in config or pillar).
network_profile
Network profile to use for the container
.. versionadded:: 2015.5.2
nic
.. deprecated:: 2015.5.0
Use ``network_profile`` instead
nic_opts
Extra options for network interfaces. E.g.:
``{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}``
start
Start the newly created container.
seed
Seed the container with the minion config and autosign its key.
Default: true
install
If salt-minion is not already installed, install it. Default: true
config
Optional config parameters. By default, the id is set to
the name of the container.
|
def sources_to_nr_vars(sources):
"""
Converts a source type to number of sources mapping into
a source numbering variable to number of sources mapping.
If, for example, we have 'point', 'gaussian' and 'sersic'
source types, then passing the following dict as an argument
sources_to_nr_vars({'point':10, 'gaussian': 20})
will return an OrderedDict
{'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 }
"""
sources = default_sources(**sources)
try:
return OrderedDict((SOURCE_VAR_TYPES[name], nr)
for name, nr in sources.iteritems())
except KeyError as e:
raise KeyError((
'No source type ''%s'' is '
'registered. Valid source types '
'are %s') % (e, SOURCE_VAR_TYPES.keys()))
|
Converts a source type to number of sources mapping into
a source numbering variable to number of sources mapping.
If, for example, we have 'point', 'gaussian' and 'sersic'
source types, then passing the following dict as an argument
sources_to_nr_vars({'point':10, 'gaussian': 20})
will return an OrderedDict
{'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 }
|
def connect_from(self, vertex, weight=1):
"""
Connect another vertex to this one.
Args:
vertex (Vertex): vertex to connect from.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
"""
for edge in self.edges_in:
if vertex == edge.vertex_out:
return edge
return Edge(vertex, self, weight)
|
Connect another vertex to this one.
Args:
vertex (Vertex): vertex to connect from.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
|
def f_measure(precision, recall, beta=1.0):
"""Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure
"""
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall)
|
Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure
|
def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list:
#print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
#print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44
|
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
|
def _create_hosting_device_templates_from_config(self):
"""To be called late during plugin initialization so that any hosting
device templates defined in the config file is properly inserted in
the DB.
"""
hdt_dict = config.get_specific_config('cisco_hosting_device_template')
attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[
ciscohostingdevicemanager.DEVICE_TEMPLATES]
adm_context = bc.context.get_admin_context()
for hdt_uuid, kv_dict in hdt_dict.items():
# ensure hdt_uuid is properly formatted
hdt_uuid = config.uuidify(hdt_uuid)
try:
self.get_hosting_device_template(adm_context, hdt_uuid)
is_create = False
except ciscohostingdevicemanager.HostingDeviceTemplateNotFound:
is_create = True
kv_dict['id'] = hdt_uuid
kv_dict['tenant_id'] = self.l3_tenant_id()
config.verify_resource_dict(kv_dict, True, attr_info)
hdt = {ciscohostingdevicemanager.DEVICE_TEMPLATE: kv_dict}
try:
if is_create:
self.create_hosting_device_template(adm_context, hdt)
else:
self.update_hosting_device_template(adm_context,
kv_dict['id'], hdt)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.error('Invalid hosting device template definition '
'in configuration file for template = %s',
hdt_uuid)
|
To be called late during plugin initialization so that any hosting
device templates defined in the config file is properly inserted in
the DB.
|
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
|
Send a unicode object as reply to the most recently-issued command
|
def transform_cb(self, setting, value):
"""Handle callback related to changes in transformations."""
self.make_callback('transform')
# whence=0 because need to calculate new extents for proper
# cutout for rotation (TODO: always make extents consider
# room for rotation)
whence = 0
self.redraw(whence=whence)
|
Handle callback related to changes in transformations.
|
def solution(self, x0, y0):
""" Create a solution function ``y(x)`` such that ``y(x0) = y0``.
A list of solution values ``[y(x0), y(x1) ...]`` is returned if the
function is called with a list ``[x0, x1 ...]`` of ``x`` values.
"""
def soln(x):
if numpy.size(x) > 1:
x = [soln.x] + list(x)
ans = self(soln.y, interval=x)
soln.x = x[-1]
soln.y = ans[-1]
return ans
else:
soln.y = self(soln.y, interval=(soln.x, x))
soln.x = x
return soln.y
soln.x = x0
soln.y = y0
return soln
|
Create a solution function ``y(x)`` such that ``y(x0) = y0``.
A list of solution values ``[y(x0), y(x1) ...]`` is returned if the
function is called with a list ``[x0, x1 ...]`` of ``x`` values.
|
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
|
Create a new migration.
|
def astra_parallel_3d_geom_to_vec(geometry):
"""Create vectors for ASTRA projection geometries from ODL geometry.
The 3D vectors are used to create an ASTRA projection geometry for
parallel beam geometries, see ``'parallel3d_vec'`` in the
`ASTRA projection geometry documentation`_.
Each row of the returned vectors corresponds to a single projection
and consists of ::
(rayX, rayY, rayZ, dX, dY, dZ, uX, uY, uZ, vX, vY, vZ)
with
- ``ray``: the ray direction
- ``d`` : the center of the detector
- ``u`` : the vector from detector pixel ``(0,0)`` to ``(0,1)``
- ``v`` : the vector from detector pixel ``(0,0)`` to ``(1,0)``
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
vectors : `numpy.ndarray`
Array of shape ``(num_angles, 12)`` containing the vectors.
References
----------
.. _ASTRA projection geometry documentation:
http://www.astra-toolbox.com/docs/geom3d.html#projection-geometries
"""
angles = geometry.angles
mid_pt = geometry.det_params.mid_pt
vectors = np.zeros((angles.shape[-1], 12))
# Ray direction = -(detector-to-source normal vector)
vectors[:, 0:3] = -geometry.det_to_src(angles, mid_pt)
# Center of the detector in 3D space
vectors[:, 3:6] = geometry.det_point_position(angles, mid_pt)
# Vectors from detector pixel (0, 0) to (1, 0) and (0, 0) to (0, 1)
# `det_axes` gives shape (N, 2, 3), swap to get (2, N, 3)
det_axes = moveaxis(geometry.det_axes(angles), -2, 0)
px_sizes = geometry.det_partition.cell_sides
# Swap detector axes to have better memory layout in projection data.
# ASTRA produces `(v, theta, u)` layout, and to map to ODL layout
# `(theta, u, v)` a complete roll must be performed, which is the
# worst case (compeltely discontiguous).
# Instead we swap `u` and `v`, resulting in the effective ASTRA result
# `(u, theta, v)`. Here we only need to swap axes 0 and 1, which
# keeps at least contiguous blocks in `v`.
vectors[:, 9:12] = det_axes[0] * px_sizes[0]
vectors[:, 6:9] = det_axes[1] * px_sizes[1]
# ASTRA has (z, y, x) axis convention, in contrast to (x, y, z) in ODL,
# so we need to adapt to this by changing the order.
new_ind = []
for i in range(4):
new_ind += [2 + 3 * i, 1 + 3 * i, 0 + 3 * i]
vectors = vectors[:, new_ind]
return vectors
|
Create vectors for ASTRA projection geometries from ODL geometry.
The 3D vectors are used to create an ASTRA projection geometry for
parallel beam geometries, see ``'parallel3d_vec'`` in the
`ASTRA projection geometry documentation`_.
Each row of the returned vectors corresponds to a single projection
and consists of ::
(rayX, rayY, rayZ, dX, dY, dZ, uX, uY, uZ, vX, vY, vZ)
with
- ``ray``: the ray direction
- ``d`` : the center of the detector
- ``u`` : the vector from detector pixel ``(0,0)`` to ``(0,1)``
- ``v`` : the vector from detector pixel ``(0,0)`` to ``(1,0)``
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
vectors : `numpy.ndarray`
Array of shape ``(num_angles, 12)`` containing the vectors.
References
----------
.. _ASTRA projection geometry documentation:
http://www.astra-toolbox.com/docs/geom3d.html#projection-geometries
|
def events_system(self):
"""
Get all system events. Uses GET to /events/system interface.
:Returns: (list) Events
"""
# TODO Add paging to this
response = self._get(url.events_system)
self._check_response(response, 200)
return self._create_response(response).get("events")
|
Get all system events. Uses GET to /events/system interface.
:Returns: (list) Events
|
def translate_changes(initial_change):
"""Translate rope.base.change.Change instances to dictionaries.
See Refactor.get_changes for an explanation of the resulting
dictionary.
"""
agenda = [initial_change]
result = []
while agenda:
change = agenda.pop(0)
if isinstance(change, rope_change.ChangeSet):
agenda.extend(change.changes)
elif isinstance(change, rope_change.ChangeContents):
result.append({'action': 'change',
'file': change.resource.real_path,
'contents': change.new_contents,
'diff': change.get_description()})
elif isinstance(change, rope_change.CreateFile):
result.append({'action': 'create',
'type': 'file',
'file': change.resource.real_path})
elif isinstance(change, rope_change.CreateFolder):
result.append({'action': 'create',
'type': 'directory',
'path': change.resource.real_path})
elif isinstance(change, rope_change.MoveResource):
result.append({'action': 'move',
'type': ('directory'
if change.new_resource.is_folder()
else 'file'),
'source': change.resource.real_path,
'destination': change.new_resource.real_path})
elif isinstance(change, rope_change.RemoveResource):
if change.resource.is_folder():
result.append({'action': 'delete',
'type': 'directory',
'path': change.resource.real_path})
else:
result.append({'action': 'delete',
'type': 'file',
'file': change.resource.real_path})
return result
|
Translate rope.base.change.Change instances to dictionaries.
See Refactor.get_changes for an explanation of the resulting
dictionary.
|
def object_as_dict(obj):
"""Turn an SQLAlchemy model into a dict of field names and values.
Based on https://stackoverflow.com/a/37350445/1579058
"""
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
|
Turn an SQLAlchemy model into a dict of field names and values.
Based on https://stackoverflow.com/a/37350445/1579058
|
def p_arr_decl_initialized(p):
""" var_arr_decl : DIM idlist LP bound_list RP typedef RIGHTARROW const_vector
| DIM idlist LP bound_list RP typedef EQ const_vector
"""
def check_bound(boundlist, remaining):
""" Checks if constant vector bounds matches the array one
"""
lineno = p.lineno(8)
if not boundlist: # Returns on empty list
if not isinstance(remaining, list):
return True # It's OK :-)
syntax_error(lineno, 'Unexpected extra vector dimensions. It should be %i' % len(remaining))
if not isinstance(remaining, list):
syntax_error(lineno, 'Mismatched vector size. Missing %i extra dimension(s)' % len(boundlist))
return False
if len(remaining) != boundlist[0].count:
syntax_error(lineno, 'Mismatched vector size. Expected %i elements, got %i.' % (boundlist[0].count,
len(remaining)))
return False # It's wrong. :-(
for row in remaining:
if not check_bound(boundlist[1:], row):
return False
return True
if p[8] is None:
p[0] = None
return
if check_bound(p[4].children, p[8]):
id_, lineno = p[2][0]
SYMBOL_TABLE.declare_array(id_, lineno, p[6], p[4], default_value=p[8])
p[0] = None
|
var_arr_decl : DIM idlist LP bound_list RP typedef RIGHTARROW const_vector
| DIM idlist LP bound_list RP typedef EQ const_vector
|
def remove(self, nodes):
"""Remove a node and its edges."""
nodes = nodes if isinstance(nodes, list) else [nodes]
for node in nodes:
k = self.id(node)
self.edges = list(filter(lambda e: e[0] != k and e[1] != k, self.edges))
del self.nodes[k]
|
Remove a node and its edges.
|
def indexXY(self, index):
"""Returns the top left coordinates of the item for the given index
:param index: index for the item
:type index: :qtdoc:`QModelIndex`
:returns: (int, int) -- (x, y) view coordinates of item
"""
rect = self.visualRect(index)
return rect.x(), rect.y()
|
Returns the top left coordinates of the item for the given index
:param index: index for the item
:type index: :qtdoc:`QModelIndex`
:returns: (int, int) -- (x, y) view coordinates of item
|
def is_contiguous(self):
"""Return offset and size of contiguous data, else None."""
if self._keyframe is None:
raise RuntimeError('keyframe not set')
if self._keyframe.is_contiguous:
return self._offsetscounts[0][0], self._keyframe.is_contiguous[1]
return None
|
Return offset and size of contiguous data, else None.
|
def __serve_forever(self):
"""Main client loop."""
# No need to update the server list
# It's done by the GlancesAutoDiscoverListener class (autodiscover.py)
# Or define staticaly in the configuration file (module static_list.py)
# For each server in the list, grab elementary stats (CPU, LOAD, MEM, OS...)
thread_list = {}
while self.screen.is_end == False:
logger.debug("Iter through the following server list: {}".format(self.get_servers_list()))
for v in self.get_servers_list():
key = v["key"]
thread = thread_list.get(key, None)
if thread is None or thread.is_alive() == False:
thread = threading.Thread(target=self.__update_stats, args=[v])
thread_list[key] = thread
thread.start()
# Update the screen (list or Glances client)
if self.screen.active_server is None:
# Display the Glances browser
self.screen.update(self.get_servers_list())
else:
# Display the active server
self.__display_server(self.get_servers_list()[self.screen.active_server])
# exit key pressed
for thread in thread_list.values():
thread.join()
|
Main client loop.
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
"""
if self._context is None:
self._context = AvailablePhoneNumberCountryContext(
self._version,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
|
def submit_all(self):
"""
:returns: an IterResult object
"""
for args in self.task_args:
self.submit(*args)
return self.get_results()
|
:returns: an IterResult object
|
def fast_kde(x, y, gridsize=(200,200), extents=None, nocorrelation=False, weights=None):
"""
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
This function is typically several orders of magnitude faster than
scipy.stats.kde.gaussian_kde for large (>1e7) numbers of points and
produces an essentially identical result.
Input:
x: The x-coords of the input data points
y: The y-coords of the input data points
gridsize: (default: 200x200) A (nx,ny) tuple of the size of the output
grid
extents: (default: extent of input data) A (xmin, xmax, ymin, ymax)
tuple of the extents of output grid
nocorrelation: (default: False) If True, the correlation between the
x and y coords will be ignored when preforming the KDE.
weights: (default: None) An array of the same shape as x & y that
weighs each sample (x_i, y_i) by each value in weights (w_i).
Defaults to an array of ones the same size as x & y.
Output:
A gridded 2D kernel density estimate of the input points.
"""
#---- Setup --------------------------------------------------------------
x, y = np.asarray(x), np.asarray(y)
x, y = np.squeeze(x), np.squeeze(y)
if x.size != y.size:
raise ValueError('Input x & y arrays must be the same size!')
nx, ny = gridsize
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size'
' as input x & y arrays!')
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = list(map(float, extents))
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
#---- Preliminary Calculations -------------------------------------------
# First convert x & y over to pixel coordinates
# (Avoiding np.digitize due to excessive memory usage in numpy < v1.5!)
# http://stackoverflow.com/q/8805601/
xyi = np.vstack((x,y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# Next, make a 2D histogram of x & y
# Avoiding np.histogram2d due to excessive memory usage with many points
# http://stackoverflow.com/q/8805601/
grid = sp.sparse.coo_matrix((weights, xyi), shape=(nx, ny)).toarray()
# Calculate the covariance matrix (in pixel coords)
cov = np.cov(xyi)
if nocorrelation:
cov[1,0] = 0
cov[0,1] = 0
# Scaling factor for bandwidth
scotts_factor = np.power(n, -1.0 / 6) # For 2D
#---- Make the gaussian kernel -------------------------------------------
# First, determine how big the kernel needs to be
std_devs = np.diag(np.sqrt(cov))
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
# Determine the bandwidth to use for the gaussian kernel
inv_cov = np.linalg.inv(cov * scotts_factor**2)
# x & y (pixel) coords of the kernel grid, with <x,y> = <0,0> in center
xx = np.arange(kern_nx, dtype=np.float) - kern_nx / 2.0
yy = np.arange(kern_ny, dtype=np.float) - kern_ny / 2.0
xx, yy = np.meshgrid(xx, yy)
# Then evaluate the gaussian function on the kernel grid
kernel = np.vstack((xx.flatten(), yy.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.sum(kernel, axis=0) / 2.0
kernel = np.exp(-kernel)
kernel = kernel.reshape((kern_ny, kern_nx))
#---- Produce the kernel density estimate --------------------------------
# Convolve the gaussian kernel with the 2D histogram, producing a gaussian
# kernel density estimate on a regular grid
grid = sp.signal.convolve2d(grid, kernel, mode='same', boundary='fill').T
### ADW: Commented out for
### # Normalization factor to divide result by so that units are in the same
### # units as scipy.stats.kde.gaussian_kde's output.
### norm_factor = 2 * np.pi * cov * scotts_factor**2
### norm_factor = np.linalg.det(norm_factor)
### norm_factor = n * dx * dy * np.sqrt(norm_factor)
###
### # Normalize the result
### grid /= norm_factor
return grid
|
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
This function is typically several orders of magnitude faster than
scipy.stats.kde.gaussian_kde for large (>1e7) numbers of points and
produces an essentially identical result.
Input:
x: The x-coords of the input data points
y: The y-coords of the input data points
gridsize: (default: 200x200) A (nx,ny) tuple of the size of the output
grid
extents: (default: extent of input data) A (xmin, xmax, ymin, ymax)
tuple of the extents of output grid
nocorrelation: (default: False) If True, the correlation between the
x and y coords will be ignored when preforming the KDE.
weights: (default: None) An array of the same shape as x & y that
weighs each sample (x_i, y_i) by each value in weights (w_i).
Defaults to an array of ones the same size as x & y.
Output:
A gridded 2D kernel density estimate of the input points.
|
def zone(self) -> Optional[str]:
"""Zone the device is assigned to."""
if self._device_category == DC_BASEUNIT:
return None
return '{:02x}-{:02x}'.format(self._group_number, self._unit_number)
|
Zone the device is assigned to.
|
def compiled_quil(self):
"""
If the Quil program associated with the Job was compiled (e.g., to translate it to the
QPU's natural gateset) return this compiled program.
:rtype: Optional[Program]
"""
prog = self._raw.get("program", {}).get("compiled-quil", None)
if prog is not None:
return parse_program(prog)
else:
# if we failed too early to even get a "compiled-quil" field,
# then alert the user to that problem instead
if self._raw['status'] == 'ERROR':
return self.result()
|
If the Quil program associated with the Job was compiled (e.g., to translate it to the
QPU's natural gateset) return this compiled program.
:rtype: Optional[Program]
|
def line(self, lines):
"""Creates a POLYLINE shape.
Lines is a collection of lines, each made up of a list of xy values."""
shapeType = POLYLINE
self._shapeparts(parts=lines, shapeType=shapeType)
|
Creates a POLYLINE shape.
Lines is a collection of lines, each made up of a list of xy values.
|
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string())
|
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
|
def write_xml(xml, output_file=None):
"""Outputs the XML content into a file."""
gen_filename = "requirements-{:%Y%m%d%H%M%S}.xml".format(datetime.datetime.now())
utils.write_xml(xml, output_loc=output_file, filename=gen_filename)
|
Outputs the XML content into a file.
|
def missing_parameter_values(self, parameter_values):
"""
Checks if the given input contains values for all parameters used by this template
:param dict parameter_values: Dictionary of values for each parameter used in the template
:return list: List of names of parameters that are missing.
:raises InvalidParameterValues: When parameter values is not a valid dictionary
"""
if not self._is_valid_parameter_values(parameter_values):
raise InvalidParameterValues("Parameter values are required to process a policy template")
return list(set(self.parameters.keys()) - set(parameter_values.keys()))
|
Checks if the given input contains values for all parameters used by this template
:param dict parameter_values: Dictionary of values for each parameter used in the template
:return list: List of names of parameters that are missing.
:raises InvalidParameterValues: When parameter values is not a valid dictionary
|
def getDataset(self, itemId):
"""gets a dataset class"""
if self._url.lower().find('datasets') > -1:
url = self._url
else:
url = self._url + "/datasets"
return OpenDataItem(url=url,
itemId=itemId,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
gets a dataset class
|
def get_profile(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the specified profile.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> name = client.profile_path('[PROJECT]', '[TENANT]', '[PROFILE]')
>>>
>>> response = client.get_profile(name)
Args:
name (str): Required.
Resource name of the profile to get.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}",
for example, "projects/api-test-project/tenants/foo/profiles/bar".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Profile` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_profile" not in self._inner_api_calls:
self._inner_api_calls[
"get_profile"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_profile,
default_retry=self._method_configs["GetProfile"].retry,
default_timeout=self._method_configs["GetProfile"].timeout,
client_info=self._client_info,
)
request = profile_service_pb2.GetProfileRequest(name=name)
return self._inner_api_calls["get_profile"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Gets the specified profile.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> name = client.profile_path('[PROJECT]', '[TENANT]', '[PROFILE]')
>>>
>>> response = client.get_profile(name)
Args:
name (str): Required.
Resource name of the profile to get.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}",
for example, "projects/api-test-project/tenants/foo/profiles/bar".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Profile` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def get_collection_instance(klass, api_client = None, request_api=True, **kwargs):
"""
instatiates the collection lookup of json type klass
:param klass: json file name
:param api_client: transportation api
:param request_api: if True uses the default APIClient
"""
_type = klass
if api_client is None and request_api:
api_client = api.APIClient()
if isinstance(klass, dict):
_type = klass['type']
obj = CollectionResource(_type, api_client, **kwargs)
return obj
#
# /**
# * magic method for mapping all kinds of method calls to addFilter
# * @param string $method method name
# * @param array $args array of arguments
# * @return SaleskingCollection
# * @throws BadMethodCallException
# * @since 1.0.0
# */
# public function __call($method, array $args) {
# try {
# $this->addFilter($method,$args[0]);
# return $this;
# }
# catch (SaleskingException $e)
# {
# if($e->getCode() == "FILTER_NOTEXISTING")
# {
# throw new BadMethodCallException('Call to undefined method :'.$method);
# }
#
# throw $e;
# }
# }
def sort(self, direction = "ASC"):
"""
set the sort to the query
['ASC','DESC']
"""
direction = directtion.upper()
if direction in ['ASC','DESC']:
self.sort = direction
else:
raise SaleskingException("SORT_INVALIDDIRECTION","Invalid sorting direction - please choose either ASC or DESC");
def sort_by(self, property):
"""
set sort by property to the query
"""
seek =u"sort_by"
# make sure that the api supports sorting for this kind of object
if seek in self.schema['links']['instances']['properties']:
# make sure that we have a valid property
if seek in self.schema['links']['instances']['properties']['sort_by']['enum']:
self.sort_by = property
return self
else:
raise SaleskingException("SORTBY_INVALIDPROPERTY","Invalid property for sorting");
else:
raise SaleskingException("SORTBY_CANNOTSORT","object type doesnt support sorting");
|
instatiates the collection lookup of json type klass
:param klass: json file name
:param api_client: transportation api
:param request_api: if True uses the default APIClient
|
def stickers_translate_get(self, api_key, s, **kwargs):
"""
Sticker Translate Endpoint
The translate API draws on search, but uses the Giphy `special sauce` to handle translating from one vocabulary to another. In this case, words and phrases to GIFs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stickers_translate_get(api_key, s, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param str s: Search term. (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.stickers_translate_get_with_http_info(api_key, s, **kwargs)
else:
(data) = self.stickers_translate_get_with_http_info(api_key, s, **kwargs)
return data
|
Sticker Translate Endpoint
The translate API draws on search, but uses the Giphy `special sauce` to handle translating from one vocabulary to another. In this case, words and phrases to GIFs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stickers_translate_get(api_key, s, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param str s: Search term. (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
|
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
|
!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
|
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL):
"""
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
"""
if self._user is not None:
if not cgroups:
cgroups = find_cgroups_of_process(pid)
# In case we started a tool with sudo, we cannot kill the started
# process itself, because sudo always runs as root.
# So if we are asked to kill the started process itself (the first
# process in the cgroup), we instead kill the child of sudo
# (the second process in the cgroup).
pids = cgroups.get_all_tasks(FREEZER)
try:
if pid == next(pids):
pid = next(pids)
except StopIteration:
# pids seems to not have enough values
pass
finally:
pids.close()
self._kill_process0(pid, sig)
|
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
|
def _read_register(self, reg):
"""Read 16 bit register value."""
self.buf[0] = reg
with self.i2c_device as i2c:
i2c.write(self.buf, end=1, stop=False)
i2c.readinto(self.buf, end=2)
return self.buf[0] << 8 | self.buf[1]
|
Read 16 bit register value.
|
def remove_cache(self, namespace, key=None):
"""Remove all cached values for the specified namespace,
optionally specifying a key"""
if key is None:
self.cursor.execute('DELETE FROM gauged_cache '
'WHERE namespace = %s', (namespace,))
else:
self.cursor.execute('DELETE FROM gauged_cache '
'WHERE namespace = %s and `key` = %s',
(namespace, key))
|
Remove all cached values for the specified namespace,
optionally specifying a key
|
def coroutine(func):
"""
A decorator to wrap a generator function into a callable interface.
>>> @coroutine
... def sum(count):
... sum = 0
... for _ in range(0, count):
... # note that generator arguments are passed as a tuple, hence `num, = ...` instead of `num = ...`
... num, = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> add(2)
2
>>> add(3)
5
>>> add(4)
Traceback (most recent call last):
...
StopIteration
As you can see, this lets you keep state between calls easily, as expected from a generator, while calling the
function looks like a function. The same without `@coroutine` would look like this:
>>> def sum(count):
... sum = 0
... for _ in range(0, count):
... num = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> next(add) # initial next call is necessary
0
>>> add.send(2) # to call the function, next or send must be used
2
>>> add.send(3)
5
>>> add.send(4)
Traceback (most recent call last):
...
StopIteration
Here is an example that shows how to translate traditional functions to use this decorator:
>>> def foo(a, b):
... # do some foo
... return a + b
...
>>> def bar(c):
... # do some bar
... return 2*c
...
>>> foo(1, 2)
3
>>> bar(3)
6
>>> @coroutine
... def func_maker():
... a, b = yield
... # do some foo
... c, = yield foo(a, b)
... # do some bar
... yield bar(c)
...
>>> func_once = func_maker()
>>> func_once(1, 2)
3
>>> func_once(3)
6
The two differences are that a) using traditional functions, func1 and func2 don't share any context and b) using
the decorator, both calls use the same function name, and calling the function is limited to wice (in this case).
"""
def decorator(*args, **kwargs):
generator = func(*args, **kwargs)
next(generator)
return lambda *args: generator.send(args)
return decorator
|
A decorator to wrap a generator function into a callable interface.
>>> @coroutine
... def sum(count):
... sum = 0
... for _ in range(0, count):
... # note that generator arguments are passed as a tuple, hence `num, = ...` instead of `num = ...`
... num, = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> add(2)
2
>>> add(3)
5
>>> add(4)
Traceback (most recent call last):
...
StopIteration
As you can see, this lets you keep state between calls easily, as expected from a generator, while calling the
function looks like a function. The same without `@coroutine` would look like this:
>>> def sum(count):
... sum = 0
... for _ in range(0, count):
... num = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> next(add) # initial next call is necessary
0
>>> add.send(2) # to call the function, next or send must be used
2
>>> add.send(3)
5
>>> add.send(4)
Traceback (most recent call last):
...
StopIteration
Here is an example that shows how to translate traditional functions to use this decorator:
>>> def foo(a, b):
... # do some foo
... return a + b
...
>>> def bar(c):
... # do some bar
... return 2*c
...
>>> foo(1, 2)
3
>>> bar(3)
6
>>> @coroutine
... def func_maker():
... a, b = yield
... # do some foo
... c, = yield foo(a, b)
... # do some bar
... yield bar(c)
...
>>> func_once = func_maker()
>>> func_once(1, 2)
3
>>> func_once(3)
6
The two differences are that a) using traditional functions, func1 and func2 don't share any context and b) using
the decorator, both calls use the same function name, and calling the function is limited to wice (in this case).
|
def endswith(self, search_str):
"""Check whether the provided string exists in Journal file.
Only checks the last 5 lines of the journal file. This method is
usually used when tracking a journal from an active Revit session.
Args:
search_str (str): string to search for
Returns:
bool: if True the search string is found
"""
for entry in reversed(list(open(self._jrnl_file, 'r'))[-5:]):
if search_str in entry:
return True
return False
|
Check whether the provided string exists in Journal file.
Only checks the last 5 lines of the journal file. This method is
usually used when tracking a journal from an active Revit session.
Args:
search_str (str): string to search for
Returns:
bool: if True the search string is found
|
def purge(vm_, dirs=False, removables=None, **kwargs):
'''
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
disks = _get_disks(dom)
if removables is None:
salt.utils.versions.warn_until(
'Sodium',
'removables argument default value is True, but will be changed '
'to False by default in {version}. Please set to True to maintain '
'the current behavior in the future.'
)
removables = True
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown' and dom.destroy() != 0:
return False
directories = set()
for disk in disks:
if not removables and disks[disk]['type'] in ['cdrom', 'floppy']:
continue
elif disks[disk].get('zfs', False):
# TODO create solution for 'dataset is busy'
time.sleep(3)
fs_name = disks[disk]['file'][len('/dev/zvol/'):]
log.info('Destroying VM ZFS volume %s', fs_name)
__salt__['zfs.destroy'](
name=fs_name,
force=True)
else:
os.remove(disks[disk]['file'])
directories.add(os.path.dirname(disks[disk]['file']))
if dirs:
for dir_ in directories:
shutil.rmtree(dir_)
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
try:
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
except libvirt.libvirtError:
dom.undefine()
else:
dom.undefine()
conn.close()
return True
|
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
|
def _infer_all_output_dims(self, inputs):
"""Calculate the output shape for `inputs` after a deconvolution.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
Returns:
output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
"""
# Use tensorflow shape op to manipulate inputs shape, so that unknown batch
# size - which can happen when using input placeholders - is handled
# correcly.
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
out_channels = (self.output_channels,)
# Height dim needs to be added to everything for 1D Conv
# as we'll be using the 2D Conv Transpose op.
if self._n == 1:
out_shape = (1,) + self.output_shape
else:
out_shape = self.output_shape
if self._data_format.startswith("NC"):
out_shape_tuple = out_channels + out_shape
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
out_shape_tuple = out_shape + out_channels
output_shape = tf.concat([batch_size, out_shape_tuple], 0)
return output_shape
|
Calculate the output shape for `inputs` after a deconvolution.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
Returns:
output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
|
def more_than_one_error(self, field):
"""Logs a more than one error.
field is the field/property that has more than one defined.
"""
msg = 'More than one {0} defined.'.format(field)
self.logger.log(msg)
self.error = True
|
Logs a more than one error.
field is the field/property that has more than one defined.
|
def from_dynacRepr(cls, pynacRepr):
"""
Construct a ``AccGap`` instance from the Pynac lattice element
"""
pynacList = pynacRepr[1][0]
L = float(pynacList[3])
TTF = float(pynacList[4])
TTFprime = float(pynacList[5])
TTFprimeprime = float(pynacList[13])
EField = float(pynacList[10])
phase = float(pynacList[11])
F = float(pynacList[14])
atten = float(pynacList[15])
gap = cls(L, TTF, TTFprime, TTFprimeprime, EField, phase, F, atten)
gap.gapID = Param(val = int(pynacList[0]), unit = None)
gap.energy = Param(val = float(pynacList[1]), unit = 'MeV')
gap.beta = Param(val = float(pynacList[2]), unit = None)
gap.S = Param(val = float(pynacList[6]), unit = None)
gap.SP = Param(val = float(pynacList[7]), unit = None)
gap.quadLength = Param(val = float(pynacList[8]), unit = 'cm')
gap.quadStrength = Param(val = float(pynacList[9]), unit = 'kG/cm')
gap.accumLen = Param(val = float(pynacList[12]), unit = 'cm')
return gap
|
Construct a ``AccGap`` instance from the Pynac lattice element
|
def distrib_release():
"""
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
"""
with settings(hide('running', 'stdout')):
kernel = (run('uname -s') or '').strip().lower()
if kernel == LINUX:
return run('lsb_release -r --short')
elif kernel == SUNOS:
return run('uname -v')
|
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
|
def t_IDENTIFER(self, t):
r'\#?[a-zA-Z_][a-zA-Z_0-9]*'
t.type = SpecParser.reserved.get(t.value, 'IDENTIFIER')
return t
|
r'\#?[a-zA-Z_][a-zA-Z_0-9]*
|
def write_document(self, document: BioCDocument):
"""Encode and write a single document."""
tree = self.encoder.encode(document)
self.__writer.send(tree)
|
Encode and write a single document.
|
def get_hash(fName, readSize, dire=pDir()):
"""
creates the required hash
"""
if not fileExists(fName, dire):
return -1
readSize = readSize * 1024 # bytes to be read
fName = os.path.join(dire, fName) # name coupled with path
with open(fName, 'rb') as f:
size = os.path.getsize(fName)
if size < readSize * 2:
return -1
data = f.read(readSize)
f.seek(-readSize, os.SEEK_END)
data += f.read(readSize)
return md5(data).hexdigest()
|
creates the required hash
|
def redact_secrets(line):
"""
Returns a sanitized string for any ``line`` that looks like it contains a
secret (i.e. matches SECRET_PATTERN).
"""
def redact(match):
if match.group(2) in SECRET_WHITELIST:
return match.group(0)
return match.group(1) + 'TOO_TOO_SEXY'
return SECRET_PATTERN.sub(redact, line)
|
Returns a sanitized string for any ``line`` that looks like it contains a
secret (i.e. matches SECRET_PATTERN).
|
def pager(__text: str, *, pager: Optional[str] = 'less'):
"""Pass output through pager.
See :manpage:`less(1)`, if you wish to configure the default pager. For
example, you may wish to check ``FRSX`` options.
Args:
__text: Text to page
pager: Pager to use
"""
if pager:
run([pager, ], input=__text.encode())
else:
print(__text)
|
Pass output through pager.
See :manpage:`less(1)`, if you wish to configure the default pager. For
example, you may wish to check ``FRSX`` options.
Args:
__text: Text to page
pager: Pager to use
|
def has_active_subscription(self, plan=None):
"""
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
"""
if plan is None:
valid_subscriptions = self._get_valid_subscriptions()
if len(valid_subscriptions) == 0:
return False
elif len(valid_subscriptions) == 1:
return True
else:
raise TypeError(
"plan cannot be None if more than one valid subscription exists for this customer."
)
else:
# Convert Plan to id
if isinstance(plan, StripeModel):
plan = plan.id
return any(
[
subscription.is_valid()
for subscription in self.subscriptions.filter(plan__id=plan)
]
)
|
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
|
def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state()
|
Listener method that keeps pulling new messages.
|
def count_of_certain_kind(kind):
'''
Get the count of certain kind.
'''
recs = TabPost.select().where(TabPost.kind == kind)
return recs.count()
|
Get the count of certain kind.
|
def browse(fileNames=None,
inspectorFullName=None,
select=None,
profile=DEFAULT_PROFILE,
resetProfile=False, # TODO: should probably be moved to the main program
resetAllProfiles=False, # TODO: should probably be moved to the main program
resetRegistry=False): # TODO: should probably be moved to the main program
""" Opens the main window(s) for the persistent settings of the given profile,
and executes the application.
:param fileNames: List of file names that will be added to the repository
:param inspectorFullName: The full path name of the inspector that will be loaded
:param select: a path of the repository item that will selected at start up.
:param profile: the name of the profile that will be loaded
:param resetProfile: if True, the profile will be reset to it standard settings.
:param resetAllProfiles: if True, all profiles will be reset to it standard settings.
:param resetRegistry: if True, the registry will be reset to it standard settings.
:return:
"""
# Imported here so this module can be imported without Qt being installed.
from argos.qt import QtWidgets, QtCore
from argos.application import ArgosApplication
from argos.repo.testdata import createArgosTestData
try:
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
except Exception as ex:
logger.debug("AA_UseHighDpiPixmaps not available in PyQt4: {}".format(ex))
# Create
argosApp = ArgosApplication()
if resetProfile:
argosApp.deleteProfile(profile)
if resetAllProfiles:
argosApp.deleteAllProfiles()
if resetRegistry:
argosApp.deleteRegistries()
# Must be called before opening the files so that file formats are auto-detected.
argosApp.loadOrInitRegistries()
# Load data in common repository before windows are created.
argosApp.loadFiles(fileNames)
if DEBUGGING:
argosApp.repo.insertItem(createArgosTestData())
# Create windows for this profile.
argosApp.loadProfile(profile=profile, inspectorFullName=inspectorFullName)
if select:
for mainWindow in argosApp.mainWindows:
mainWindow.trySelectRtiByPath(select)
return argosApp.execute()
|
Opens the main window(s) for the persistent settings of the given profile,
and executes the application.
:param fileNames: List of file names that will be added to the repository
:param inspectorFullName: The full path name of the inspector that will be loaded
:param select: a path of the repository item that will selected at start up.
:param profile: the name of the profile that will be loaded
:param resetProfile: if True, the profile will be reset to it standard settings.
:param resetAllProfiles: if True, all profiles will be reset to it standard settings.
:param resetRegistry: if True, the registry will be reset to it standard settings.
:return:
|
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
|
Yield names and strings used by `code` and its nested code objects
|
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
return self
|
Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
|
def stop(self, devices):
"""Power-Off one or more running devices.
"""
for device in devices:
self.logger.info('Stopping: %s', device.id)
try:
device.power_off()
except packet.baseapi.Error:
raise PacketManagerException('Unable to stop instance "{}"'.format(device.id))
|
Power-Off one or more running devices.
|
def fw_retry_failures(self):
"""Top level retry routine called. """
if not self.fw_init:
return
try:
self.fw_retry_failures_create()
self.fw_retry_failures_delete()
except Exception as exc:
LOG.error("Exception in retry failures %s", str(exc))
|
Top level retry routine called.
|
def from_dict(cls, data):
"""Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
"""
fulfillment = data['fulfillment']
if not isinstance(fulfillment, (Fulfillment, type(None))):
try:
fulfillment = Fulfillment.from_uri(data['fulfillment'])
except ASN1DecodeError:
# TODO Remove as it is legacy code, and simply fall back on
# ASN1DecodeError
raise InvalidSignature("Fulfillment URI couldn't been parsed")
except TypeError:
# NOTE: See comment about this special case in
# `Input.to_dict`
fulfillment = _fulfillment_from_details(data['fulfillment'])
fulfills = TransactionLink.from_dict(data['fulfills'])
return cls(fulfillment, data['owners_before'], fulfills)
|
Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
|
def connect_to_images(region=None, public=True):
"""Creates a client for working with Images."""
return _create_client(ep_name="image", region=region, public=public)
|
Creates a client for working with Images.
|
def alpha_view(qimage):
"""Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8"""
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return bytes[...,_bgra[3]]
|
Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8
|
def add_dicts(*args):
"""
Adds two or more dicts together. Common keys will have their values added.
For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, 'd': 4}
"""
counters = [Counter(arg) for arg in args]
return dict(reduce(operator.add, counters))
|
Adds two or more dicts together. Common keys will have their values added.
For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, 'd': 4}
|
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
"""
super(SpentCoinState, self).Deserialize(reader)
self.TransactionHash = reader.ReadUInt256()
self.TransactionHeight = reader.ReadUInt32()
count = reader.ReadVarInt()
items = [0] * count
for i in range(0, count):
index = reader.ReadUInt16()
height = reader.ReadUInt32()
items[i] = SpentCoinItem(index=index, height=height)
self.Items = items
|
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
|
def clone(self):
"""
Create a complete copy of self.
:returns: A MaterialPackage that is identical to self.
"""
result = copy.copy(self)
result.compound_masses = copy.deepcopy(self.compound_masses)
return result
|
Create a complete copy of self.
:returns: A MaterialPackage that is identical to self.
|
def parse_version(v):
"""
Take a string version and conver it to a tuple (for easier comparison), e.g.:
"1.2.3" --> (1, 2, 3)
"1.2" --> (1, 2, 0)
"1" --> (1, 0, 0)
"""
parts = v.split(".")
# Pad the list to make sure there is three elements so that we get major, minor, point
# comparisons that default to "0" if not given. I.e. "1.2" --> (1, 2, 0)
parts = (parts + 3 * ['0'])[:3]
return tuple(int(x) for x in parts)
|
Take a string version and conver it to a tuple (for easier comparison), e.g.:
"1.2.3" --> (1, 2, 3)
"1.2" --> (1, 2, 0)
"1" --> (1, 0, 0)
|
def create(
cls,
path,
template_engine=None,
output_filename=None,
output_ext=None,
view_name=None
):
"""Create the relevant subclass of StatikView based on the given path variable and
parameters."""
# if it's a complex view
if isinstance(path, dict):
return StatikViewComplexPath(
path,
template_engine,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
elif isinstance(path, basestring):
return StatikViewSimplePath(
path,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
else:
raise ValueError(
"Unrecognised structure for \"path\" configuration in view: %s" % view_name
)
|
Create the relevant subclass of StatikView based on the given path variable and
parameters.
|
def colon_subscripts(u):
"""
Array colon subscripts foo(1:10) and colon expressions 1:10 look
too similar to each other. Now is the time to find out who is who.
"""
if u.__class__ in (node.arrayref,node.cellarrayref):
for w in u.args:
if w.__class__ is node.expr and w.op == ":":
w._replace(op="::")
|
Array colon subscripts foo(1:10) and colon expressions 1:10 look
too similar to each other. Now is the time to find out who is who.
|
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_remove(key, column_path, timestamp, consistency_level)
return d
|
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
|
def fig_to_geojson(fig=None, **kwargs):
"""
Returns a figure's GeoJSON representation as a dictionary
All arguments passed to fig_to_html()
Returns
-------
GeoJSON dictionary
"""
if fig is None:
fig = plt.gcf()
renderer = LeafletRenderer(**kwargs)
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.geojson()
|
Returns a figure's GeoJSON representation as a dictionary
All arguments passed to fig_to_html()
Returns
-------
GeoJSON dictionary
|
def add_type(self, type: type, serialize: Callable[[Any], str], unserialize: Callable[[str], Any]) -> None:
"""
Adds serialization support for a new type.
:param type: The type to add support for.
:param serialize: A callable that takes an object of type ``type`` and returns a string.
:param unserialize: A callable that takes a string and returns an object of type ``type``.
"""
self.types.append(HierarkeyType(type=type, serialize=serialize, unserialize=unserialize))
|
Adds serialization support for a new type.
:param type: The type to add support for.
:param serialize: A callable that takes an object of type ``type`` and returns a string.
:param unserialize: A callable that takes a string and returns an object of type ``type``.
|
def setText(self, text: str):
"""
Undo safe wrapper for the native ``setText`` method.
|Args|
* ``text`` (**str**): text to insert at the specified position.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
undoObj = UndoSetText(self, text)
self.qteUndoStack.push(undoObj)
|
Undo safe wrapper for the native ``setText`` method.
|Args|
* ``text`` (**str**): text to insert at the specified position.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
|
def controldata(self):
""" return the contents of pg_controldata, or non-True value if pg_controldata call failed """
result = {}
# Don't try to call pg_controldata during backup restore
if self._version_file_exists() and self.state != 'creating replica':
try:
env = {'LANG': 'C', 'LC_ALL': 'C', 'PATH': os.getenv('PATH')}
if os.getenv('SYSTEMROOT') is not None:
env['SYSTEMROOT'] = os.getenv('SYSTEMROOT')
data = subprocess.check_output([self._pgcommand('pg_controldata'), self._data_dir], env=env)
if data:
data = data.decode('utf-8').splitlines()
# pg_controldata output depends on major verion. Some of parameters are prefixed by 'Current '
result = {l.split(':')[0].replace('Current ', '', 1): l.split(':', 1)[1].strip() for l in data
if l and ':' in l}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return result
|
return the contents of pg_controldata, or non-True value if pg_controldata call failed
|
def listidentifiers(**kwargs):
"""Create OAI-PMH response for verb ListIdentifiers."""
e_tree, e_listidentifiers = verb(**kwargs)
result = get_records(**kwargs)
for record in result.items:
pid = oaiid_fetcher(record['id'], record['json']['_source'])
header(
e_listidentifiers,
identifier=pid.pid_value,
datestamp=record['updated'],
sets=record['json']['_source'].get('_oai', {}).get('sets', []),
)
resumption_token(e_listidentifiers, result, **kwargs)
return e_tree
|
Create OAI-PMH response for verb ListIdentifiers.
|
def forwardMessage(self, chat_id, from_chat_id, message_id,
disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
|
See: https://core.telegram.org/bots/api#forwardmessage
|
def coerce(self, value):
"""
Takes one or two values in the domain and returns a LinearOrderedCell
with the same domain
"""
if isinstance(value, LinearOrderedCell) and (self.domain == value.domain or \
list_diff(self.domain, value.domain) == []):
# is LinearOrderedCell with same domain
return value
elif value in self.domain:
return LinearOrderedCell(self.domain, value, value)
elif isinstance(value, (list, tuple)) and all(map(value in self.domain, value)):
if len(value) == 1:
return LinearOrderedCell(self.domain, value[0], value[0])
elif len(value) == 2:
return LinearOrderedCell(self.domain, *value)
else:
sorted_vals = sorted(value, key=lambda x: self.to_i(x))
return LinearOrderedCell(self.domain, sorted_vals[0], sorted_vals[-1])
else:
raise Exception("Cannot coerce %s into LinearOrderedCell" % (str(value)))
|
Takes one or two values in the domain and returns a LinearOrderedCell
with the same domain
|
def delete_subnet_group(name, region=None, key=None, keyid=None,
profile=None):
'''
Delete an RDS subnet group.
CLI example::
salt myminion boto_rds.delete_subnet_group my-subnet-group \
region=us-east-1
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
r = conn.delete_db_subnet_group(DBSubnetGroupName=name)
return {'deleted': bool(r), 'message':
'Deleted RDS subnet group {0}.'.format(name)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Delete an RDS subnet group.
CLI example::
salt myminion boto_rds.delete_subnet_group my-subnet-group \
region=us-east-1
|
def gen_lazy_function(self):
"""
Will be called by Node at instantiation.
"""
# If value argument to __init__ was None, draw value from random
# method.
if self._value is None:
# Use random function if provided
if self._random is not None:
self.value = self._random(**self._parents.value)
# Otherwise leave initial value at None and warn.
else:
raise ValueError(
'Stochastic ' +
self.__name__ +
"'s value initialized to None; no initial value or random method provided.")
arguments = {}
arguments.update(self.parents)
arguments['value'] = self
arguments = DictContainer(arguments)
self._logp = LazyFunction(fun=self._logp_fun,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
self._logp.force_compute()
self._logp_partial_gradients = {}
for parameter, function in six.iteritems(self._logp_partial_gradient_functions):
lazy_logp_partial_gradient = LazyFunction(fun=function,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
# lazy_logp_partial_gradient.force_compute()
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient
|
Will be called by Node at instantiation.
|
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
|
Clears all of the build variables.
|
def _get_default_values(self, default_values=None):
"""Gets the default values set for a resource"""
if not default_values:
default_values = self.DEFAULT_VALUES
if default_values:
api_version = str(self._connection._apiVersion)
values = default_values.get(api_version, {}).copy()
else:
values = {}
return values
|
Gets the default values set for a resource
|
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
|
See: https://core.telegram.org/bots/api#answerprecheckoutquery
|
def get_internal_urls(self):
"""
URL's, which may point to edeposit, aleph, kramerius and so on.
Fields ``856u40``, ``998a`` and ``URLu``.
Returns:
list: List of internal URLs.
"""
internal_urls = self.get_subfields("856", "u", i1="4", i2="0")
internal_urls.extend(self.get_subfields("998", "a"))
internal_urls.extend(self.get_subfields("URL", "u"))
return map(lambda x: x.replace("&", "&"), internal_urls)
|
URL's, which may point to edeposit, aleph, kramerius and so on.
Fields ``856u40``, ``998a`` and ``URLu``.
Returns:
list: List of internal URLs.
|
def hardware_flexport_flexport_type_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
instance = ET.SubElement(flexport_type, "instance")
instance.text = kwargs.pop('instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def getResultFromProcess(res, tempname, process):
"""Get a value from process, return tuple of value, res if succesful"""
if not isinstance(res, (UndefinedValue, Exception)):
value = getRepresentation(tempname, process)
return value, res
else:
return res, str(res)
|
Get a value from process, return tuple of value, res if succesful
|
def parse_bdstoken(content):
'''从页面中解析出bdstoken等信息.
这些信息都位于页面底部的<script>, 只有在授权后的页面中才出现.
这里, 为了保证兼容性, 就不再使用cssselect模块解析了.
@return 返回bdstoken
'''
bdstoken = ''
bds_re = re.compile('"bdstoken"\s*:\s*"([^"]+)"', re.IGNORECASE)
bds_match = bds_re.search(content)
if bds_match:
bdstoken = bds_match.group(1)
return bdstoken
|
从页面中解析出bdstoken等信息.
这些信息都位于页面底部的<script>, 只有在授权后的页面中才出现.
这里, 为了保证兼容性, 就不再使用cssselect模块解析了.
@return 返回bdstoken
|
def speed_rms(Temperature,element,isotope):
r"""This function calculates the average speed (in meters per second)
of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution.
This is simply
sqrt(8*k_B*T/m/pi)
where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and
m is the mass of the atom (in kilograms).
>>> print speed_rms(25+273.15,"Rb",85)
295.945034349
>>> print speed_rms(25+273.15,"Cs",133)
236.550383496
"""
atom = Atom(element, isotope)
return sqrt(3*Temperature*k_B/atom.mass)
|
r"""This function calculates the average speed (in meters per second)
of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution.
This is simply
sqrt(8*k_B*T/m/pi)
where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and
m is the mass of the atom (in kilograms).
>>> print speed_rms(25+273.15,"Rb",85)
295.945034349
>>> print speed_rms(25+273.15,"Cs",133)
236.550383496
|
def restore(self, hist_uid):
'''
Restore by ID
'''
if self.check_post_role()['ADMIN']:
pass
else:
return False
histinfo = MWikiHist.get_by_uid(hist_uid)
if histinfo:
pass
else:
return False
postinfo = MWiki.get_by_uid(histinfo.wiki_id)
cur_cnt = tornado.escape.xhtml_unescape(postinfo.cnt_md)
old_cnt = tornado.escape.xhtml_unescape(histinfo.cnt_md)
MWiki.update_cnt(
histinfo.wiki_id,
{'cnt_md': old_cnt, 'user_name': self.userinfo.user_name}
)
MWikiHist.update_cnt(
histinfo.uid,
{'cnt_md': cur_cnt, 'user_name': postinfo.user_name}
)
if postinfo.kind == '1':
self.redirect('/wiki/{0}'.format(postinfo.title))
elif postinfo.kind == '2':
self.redirect('/page/{0}.html'.format(postinfo.uid))
|
Restore by ID
|
def main(argv, reactor=None):
"""Run the client GUI.
Typical use:
>>> sys.exit(main(sys.argv))
@param argv: The arguments to run it with, e.g. sys.argv.
@param reactor: The reactor to use. Must be compatible with gtk as this
module uses gtk API"s.
@return exitcode: The exit code it returned, as per sys.exit.
"""
if reactor is None:
from twisted.internet import gtk2reactor
gtk2reactor.install()
from twisted.internet import reactor
try:
AWSStatusIndicator(reactor)
gobject.set_application_name("aws-status")
reactor.run()
except ValueError:
# In this case, the user cancelled, and the exception bubbled to here.
pass
|
Run the client GUI.
Typical use:
>>> sys.exit(main(sys.argv))
@param argv: The arguments to run it with, e.g. sys.argv.
@param reactor: The reactor to use. Must be compatible with gtk as this
module uses gtk API"s.
@return exitcode: The exit code it returned, as per sys.exit.
|
def close(self):
"""
Close the connection.
:param purge: If True (the default), the receive buffer will
be purged.
"""
# Close the underlying socket
if self._sock:
with utils.ignore_except():
self._sock.close()
self._sock = None
# Purge the message buffers
self._recvbuf = []
self._recvbuf_partial = ''
|
Close the connection.
:param purge: If True (the default), the receive buffer will
be purged.
|
def upload_image(self,
image_file,
referer_url=None,
title=None,
desc=None,
created_at=None,
collection_id=None):
"""Upload an image
:param image_file: File-like object of an image file
:param referer_url: Referer site URL
:param title: Site title
:param desc: Comment
:param created_at: Image's created time in unix time
:param collection_id: Collection ID
"""
url = self.upload_url + '/api/upload'
data = {}
if referer_url is not None:
data['referer_url'] = referer_url
if title is not None:
data['title'] = title
if desc is not None:
data['desc'] = desc
if created_at is not None:
data['created_at'] = str(created_at)
if collection_id is not None:
data['collection_id'] = collection_id
files = {
'imagedata': image_file
}
response = self._request_url(
url, 'post', data=data, files=files, with_access_token=True)
headers, result = self._parse_and_check(response)
return Image.from_dict(result)
|
Upload an image
:param image_file: File-like object of an image file
:param referer_url: Referer site URL
:param title: Site title
:param desc: Comment
:param created_at: Image's created time in unix time
:param collection_id: Collection ID
|
def create_or_replace_primary_key(self,
table: str,
fieldnames: Sequence[str]) -> int:
"""Make a primary key, or replace it if it exists."""
# *** create_or_replace_primary_key: Uses code specific to MySQL
sql = """
SELECT COUNT(*)
FROM information_schema.table_constraints
WHERE table_name=?
AND table_schema={}
AND constraint_name='PRIMARY'
""".format(self.get_current_schema_expr())
# http://forums.mysql.com/read.php?10,114742,114748#msg-114748
row = self.fetchone(sql, table)
has_pk_already = True if row[0] >= 1 else False
drop_pk_if_exists = " DROP PRIMARY KEY," if has_pk_already else ""
fieldlist = ",".join([self.delimit(f) for f in fieldnames])
sql = ("ALTER TABLE " + self.delimit(table) +
drop_pk_if_exists +
" ADD PRIMARY KEY(" + fieldlist + ")")
# http://stackoverflow.com/questions/8859353
return self.db_exec(sql)
|
Make a primary key, or replace it if it exists.
|
def ok(self):
"""
Returns True if OK to use, else False
"""
try:
v = int(self._value)
if v < 0:
return False
else:
return True
except:
return False
|
Returns True if OK to use, else False
|
def Start(self):
"""Retrieve all the clients for the AbstractClientStatsCollectors."""
try:
self.stats = {}
self.BeginProcessing()
processed_count = 0
if data_store.RelationalDBEnabled():
for client_info_batch in _IterateAllClients(
recency_window=self.recency_window):
for client_info in client_info_batch:
self.ProcessClientFullInfo(client_info)
processed_count += len(client_info_batch)
self.Log("Processed %d clients.", processed_count)
self.HeartBeat()
else:
root_children = aff4.FACTORY.Open(
aff4.ROOT_URN, token=self.token).OpenChildren(mode="r")
for batch in collection.Batch(root_children, CLIENT_READ_BATCH_SIZE):
for child in batch:
if not isinstance(child, aff4_grr.VFSGRRClient):
continue
last_ping = child.Get(child.Schema.PING)
self.ProcessLegacyClient(last_ping, child)
processed_count += 1
# This flow is not dead: we don't want to run out of lease time.
self.HeartBeat()
self.FinishProcessing()
for fd in itervalues(self.stats):
fd.Close()
logging.info("%s: processed %d clients.", self.__class__.__name__,
processed_count)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while calculating stats: %s", e)
raise
|
Retrieve all the clients for the AbstractClientStatsCollectors.
|
def adapt_files(solver):
"""
Rename and remove files whenever necessary.
"""
print("adapting {0}'s files".format(solver))
root = os.path.join('solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True)
for fnames in to_move[solver]:
old = os.path.join(root, fnames[0])
new = os.path.join(root, fnames[1])
os.rename(old, new)
for f in to_remove[solver]:
f = os.path.join(root, f)
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
|
Rename and remove files whenever necessary.
|
def get_match(sport, team1, team2):
"""
Get live scores for a single match
:param sport: the sport being played
:type sport: string
:param team1: first team participating in the match
:ttype team1: string
:param team2: second team participating in the match
:type team2: string
:return: A specific match
:rtype: Match
"""
sport = sport.lower()
team1_pattern = re.compile(team1, re.I)
team2_pattern = re.compile(team2, re.I)
matches = get_sport(sport)
for match in matches:
if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \
and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team):
return match
raise errors.MatchError(sport, [team1, team2])
|
Get live scores for a single match
:param sport: the sport being played
:type sport: string
:param team1: first team participating in the match
:ttype team1: string
:param team2: second team participating in the match
:type team2: string
:return: A specific match
:rtype: Match
|
def assign(self, pm):
"""Reassign pixmap or xpm string array to wrapper"""
if isinstance(pm, QPixmap):
self._pm = pm
else: # assume xpm string list to be decoded on-demand
self._xpmstr = pm
self._pm = None
self._icon = None
|
Reassign pixmap or xpm string array to wrapper
|
def ssh_version():
'''
Returns the version of the installed ssh command
'''
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
['ssh', '-V'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
try:
version_parts = ret[1].split(b',')[0].split(b'_')[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
|
Returns the version of the installed ssh command
|
def set_up(self, test_args=(), clear=True, debug=False):
"""
Sets properties right before calling run.
``test_args`` The arguments to pass to the test runner.
``clear`` Boolean. Set to True if we should clear console before
running the tests.
``debug`` Boolean. Set to True if we want to print debugging
information.
"""
self.test_args = test_args
self.debug, self.clear = debug, clear
|
Sets properties right before calling run.
``test_args`` The arguments to pass to the test runner.
``clear`` Boolean. Set to True if we should clear console before
running the tests.
``debug`` Boolean. Set to True if we want to print debugging
information.
|
def execute():
""" Ensure provisioning """
boto_server_error_retries = 3
# Ensure provisioning
for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
try:
table_num_consec_read_checks = \
CHECK_STATUS['tables'][table_name]['reads']
except KeyError:
table_num_consec_read_checks = 0
try:
table_num_consec_write_checks = \
CHECK_STATUS['tables'][table_name]['writes']
except KeyError:
table_num_consec_write_checks = 0
try:
# The return var shows how many times the scale-down criteria
# has been met. This is coupled with a var in config,
# "num_intervals_scale_down", to delay the scale-down
table_num_consec_read_checks, table_num_consec_write_checks = \
table.ensure_provisioning(
table_name,
table_key,
table_num_consec_read_checks,
table_num_consec_write_checks)
CHECK_STATUS['tables'][table_name] = {
'reads': table_num_consec_read_checks,
'writes': table_num_consec_write_checks
}
gsi_names = set()
# Add regexp table names
for gst_instance in dynamodb.table_gsis(table_name):
gsi_name = gst_instance[u'IndexName']
try:
gsi_keys = get_table_option(table_key, 'gsis').keys()
except AttributeError:
# Continue if there are not GSIs configured
continue
for gsi_key in gsi_keys:
try:
if re.match(gsi_key, gsi_name):
logger.debug(
'Table {0} GSI {1} matches '
'GSI config key {2}'.format(
table_name, gsi_name, gsi_key))
gsi_names.add((gsi_name, gsi_key))
except re.error:
logger.error('Invalid regular expression: "{0}"'.format(
gsi_key))
sys.exit(1)
for gsi_name, gsi_key in sorted(gsi_names):
unique_gsi_name = ':'.join([table_name, gsi_name])
try:
gsi_num_consec_read_checks = \
CHECK_STATUS['gsis'][unique_gsi_name]['reads']
except KeyError:
gsi_num_consec_read_checks = 0
try:
gsi_num_consec_write_checks = \
CHECK_STATUS['gsis'][unique_gsi_name]['writes']
except KeyError:
gsi_num_consec_write_checks = 0
gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
gsi.ensure_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
gsi_num_consec_read_checks,
gsi_num_consec_write_checks)
CHECK_STATUS['gsis'][unique_gsi_name] = {
'reads': gsi_num_consec_read_checks,
'writes': gsi_num_consec_write_checks
}
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
if exception == 'ResourceNotFoundException':
logger.error('{0} - Table {1} does not exist anymore'.format(
table_name,
table_name))
continue
except BotoServerError as error:
if boto_server_error_retries > 0:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
logger.error(
'Please bug report if this error persists')
boto_server_error_retries -= 1
continue
else:
raise
# Sleep between the checks
if not get_global_option('run_once'):
logger.debug('Sleeping {0} seconds until next check'.format(
get_global_option('check_interval')))
time.sleep(get_global_option('check_interval'))
|
Ensure provisioning
|
def infile(self):
"""Path of the input file"""
return os.path.join(OPTIONS['base_dir'],
'{0}.{1}'.format(self.name, OPTIONS['in_ext']))
|
Path of the input file
|
def p_rule(self, rule):
'''rule : GUIDELINE
| REGULATION'''
if len(rule[1]) == 4:
# This is a guideline
rule[0] = Guideline(rule[1][1], rule[1][2], rule[1][3])
else:
# This is a regulation
indentsize = rule[1][0]
number = rule[1][1]
text = rule[1][2]
parent = None
# If we just "un"nested, shrink the current rule to our level
if self.prev_indent > indentsize:
self.current_rule = self.current_rule[0:indentsize+1]
# We just added a nested level, the parent is the list's last elem
if self.prev_indent < indentsize:
parent = self.current_rule[-1]
# Else, if we are nested the parent is the one before the last elem
elif len(self.current_rule) > 1:
parent = self.current_rule[-2]
# Else if we are not nested, then we are a root rule and parent is none
# (do nothing as parent is initialized to none)
# Create the regulation node
reg = Regulation(number, text, parent)
# Let our parent knows he has a new child, if we don't have a parent
# let's create an item in the article rules list
if parent:
parent.add_child(reg)
else:
rule[0] = reg
# Unless we nested, pop and replace the last rule by ourself
# If we added a nesting level, we just need to add ourself
if self.prev_indent >= indentsize:
self.current_rule.pop()
self.current_rule.append(reg)
self.prev_indent = indentsize
|
rule : GUIDELINE
| REGULATION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.