text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get_meta_lang(self):
"""\
Extract content language from meta
"""
# we have a lang attribute in html
attr = self.parser.getAttribute(self.article.doc, attr='lang')
if attr is None:
# look up for a Content-Language in meta
items = [
{'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},
{'tag': 'meta', 'attr': 'name', 'value': 'lang'}
]
for item in items:
meta = self.parser.getElementsByTag(self.article.doc, **item)
if meta:
attr = self.parser.getAttribute(meta[0], attr='content')
break
if attr:
value = attr[:2]
if re.search(RE_LANG, value):
return value.lower()
return None | [
"def",
"get_meta_lang",
"(",
"self",
")",
":",
"# we have a lang attribute in html",
"attr",
"=",
"self",
".",
"parser",
".",
"getAttribute",
"(",
"self",
".",
"article",
".",
"doc",
",",
"attr",
"=",
"'lang'",
")",
"if",
"attr",
"is",
"None",
":",
"# look... | 34.583333 | 18.625 |
def _image_gradients(self, input_csvlines, label, image_column_name):
"""Compute gradients from prob of label to image. Used by integrated gradients (probe)."""
with tf.Graph().as_default() as g, tf.Session() as sess:
logging_level = tf.logging.get_verbosity()
try:
tf.logging.set_verbosity(tf.logging.ERROR)
meta_graph_pb = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=self._model_dir)
finally:
tf.logging.set_verbosity(logging_level)
signature = meta_graph_pb.signature_def['serving_default']
input_alias_map = {name: tensor_info_proto.name
for (name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {name: tensor_info_proto.name
for (name, tensor_info_proto) in signature.outputs.items()}
csv_tensor_name = list(input_alias_map.values())[0]
# The image tensor is already built into ML Workbench graph.
float_image = g.get_tensor_by_name("import/gradients_%s:0" % image_column_name)
if label not in output_alias_map:
raise ValueError('The label "%s" does not exist in output map.' % label)
prob = g.get_tensor_by_name(output_alias_map[label])
grads = tf.gradients(prob, float_image)[0]
grads_values = sess.run(fetches=grads, feed_dict={csv_tensor_name: input_csvlines})
return grads_values | [
"def",
"_image_gradients",
"(",
"self",
",",
"input_csvlines",
",",
"label",
",",
"image_column_name",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
"as",
"g",
",",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"lo... | 50.0625 | 27.4375 |
def createEditor(self, delegate, parent, option):
""" Creates a FloatCtiEditor.
For the parameters see the AbstractCti constructor documentation.
"""
return FloatCtiEditor(self, delegate, parent=parent) | [
"def",
"createEditor",
"(",
"self",
",",
"delegate",
",",
"parent",
",",
"option",
")",
":",
"return",
"FloatCtiEditor",
"(",
"self",
",",
"delegate",
",",
"parent",
"=",
"parent",
")"
] | 46.8 | 13.2 |
def call_actions(
self,
service_name,
actions,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
timeout=None,
**kwargs
):
"""
Build and send a single job request with one or more actions.
Returns a list of action responses, one for each action in the same order as provided, or raises an exception
if any action response is an error (unless `raise_action_errors` is passed as `False`) or if the job response
is an error (unless `raise_job_errors` is passed as `False`).
This method performs expansions if the Client is configured with an expansion converter.
:param service_name: The name of the service to call
:type service_name: union[str, unicode]
:param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects
:type actions: iterable[union[ActionRequest, dict]]
:param expansions: A dictionary representing the expansions to perform
:type expansions: dict
:param raise_job_errors: Whether to raise a JobError if the job response contains errors (defaults to `True`)
:type raise_job_errors: bool
:param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults
to `True`)
:type raise_action_errors: bool
:param timeout: If provided, this will override the default transport timeout values to; requests will expire
after this number of seconds plus some buffer defined by the transport, and the client will not
block waiting for a response for longer than this amount of time.
:type timeout: int
:param switches: A list of switch value integers
:type switches: list
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:return: The job response
:rtype: JobResponse
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge,
MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
"""
return self.call_actions_future(
service_name,
actions,
expansions,
raise_job_errors,
raise_action_errors,
timeout,
**kwargs
).result() | [
"def",
"call_actions",
"(",
"self",
",",
"service_name",
",",
"actions",
",",
"expansions",
"=",
"None",
",",
"raise_job_errors",
"=",
"True",
",",
"raise_action_errors",
"=",
"True",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return"... | 47.583333 | 29.216667 |
def make_process_header(self, slug, typ, version, source_uri, description, inputs):
"""Generate a process definition header.
:param str slug: process' slug
:param str typ: process' type
:param str version: process' version
:param str source_uri: url to the process definition
:param str description: process' description
:param dict inputs: process' inputs
"""
node = addnodes.desc()
signode = addnodes.desc_signature(slug, '')
node.append(signode)
node['objtype'] = node['desctype'] = typ
signode += addnodes.desc_annotation(typ, typ, classes=['process-type'])
signode += addnodes.desc_addname('', '')
signode += addnodes.desc_name(slug + ' ', slug + ' ')
paramlist = addnodes.desc_parameterlist()
for field_schema, _, _ in iterate_schema({}, inputs, ''):
field_type = field_schema['type']
field_name = field_schema['name']
field_default = field_schema.get('default', None)
field_default = '' if field_default is None else '={}'.format(field_default)
param = addnodes.desc_parameter('', '', noemph=True)
param += nodes.emphasis(field_type, field_type, classes=['process-type'])
# separate by non-breaking space in the output
param += nodes.strong(text='\xa0\xa0' + field_name)
paramlist += param
signode += paramlist
signode += nodes.reference('', nodes.Text('[Source: v{}]'.format(version)),
refuri=source_uri, classes=['viewcode-link'])
desc = nodes.paragraph()
desc += nodes.Text(description, description)
return [node, desc] | [
"def",
"make_process_header",
"(",
"self",
",",
"slug",
",",
"typ",
",",
"version",
",",
"source_uri",
",",
"description",
",",
"inputs",
")",
":",
"node",
"=",
"addnodes",
".",
"desc",
"(",
")",
"signode",
"=",
"addnodes",
".",
"desc_signature",
"(",
"s... | 38.155556 | 23.066667 |
def clear(self, ts=None):
"""Clear all session in prepare phase.
:param ts: timestamp used locate the namespace
"""
sp_key = "%s:session_prepare" % self.namespace(ts or int(time.time()))
return self.r.delete(sp_key) | [
"def",
"clear",
"(",
"self",
",",
"ts",
"=",
"None",
")",
":",
"sp_key",
"=",
"\"%s:session_prepare\"",
"%",
"self",
".",
"namespace",
"(",
"ts",
"or",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"return",
"self",
".",
"r",
".",
"delete",
... | 35.714286 | 15.857143 |
def is_prefix(pre_path, path):
"""Return True if pre_path is a path-prefix of path."""
pre_path = pre_path.strip('.')
path = path.strip('.')
return not pre_path or path.startswith(pre_path + '.') | [
"def",
"is_prefix",
"(",
"pre_path",
",",
"path",
")",
":",
"pre_path",
"=",
"pre_path",
".",
"strip",
"(",
"'.'",
")",
"path",
"=",
"path",
".",
"strip",
"(",
"'.'",
")",
"return",
"not",
"pre_path",
"or",
"path",
".",
"startswith",
"(",
"pre_path",
... | 41.4 | 9.6 |
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters) | [
"def",
"delete_classification_node",
"(",
"self",
",",
"project",
",",
"structure_group",
",",
"path",
"=",
"None",
",",
"reclassify_id",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
... | 57.521739 | 24 |
def get_undefined_annotations(graph: BELGraph) -> Set[str]:
"""Get all annotations that aren't actually defined.
:return: The set of all undefined annotations
"""
return {
exc.annotation
for _, exc, _ in graph.warnings
if isinstance(exc, UndefinedAnnotationWarning)
} | [
"def",
"get_undefined_annotations",
"(",
"graph",
":",
"BELGraph",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"return",
"{",
"exc",
".",
"annotation",
"for",
"_",
",",
"exc",
",",
"_",
"in",
"graph",
".",
"warnings",
"if",
"isinstance",
"(",
"exc",
",",
... | 30.7 | 16 |
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit) | [
"def",
"split",
"(",
"pattern",
",",
"string",
",",
"maxsplit",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":",
"return",
"_compile",
"(",
"pattern",
",",
"flags",
")",
".",
"split",
"(",
"string",
",",
"maxsplit",
")"
] | 58 | 6.75 |
def get_factors(self, unique_R, inds, centers, widths):
"""Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
"""
F = np.zeros((len(inds[0]), self.K))
tfa_extension.factor(
F,
centers,
widths,
unique_R[0],
unique_R[1],
unique_R[2],
inds[0],
inds[1],
inds[2])
return F | [
"def",
"get_factors",
"(",
"self",
",",
"unique_R",
",",
"inds",
",",
"centers",
",",
"widths",
")",
":",
"F",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"inds",
"[",
"0",
"]",
")",
",",
"self",
".",
"K",
")",
")",
"tfa_extension",
".",
"fa... | 24.209302 | 21.255814 |
async def get_version(self, tp, params):
"""
Loads version from the stream / version database
# TODO: instance vs. tp.
:param tp:
:param params:
:return:
"""
tw = TypeWrapper(tp, params)
if not tw.is_versioned():
# self.registry.set_tr()
return TypeWrapper.ELEMENTARY_RES
# If not in the DB, load from archive at current position
if not self.version_db.is_versioned(tw):
tr = await load_uvarint(self.iobj)
ver = await load_uvarint(self.iobj)
self.version_db.set_version(tw, tr, ver)
else:
tr, ver = self.version_db.get_version(tw)
obj_id = None if tr == 0 else await load_uvarint(self.iobj)
self.registry.set_tr(obj_id)
return ver | [
"async",
"def",
"get_version",
"(",
"self",
",",
"tp",
",",
"params",
")",
":",
"tw",
"=",
"TypeWrapper",
"(",
"tp",
",",
"params",
")",
"if",
"not",
"tw",
".",
"is_versioned",
"(",
")",
":",
"# self.registry.set_tr()",
"return",
"TypeWrapper",
".",
"ELE... | 30.807692 | 16.115385 |
def label_clusters(image, min_cluster_size=50, min_thresh=1e-6, max_thresh=1, fully_connected=False):
"""
This will give a unique ID to each connected
component 1 through N of size > min_cluster_size
ANTsR function: `labelClusters`
Arguments
---------
image : ANTsImage
input image e.g. a statistical map
min_cluster_size : integer
throw away clusters smaller than this value
min_thresh : scalar
threshold to a statistical map
max_thresh : scalar
threshold to a statistical map
fully_connected : boolean
boolean sets neighborhood connectivity pattern
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> timageFully = ants.label_clusters( image, 10, 128, 150, True )
>>> timageFace = ants.label_clusters( image, 10, 128, 150, False )
"""
dim = image.dimension
clust = threshold_image(image, min_thresh, max_thresh)
temp = int(fully_connected)
args = [dim, clust, clust, min_cluster_size, temp]
processed_args = _int_antsProcessArguments(args)
libfn = utils.get_lib_fn('LabelClustersUniquely')
libfn(processed_args)
return clust | [
"def",
"label_clusters",
"(",
"image",
",",
"min_cluster_size",
"=",
"50",
",",
"min_thresh",
"=",
"1e-6",
",",
"max_thresh",
"=",
"1",
",",
"fully_connected",
"=",
"False",
")",
":",
"dim",
"=",
"image",
".",
"dimension",
"clust",
"=",
"threshold_image",
... | 28.813953 | 21.093023 |
def train_local(self, closest_point, label_vector_description=None, N=None,
pivot=True, **kwargs):
"""
Train the model in a Cannon-like fashion using the grid points as labels
and the intensities as normalsied rest-frame fluxes within some local
regime.
"""
lv = self._cannon_label_vector if label_vector_description is None else\
self._interpret_label_vector(label_vector_description)
# By default we will train to the nearest 10% of the grid.
# If grid subset is a fraction, scale it to real numbers.
if N is None:
N = self._configuration.get("settings", {}).get("grid_subset",
0.10)
if 1 >= N > 0:
N = int(np.round(N * self.grid_points.size))
logger.debug("Using {} nearest points for local Cannon model".format(N))
# Use closest N points.
dtype = [(name, '<f8') for name in self.grid_points.dtype.names]
grid_points \
= self.grid_points.astype(dtype).view(float).reshape(-1, len(dtype))
distance = np.sum(np.abs(grid_points - np.array(closest_point))/
np.ptp(grid_points, axis=0), axis=1)
grid_indices = np.argsort(distance)[:N]
lv_array, _, offsets = _build_label_vector_array(
self.grid_points[grid_indices], lv, pivot=pivot)
return self._train(lv_array, grid_indices, offsets, lv, **kwargs) | [
"def",
"train_local",
"(",
"self",
",",
"closest_point",
",",
"label_vector_description",
"=",
"None",
",",
"N",
"=",
"None",
",",
"pivot",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"lv",
"=",
"self",
".",
"_cannon_label_vector",
"if",
"label_vector_... | 43.242424 | 26.030303 |
def identical_blocks(self):
"""
:return A list of all block matches that appear to be identical
"""
identical_blocks = []
for (func_a, func_b) in self.function_matches:
identical_blocks.extend(self.get_function_diff(func_a, func_b).identical_blocks)
return identical_blocks | [
"def",
"identical_blocks",
"(",
"self",
")",
":",
"identical_blocks",
"=",
"[",
"]",
"for",
"(",
"func_a",
",",
"func_b",
")",
"in",
"self",
".",
"function_matches",
":",
"identical_blocks",
".",
"extend",
"(",
"self",
".",
"get_function_diff",
"(",
"func_a"... | 40.75 | 16.25 |
def show_vpnservice(self, vpnservice, **_params):
"""Fetches information of a specific VPN service."""
return self.get(self.vpnservice_path % (vpnservice), params=_params) | [
"def",
"show_vpnservice",
"(",
"self",
",",
"vpnservice",
",",
"*",
"*",
"_params",
")",
":",
"return",
"self",
".",
"get",
"(",
"self",
".",
"vpnservice_path",
"%",
"(",
"vpnservice",
")",
",",
"params",
"=",
"_params",
")"
] | 61.666667 | 15 |
def get_rendering_cache_key(placeholder_name, contentitem):
"""
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
"""
if not contentitem.pk:
return None
return "contentitem.@{0}.{1}.{2}".format(
placeholder_name,
contentitem.plugin.type_name, # always returns the upcasted name.
contentitem.pk, # already unique per language_code
) | [
"def",
"get_rendering_cache_key",
"(",
"placeholder_name",
",",
"contentitem",
")",
":",
"if",
"not",
"contentitem",
".",
"pk",
":",
"return",
"None",
"return",
"\"contentitem.@{0}.{1}.{2}\"",
".",
"format",
"(",
"placeholder_name",
",",
"contentitem",
".",
"plugin"... | 36.0625 | 23.8125 |
def listen(self):
"""Server-side cookie exchange
This method reads datagrams from the socket and initiates cookie
exchange, upon whose successful conclusion one can then proceed to
the accept method. Alternatively, accept can be called directly, in
which case it will call this method. In order to prevent denial-of-
service attacks, only a small, constant set of computing resources
are used during the listen phase.
On some platforms, listen must be called so that packets will be
forwarded to accepted connections. Doing so is therefore recommened
in all cases for portable code.
Return value: a peer address if a datagram from a new peer was
encountered, None if a datagram for a known peer was forwarded
"""
if not hasattr(self, "_listening"):
raise InvalidSocketError("listen called on non-listening socket")
self._pending_peer_address = None
try:
peer_address = self._udp_demux.service()
except socket.timeout:
peer_address = None
except socket.error as sock_err:
if sock_err.errno != errno.EWOULDBLOCK:
_logger.exception("Unexpected socket error in listen")
raise
peer_address = None
if not peer_address:
_logger.debug("Listen returning without peer")
return
# The demux advises that a datagram from a new peer may have arrived
if type(peer_address) is tuple:
# For this type of demux, the write BIO must be pointed at the peer
BIO_dgram_set_peer(self._wbio.value, peer_address)
self._udp_demux.forward()
self._listening_peer_address = peer_address
self._check_nbio()
self._listening = True
try:
_logger.debug("Invoking DTLSv1_listen for ssl: %d",
self._ssl.raw)
dtls_peer_address = DTLSv1_listen(self._ssl.value)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_WANT_READ:
# This method must be called again to forward the next datagram
_logger.debug("DTLSv1_listen must be resumed")
return
elif err.errqueue and err.errqueue[0][0] == ERR_WRONG_VERSION_NUMBER:
_logger.debug("Wrong version number; aborting handshake")
raise
elif err.errqueue and err.errqueue[0][0] == ERR_COOKIE_MISMATCH:
_logger.debug("Mismatching cookie received; aborting handshake")
raise
elif err.errqueue and err.errqueue[0][0] == ERR_NO_SHARED_CIPHER:
_logger.debug("No shared cipher; aborting handshake")
raise
_logger.exception("Unexpected error in DTLSv1_listen")
raise
finally:
self._listening = False
self._listening_peer_address = None
if type(peer_address) is tuple:
_logger.debug("New local peer: %s", dtls_peer_address)
self._pending_peer_address = peer_address
else:
self._pending_peer_address = dtls_peer_address
_logger.debug("New peer: %s", self._pending_peer_address)
return self._pending_peer_address | [
"def",
"listen",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_listening\"",
")",
":",
"raise",
"InvalidSocketError",
"(",
"\"listen called on non-listening socket\"",
")",
"self",
".",
"_pending_peer_address",
"=",
"None",
"try",
":",
"pe... | 43.666667 | 21.84 |
def abu_flux_chart(self, cycle, ilabel=True, imlabel=True,
imagic=False, boxstable=True, lbound=(-12,0),
plotaxis=[0,0,0,0], which_flux=None, prange=None,
profile='charged', show=True):
'''
Plots an abundance and flux chart
Parameters
----------
cycle : string, integer or list
The cycle we are looking in. If it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file.
ilabel : boolean, optional
Elemental labels off/on. The default is True.
imlabel : boolean, optional
Label for isotopic masses off/on. The default is True.
imagic : boolean, optional
Turn lines for magic numbers off/on. The default is False.
boxstable : boolean, optional
Plot the black boxes around the stable elements. The
defaults is True.
lbound : tuple, optional
Boundaries for colour spectrum ploted. The default is
(-12,0).
plotaxis : list, optional
Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z)
will be plotted. It equates to [xMin, xMax, Ymin, Ymax].
The default is [0, 0, 0, 0].
which_flux : integer, optional
Set to 0 for nucleosynthesis flux plot. Set to 1 for
energy flux plot. Setting wich_flux to 0 is equivelent to
setting it to 0. The default is None.
prange : integer, optional
Range of fluxes to be considered, if prange is None then
the plot range is set to 8. The default is None.
profile : string, optional
'charged' is ideal setting to show charged particle
reactions flow. 'neutron' is ideal setting for neutron
captures flows. The default is 'charged'.
show : boolean, optional
Boolean of if the plot should be displayed. Useful with
saving multiple plots using abu_chartMulti. The default is
True.
'''
#######################################################################
#### plot options
# Set axis limit: If default [0,0,0,0] the complete range in (N,Z) will
# be plotted, i.e. all isotopes, else specify the limits in
# plotaxis = [xmin,xmax,ymin,ymax]
#######################################################################
# read data file
#inpfile = cycle
#ff = fdic.ff(inpfile)
# with the flux implementation I am not using mass range for now.
# It may be introduced eventually.
mass_range = None
if str(cycle.__class__)=="<type 'list'>":
self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imlabel_fontsize,imagic,boxstable,\
lbound,plotaxis)
return
plotType=self._classTest()
#if mass_range!=None and mass_range[0]>mass_range[1]:
#print 'Please input a proper mass range'
#print 'Returning None'
#return None
if plotType=='se':
cycle=self.se.findCycle(cycle)
nin=zeros(len(self.se.A))
zin=zeros(len(self.se.Z))
for i in range(len(nin)):
nin[i]=self.se.A[i]
zin[i]=self.se.Z[i]
for i in range(len(nin)):
nin[i]=nin[i]-zin[i]
yin=self.get(cycle, 'iso_massf')
isom=self.se.isomeric_states
masses = self.se.get(cycle,'mass')
if mass_range != None:
masses = self.se.get(cycle,'mass')
masses.sort()
if mass_range != None:
tmpyps=[]
masses = self.se.get(cycle,'mass')
masses = self.se.get(cycle,'mass')
masses.sort()
for i in range(len(masses)):
if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\
(masses[i]==mass_range[0] or masses[i]==mass_range[1]):
tmpyps.append(yin[i])
yin=tmpyps
tmp=zeros(len(yin[0]))
for i in range(len(yin)):
for j in range(len(yin[i])):
tmp[j]+=yin[i][j]
tmp=old_div(tmp,len(yin))
yin=tmp
elif plotType=='PPN':
ain=self.get('A',cycle)
zin=self.get('Z',cycle)
nin=ain-zin
yin=self.get('ABUNDANCE_MF',cycle)
isom=self.get('ISOM',cycle)
if mass_range != None:
tmpA=[]
tmpZ=[]
tmpIsom=[]
tmpyps=[]
for i in range(len(nin)):
if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\
or (ain[i]==mass_range[0] or ain[i]==mass_range[1]):
tmpA.append(nin[i])
tmpZ.append(zin[i])
tmpIsom.append(isom[i])
tmpyps.append(yin[i])
zin=tmpZ
nin=tmpA
yin=tmpyps
isom=tmpIsom
else:
print('This method, abu_chart, is not supported by this class')
print('Returning None')
return None
# in case we call from ipython -pylab, turn interactive on at end again
turnoff=False
if not show:
try:
ioff()
turnoff=True
except NameError:
turnoff=False
nnmax = int(max(nin))+1
nzmax = int(max(zin))+1
nnmax_plot = nnmax
nzmax_plot = nzmax
nzycheck = zeros([nnmax,nzmax,3])
nzycheck_plot = zeros([nnmax,nzmax,3])
for i in range(len(nin)):
if isom[i]==1:
ni = int(nin[i])
zi = int(zin[i])
nzycheck[ni,zi,0] = 1
nzycheck[ni,zi,1] = yin[i]
nzycheck_plot[ni,zi,0] = 1
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname=self.elements_names
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
params = {'axes.labelsize': 15,
'text.fontsize': 12,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True}
#pl.rcParams.update(params) #May cause Error, someting to do with tex
#fig=pl.figure(figsize=(xdim,ydim),dpi=100)
fig=pl.figure()
if profile == 'charged':
ax1 = fig.add_subplot(1, 2, 1)
elif profile == 'neutron':
ax1 = fig.add_subplot(2, 1, 1)
#axx = 0.10
#axy = 0.10
#axw = 0.85
#axh = 0.8
#ax1=pl.axes([axx,axy,axw,axh])
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax1.xaxis.set_major_locator(xmajorlocator)
ax1.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax1.yaxis.set_major_locator(ymajorlocator)
ax1.yaxis.set_minor_locator(yminorlocator)
# color map choice for abundances
#cmapa = cm.jet
cmapa = cm.summer
# color map choice for arrows
cmapr = cm.summer
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=lbound[0],vmax=lbound[1])
# set x- and y-axis scale aspect ratio to 1
#ax1.set_aspect('equal')
#print time,temp and density on top
temp = ' '#'%8.3e' %ff['temp']
time = ' '#'%8.3e' %ff['time']
dens = ' '#'%8.3e' %ff['dens']
#May cause Error, someting to do with tex
'''
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
anchored_box = AnchoredOffsetbox(loc=3,
child=box1, pad=0.,
frameon=False,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
'''
## Colour bar plotted
patches = []
color = []
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
# abundance
yab = nzycheck[j,i,1]
if yab == 0:
yab=1e-99
col =log10(yab)
patches.append(rect)
color.append(col)
p = PatchCollection(patches, cmap=cmapa, norm=norma)
p.set_array(array(color))
p.set_zorder(1)
ax1.add_collection(p)
cb = pl.colorbar(p)
# colorbar label
if profile == 'neutron':
cb.set_label('log$_{10}$(X)')
# plot file name
graphname = 'abundance-flux-chart'+str(cycle)
# Add black frames for stable isotopes
if boxstable:
for i in range(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in range(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax1.add_patch(rect)
# decide which array to take for label positions
iarr = 0
# plot element labels
if ilabel:
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1
nmax = max(argwhere(nzycheck[:,z,iarr]))[0]+1
ax1.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='small',clip_on=True)
ax1.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr]==1:
ax1.text(n,z,a,horizontalalignment='center',verticalalignment='center',\
fontsize='x-small',clip_on=True)
# plot lines at magic numbers
if imagic:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0]
line = ax1.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0]
line = ax1.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
xmax=max(nin)
ymax=max(zin)
ax1.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax1.axis(plotaxis)
# set x- and y-axis label
ax1.set_ylabel('Proton number')
if profile == 'charged':
ax1.set_xlabel('Neutron number')
#pl.title('Isotopic Chart for cycle '+str(int(cycle)))
#
# here below I read data from the flux_*****.DAT file.
#
file_name = 'flux_'+str(cycle).zfill(5)+'.DAT'
print(file_name)
f = open(file_name)
lines = f.readline()
lines = f.readlines()
f.close()
print_max_flux_in_plot = False
# color map choice for fluxes
#cmapa = cm.jet
cmapa = cm.autumn
# color map choice for arrows
cmapr = cm.autumn
# starting point of arrow
coord_x_1 = []
coord_y_1 = []
# ending point of arrow (option 1)
coord_x_2 = []
coord_y_2 = []
# ending point of arrow (option 2)
coord_x_3 = []
coord_y_3 = []
# fluxes
flux_read = []
flux_log10 = []
if which_flux == None or which_flux == 0:
print('chart for nucleosynthesis fluxes [dYi/dt]')
line_to_read = 9
elif which_flux == 1:
print('chart for energy fluxes')
line_to_read = 10
elif which_flux > 1:
print("you have only option 0 or 1, not larger than 1")
single_line = []
for i in range(len(lines)):
single_line.append(lines[i].split())
coord_y_1.append(int(single_line[i][1]))
coord_x_1.append(int(single_line[i][2])-coord_y_1[i])
coord_y_2.append(int(single_line[i][5]))
coord_x_2.append(int(single_line[i][6])-coord_y_2[i])
coord_y_3.append(int(single_line[i][7]))
coord_x_3.append(int(single_line[i][8])-coord_y_3[i])
try:
flux_read.append(float(single_line[i][line_to_read]))
except ValueError: # this is done to avoid format issues like 3.13725-181...
flux_read.append(1.0E-99)
flux_log10.append(log10(flux_read[i]+1.0e-99))
print(file_name,' read!')
# I need to select smaller sample, with only fluxes inside plotaxis.
if plotaxis!=[0,0,0,0]:
coord_y_1_small=[]
coord_x_1_small=[]
coord_y_2_small=[]
coord_x_2_small=[]
coord_y_3_small=[]
coord_x_3_small=[]
flux_log10_small = []
for i in range(len(flux_log10)):
I_am_in = 0
if coord_y_1[i] > plotaxis[2] and coord_y_1[i] < plotaxis[3] and coord_x_1[i] > plotaxis[0] and coord_x_1[i] < plotaxis[1]:
I_am_in = 1
coord_y_1_small.append(int(coord_y_1[i]))
coord_x_1_small.append(int(coord_x_1[i]))
coord_y_2_small.append(int(coord_y_2[i]))
coord_x_2_small.append(int(coord_x_2[i]))
coord_y_3_small.append(int(coord_y_3[i]))
coord_x_3_small.append(int(coord_x_3[i]))
flux_log10_small.append(flux_log10[i])
if coord_y_3[i] > plotaxis[2] and coord_y_3[i] < plotaxis[3] and coord_x_3[i] > plotaxis[0] and coord_x_3[i] < plotaxis[1] and I_am_in == 0:
I_am_in = 1
coord_y_1_small.append(int(coord_y_1[i]))
coord_x_1_small.append(int(coord_x_1[i]))
coord_y_2_small.append(int(coord_y_2[i]))
coord_x_2_small.append(int(coord_x_2[i]))
coord_y_3_small.append(int(coord_y_3[i]))
coord_x_3_small.append(int(coord_x_3[i]))
flux_log10_small.append(flux_log10[i])
# elemental labels off/on [0/1]
ilabel = 1
# label for isotopic masses off/on [0/1]
imlabel = 1
# turn lines for magic numbers off/on [0/1]
imagic = 0
# flow is plotted over "prange" dex. If flow < maxflow-prange it is not plotted
if prange == None:
print('plot range given by default')
prange = 8.
#############################################
#print flux_log10_small
# we should scale prange on plot_axis range, not on max_flux!
max_flux = max(flux_log10)
ind_max_flux = flux_log10.index(max_flux)
if plotaxis!=[0,0,0,0]:
max_flux_small = max(flux_log10_small)
if plotaxis==[0,0,0,0]:
nzmax = int(max(max(coord_y_1),max(coord_y_2),max(coord_y_3)))+1
nnmax = int(max(max(coord_x_1),max(coord_x_2),max(coord_x_3)))+1
coord_x_1_small = coord_x_1
coord_x_2_small = coord_x_2
coord_x_3_small = coord_x_3
coord_y_1_small = coord_y_1
coord_y_2_small = coord_y_2
coord_y_3_small = coord_y_3
flux_log10_small= flux_log10
max_flux_small = max_flux
else:
nzmax = int(max(max(coord_y_1_small),max(coord_y_2_small),max(coord_y_3_small)))+1
nnmax = int(max(max(coord_x_1_small),max(coord_x_2_small),max(coord_x_3_small)))+1
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
patches.append(rect)
nzycheck = zeros([nnmax_plot,nzmax,3])
coord_x_out = zeros(len(coord_x_2_small), dtype='int')
coord_y_out = zeros(len(coord_y_2_small),dtype='int')
for i in range(len(flux_log10_small)):
nzycheck[coord_x_1_small[i],coord_y_1_small[i],0] = 1
nzycheck[coord_x_1_small[i],coord_y_1_small[i],1] = flux_log10_small[i]
if coord_x_2_small[i] >= coord_x_3_small[i]:
coord_x_out[i] = coord_x_2_small[i]
coord_y_out[i] = coord_y_2_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
elif coord_x_2_small[i] < coord_x_3_small[i]:
coord_x_out[i] = coord_x_3_small[i]
coord_y_out[i] = coord_y_3_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
if flux_log10_small[i]>max_flux_small-prange:
nzycheck[coord_x_1_small[i],coord_y_1_small[i],2] = 1
nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
#### create plot
if profile == 'charged':
ax2 = fig.add_subplot(1, 2, 2)
elif profile == 'neutron':
ax2 = fig.add_subplot(2, 1, 2)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax2.xaxis.set_major_locator(xmajorlocator)
ax2.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax2.yaxis.set_major_locator(ymajorlocator)
ax2.yaxis.set_minor_locator(yminorlocator)
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
format = 'pdf'
# set x- and y-axis scale aspect ratio to 1
#ax2.set_aspect('equal')
# Add black frames for stable isotopes
# Add black frames for stable isotopes
if boxstable:
for i in range(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in range(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax2.add_patch(rect)
apatches = []
acolor = []
m = old_div(0.8,prange)
vmax=ceil(max(flux_log10_small))
vmin=max(flux_log10_small)-prange
b=-vmin*m+0.1
normr = colors.Normalize(vmin=vmin,vmax=vmax)
ymax=0.
xmax=0.
for i in range(len(flux_log10_small)):
x = coord_x_1_small[i]
y = coord_y_1_small[i]
dx = coord_x_out[i]-coord_x_1_small[i]
dy = coord_y_out[i]-coord_y_1_small[i]
if flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
xy = x-0.5,y-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
xy = x+dx-0.5,y+dy-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
p = PatchCollection(patches,norm=0,facecolor='w')
p.set_zorder(1)
ax2.add_collection(p)
a = PatchCollection(apatches, cmap=cmapr, norm=normr)
a.set_array(array(acolor))
a.set_zorder(3)
ax2.add_collection(a)
cb = pl.colorbar(a)
# colorbar label
cb.set_label('log$_{10}$($x$)')
if profile == 'neutron':
cb.set_label('log$_{10}$(f)')
# decide which array to take for label positions
iarr = 2
# plot element labels
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck_plot[:,z,iarr-2]))[0]-1
nmax = max(argwhere(nzycheck_plot[:,z,iarr-2]))[0]+1
ax2.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='small',clip_on=True)
ax2.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax_plot):
a = z+n
if nzycheck_plot[n,z,iarr-2]==1:
ax2.text(n,z,a,horizontalalignment='center',verticalalignment='center',fontsize='x-small',clip_on=True)
# plot lines at magic numbers
if imagic==1:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr-2]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr-2]))[0]
line = ax2.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr-2]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr-2]))[0]
line = ax2.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
ax2.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax2.axis(plotaxis)
# set x- and y-axis label
ax2.set_xlabel('Neutron number')
if profile == 'neutron':
ax2.set_ylabel('Proton number')
if which_flux == None or which_flux == 0:
max_flux_label="max flux = "+str('{0:.4f}'.format(max_flux))
elif which_flux == 1:
max_flux_label="max energy flux = "+str('{0:.4f}'.format(max_flux))
if print_max_flux_in_plot:
ax2.text(plotaxis[1]-1.8,plotaxis[2]+0.1,max_flux_label,fontsize=10.)
#fig.savefig(graphname)
print(graphname,'is done')
if show:
pl.show()
if turnoff:
ion()
return | [
"def",
"abu_flux_chart",
"(",
"self",
",",
"cycle",
",",
"ilabel",
"=",
"True",
",",
"imlabel",
"=",
"True",
",",
"imagic",
"=",
"False",
",",
"boxstable",
"=",
"True",
",",
"lbound",
"=",
"(",
"-",
"12",
",",
"0",
")",
",",
"plotaxis",
"=",
"[",
... | 35.857765 | 18.522496 |
def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
'''Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
'''
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"reshape",
"(",
"self",
",",
"shape",
":",
"tf",
".",
"TensorShape",
")",
"->",
"'TensorFluent'",
":",
"t",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"tensor",
",",
"shape",
")",
"scope",
"=",
"self",
".",
"scope",
".",
"as_list",
"(",
")... | 32.923077 | 20.615385 |
def parse(string, language=None):
"""
Return a solution to the equation in the input string.
"""
if language:
string = replace_word_tokens(string, language)
tokens = tokenize(string)
postfix = to_postfix(tokens)
return evaluate_postfix(postfix) | [
"def",
"parse",
"(",
"string",
",",
"language",
"=",
"None",
")",
":",
"if",
"language",
":",
"string",
"=",
"replace_word_tokens",
"(",
"string",
",",
"language",
")",
"tokens",
"=",
"tokenize",
"(",
"string",
")",
"postfix",
"=",
"to_postfix",
"(",
"to... | 24.727273 | 15.090909 |
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
try:
return self._tagMap
except AttributeError:
self._tagMap = tagmap.TagMap(
{self.tagSet: self},
{eoo.endOfOctets.tagSet: eoo.endOfOctets},
self
)
return self._tagMap | [
"def",
"tagMap",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_tagMap",
"except",
"AttributeError",
":",
"self",
".",
"_tagMap",
"=",
"tagmap",
".",
"TagMap",
"(",
"{",
"self",
".",
"tagSet",
":",
"self",
"}",
",",
"{",
"eoo",
".",
"e... | 29 | 16.866667 |
def render_config(config: Config, indent: str = "") -> str:
"""
Pretty-print a config in sort-of-JSON+comments.
"""
# Add four spaces to the indent.
new_indent = indent + " "
return "".join([
# opening brace + newline
"{\n",
# "type": "...", (if present)
f'{new_indent}"type": "{config.typ3}",\n' if config.typ3 else '',
# render each item
"".join(_render(item, new_indent) for item in config.items),
# indent and close the brace
indent,
"}\n"
]) | [
"def",
"render_config",
"(",
"config",
":",
"Config",
",",
"indent",
":",
"str",
"=",
"\"\"",
")",
"->",
"str",
":",
"# Add four spaces to the indent.",
"new_indent",
"=",
"indent",
"+",
"\" \"",
"return",
"\"\"",
".",
"join",
"(",
"[",
"# opening brace + n... | 31.611111 | 15.833333 |
def chunks(self, size=32, alignment=1):
"""Iterate over all segments and return chunks of the data aligned as
given by `alignment`. `size` must be a multiple of
`alignment`. Each chunk is returned as a named two-tuple of
its address and data.
"""
if (size % alignment) != 0:
raise Error(
'size {} is not a multiple of alignment {}'.format(
size,
alignment))
for segment in self:
for chunk in segment.chunks(size, alignment):
yield chunk | [
"def",
"chunks",
"(",
"self",
",",
"size",
"=",
"32",
",",
"alignment",
"=",
"1",
")",
":",
"if",
"(",
"size",
"%",
"alignment",
")",
"!=",
"0",
":",
"raise",
"Error",
"(",
"'size {} is not a multiple of alignment {}'",
".",
"format",
"(",
"size",
",",
... | 33.823529 | 17.117647 |
def combine_duplicate_stmts(stmts):
"""Combine evidence from duplicate Statements.
Statements are deemed to be duplicates if they have the same key
returned by the `matches_key()` method of the Statement class. This
generally means that statements must be identical in terms of their
arguments and can differ only in their associated `Evidence` objects.
This function keeps the first instance of each set of duplicate
statements and merges the lists of Evidence from all of the other
statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Set of statements to de-duplicate.
Returns
-------
list of :py:class:`indra.statements.Statement`
Unique statements with accumulated evidence across duplicates.
Examples
--------
De-duplicate and combine evidence for two statements differing only
in their evidence lists:
>>> map2k1 = Agent('MAP2K1')
>>> mapk1 = Agent('MAPK1')
>>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 1')])
>>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 2')])
>>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2])
>>> uniq_stmts
[Phosphorylation(MAP2K1(), MAPK1(), T, 185)]
>>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE
['evidence 1', 'evidence 2']
"""
# Helper function to get a list of evidence matches keys
def _ev_keys(sts):
ev_keys = []
for stmt in sts:
for ev in stmt.evidence:
ev_keys.append(ev.matches_key())
return ev_keys
# Iterate over groups of duplicate statements
unique_stmts = []
for _, duplicates in Preassembler._get_stmt_matching_groups(stmts):
ev_keys = set()
# Get the first statement and add the evidence of all subsequent
# Statements to it
duplicates = list(duplicates)
start_ev_keys = _ev_keys(duplicates)
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix is 0:
new_stmt = stmt.make_generic_copy()
if len(duplicates) == 1:
new_stmt.uuid = stmt.uuid
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
raw_grounding = [None if ag is None else ag.db_refs
for ag in stmt.agent_list(deep_sorted=True)]
for ev in stmt.evidence:
ev_key = ev.matches_key() + str(raw_text) + \
str(raw_grounding)
if ev_key not in ev_keys:
# In case there are already agents annotations, we
# just add a new key for raw_text, otherwise create
# a new key
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
ev.annotations['agents']['raw_grounding'] = \
raw_grounding
else:
ev.annotations['agents'] = \
{'raw_text': raw_text,
'raw_grounding': raw_grounding}
if 'prior_uuids' not in ev.annotations:
ev.annotations['prior_uuids'] = []
ev.annotations['prior_uuids'].append(stmt.uuid)
new_stmt.evidence.append(ev)
ev_keys.add(ev_key)
end_ev_keys = _ev_keys([new_stmt])
if len(end_ev_keys) != len(start_ev_keys):
logger.debug('%d redundant evidences eliminated.' %
(len(start_ev_keys) - len(end_ev_keys)))
# This should never be None or anything else
assert isinstance(new_stmt, Statement)
unique_stmts.append(new_stmt)
return unique_stmts | [
"def",
"combine_duplicate_stmts",
"(",
"stmts",
")",
":",
"# Helper function to get a list of evidence matches keys",
"def",
"_ev_keys",
"(",
"sts",
")",
":",
"ev_keys",
"=",
"[",
"]",
"for",
"stmt",
"in",
"sts",
":",
"for",
"ev",
"in",
"stmt",
".",
"evidence",
... | 46.714286 | 19.604396 |
def projects(self):
"""*All child projects of this taskpaper object*
**Usage:**
Given a taskpaper document object (`doc`), to get a list of the project objects found within the document use:
.. code-block:: python
docProjects = doc.projects
The same is true of project objects which may contain sub-projects:
.. code-block:: python
aProject = docProjects[0]
subProjects = aProject.projects
"""
return self._get_object(
regex=re.compile(
r'((?<=\n)|(?<=^))(?P<title>(?!\[Searches\]|- )\S.*?:(?!\S)) *(?P<tagString>( *?@[^(\s]+(\([^)]*\))?)+)?(?P<content>(\n(( |\t)+\S.*)|\n( |\t)*|\n)+)', re.UNICODE),
objectType="project",
content=None
) | [
"def",
"projects",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_object",
"(",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'((?<=\\n)|(?<=^))(?P<title>(?!\\[Searches\\]|- )\\S.*?:(?!\\S)) *(?P<tagString>( *?@[^(\\s]+(\\([^)]*\\))?)+)?(?P<content>(\\n(( |\\t)+\\S.*)|\\n( |\\t)*... | 33.708333 | 28.25 |
def log_uniform(low, high, size:Optional[List[int]]=None)->FloatOrTensor:
"Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)."
res = uniform(log(low), log(high), size)
return exp(res) if size is None else res.exp_() | [
"def",
"log_uniform",
"(",
"low",
",",
"high",
",",
"size",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
"->",
"FloatOrTensor",
":",
"res",
"=",
"uniform",
"(",
"log",
"(",
"low",
")",
",",
"log",
"(",
"high",
")",
",",
... | 65.5 | 25.5 |
def get_selections(self):
"""Get current model selection status in state machine selection (filtered according the purpose of the widget)
and tree selection of the widget"""
sm_selection, sm_filtered_selected_model_set = self.get_state_machine_selection()
tree_selection, selected_model_list = self.get_view_selection()
return tree_selection, selected_model_list, sm_selection, sm_filtered_selected_model_set | [
"def",
"get_selections",
"(",
"self",
")",
":",
"sm_selection",
",",
"sm_filtered_selected_model_set",
"=",
"self",
".",
"get_state_machine_selection",
"(",
")",
"tree_selection",
",",
"selected_model_list",
"=",
"self",
".",
"get_view_selection",
"(",
")",
"return",
... | 73.833333 | 25.166667 |
def vn(x):
"""
value or none, returns none if x is an empty list
"""
if x == []:
return None
if isinstance(x, list):
return '|'.join(x)
if isinstance(x, datetime):
return x.isoformat()
return x | [
"def",
"vn",
"(",
"x",
")",
":",
"if",
"x",
"==",
"[",
"]",
":",
"return",
"None",
"if",
"isinstance",
"(",
"x",
",",
"list",
")",
":",
"return",
"'|'",
".",
"join",
"(",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"datetime",
")",
":",
"ret... | 17.818182 | 17.454545 |
def get_upcoming_events_within_the_current_week(self):
'''Returns the events from the calendar for the next days_to_look_ahead days.'''
now = datetime.now(tz=self.timezone) # timezone?
start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)
end_time = start_time + timedelta(days = 6 - now.weekday())
end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
assert(end_time.weekday() == 6)
start_time = start_time.isoformat()
end_time = end_time.isoformat()
return self.get_events(start_time, end_time) | [
"def",
"get_upcoming_events_within_the_current_week",
"(",
"self",
")",
":",
"now",
"=",
"datetime",
".",
"now",
"(",
"tz",
"=",
"self",
".",
"timezone",
")",
"# timezone?",
"start_time",
"=",
"datetime",
"(",
"year",
"=",
"now",
".",
"year",
",",
"month",
... | 73.3 | 33.7 |
async def get(self, key, *, dc=None, watch=None, consistency=None):
"""Returns the specified key
Parameters:
key (str): Key to fetch
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
ObjectMeta: where value is the queried kv value
Object will look like::
{
"CreateIndex": 100,
"ModifyIndex": 200,
"LockIndex": 200,
"Key": "zip",
"Flags": 0,
"Value": b"my data",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
**CreateIndex** is the internal index value that represents when
the entry was created.
**ModifyIndex** is the last index that modified this key.
This index corresponds to the X-Consul-Index header value that is
returned in responses, and it can be used to establish blocking
queries. You can even perform blocking queries against entire
subtrees of the KV store.
**LockIndex** is the number of times this key has successfully been
acquired in a lock. If the lock is held, the Session key provides
the session that owns the lock.
**Key** is simply the full path of the entry.
**Flags** is an opaque unsigned integer that can be attached to each
entry. Clients can choose to use this however makes sense for their
application.
**Value** is a :class:`~aioconsul.typing.Payload` object,
it depends on **Flags**.
"""
response = await self._read(key,
dc=dc,
watch=watch,
consistency=consistency)
result = response.body[0]
result["Value"] = decode_value(result["Value"], result["Flags"])
return consul(result, meta=extract_meta(response.headers)) | [
"async",
"def",
"get",
"(",
"self",
",",
"key",
",",
"*",
",",
"dc",
"=",
"None",
",",
"watch",
"=",
"None",
",",
"consistency",
"=",
"None",
")",
":",
"response",
"=",
"await",
"self",
".",
"_read",
"(",
"key",
",",
"dc",
"=",
"dc",
",",
"watc... | 38.176471 | 21.215686 |
def discrete(self):
"""
Set sequence to be discrete.
:rtype: Column
:Example:
>>> # Table schema is create table test(f1 double, f2 string)
>>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS
>>> # Now we want to set ``f1`` and ``f2`` into continuous
>>> new_ds = df.discrete('f1 f2')
"""
field_name = self.name
new_df = copy_df(self)
new_df._perform_operation(op.FieldContinuityOperation({field_name: False}))
return new_df | [
"def",
"discrete",
"(",
"self",
")",
":",
"field_name",
"=",
"self",
".",
"name",
"new_df",
"=",
"copy_df",
"(",
"self",
")",
"new_df",
".",
"_perform_operation",
"(",
"op",
".",
"FieldContinuityOperation",
"(",
"{",
"field_name",
":",
"False",
"}",
")",
... | 30.529412 | 20.411765 |
def on_release_key(key, callback, suppress=False):
"""
Invokes `callback` for KEY_UP event related to the given key. For details see `hook`.
"""
return hook_key(key, lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress) | [
"def",
"on_release_key",
"(",
"key",
",",
"callback",
",",
"suppress",
"=",
"False",
")",
":",
"return",
"hook_key",
"(",
"key",
",",
"lambda",
"e",
":",
"e",
".",
"event_type",
"==",
"KEY_DOWN",
"or",
"callback",
"(",
"e",
")",
",",
"suppress",
"=",
... | 49.4 | 22.6 |
def _format_linedata(linedata, indent, indent_width):
"""Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 30 columns.
"""
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 30
NAME_WIDTH = max([len(s) for s, d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
for namestr, doc in linedata:
line = indent + namestr
if len(namestr) <= NAME_WIDTH:
line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
else:
lines.append(line)
line = indent + ' ' * (NAME_WIDTH + SPACING)
line += _summarize_doc(doc, DOC_WIDTH)
lines.append(line.rstrip())
return lines | [
"def",
"_format_linedata",
"(",
"linedata",
",",
"indent",
",",
"indent_width",
")",
":",
"lines",
"=",
"[",
"]",
"WIDTH",
"=",
"78",
"-",
"indent_width",
"SPACING",
"=",
"2",
"NAME_WIDTH_LOWER_BOUND",
"=",
"13",
"NAME_WIDTH_UPPER_BOUND",
"=",
"30",
"NAME_WIDT... | 36.636364 | 14.393939 |
def rename(blocks, scope, stype):
""" Rename all sub-blocks moved under another
block. (mixins)
Args:
lst (list): block list
scope (object): Scope object
"""
for p in blocks:
if isinstance(p, stype):
p.tokens[0].parse(scope)
if p.tokens[1]:
scope.push()
scope.current = p.tokens[0]
rename(p.tokens[1], scope, stype)
scope.pop() | [
"def",
"rename",
"(",
"blocks",
",",
"scope",
",",
"stype",
")",
":",
"for",
"p",
"in",
"blocks",
":",
"if",
"isinstance",
"(",
"p",
",",
"stype",
")",
":",
"p",
".",
"tokens",
"[",
"0",
"]",
".",
"parse",
"(",
"scope",
")",
"if",
"p",
".",
"... | 29.933333 | 10.066667 |
def filter_bam(job, job_vars):
"""
Performs filtering on the transcriptome bam
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
# I/O
transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam')
output = os.path.join(work_dir, 'filtered.bam')
# Command
parameters = ['sam-filter',
'--strip-indels',
'--max-insert', '1000',
'--mapq', '1',
'--in', docker_path(transcriptome_bam),
'--out', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['filtered.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv() | [
"def",
"filter_bam",
"(",
"job",
",",
"job_vars",
")",
":",
"input_args",
",",
"ids",
"=",
"job_vars",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"cores",
"=",
"input_args",
"[",
"'cpu_count'",
"]",
"sudo",
"=",
"input_args... | 41.346154 | 18.730769 |
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
:param text: The HTML (or XML) source text.
:return: The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text) | [
"def",
"unescape",
"(",
"text",
")",
":",
"def",
"fixup",
"(",
"m",
")",
":",
"text",
"=",
"m",
".",
"group",
"(",
"0",
")",
"if",
"text",
"[",
":",
"2",
"]",
"==",
"\"&#\"",
":",
"# character reference",
"try",
":",
"if",
"text",
"[",
":",
"3"... | 29.62963 | 17.111111 |
def officialPriceWS(symbols=None, on_data=None):
'''https://iextrading.com/developer/docs/#official-price'''
symbols = _strToList(symbols)
sendinit = ({'symbols': symbols, 'channels': ['official-price']},)
return _stream(_wsURL('deep'), sendinit, on_data) | [
"def",
"officialPriceWS",
"(",
"symbols",
"=",
"None",
",",
"on_data",
"=",
"None",
")",
":",
"symbols",
"=",
"_strToList",
"(",
"symbols",
")",
"sendinit",
"=",
"(",
"{",
"'symbols'",
":",
"symbols",
",",
"'channels'",
":",
"[",
"'official-price'",
"]",
... | 53.4 | 16.2 |
def create_from_cellranger(indir: str, outdir: str = None, genome: str = None) -> str:
"""
Create a .loom file from 10X Genomics cellranger output
Args:
indir (str): path to the cellranger output folder (the one that contains 'outs')
outdir (str): output folder wher the new loom file should be saved (default to indir)
genome (str): genome build to load (e.g. 'mm10'; if None, determine species from outs folder)
Returns:
path (str): Full path to the created loom file.
Remarks:
The resulting file will be named ``{sampleID}.loom``, where the sampleID is the one given by cellranger.
"""
if outdir is None:
outdir = indir
sampleid = os.path.split(os.path.abspath(indir))[-1]
matrix_folder = os.path.join(indir, 'outs', 'filtered_gene_bc_matrices')
if os.path.exists(matrix_folder):
if genome is None:
genome = [f for f in os.listdir(matrix_folder) if not f.startswith(".")][0]
matrix_folder = os.path.join(matrix_folder, genome)
matrix = mmread(os.path.join(matrix_folder, "matrix.mtx")).astype("float32").todense()
genelines = open(os.path.join(matrix_folder, "genes.tsv"), "r").readlines()
bclines = open(os.path.join(matrix_folder, "barcodes.tsv"), "r").readlines()
else: # cellranger V3 file locations
if genome is None:
genome = "" # Genome is not visible from V3 folder
matrix_folder = os.path.join(indir, 'outs', 'filtered_feature_bc_matrix')
matrix = mmread(os.path.join(matrix_folder, "matrix.mtx.gz")).astype("float32").todense()
genelines = [ l.decode() for l in gzip.open(os.path.join(matrix_folder, "features.tsv.gz"), "r").readlines() ]
bclines = [ l.decode() for l in gzip.open(os.path.join(matrix_folder, "barcodes.tsv.gz"), "r").readlines() ]
accession = np.array([x.split("\t")[0] for x in genelines]).astype("str")
gene = np.array([x.split("\t")[1].strip() for x in genelines]).astype("str")
cellids = np.array([sampleid + ":" + x.strip() for x in bclines]).astype("str")
col_attrs = {"CellID": cellids}
row_attrs = {"Accession": accession, "Gene": gene}
tsne_file = os.path.join(indir, "outs", "analysis", "tsne", "projection.csv")
# In cellranger V2 the file moved one level deeper
if not os.path.exists(tsne_file):
tsne_file = os.path.join(indir, "outs", "analysis", "tsne", "2_components", "projection.csv")
if os.path.exists(tsne_file):
tsne = np.loadtxt(tsne_file, usecols=(1, 2), delimiter=',', skiprows=1)
col_attrs["X"] = tsne[:, 0].astype('float32')
col_attrs["Y"] = tsne[:, 1].astype('float32')
clusters_file = os.path.join(indir, "outs", "analysis", "clustering", "graphclust", "clusters.csv")
if os.path.exists(clusters_file):
labels = np.loadtxt(clusters_file, usecols=(1, ), delimiter=',', skiprows=1)
col_attrs["ClusterID"] = labels.astype('int') - 1
path = os.path.join(outdir, sampleid + ".loom")
create(path, matrix, row_attrs, col_attrs, file_attrs={"Genome": genome})
return path | [
"def",
"create_from_cellranger",
"(",
"indir",
":",
"str",
",",
"outdir",
":",
"str",
"=",
"None",
",",
"genome",
":",
"str",
"=",
"None",
")",
"->",
"str",
":",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"indir",
"sampleid",
"=",
"os",
".",
... | 49.086207 | 29.913793 |
async def patch_register(self, register: Dict, request: 'Request'):
"""
Store all options in the "choices" sub-register. We store both the
text and the potential intent, in order to match both regular
quick reply clicks but also the user typing stuff on his keyboard that
matches more or less the content of quick replies.
"""
register['choices'] = {
o.slug: {
'intent': o.intent.key if o.intent else None,
'text': await render(o.text, request),
} for o in self.options
if isinstance(o, QuickRepliesList.TextOption)
}
return register | [
"async",
"def",
"patch_register",
"(",
"self",
",",
"register",
":",
"Dict",
",",
"request",
":",
"'Request'",
")",
":",
"register",
"[",
"'choices'",
"]",
"=",
"{",
"o",
".",
"slug",
":",
"{",
"'intent'",
":",
"o",
".",
"intent",
".",
"key",
"if",
... | 38.764706 | 21.117647 |
def firmware_download_input_protocol_type_ftp_protocol_ftp_file(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware_download = ET.Element("firmware_download")
config = firmware_download
input = ET.SubElement(firmware_download, "input")
protocol_type = ET.SubElement(input, "protocol-type")
ftp_protocol = ET.SubElement(protocol_type, "ftp-protocol")
ftp = ET.SubElement(ftp_protocol, "ftp")
file = ET.SubElement(ftp, "file")
file.text = kwargs.pop('file')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"firmware_download_input_protocol_type_ftp_protocol_ftp_file",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"firmware_download",
"=",
"ET",
".",
"Element",
"(",
"\"firmware_download\"",
")",
"c... | 43.4 | 14 |
def is_present(self, locator, search_object=None):
"""
Determines whether an element is present on the page, retrying once if unable to locate
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with.
If null, search will be on self.driver
"""
all_elements = self._find_immediately(locator, search_object=search_object)
if all_elements is not None and len(all_elements) > 0:
return True
else:
return False | [
"def",
"is_present",
"(",
"self",
",",
"locator",
",",
"search_object",
"=",
"None",
")",
":",
"all_elements",
"=",
"self",
".",
"_find_immediately",
"(",
"locator",
",",
"search_object",
"=",
"search_object",
")",
"if",
"all_elements",
"is",
"not",
"None",
... | 48.5 | 29.625 |
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_present /some_random_file
Returns True if the file is present
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return False if match in cmd_return else True | [
"def",
"dfs_present",
"(",
"path",
")",
":",
"cmd_return",
"=",
"_hadoop_cmd",
"(",
"'dfs'",
",",
"'stat'",
",",
"path",
")",
"match",
"=",
"'No such file or directory'",
"return",
"False",
"if",
"match",
"in",
"cmd_return",
"else",
"True"
] | 24.733333 | 22.866667 |
def get_maintenance_response(request):
"""
Return a '503 Service Unavailable' maintenance response.
"""
if settings.MAINTENANCE_MODE_REDIRECT_URL:
return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL)
context = {}
if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT:
try:
get_request_context_func = import_string(
settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT)
except ImportError:
raise ImproperlyConfigured(
'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT '
'is not a valid function path.'
)
context = get_request_context_func(request=request)
if django.VERSION < (1, 8):
kwargs = {'context_instance': RequestContext(request, context)}
else:
kwargs = {'context': context}
response = render(request, settings.MAINTENANCE_MODE_TEMPLATE,
status=settings.MAINTENANCE_MODE_STATUS_CODE,
**kwargs)
response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER
add_never_cache_headers(response)
return response | [
"def",
"get_maintenance_response",
"(",
"request",
")",
":",
"if",
"settings",
".",
"MAINTENANCE_MODE_REDIRECT_URL",
":",
"return",
"redirect",
"(",
"settings",
".",
"MAINTENANCE_MODE_REDIRECT_URL",
")",
"context",
"=",
"{",
"}",
"if",
"settings",
".",
"MAINTENANCE_... | 34.5 | 19.75 |
def download_google_images(path:PathOrStr, search_term:str, size:str='>400*300', n_images:int=10, format:str='jpg',
max_workers:int=defaults.cpus, timeout:int=4) -> FilePathList:
"""
Search for `n_images` images on Google, matching `search_term` and `size` requirements,
download them into `path`/`search_term` and verify them, using `max_workers` threads.
"""
label_path = Path(path)/search_term
search_url = _search_url(search_term, size=size, format=format)
if n_images <= 100: img_tuples = _fetch_img_tuples(search_url, format=format, n_images=n_images)
else: img_tuples = _fetch_img_tuples_webdriver(search_url, format=format, n_images=n_images)
downloaded_images = _download_images(label_path, img_tuples, max_workers=max_workers, timeout=timeout)
if len(downloaded_images) == 0: raise RuntimeError(f"Couldn't download any images.")
verify_images(label_path, max_workers=max_workers)
return get_image_files(label_path) | [
"def",
"download_google_images",
"(",
"path",
":",
"PathOrStr",
",",
"search_term",
":",
"str",
",",
"size",
":",
"str",
"=",
"'>400*300'",
",",
"n_images",
":",
"int",
"=",
"10",
",",
"format",
":",
"str",
"=",
"'jpg'",
",",
"max_workers",
":",
"int",
... | 71.5 | 36.642857 |
def round(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
'''
return cls._unary_op(x, tf.round, tf.float32) | [
"def",
"round",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"round",
",",
"tf",
".",
"float32",
")"
] | 28.9 | 22.5 |
def add_or_update(uid, post_data):
'''
Add or update the data by the given ID of post.
'''
catinfo = MCategory.get_by_uid(uid)
if catinfo:
MCategory.update(uid, post_data)
else:
TabTag.create(
uid=uid,
name=post_data['name'],
slug=post_data['slug'],
order=post_data['order'],
kind=post_data['kind'] if 'kind' in post_data else '1',
pid=post_data['pid'],
)
return uid | [
"def",
"add_or_update",
"(",
"uid",
",",
"post_data",
")",
":",
"catinfo",
"=",
"MCategory",
".",
"get_by_uid",
"(",
"uid",
")",
"if",
"catinfo",
":",
"MCategory",
".",
"update",
"(",
"uid",
",",
"post_data",
")",
"else",
":",
"TabTag",
".",
"create",
... | 31.647059 | 14.705882 |
def arg_bool(name, default=False):
""" Fetch a query argument, as a boolean. """
v = request.args.get(name, '')
if not len(v):
return default
return v in BOOL_TRUISH | [
"def",
"arg_bool",
"(",
"name",
",",
"default",
"=",
"False",
")",
":",
"v",
"=",
"request",
".",
"args",
".",
"get",
"(",
"name",
",",
"''",
")",
"if",
"not",
"len",
"(",
"v",
")",
":",
"return",
"default",
"return",
"v",
"in",
"BOOL_TRUISH"
] | 30.666667 | 10.833333 |
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val | [
"def",
"ensure_type",
"(",
"val",
",",
"dtype",
",",
"ndim",
",",
"name",
",",
"length",
"=",
"None",
",",
"can_be_none",
"=",
"False",
",",
"shape",
"=",
"None",
",",
"warn_on_cast",
"=",
"True",
",",
"add_newaxis_on_deficient_ndim",
"=",
"False",
")",
... | 42.191919 | 23.676768 |
def parse_subtags(subtags, expect=EXTLANG):
"""
Parse everything that comes after the language tag: scripts, regions,
variants, and assorted extensions.
"""
# We parse the parts of a language code recursively: each step of
# language code parsing handles one component of the code, recurses
# to handle the rest of the code, and adds what it found onto the
# list of things that were in the rest of the code.
#
# This could just as well have been iterative, but the loops would have
# been convoluted.
#
# So here's the base case.
if not subtags:
return []
# There's a subtag that comes next. We need to find out what it is.
#
# The primary thing that distinguishes different types of subtags is
# length, but the subtags also come in a specified order. The 'expect'
# parameter keeps track of where we are in that order. expect=REGION,
# for example, means we're expecting a region code, or anything later
# (because everything but the language is optional).
subtag = subtags[0]
tag_length = len(subtag)
# In the usual case, our goal is to recognize what kind of tag this is,
# and set it in 'tagtype' -- as an integer, so we can compare where it
# should go in order. You can see the enumerated list of tagtypes above,
# where the SUBTAG_TYPES global is defined.
tagtype = None
if tag_length == 0 or tag_length > 8:
# Unless you're inside a private use tag or something -- in which case,
# you're not in this function at the moment -- every component needs to
# be between 1 and 8 characters.
subtag_error(subtag, '1-8 characters')
elif tag_length == 1:
# A one-character subtag introduces an extension, which can itself have
# sub-subtags, so we dispatch to a different function at this point.
#
# We don't need to check anything about the order, because extensions
# necessarily come last.
return parse_extension(subtags)
elif tag_length == 2:
if subtag.isalpha():
# Two-letter alphabetic subtags are regions. These are the only
# two-character subtags after the language.
tagtype = REGION
elif tag_length == 3:
if subtag.isalpha():
# Three-letter alphabetic subtags are 'extended languages'.
# It's allowed for there to be up to three of them in a row, so we
# need another function to enforce that. Before we dispatch to that
# function, though, we need to check whether we're in the right
# place in order.
if expect <= EXTLANG:
return parse_extlang(subtags)
else:
order_error(subtag, EXTLANG, expect)
elif subtag.isdigit():
# Three-digit subtags are broad regions, such as Latin America
# (419).
tagtype = REGION
elif tag_length == 4:
if subtag.isalpha():
# Four-letter alphabetic subtags are scripts.
tagtype = SCRIPT
elif subtag[0].isdigit():
# Four-character subtags that start with a digit are variants.
tagtype = VARIANT
else:
# Tags of length 5-8 are variants.
tagtype = VARIANT
# That's the end of the big elif block for figuring out what kind of
# subtag we have based on its length. Now we should do something with that
# kind of subtag.
if tagtype is None:
# We haven't recognized a type of tag. This subtag just doesn't fit the
# standard.
subtag_error(subtag)
elif tagtype < expect:
# We got a tag type that was supposed to appear earlier in the order.
order_error(subtag, tagtype, expect)
else:
# We've recognized a subtag of a particular type. If it's a region or
# script, we expect the next subtag to be a strictly later type, because
# there can be at most one region and one script. Otherwise, we expect
# the next subtag to be the type we got or later.
if tagtype in (SCRIPT, REGION):
expect = tagtype + 1
else:
expect = tagtype
# Get the name of this subtag type instead of its integer value.
typename = SUBTAG_TYPES[tagtype]
# Some subtags are conventionally written with capitalization. Apply
# those conventions.
if tagtype == SCRIPT:
subtag = subtag.title()
elif tagtype == REGION:
subtag = subtag.upper()
# Recurse on the remaining subtags.
return [(typename, subtag)] + parse_subtags(subtags[1:], expect) | [
"def",
"parse_subtags",
"(",
"subtags",
",",
"expect",
"=",
"EXTLANG",
")",
":",
"# We parse the parts of a language code recursively: each step of",
"# language code parsing handles one component of the code, recurses",
"# to handle the rest of the code, and adds what it found onto the",
... | 39.128205 | 23.299145 |
def setup_logging(logfile, print_log_location=True, debug=False):
'''
Set up logging using the built-in ``logging`` package.
A stream handler is added to all logs, so that logs at or above
``logging.INFO`` level are printed to screen as well as written
to the log file.
Arguments:
logfile (str): Path to the log file. If the parent directory
does not exist, it will be created. Required.
print_log_location (bool): If ``True``, the log path will be
written to the log upon initialization. Default is ``True``.
debug (bool): If true, the log level will be set to ``logging.DEBUG``.
If ``False``, the log level will be set to ``logging.INFO``.
Default is ``False``.
'''
log_dir = os.path.dirname(logfile)
make_dir(log_dir)
fmt = '[%(levelname)s] %(name)s %(asctime)s %(message)s'
if debug:
logging.basicConfig(filename=logfile,
filemode='w',
format=fmt,
level=logging.DEBUG)
else:
logging.basicConfig(filename=logfile,
filemode='w',
format=fmt,
level=logging.INFO)
logger = logging.getLogger('log')
logger = add_stream_handler(logger)
if print_log_location:
logger.info('LOG LOCATION: {}'.format(logfile)) | [
"def",
"setup_logging",
"(",
"logfile",
",",
"print_log_location",
"=",
"True",
",",
"debug",
"=",
"False",
")",
":",
"log_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"logfile",
")",
"make_dir",
"(",
"log_dir",
")",
"fmt",
"=",
"'[%(levelname)s] %(n... | 37.72973 | 20.324324 |
def loadWeights(self, filename, mode='pickle'):
"""
Loads weights from a file in pickle, plain, or tlearn mode.
"""
# modes: pickle, plain/conx, tlearn
if mode == 'pickle':
import pickle
fp = open(filename, "r")
mylist = pickle.load(fp)
fp.close()
self.unArrayify(mylist)
elif mode in ['plain', 'conx']:
arr = []
fp = open(filename, "r")
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line[0] == '#':
pass
else:
data = list(map( float, line.split()))
arr.extend( data )
self.unArrayify( arr )
fp.close()
elif mode == 'tlearn':
fp = open(filename, "r")
fp.readline() # NETWORK CONFIGURED BY
fp.readline() # # weights after %d sweeps
fp.readline() # # WEIGHTS
cnt = 1
for lto in self.layers:
if lto.type != 'Input':
for j in range(lto.size):
fp.readline() # TO NODE %d
lto.weight[j] = float(fp.readline())
for lfrom in self.layers:
try:
connection = self.getConnection(lfrom.name, lto.name)
for i in range(connection.fromLayer.size):
connection.weight[i][j] = float( fp.readline() )
except NetworkError:
for i in range(lfrom.size):
# 0.0
fp.readline()
cnt += 1
fp.close()
elif mode == 'nbench':
# reads weights and constructs network
fp = open(filename, "r")
line = fp.readline()
while line[:8] != "Ninputs:":
line = fp.readline()
itxt, inputs, ttxt, total, otxt, outputs = line.split()
inputs, total, outputs = int(inputs), int(total), int(outputs)
# cascor's total value is the bias + inputs + hiddens
hiddens = total - inputs - 1
# create the layers:
self.addLayer("input", inputs)
for i in range(hiddens):
self.addLayer("hidden%d" % i, 1)
self.addLayer("output", outputs)
# connect input to all hiddens, output:
for i in range(hiddens):
self.connect("input", "hidden%d" % i)
self.connect("input", "output")
# connect all hiddens to all later hiddens:
for i in range(hiddens - 1):
for j in range(i + 1, hiddens):
if j != i:
self.connect("hidden%d" % i, "hidden%d" % j )
# connect all hiddens to outputs:
for i in range(hiddens):
self.connect("hidden%d" % i, "output")
# now, let's set the weights:
for outcount in range(outputs):
while line[:9] != "# Output:":
line = fp.readline()
line = fp.readline() # $
line = fp.readline() # bias, input to output, hidden to output?
data = ""
while line and line[0] != "#":
data += " " + line.strip()
line = fp.readline()
weights = list(map(float, data.split()))
self["output"].weight[outcount] = weights[0] # bias
next = 1
for i in range(self["input"].size):
self["input", "output"].weight[i][outcount] = weights[next]
next += 1
for h in range(hiddens):
for i in range(self["hidden%d" % h].size): # normally just 1
self["hidden%d" % h, "output"].weight[i][outcount] = weights[next]
next += 1
# now, for each hidden "layer":
while line and line[0] != "$":
line = fp.readline()
line = fp.readline()
for hidcount in range(hiddens):
weights = []
while line and line[0] != "$" and line[0] != "#": # next line is a weight line
weights.extend( list(map(float, line.split()))) # bias, input to hidden, hidden to hidden?
line = fp.readline()
self[("hidden%d" % hidcount)].weight[0] = weights[0] # bias first
next = 1
for i in range(self["input"].size):
for j in range(self["hidden%d" % hidcount].size): # normally just 1
self["input", ("hidden%d" % hidcount)].weight[i][j] = weights[next]
next += 1
for h in range(hidcount): # all those hids leading up to this one
for i in range(self["hidden%d" % h].size): # normally just 1
for j in range(self["hidden%d" % hidcount].size): # normally just 1
self[("hidden%d" % h), ("hidden%d" % hidcount)].weight[i][j] = weights[next]
next += 1
line = fp.readline() # $
line = fp.readline() # beginning of weights
else:
raise ValueError('Unknown mode in loadWeights()', mode) | [
"def",
"loadWeights",
"(",
"self",
",",
"filename",
",",
"mode",
"=",
"'pickle'",
")",
":",
"# modes: pickle, plain/conx, tlearn",
"if",
"mode",
"==",
"'pickle'",
":",
"import",
"pickle",
"fp",
"=",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"mylist",
"=",
... | 46.794872 | 14.350427 |
def is_link_inline(cls, tag, attribute):
'''Return whether the link is likely to be inline object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_INLINE
return attribute != 'href' | [
"def",
"is_link_inline",
"(",
"cls",
",",
"tag",
",",
"attribute",
")",
":",
"if",
"tag",
"in",
"cls",
".",
"TAG_ATTRIBUTES",
"and",
"attribute",
"in",
"cls",
".",
"TAG_ATTRIBUTES",
"[",
"tag",
"]",
":",
"attr_flags",
"=",
"cls",
".",
"TAG_ATTRIBUTES",
"... | 42.25 | 14.25 |
def is_edge_highlighted(graph: BELGraph, u, v, k) -> bool:
"""Returns if the given edge is highlighted.
:param graph: A BEL graph
:return: Does the edge contain highlight information?
:rtype: bool
"""
return EDGE_HIGHLIGHT in graph[u][v][k] | [
"def",
"is_edge_highlighted",
"(",
"graph",
":",
"BELGraph",
",",
"u",
",",
"v",
",",
"k",
")",
"->",
"bool",
":",
"return",
"EDGE_HIGHLIGHT",
"in",
"graph",
"[",
"u",
"]",
"[",
"v",
"]",
"[",
"k",
"]"
] | 32.75 | 13.625 |
def register(self, typ):
""" register a plugin """
# should be able to combine class/instance namespace, and inherit from either
# would need to store meta or rely on copy ctor
def _func(cls):
if typ in self._class:
raise ValueError("duplicated type name '%s'" % typ)
cls.plugin_type = typ
self._class[typ] = cls
return cls
return _func | [
"def",
"register",
"(",
"self",
",",
"typ",
")",
":",
"# should be able to combine class/instance namespace, and inherit from either",
"# would need to store meta or rely on copy ctor",
"def",
"_func",
"(",
"cls",
")",
":",
"if",
"typ",
"in",
"self",
".",
"_class",
":",
... | 39.090909 | 16.090909 |
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
) | [
"def",
"params_section",
"(",
"thing",
",",
"doc",
",",
"header_level",
")",
":",
"lines",
"=",
"[",
"]",
"class_doc",
"=",
"doc",
"[",
"\"Parameters\"",
"]",
"return",
"type_list",
"(",
"inspect",
".",
"signature",
"(",
"thing",
")",
",",
"class_doc",
"... | 21.076923 | 18.384615 |
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id | [
"def",
"submit",
"(",
"self",
",",
"command",
",",
"blocksize",
",",
"job_name",
"=",
"\"parsl.auto\"",
")",
":",
"job_name",
"=",
"\"{0}.{1}\"",
".",
"format",
"(",
"job_name",
",",
"time",
".",
"time",
"(",
")",
")",
"# Set script path",
"script_path",
"... | 35.825 | 30.075 |
def filtered_search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
taxon_filter: Optional[int] = None,
category_filter: Optional[str] = None,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult:
"""
Owlsim2 filtered search, resolves taxon and category to a namespace,
calls search_by_attribute_set, and converts to SimResult object
"""
if len(negated_classes) > 0:
logging.warning("Owlsim2 does not support negation, ignoring neg classes")
namespace_filter = self._get_namespace_filter(taxon_filter, category_filter)
owlsim_results = search_by_attribute_set(self.url, tuple(id_list), limit, namespace_filter)
return self._simsearch_to_simresult(owlsim_results, method) | [
"def",
"filtered_search",
"(",
"self",
",",
"id_list",
":",
"List",
",",
"negated_classes",
":",
"List",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"100",
",",
"taxon_filter",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"category_filter... | 47.555556 | 22.777778 |
def do_scan_results(sk, if_index, driver_id, results):
"""Retrieve the results of a successful scan (SSIDs and data about them).
This function does not require root privileges. It eventually calls a callback that actually decodes data about
SSIDs but this function kicks that off.
May exit the program (sys.exit()) if a fatal error occurs.
Positional arguments:
sk -- nl_sock class instance (from nl_socket_alloc()).
if_index -- interface index (integer).
driver_id -- nl80211 driver ID from genl_ctrl_resolve() (integer).
results -- dictionary to populate with results. Keys are BSSIDs (MAC addresses) and values are dicts of data.
Returns:
0 on success or a negative error code.
"""
msg = nlmsg_alloc()
genlmsg_put(msg, 0, 0, driver_id, 0, NLM_F_DUMP, nl80211.NL80211_CMD_GET_SCAN, 0)
nla_put_u32(msg, nl80211.NL80211_ATTR_IFINDEX, if_index)
cb = libnl.handlers.nl_cb_alloc(libnl.handlers.NL_CB_DEFAULT)
libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_VALID, libnl.handlers.NL_CB_CUSTOM, callback_dump, results)
_LOGGER.debug('Sending NL80211_CMD_GET_SCAN...')
ret = nl_send_auto(sk, msg)
if ret >= 0:
_LOGGER.debug('Retrieving NL80211_CMD_GET_SCAN response...')
ret = nl_recvmsgs(sk, cb)
return ret | [
"def",
"do_scan_results",
"(",
"sk",
",",
"if_index",
",",
"driver_id",
",",
"results",
")",
":",
"msg",
"=",
"nlmsg_alloc",
"(",
")",
"genlmsg_put",
"(",
"msg",
",",
"0",
",",
"0",
",",
"driver_id",
",",
"0",
",",
"NLM_F_DUMP",
",",
"nl80211",
".",
... | 45.714286 | 26 |
def parse_line(p_string):
"""
Parses a single line as can be encountered in a todo.txt file.
First checks whether the standard elements are present, such as priority,
creation date, completeness check and the completion date.
Then the rest of the analyzed for any occurrences of contexts, projects or
tags.
Returns an dictionary with the default values as shown below.
"""
result = {
'completed': False,
'completionDate': None,
'priority': None,
'creationDate': None,
'text': "",
'projects': [],
'contexts': [],
'tags': {},
}
completed_head = _COMPLETED_HEAD_MATCH.match(p_string)
normal_head = _NORMAL_HEAD_MATCH.match(p_string)
rest = p_string
if completed_head:
result['completed'] = True
completion_date = completed_head.group('completionDate')
try:
result['completionDate'] = date_string_to_date(completion_date)
except ValueError:
pass
creation_date = completed_head.group('creationDate')
try:
result['creationDate'] = date_string_to_date(creation_date)
except ValueError:
pass
rest = completed_head.group('rest')
elif normal_head:
result['priority'] = normal_head.group('priority')
creation_date = normal_head.group('creationDate')
try:
result['creationDate'] = date_string_to_date(creation_date)
except ValueError:
pass
rest = normal_head.group('rest')
for word in rest.split():
project = _PROJECT_MATCH.match(word)
if project:
result['projects'].append(project.group(1))
context = _CONTEXT_MATCH.match(word)
if context:
result['contexts'].append(context.group(1))
tag = _TAG_MATCH.match(word)
if tag:
tag_name = tag.group('tag')
tag_value = tag.group('value')
try:
result['tags'][tag_name].append(tag_value)
except KeyError:
result['tags'][tag_name] = [tag_value]
else:
result['text'] += word + ' '
# strip trailing space from resulting text
result['text'] = result['text'][:-1]
return result | [
"def",
"parse_line",
"(",
"p_string",
")",
":",
"result",
"=",
"{",
"'completed'",
":",
"False",
",",
"'completionDate'",
":",
"None",
",",
"'priority'",
":",
"None",
",",
"'creationDate'",
":",
"None",
",",
"'text'",
":",
"\"\"",
",",
"'projects'",
":",
... | 27.8375 | 22.2125 |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
expr = str(self.resolve_option("expression"))
expr = expr.replace("{X}", str(self.input.payload))
self._output.append(Token(eval(expr)))
return None | [
"def",
"do_execute",
"(",
"self",
")",
":",
"expr",
"=",
"str",
"(",
"self",
".",
"resolve_option",
"(",
"\"expression\"",
")",
")",
"expr",
"=",
"expr",
".",
"replace",
"(",
"\"{X}\"",
",",
"str",
"(",
"self",
".",
"input",
".",
"payload",
")",
")",... | 31 | 14.636364 |
def remove_container(self, container, v=False, link=False, force=False):
"""
Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res) | [
"def",
"remove_container",
"(",
"self",
",",
"container",
",",
"v",
"=",
"False",
",",
"link",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'v'",
":",
"v",
",",
"'link'",
":",
"link",
",",
"'force'",
":",
"force",
"}",
... | 37.714286 | 20.285714 |
def count_variants_barplot(data):
""" Return HTML for the Variant Counts barplot """
keys = OrderedDict()
keys['snps'] = {'name': 'SNPs'}
keys['mnps'] = {'name': 'MNPs'}
keys['insertions'] = {'name': 'Insertions'}
keys['deletions'] = {'name': 'Deletions'}
keys['complex'] = {'name': 'Complex'}
keys['symbolic'] = {'name': 'Symbolic'}
keys['mixed'] = {'name': 'Mixed'}
keys['nocalls'] = {'name': 'No-calls'}
plot_conf = {
'id': 'gatk_varianteval_variant_plot',
'title': 'GATK VariantEval: Variant Counts',
'ylab': '# Variants',
'cpswitch_counts_label': 'Number of Variants'
}
return bargraph.plot(data, keys, plot_conf) | [
"def",
"count_variants_barplot",
"(",
"data",
")",
":",
"keys",
"=",
"OrderedDict",
"(",
")",
"keys",
"[",
"'snps'",
"]",
"=",
"{",
"'name'",
":",
"'SNPs'",
"}",
"keys",
"[",
"'mnps'",
"]",
"=",
"{",
"'name'",
":",
"'MNPs'",
"}",
"keys",
"[",
"'inser... | 36.052632 | 10.578947 |
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value | [
"def",
"_nextSequence",
"(",
"cls",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"name",
"=",
"cls",
".",
"_sqlSequence",
"if",
"not",
"name",
":",
"# Assume it's tablename_primarykey_seq",
"if",
"len",
"(",
"cls",
".",
"_sqlPrimary",
")",... | 42.954545 | 16.454545 |
def _get_tmaster_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_tmaster_path(topologyName)
if isWatching:
LOG.info("Adding data watch for path: " + path)
# pylint: disable=unused-variable, unused-argument
@self.client.DataWatch(path)
def watch_tmaster(data, stats):
""" invoke callback to watch tmaster """
if data:
tmaster = TMasterLocation()
tmaster.ParseFromString(data)
callback(tmaster)
else:
callback(None)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching | [
"def",
"_get_tmaster_with_watch",
"(",
"self",
",",
"topologyName",
",",
"callback",
",",
"isWatching",
")",
":",
"path",
"=",
"self",
".",
"get_tmaster_path",
"(",
"topologyName",
")",
"if",
"isWatching",
":",
"LOG",
".",
"info",
"(",
"\"Adding data watch for p... | 31.92 | 13.52 |
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i + parity - 1] + 1
k = _NEXT_AXIS[i - parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci * ck
cs = ci * sk
sc = si * ck
ss = si * sk
q = np.empty((4, ))
if repetition:
q[0] = cj * (cc - ss)
q[i] = cj * (cs + sc)
q[j] = sj * (cc + ss)
q[k] = sj * (cs - sc)
else:
q[0] = cj * cc + sj * ss
q[i] = cj * sc - sj * cs
q[j] = cj * ss + sj * cc
q[k] = cj * cs - sj * sc
if parity:
q[j] *= -1.0
return q | [
"def",
"quaternion_from_euler",
"(",
"ai",
",",
"aj",
",",
"ak",
",",
"axes",
"=",
"'sxyz'",
")",
":",
"try",
":",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"_AXES2TUPLE",
"[",
"axes",
".",
"lower",
"(",
")",
"]",
"except",
"(... | 23.472727 | 20.745455 |
def make_input(self,
seq_idx: int,
word_vec_prev: mx.sym.Symbol,
decoder_state: mx.sym.Symbol) -> AttentionInput:
"""
Returns AttentionInput to be fed into the attend callable returned by the on() method.
:param seq_idx: Decoder time step.
:param word_vec_prev: Embedding of previously predicted ord
:param decoder_state: Current decoder state
:return: Attention input.
"""
query = decoder_state
if self._input_previous_word:
# (batch_size, num_target_embed + rnn_num_hidden)
query = mx.sym.concat(word_vec_prev, decoder_state, dim=1,
name='%sconcat_prev_word_%d' % (self.prefix, seq_idx))
return AttentionInput(seq_idx=seq_idx, query=query) | [
"def",
"make_input",
"(",
"self",
",",
"seq_idx",
":",
"int",
",",
"word_vec_prev",
":",
"mx",
".",
"sym",
".",
"Symbol",
",",
"decoder_state",
":",
"mx",
".",
"sym",
".",
"Symbol",
")",
"->",
"AttentionInput",
":",
"query",
"=",
"decoder_state",
"if",
... | 45.555556 | 18.666667 |
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
for fullname, (docname, objtype) in self.data['objects'].items():
if fullname.name == target:
return make_refnode(builder, fromdocname, docname, fullname2id(fullname), contnode, fullname.name)
return None | [
"def",
"resolve_xref",
"(",
"self",
",",
"env",
",",
"fromdocname",
",",
"builder",
",",
"typ",
",",
"target",
",",
"node",
",",
"contnode",
")",
":",
"# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA",
"for",
"f... | 52.5 | 27.9 |
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
file_reference = event_values.get('file_reference', None)
if file_reference:
event_values['file_reference'] = '{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
parent_file_reference = event_values.get('parent_file_reference', None)
if parent_file_reference:
event_values['parent_file_reference'] = '{0:d}-{1:d}'.format(
parent_file_reference & 0xffffffffffff, parent_file_reference >> 48)
update_reason_flags = event_values.get('update_reason_flags', 0)
update_reasons = []
for bitmask, description in sorted(self._USN_REASON_FLAGS.items()):
if bitmask & update_reason_flags:
update_reasons.append(description)
event_values['update_reason'] = ', '.join(update_reasons)
update_source_flags = event_values.get('update_source_flags', 0)
update_sources = []
for bitmask, description in sorted(self._USN_SOURCE_FLAGS.items()):
if bitmask & update_source_flags:
update_sources.append(description)
event_values['update_source'] = ', '.join(update_sources)
return self._ConditionalFormatMessages(event_values) | [
"def",
"GetMessages",
"(",
"self",
",",
"formatter_mediator",
",",
"event",
")",
":",
"if",
"self",
".",
"DATA_TYPE",
"!=",
"event",
".",
"data_type",
":",
"raise",
"errors",
".",
"WrongFormatter",
"(",
"'Unsupported data type: {0:s}.'",
".",
"format",
"(",
"e... | 37.854167 | 24.083333 |
def catsimPopulation(tag, mc_source_id_start=1, n=5000, n_chunk=100, config='simulate_population.yaml'):
"""
n = Number of satellites to simulation
n_chunk = Number of satellites in a file chunk
"""
assert mc_source_id_start >= 1, "Starting mc_source_id must be >= 1"
assert n % n_chunk == 0, "Total number of satellites must be divisible by the chunk size"
nside_pix = 256 # NSIDE = 128 -> 27.5 arcmin, NSIDE = 256 -> 13.7 arcmin
if not os.path.exists(tag): os.makedirs(tag)
if isinstance(config,str): config = yaml.load(open(config))
assert config['survey'] in ['des', 'ps1']
infile_ebv = config['ebv']
infile_fracdet = config['fracdet']
infile_maglim_g = config['maglim_g']
infile_maglim_r = config['maglim_r']
infile_density = config['stellar_density']
range_distance = config.get('range_distance',[5., 500.])
range_stellar_mass = config.get('range_stellar_mass',[1.e1, 1.e6])
range_r_physical = config.get('range_r_physical',[1.e-3, 2.0])
m_density = np.load(infile_density)
nside_density = healpy.npix2nside(len(m_density))
m_fracdet = read_map(infile_fracdet, nest=False) #.astype(np.float16)
nside_fracdet = healpy.npix2nside(len(m_fracdet))
m_maglim_g = read_map(infile_maglim_g, nest=False) #.astype(np.float16)
m_maglim_r = read_map(infile_maglim_r, nest=False) #.astype(np.float16)
m_ebv = read_map(infile_ebv, nest=False) #.astype(np.float16)
#m_foreground = healpy.read_map(infile_foreground)
mask = (m_fracdet > 0.5)
kwargs = dict(range_distance = range_distance,
range_stellar_mass = range_stellar_mass,
range_r_physical = range_r_physical)
print kwargs
# r_physical is azimuthally-averaged half-light radius, kpc
simulation_area, lon_population, lat_population, distance_population, stellar_mass_population, r_physical_population = ugali.simulation.population.satellitePopulation(mask, nside_pix, n, **kwargs)
n_g22_population = np.tile(np.nan, n)
n_g24_population = np.tile(np.nan, n)
abs_mag_population = np.tile(np.nan, n)
surface_brightness_population = np.tile(np.nan, n)
ellipticity_population = np.tile(np.nan, n)
position_angle_population = np.tile(np.nan, n)
age_population = np.tile(np.nan, n)
metal_z_population = np.tile(np.nan, n)
mc_source_id_population = np.arange(mc_source_id_start, mc_source_id_start + n)
#cut_difficulty_population = np.tile(False, n)
difficulty_population = np.tile(0, n)
lon_array = []
lat_array = []
mag_1_array = []
mag_2_array = []
mag_1_error_array = []
mag_2_error_array = []
mag_extinction_1_array = []
mag_extinction_2_array = []
mc_source_id_array = []
for ii, mc_source_id in enumerate(mc_source_id_population):
print ' Simulating satellite (%i/%i) ... MC_SOURCE_ID = %i'%(ii + 1, n, mc_source_id)
print ' distance=%.2e, stellar_mass=%.2e, rhalf=%.2e'%(distance_population[ii],stellar_mass_population[ii],r_physical_population[ii])
lon, lat, mag_1, mag_2, mag_1_error, mag_2_error, mag_extinction_1, mag_extinction_2, n_g22, n_g24, abs_mag, surface_brightness, ellipticity, position_angle, age, metal_z, flag_too_extended = catsimSatellite(config,
lon_population[ii],
lat_population[ii],
distance_population[ii],
stellar_mass_population[ii],
r_physical_population[ii],
m_maglim_g,
m_maglim_r,
m_ebv)
print ' ', len(lon)
n_g22_population[ii] = n_g22
n_g24_population[ii] = n_g24
abs_mag_population[ii] = abs_mag
surface_brightness_population[ii] = surface_brightness
ellipticity_population[ii] = ellipticity
position_angle_population[ii] = position_angle
age_population[ii] = age
metal_z_population[ii] = metal_z
#print "Difficulty masking..."
# These objects are too extended and are not simulated
if (flag_too_extended):
difficulty_population[ii] |= 0b0001
# We assume that these objects would be easily detected and
# remove them to reduce data volume
if (surface_brightness_population[ii]<25.)&(n_g22_population[ii]>1e2):
difficulty_population[ii] |= 0b0010
if (surface_brightness_population[ii]<28.)&(n_g22_population[ii]>1e4):
difficulty_population[ii] |= 0b0100
if (surface_brightness_population[ii]<30.)&(n_g22_population[ii]>1e5):
difficulty_population[ii] |= 0b1000
#cut_easy = (surface_brightness_population[ii]<25.)&(n_g22_population[ii]>1.e2) \
# | ((surface_brightness_population[ii] < 30.) & (n_g24_population[ii] > 1.e4)) \
# | ((surface_brightness_population[ii] < 31.) & (n_g24_population[ii] > 1.e5))
#cut_hard = (surface_brightness_population[ii] > 35.) | (n_g24_population[ii] < 1.)
#cut_difficulty_population[ii] = ~cut_easy & ~cut_hard
#if cut_easy:
# difficulty_population[ii] += 1 # TOO EASY
#if cut_hard:
# difficulty_population[ii] += 2 # TOO HARD
#if flag_too_extended:
# difficulty_population[ii] += 3 # TOO EXTENDED
if difficulty_population[ii] == 0:
lon_array.append(lon)
lat_array.append(lat)
mag_1_array.append(mag_1)
mag_2_array.append(mag_2)
mag_1_error_array.append(mag_1_error)
mag_2_error_array.append(mag_2_error)
mag_extinction_1_array.append(mag_extinction_1)
mag_extinction_2_array.append(mag_extinction_2)
mc_source_id_array.append(np.tile(mc_source_id, len(lon)))
# Concatenate all the arrays
print "Concatenating arrays..."
lon_array = np.concatenate(lon_array)
lat_array = np.concatenate(lat_array)
mag_1_array = np.concatenate(mag_1_array)
mag_2_array = np.concatenate(mag_2_array)
mag_1_error_array = np.concatenate(mag_1_error_array)
mag_2_error_array = np.concatenate(mag_2_error_array)
mag_extinction_1_array = np.concatenate(mag_extinction_1_array)
mag_extinction_2_array = np.concatenate(mag_extinction_2_array)
mc_source_id_array = np.concatenate(mc_source_id_array)
# Now do the masking all at once
print "Fracdet masking..."
pix_array = ugali.utils.healpix.angToPix(nside_fracdet, lon_array, lat_array)
cut_fracdet = (np.random.uniform(size=len(lon_array)) < m_fracdet[pix_array])
lon_array = lon_array[cut_fracdet]
lat_array = lat_array[cut_fracdet]
mag_1_array = mag_1_array[cut_fracdet]
mag_2_array = mag_2_array[cut_fracdet]
mag_1_error_array = mag_1_error_array[cut_fracdet]
mag_2_error_array = mag_2_error_array[cut_fracdet]
mag_extinction_1_array = mag_extinction_1_array[cut_fracdet]
mag_extinction_2_array = mag_extinction_2_array[cut_fracdet]
mc_source_id_array = mc_source_id_array[cut_fracdet]
# Cut out the entries that are easily detectable
"""
lon_population = lon_population[cut_difficulty_population]
lat_population = lat_population[cut_difficulty_population]
distance_population = distance_population[cut_difficulty_population]
stellar_mass_population = stellar_mass_population[cut_difficulty_population]
r_physical_population = r_physical_population[cut_difficulty_population]
n_g24_population = n_g24_population[cut_difficulty_population]
abs_mag_population = abs_mag_population[cut_difficulty_population]
surface_brightness_population = surface_brightness_population[cut_difficulty_population]
ellipticity_population = ellipticity_population[cut_difficulty_population]
position_angle_population = position_angle_population[cut_difficulty_population]
age_population = age_population[cut_difficulty_population]
metal_z_population = metal_z_population[cut_difficulty_population]
mc_source_id_population = mc_source_id_population[cut_difficulty_population]
"""
# Create bonus columns
print "Creating bonus columns..."
distance_modulus_population = ugali.utils.projector.distanceToDistanceModulus(distance_population)
hpix_32_population = ugali.utils.healpix.angToPix(32, lon_population, lat_population) # Make sure this matches the dataset
# Local stellar density
pixarea = healpy.nside2pixarea(nside_density, degrees=True) * 60.**2 # arcmin^2
density_population = m_density[ugali.utils.healpix.angToPix(nside_density, lon_population, lat_population)] / pixarea # arcmin^-2
# Average fracdet within the azimuthally averaged half-light radius
#m_fracdet_zero = np.where(m_fracdet >= 0., m_fracdet, 0.)
#m_fracdet_zero = m_fracdet
r_half = np.degrees(np.arctan2(r_physical_population, distance_population)) # Azimuthally averaged half-light radius in degrees
fracdet_half_population = meanFracdet(m_fracdet, lon_population, lat_population, r_half)
fracdet_core_population = meanFracdet(m_fracdet, lon_population, lat_population, 0.1)
fracdet_wide_population = meanFracdet(m_fracdet, lon_population, lat_population, 0.5)
# Magnitude limits
nside_maglim = healpy.npix2nside(len(m_maglim_g))
pix_population = ugali.utils.healpix.angToPix(nside_maglim, lon_population, lat_population)
maglim_g_population = m_maglim_g[pix_population]
maglim_r_population = m_maglim_r[pix_population]
# E(B-V)
nside_ebv = healpy.npix2nside(len(m_ebv))
pix_population = ugali.utils.healpix.angToPix(nside_ebv, lon_population, lat_population)
ebv_population = m_ebv[pix_population]
# Survey
survey_population = np.tile(config['survey'], len(lon_population))
# Number of surviving catalog stars
n_catalog_population = np.histogram(mc_source_id_array, bins=np.arange(mc_source_id_population[0] - 0.5, mc_source_id_population[-1] + 0.51))[0]
# Faked-up coadd_object_ids
coadd_object_id_array = []
for mc_source_id in mc_source_id_population:
coadd_object_id_array.append((1000000 * mc_source_id) + 1 + np.arange(np.sum(mc_source_id == mc_source_id_array)))
coadd_object_id_array = -1 * np.concatenate(coadd_object_id_array) # Assign negative numbers to distinguish from real objects
# Catalog output file
# for ii in range(0, len(d.formats)): print '\'%s\': [ , \'%s\'],'%(d.names[ii], d.formats[ii])
# See:
# https://github.com/sidneymau/simple/blob/master/search_algorithm.py
# https://github.com/sidneymau/simple/blob/master/config.yaml
# /home/s1/kadrlica/projects/y3a2/dsphs/v2/skim/ , e.g., /home/s1/kadrlica/projects/y3a2/dsphs/v2/skim/y3a2_ngmix_cm_11755.fits
#default_array = np.tile(np.nan, len(mc_source_id_array)) # To recognize that those values are synthetic filler
default_array = np.tile(-9999., len(mc_source_id_array))
"""
# Column name, data, fits format
# Y3A2 pre-Gold
key_map = {'CM_MAG_ERR_G': [mag_1_error_array, 'D'],
'CM_MAG_ERR_R': [mag_2_error_array, 'D'],
'CM_MAG_G': [mag_1_array, 'D'],
'CM_MAG_R': [mag_2_array, 'D'],
'CM_T': [default_array, 'D'],
'CM_T_ERR': [default_array, 'D'],
'COADD_OBJECT_ID': [coadd_object_id_array, 'K'],
'DEC': [lat_array, 'D'],
'FLAGS': [default_array, 'K'],
'PSF_MAG_ERR_G': [mag_1_error_array, 'D'],
'PSF_MAG_ERR_R': [mag_2_error_array, 'D'],
'PSF_MAG_G': [mag_1_array, 'D'],
'PSF_MAG_R': [mag_2_array, 'D'],
'RA': [lon_array, 'D'],
'SEXTRACTOR_FLAGS_G': [np.tile(0, len(mc_source_id_array)), 'I'],
'SEXTRACTOR_FLAGS_R': [np.tile(0, len(mc_source_id_array)), 'I'],
'WAVG_MAG_PSF_G': [mag_1_array, 'E'],
'WAVG_MAG_PSF_R': [mag_2_array, 'E'],
'WAVG_MAGERR_PSF_G': [mag_1_error_array, 'E'],
'WAVG_MAGERR_PSF_R': [mag_2_error_array, 'E'],
'WAVG_SPREAD_MODEL_I': [default_array, 'E'],
'WAVG_SPREADERR_MODEL_I': [default_array, 'E'],
'EXT_SFD98_G': [default_array, 'E'],
'EXT_SFD98_R': [default_array, 'E'],
'CM_MAG_SFD_G': [mag_1_array, 'D'],
'CM_MAG_SFD_R': [mag_2_array, 'D'],
'FLAG_FOOTPRINT': [np.tile(1, len(mc_source_id_array)), 'J'],
'FLAG_FOREGROUND': [np.tile(0, len(mc_source_id_array)), 'J'],
'EXTENDED_CLASS_MASH': [np.tile(0, len(mc_source_id_array)), 'K'],
'PSF_MAG_SFD_G': [mag_1_array, 'D'],
'PSF_MAG_SFD_R': [mag_2_array, 'D'],
'WAVG_MAG_PSF_SFD_G': [mag_1_array, 'E'],
'WAVG_MAG_PSF_SFD_R': [mag_2_array, 'E']}
"""
if config['survey'] == 'des':
# Y3 Gold v2.0
key_map = odict([
('COADD_OBJECT_ID', [coadd_object_id_array, 'K']),
('RA', [lon_array, 'D']),
('DEC', [lat_array, 'D']),
('SOF_PSF_MAG_CORRECTED_G', [mag_1_array, 'D']),
('SOF_PSF_MAG_CORRECTED_R', [mag_2_array, 'D']),
('SOF_PSF_MAG_ERR_G', [mag_1_error_array, 'D']),
('SOF_PSF_MAG_ERR_R', [mag_2_error_array, 'D']),
('A_SED_SFD98_G', [mag_extinction_1_array, 'E']),
('A_SED_SFD98_R', [mag_extinction_2_array, 'E']),
('WAVG_MAG_PSF_G', [mag_1_array+mag_extinction_1_array, 'E']),
('WAVG_MAG_PSF_R', [mag_2_array+mag_extinction_2_array, 'E']),
('WAVG_MAGERR_PSF_G', [mag_1_error_array, 'E']),
('WAVG_MAGERR_PSF_R', [mag_2_error_array, 'E']),
('WAVG_SPREAD_MODEL_I', [default_array, 'E']),
('WAVG_SPREADERR_MODEL_I', [default_array, 'E']),
('SOF_CM_T', [default_array, 'D']),
('SOF_CM_T_ERR', [default_array, 'D']),
('FLAGS_GOLD', [np.tile(0, len(mc_source_id_array)), 'J']),
('EXTENDED_CLASS_MASH_SOF', [np.tile(0, len(mc_source_id_array)), 'I']),
])
elif config['survey'] == 'ps1':
# PS1
key_map = odict([
('OBJID', [coadd_object_id_array, 'K']),
('RA', [lon_array, 'D']),
('DEC', [lat_array, 'D']),
#('UNIQUEPSPSOBID', [coadd_object_id_array, 'K']),
#('OBJINFOFLAG', [default_array, 'E']),
#('QUALITYFLAG', [np.tile(16, len(mc_source_id_array)), 'I']),
#('NSTACKDETECTIONS', [np.tile(99, len(mc_source_id_array)), 'I']),
#('NDETECTIONS', [np.tile(99, len(mc_source_id_array)), 'I']),
#('NG', [default_array, 'E']),
#('NR', [default_array, 'E']),
#('NI', [default_array, 'E']),
('GFPSFMAG', [mag_1_array+mag_extinction_1_array, 'E']),
('RFPSFMAG', [mag_2_array+mag_extinction_2_array, 'E']),
#('IFPSFMAG', [np.tile(0., len(mc_source_id_array)), 'E'], # Too pass star selection
('GFPSFMAGERR', [mag_1_error_array, 'E']),
('RFPSFMAGERR', [mag_2_error_array, 'E']),
#('IFPSFMAGERR', [default_array, 'E']),
#('GFKRONMAG', [mag_1_array, 'E']),
#('RFKRONMAG', [mag_2_array, 'E']),
#('IFKRONMAG', [np.tile(0., len(mc_source_id_array)), 'E'], # Too pass star selection
#('GFKRONMAGERR', [mag_1_error_array, 'E']),
#('RFKRONMAGERR', [mag_2_error_array, 'E']),
#('IFKRONMAGERR', [default_array, 'E']),
#('GFLAGS', [np.tile(0, len(mc_source_id_array)), 'I']),
#('RFLAGS', [np.tile(0, len(mc_source_id_array)), 'I']),
#('IFLAGS', [np.tile(0, len(mc_source_id_array)), 'I']),
#('GINFOFLAG', [np.tile(0, len(mc_source_id_array)), 'I']),
#('RINFOFLAG', [np.tile(0, len(mc_source_id_array)), 'I']),
#('IINFOFLAG', [np.tile(0, len(mc_source_id_array)), 'I']),
#('GINFOFLAG2', [np.tile(0, len(mc_source_id_array)), 'I']),
#('RINFOFLAG2', [np.tile(0, len(mc_source_id_array)), 'I']),
#('IINFOFLAG2', [np.tile(0, len(mc_source_id_array)), 'I']),
#('GINFOFLAG3', [np.tile(0, len(mc_source_id_array)), 'I']),
#('RINFOFLAG3', [np.tile(0, len(mc_source_id_array)), 'I']),
#('IINFOFLAG3', [np.tile(0, len(mc_source_id_array)), 'I']),
#('PRIMARYDETECTION', [default_array, 'E']),
#('BESTDETECTION', [default_array, 'E']),
#('EBV', [default_array, 'E']),
#('EXTSFD_G', [mag_extinction_1_array 'E']),
#('EXTSFD_R', [mag_extinction_2_array, 'E']),
#('EXTSFD_I', [default_array, 'E']),
('GFPSFMAG_SFD', [mag_1_array, 'E']),
('RFPSFMAG_SFD', [mag_2_array, 'E']),
('EXTENDED_CLASS', [np.tile(0, len(mc_source_id_array)), 'I']),
])
key_map['MC_SOURCE_ID'] = [mc_source_id_array, 'K']
print "Writing catalog files..."
columns = []
for key in key_map:
columns.append(pyfits.Column(name=key, format=key_map[key][1], array=key_map[key][0]))
tbhdu = pyfits.BinTableHDU.from_columns(columns)
tbhdu.header.set('AREA', simulation_area, 'Simulation area (deg^2)')
for mc_source_id_chunk in np.split(np.arange(mc_source_id_start, mc_source_id_start + n), n / n_chunk):
print ' writing MC_SOURCE_ID values from %i to %i'%(mc_source_id_chunk[0], mc_source_id_chunk[-1])
cut_chunk = np.in1d(mc_source_id_array, mc_source_id_chunk)
outfile = '%s/sim_catalog_%s_mc_source_id_%07i-%07i.fits'%(tag, tag, mc_source_id_chunk[0], mc_source_id_chunk[-1])
header = copy.deepcopy(tbhdu.header)
header.set('IDMIN',mc_source_id_chunk[0], 'Minimum MC_SOURCE_ID')
header.set('IDMAX',mc_source_id_chunk[-1], 'Maximum MC_SOURCE_ID')
pyfits.writeto(outfile, tbhdu.data[cut_chunk], header, clobber=True)
# Population metadata output file
print "Writing population metadata file..."
tbhdu = pyfits.BinTableHDU.from_columns([
pyfits.Column(name='RA', format='E', array=lon_population, unit='deg'),
pyfits.Column(name='DEC', format='E', array=lat_population, unit='deg'),
pyfits.Column(name='DISTANCE', format='E', array=distance_population, unit='kpc'),
pyfits.Column(name='DISTANCE_MODULUS', format='E', array=distance_modulus_population, unit='kpc'),
pyfits.Column(name='STELLAR_MASS', format='E', array=stellar_mass_population, unit='m_solar'),
pyfits.Column(name='R_PHYSICAL', format='E', array=r_physical_population, unit='kpc'),
pyfits.Column(name='N_G22', format='J', array=n_g22_population, unit=''),
pyfits.Column(name='N_G24', format='J', array=n_g24_population, unit=''),
pyfits.Column(name='N_CATALOG', format='J', array=n_catalog_population, unit=''),
pyfits.Column(name='DIFFICULTY', format='J', array=difficulty_population, unit=''),
pyfits.Column(name='ABS_MAG', format='E', array=abs_mag_population, unit='mag'),
pyfits.Column(name='SURFACE_BRIGHTNESS', format='E', array=surface_brightness_population, unit='mag arcsec^-2'),
pyfits.Column(name='ELLIPTICITY', format='E', array=ellipticity_population, unit=''),
pyfits.Column(name='POSITION_ANGLE', format='E', array=position_angle_population, unit='deg'),
pyfits.Column(name='AGE', format='E', array=age_population, unit='deg'),
pyfits.Column(name='METAL_Z', format='E', array=metal_z_population, unit=''),
pyfits.Column(name='MC_SOURCE_ID', format='K', array=mc_source_id_population, unit=''),
pyfits.Column(name='HPIX_32', format='E', array=hpix_32_population, unit=''),
pyfits.Column(name='DENSITY', format='E', array=density_population, unit='arcmin^-2'),
pyfits.Column(name='FRACDET_HALF', format='E', array=fracdet_half_population, unit=''),
pyfits.Column(name='FRACDET_CORE', format='E', array=fracdet_core_population, unit=''),
pyfits.Column(name='FRACDET_WIDE', format='E', array=fracdet_wide_population, unit=''),
pyfits.Column(name='MAGLIM_G', format='E', array=maglim_g_population, unit='mag'),
pyfits.Column(name='MAGLIM_R', format='E', array=maglim_r_population, unit='mag'),
pyfits.Column(name='EBV', format='E', array=ebv_population, unit='mag'),
pyfits.Column(name='SURVEY', format='A12', array=survey_population, unit=''),
])
tbhdu.header.set('AREA', simulation_area, 'Simulation area (deg^2)')
tbhdu.writeto('%s/sim_population_%s_mc_source_id_%07i-%07i.fits'%(tag, tag, mc_source_id_start, mc_source_id_start + n - 1), clobber=True)
# 5284.2452461023322
# Mask output file
print "Writing population mask file..."
outfile_mask = '%s/sim_mask_%s_cel_nside_%i.fits'%(tag, tag, healpy.npix2nside(len(mask)))
if not os.path.exists(outfile_mask):
healpy.write_map(outfile_mask, mask.astype(int), nest=True, coord='C', overwrite=True)
os.system('gzip -f %s'%(outfile_mask)) | [
"def",
"catsimPopulation",
"(",
"tag",
",",
"mc_source_id_start",
"=",
"1",
",",
"n",
"=",
"5000",
",",
"n_chunk",
"=",
"100",
",",
"config",
"=",
"'simulate_population.yaml'",
")",
":",
"assert",
"mc_source_id_start",
">=",
"1",
",",
"\"Starting mc_source_id mu... | 56.260101 | 29.734848 |
def handle(self, *args, **options):
"""Call sync_subscriber on Subscribers without customers associated to them."""
qs = get_subscriber_model().objects.filter(djstripe_customers__isnull=True)
count = 0
total = qs.count()
for subscriber in qs:
count += 1
perc = int(round(100 * (float(count) / float(total))))
print(
"[{0}/{1} {2}%] Syncing {3} [{4}]".format(
count, total, perc, subscriber.email, subscriber.pk
)
)
sync_subscriber(subscriber) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"qs",
"=",
"get_subscriber_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"djstripe_customers__isnull",
"=",
"True",
")",
"count",
"=",
"0",
"total",
"=",
"qs",
... | 33.357143 | 20.428571 |
def _setup_conn_old(**kwargs):
'''
Setup kubernetes API connection singleton the old way
'''
host = __salt__['config.option']('kubernetes.api_url',
'http://localhost:8080')
username = __salt__['config.option']('kubernetes.user')
password = __salt__['config.option']('kubernetes.password')
ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data')
client_cert = __salt__['config.option']('kubernetes.client-certificate-data')
client_key = __salt__['config.option']('kubernetes.client-key-data')
ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file')
client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file')
client_key_file = __salt__['config.option']('kubernetes.client-key-file')
# Override default API settings when settings are provided
if 'api_url' in kwargs:
host = kwargs.get('api_url')
if 'api_user' in kwargs:
username = kwargs.get('api_user')
if 'api_password' in kwargs:
password = kwargs.get('api_password')
if 'api_certificate_authority_file' in kwargs:
ca_cert_file = kwargs.get('api_certificate_authority_file')
if 'api_client_certificate_file' in kwargs:
client_cert_file = kwargs.get('api_client_certificate_file')
if 'api_client_key_file' in kwargs:
client_key_file = kwargs.get('api_client_key_file')
if (
kubernetes.client.configuration.host != host or
kubernetes.client.configuration.user != username or
kubernetes.client.configuration.password != password):
# Recreates API connection if settings are changed
kubernetes.client.configuration.__init__()
kubernetes.client.configuration.host = host
kubernetes.client.configuration.user = username
kubernetes.client.configuration.passwd = password
if ca_cert_file:
kubernetes.client.configuration.ssl_ca_cert = ca_cert_file
elif ca_cert:
with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca:
ca.write(base64.b64decode(ca_cert))
kubernetes.client.configuration.ssl_ca_cert = ca.name
else:
kubernetes.client.configuration.ssl_ca_cert = None
if client_cert_file:
kubernetes.client.configuration.cert_file = client_cert_file
elif client_cert:
with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c:
c.write(base64.b64decode(client_cert))
kubernetes.client.configuration.cert_file = c.name
else:
kubernetes.client.configuration.cert_file = None
if client_key_file:
kubernetes.client.configuration.key_file = client_key_file
elif client_key:
with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k:
k.write(base64.b64decode(client_key))
kubernetes.client.configuration.key_file = k.name
else:
kubernetes.client.configuration.key_file = None
return {} | [
"def",
"_setup_conn_old",
"(",
"*",
"*",
"kwargs",
")",
":",
"host",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"'kubernetes.api_url'",
",",
"'http://localhost:8080'",
")",
"username",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"'kubernetes.user'",... | 41.541667 | 24.347222 |
def handle_json_GET_routepatterns(self, params):
"""Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips."""
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
if not route:
self.send_error(404)
return
time = int(params.get('time', 0))
date = params.get('date', "")
sample_size = 3 # For each pattern return the start time for this many trips
pattern_id_trip_dict = route.GetPatternIdTripDict()
patterns = []
for pattern_id, trips in pattern_id_trip_dict.items():
time_stops = trips[0].GetTimeStops()
if not time_stops:
continue
has_non_zero_trip_type = False;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = []
for trip in trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
trips_with_service.append(trip)
if trip['trip_type'] and trip['trip_type'] != '0':
has_non_zero_trip_type = True
# We're only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops))
transitfeed.SortListOfTripByTime(trips)
num_trips = len(trips)
if num_trips <= sample_size:
start_sample_index = 0
num_after_sample = 0
else:
# Will return sample_size trips that start after the 'time' param.
# Linear search because I couldn't find a built-in way to do a binary
# search with a custom key.
start_sample_index = len(trips)
for i, trip in enumerate(trips):
if trip.GetStartTime() >= time:
start_sample_index = i
break
num_after_sample = num_trips - (start_sample_index + sample_size)
if num_after_sample < 0:
# Less than sample_size trips start after 'time' so return all the
# last sample_size trips.
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = []
for t in trips[start_sample_index:start_sample_index + sample_size]:
sample.append( (t.GetStartTime(), t.trip_id) )
patterns.append((name, pattern_id, start_sample_index, sample,
num_after_sample, (0,1)[has_non_zero_trip_type]))
patterns.sort()
return patterns | [
"def",
"handle_json_GET_routepatterns",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"route",
"=",
"schedule",
".",
"GetRoute",
"(",
"params",
".",
"get",
"(",
"'route'",
",",
"None",
")",
")",
"if",
"not... | 35.859155 | 20.915493 |
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Specie) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Specie) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sp = Specie(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except:
pass
return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1] | [
"def",
"_find_codopant",
"(",
"target",
",",
"oxidation_state",
",",
"allowed_elements",
"=",
"None",
")",
":",
"ref_radius",
"=",
"target",
".",
"ionic_radius",
"candidates",
"=",
"[",
"]",
"symbols",
"=",
"allowed_elements",
"or",
"[",
"el",
".",
"symbol",
... | 36.689655 | 19.655172 |
def sg_summary_loss(tensor, prefix='losses', name=None):
r"""Register `tensor` to summary report as `loss`
Args:
tensor: A `Tensor` to log as loss
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name, tf.reduce_mean(tensor))
_histogram(name + '-h', tensor) | [
"def",
"sg_summary_loss",
"(",
"tensor",
",",
"prefix",
"=",
"'losses'",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pr... | 32.555556 | 20.222222 |
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close() | [
"def",
"_call",
"(",
"self",
",",
"cmd",
",",
"get_output",
")",
":",
"server_err",
"=",
"self",
".",
"server_logger",
"(",
")",
"chan",
"=",
"self",
".",
"get_client",
"(",
")",
".",
"get_transport",
"(",
")",
".",
"open_session",
"(",
")",
"try",
"... | 37.314286 | 13.057143 |
def add_record(self, record_in):
"""
Add a new record. Strip quotes from around strings.
This will over-write if the key already exists, except
for COMMENT and HISTORY fields
parameters
-----------
record:
The record, either a dict or a header card string
or a FITSRecord or FITSCard
convert: bool, optional
If True, convert strings. E.g. '3' gets
converted to 3 and "'hello'" gets converted
to 'hello' and 'T'/'F' to True/False. Default
is False.
"""
if (isinstance(record_in, dict) and
'name' in record_in and 'value' in record_in):
record = {}
record.update(record_in)
else:
record = FITSRecord(record_in)
# only append when this name already exists if it is
# a comment or history field, otherwise simply over-write
key = record['name'].upper()
key_exists = key in self._record_map
if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE'):
# append new record
self._record_list.append(record)
index = len(self._record_list)-1
self._index_map[key] = index
else:
# over-write existing
index = self._index_map[key]
self._record_list[index] = record
self._record_map[key] = record | [
"def",
"add_record",
"(",
"self",
",",
"record_in",
")",
":",
"if",
"(",
"isinstance",
"(",
"record_in",
",",
"dict",
")",
"and",
"'name'",
"in",
"record_in",
"and",
"'value'",
"in",
"record_in",
")",
":",
"record",
"=",
"{",
"}",
"record",
".",
"updat... | 33.452381 | 15.97619 |
def send_badge_messages(self, badge_award):
"""
If the Badge class defines a message, send it to the user who was just
awarded the badge.
"""
user_message = getattr(badge_award.badge, "user_message", None)
if callable(user_message):
message = user_message(badge_award)
else:
message = user_message
if message is not None:
badge_award.user.message_set.create(message=message) | [
"def",
"send_badge_messages",
"(",
"self",
",",
"badge_award",
")",
":",
"user_message",
"=",
"getattr",
"(",
"badge_award",
".",
"badge",
",",
"\"user_message\"",
",",
"None",
")",
"if",
"callable",
"(",
"user_message",
")",
":",
"message",
"=",
"user_message... | 38.583333 | 13.75 |
def repeat(mode):
"""Change repeat mode of current player."""
message = command(protobuf.CommandInfo_pb2.ChangeShuffleMode)
send_command = message.inner()
send_command.options.externalPlayerCommand = True
send_command.options.repeatMode = mode
return message | [
"def",
"repeat",
"(",
"mode",
")",
":",
"message",
"=",
"command",
"(",
"protobuf",
".",
"CommandInfo_pb2",
".",
"ChangeShuffleMode",
")",
"send_command",
"=",
"message",
".",
"inner",
"(",
")",
"send_command",
".",
"options",
".",
"externalPlayerCommand",
"="... | 39.428571 | 13 |
def is_python_binding_installed(self):
"""Check if the Python binding has already installed.
Consider below cases.
- pip command is not installed.
- The installed RPM Python binding does not have information
showed as a result of pip list.
"""
is_installed = False
is_install_error = False
try:
is_installed = self.is_python_binding_installed_on_pip()
except InstallError:
# Consider a case of pip is not installed in old Python (<= 2.6).
is_install_error = True
if not is_installed or is_install_error:
for rpm_dir in self.python_lib_rpm_dirs:
init_py = os.path.join(rpm_dir, '__init__.py')
if os.path.isfile(init_py):
is_installed = True
break
return is_installed | [
"def",
"is_python_binding_installed",
"(",
"self",
")",
":",
"is_installed",
"=",
"False",
"is_install_error",
"=",
"False",
"try",
":",
"is_installed",
"=",
"self",
".",
"is_python_binding_installed_on_pip",
"(",
")",
"except",
"InstallError",
":",
"# Consider a case... | 35.958333 | 15.291667 |
def update_pool(self, pool, body=None):
"""Updates a load balancer pool."""
return self.put(self.pool_path % (pool), body=body) | [
"def",
"update_pool",
"(",
"self",
",",
"pool",
",",
"body",
"=",
"None",
")",
":",
"return",
"self",
".",
"put",
"(",
"self",
".",
"pool_path",
"%",
"(",
"pool",
")",
",",
"body",
"=",
"body",
")"
] | 47 | 6.666667 |
def apply_obb(self):
"""
Transform the current path so that its OBB is axis aligned
and OBB center is at the origin.
"""
if len(self.root) == 1:
matrix, bounds = polygons.polygon_obb(
self.polygons_closed[self.root[0]])
self.apply_transform(matrix)
return matrix
else:
raise ValueError('Not implemented for multibody geometry') | [
"def",
"apply_obb",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"root",
")",
"==",
"1",
":",
"matrix",
",",
"bounds",
"=",
"polygons",
".",
"polygon_obb",
"(",
"self",
".",
"polygons_closed",
"[",
"self",
".",
"root",
"[",
"0",
"]",
"]",
... | 35.666667 | 12.333333 |
def url_quote(url):
"""Ensure url is valid"""
try:
return quote(url, safe=URL_SAFE)
except KeyError:
return quote(encode(url), safe=URL_SAFE) | [
"def",
"url_quote",
"(",
"url",
")",
":",
"try",
":",
"return",
"quote",
"(",
"url",
",",
"safe",
"=",
"URL_SAFE",
")",
"except",
"KeyError",
":",
"return",
"quote",
"(",
"encode",
"(",
"url",
")",
",",
"safe",
"=",
"URL_SAFE",
")"
] | 27.333333 | 13.5 |
def close(self):
'''Stop running timers.'''
if self._call_later_handle:
self._call_later_handle.cancel()
self._running = False | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_call_later_handle",
":",
"self",
".",
"_call_later_handle",
".",
"cancel",
"(",
")",
"self",
".",
"_running",
"=",
"False"
] | 26.333333 | 15 |
def nodes(self):
"""
Return a list of child nodes of this engine. This can be
used to iterate to obtain access to node level operations
::
>>> print(list(engine.nodes))
[Node(name=myfirewall node 1)]
>>> engine.nodes.get(0)
Node(name=myfirewall node 1)
:return: nodes for this engine
:rtype: SubElementCollection(Node)
"""
resource = sub_collection(
self.get_relation(
'nodes'),
Node)
resource._load_from_engine(self, 'nodes')
return resource | [
"def",
"nodes",
"(",
"self",
")",
":",
"resource",
"=",
"sub_collection",
"(",
"self",
".",
"get_relation",
"(",
"'nodes'",
")",
",",
"Node",
")",
"resource",
".",
"_load_from_engine",
"(",
"self",
",",
"'nodes'",
")",
"return",
"resource"
] | 29.761905 | 13.571429 |
def opener(mode='r'):
"""Factory for creating file objects
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def open_file(f):
if f is sys.stdout or f is sys.stdin:
return f
elif f == '-':
return sys.stdin if 'r' in mode else sys.stdout
elif f.endswith('.bz2'):
return bz2.BZ2File(f, mode)
elif f.endswith('.gz'):
return gzip.open(f, mode)
else:
return open(f, mode)
return open_file | [
"def",
"opener",
"(",
"mode",
"=",
"'r'",
")",
":",
"def",
"open_file",
"(",
"f",
")",
":",
"if",
"f",
"is",
"sys",
".",
"stdout",
"or",
"f",
"is",
"sys",
".",
"stdin",
":",
"return",
"f",
"elif",
"f",
"==",
"'-'",
":",
"return",
"sys",
".",
... | 31.043478 | 17.695652 |
def activate():
'''Activate an existing user (validate their email confirmation)'''
email = click.prompt('Email')
user = User.objects(email=email).first()
if not user:
exit_with_error('Invalid user')
if user.confirmed_at is not None:
exit_with_error('User email address already confirmed')
return
user.confirmed_at = datetime.utcnow()
user.save()
success('User activated successfully') | [
"def",
"activate",
"(",
")",
":",
"email",
"=",
"click",
".",
"prompt",
"(",
"'Email'",
")",
"user",
"=",
"User",
".",
"objects",
"(",
"email",
"=",
"email",
")",
".",
"first",
"(",
")",
"if",
"not",
"user",
":",
"exit_with_error",
"(",
"'Invalid use... | 35.833333 | 14.333333 |
def getSpec(cls):
"""
Return the Spec for ApicalTMPairRegion
"""
spec = {
"description": ApicalTMPairRegion.__doc__,
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": ("An array of 0's and 1's representing the active "
"minicolumns, i.e. the input to the TemporalMemory"),
"dataType": "Real32",
"count": 0,
"required": True,
"regionLevel": True,
"isDefaultInput": True,
"requireSplitterMap": False
},
"resetIn": {
"description": ("A boolean flag that indicates whether"
" or not the input vector received in this compute cycle"
" represents the first presentation in a"
" new temporal sequence."),
"dataType": "Real32",
"count": 1,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalInput": {
"description": "An array of 0's and 1's representing basal input",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalGrowthCandidates": {
"description": ("An array of 0's and 1's representing basal input " +
"that can be learned on new synapses on basal " +
"segments. If this input is a length-0 array, the " +
"whole basalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalInput": {
"description": "An array of 0's and 1's representing top down input."
" The input will be provided to apical dendrites.",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalGrowthCandidates": {
"description": ("An array of 0's and 1's representing apical input " +
"that can be learned on new synapses on apical " +
"segments. If this input is a length-0 array, the " +
"whole apicalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False},
},
"outputs": {
"predictedCells": {
"description": ("A binary output containing a 1 for every "
"cell that was predicted for this timestep."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"predictedActiveCells": {
"description": ("A binary output containing a 1 for every "
"cell that transitioned from predicted to active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"activeCells": {
"description": ("A binary output containing a 1 for every "
"cell that is currently active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": True
},
"winnerCells": {
"description": ("A binary output containing a 1 for every "
"'winner' cell in the TM."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
},
"parameters": {
# Input sizes (the network API doesn't provide these during initialize)
"columnCount": {
"description": ("The size of the 'activeColumns' input "
"(i.e. the number of columns)"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"basalInputWidth": {
"description": "The size of the 'basalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"apicalInputWidth": {
"description": "The size of the 'apicalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"learn": {
"description": "True if the TM should learn.",
"accessMode": "ReadWrite",
"dataType": "Bool",
"count": 1,
"defaultValue": "true"
},
"cellsPerColumn": {
"description": "Number of cells per column",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"activationThreshold": {
"description": ("If the number of active connected synapses on a "
"segment is at least this threshold, the segment "
"is said to be active."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"reducedBasalThreshold": {
"description": ("Activation threshold of basal segments for cells "
"with active apical segments (with apicalTiebreak "
"implementation). "),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"initialPermanence": {
"description": "Initial permanence of a new synapse.",
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"connectedPermanence": {
"description": ("If the permanence value for a synapse is greater "
"than this value, it is said to be connected."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"minThreshold": {
"description": ("If the number of synapses active on a segment is at "
"least this threshold, it is selected as the best "
"matching cell in a bursting column."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"sampleSize": {
"description": ("The desired number of active synapses for an "
"active cell"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"learnOnOneCell": {
"description": ("If True, the winner cell for each column will be"
" fixed between resets."),
"accessMode": "Read",
"dataType": "Bool",
"count": 1,
"defaultValue": "false"
},
"maxSynapsesPerSegment": {
"description": "The maximum number of synapses per segment. Use -1 "
"for unlimited.",
"accessMode": "Read",
"dataType": "Int32",
"count": 1
},
"maxSegmentsPerCell": {
"description": "The maximum number of segments per cell",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"permanenceIncrement": {
"description": ("Amount by which permanences of synapses are "
"incremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"permanenceDecrement": {
"description": ("Amount by which permanences of synapses are "
"decremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"basalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"apicalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"seed": {
"description": "Seed for the random number generator.",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"implementation": {
"description": "Apical implementation",
"accessMode": "Read",
"dataType": "Byte",
"count": 0,
"constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"),
"defaultValue": "ApicalTiebreakCPP"
},
},
}
return spec | [
"def",
"getSpec",
"(",
"cls",
")",
":",
"spec",
"=",
"{",
"\"description\"",
":",
"ApicalTMPairRegion",
".",
"__doc__",
",",
"\"singleNodeOnly\"",
":",
"True",
",",
"\"inputs\"",
":",
"{",
"\"activeColumns\"",
":",
"{",
"\"description\"",
":",
"(",
"\"An array... | 34.113971 | 18.716912 |
def unscored_nodes_iter(self) -> BaseEntity:
"""Iterate over all nodes without a score."""
for node, data in self.graph.nodes(data=True):
if self.tag not in data:
yield node | [
"def",
"unscored_nodes_iter",
"(",
"self",
")",
"->",
"BaseEntity",
":",
"for",
"node",
",",
"data",
"in",
"self",
".",
"graph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"if",
"self",
".",
"tag",
"not",
"in",
"data",
":",
"yield",
"node"
] | 42.6 | 7.2 |
def cli_tempurl(context, method, path, seconds=None, use_container=False):
"""
Generates a TempURL and sends that to the context.io_manager's
stdout.
See :py:mod:`swiftly.cli.tempurl` for context usage information.
See :py:class:`CLITempURL` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param method: The method for the TempURL (GET, PUT, etc.)
:param path: The path the TempURL should direct to.
:param seconds: The number of seconds the TempURL should be good
for. Default: 3600
:param use_container: If True, will create a container level TempURL
useing X-Container-Meta-Temp-Url-Key instead of
X-Account-Meta-Temp-Url-Key.
"""
with contextlib.nested(
context.io_manager.with_stdout(),
context.client_manager.with_client()) as (fp, client):
method = method.upper()
path = path.lstrip('/')
seconds = seconds if seconds is not None else 3600
if '/' not in path:
raise ReturnCode(
'invalid tempurl path %r; should have a / within it' % path)
if use_container:
key_type = 'container'
container = path.split('/', 1)[0]
status, reason, headers, contents = \
client.head_container(container)
else:
key_type = 'account'
status, reason, headers, contents = \
client.head_account()
if status // 100 != 2:
raise ReturnCode(
'obtaining X-%s-Meta-Temp-Url-Key: %s %s' %
(key_type.title(), status, reason))
key = headers.get('x-%s-meta-temp-url-key' % key_type)
if not key:
raise ReturnCode(
'there is no X-%s-Meta-Temp-Url-Key set for this %s' %
(key_type.title(), key_type))
url = client.storage_url + '/' + path
fp.write(generate_temp_url(method, url, seconds, key))
fp.write('\n')
fp.flush() | [
"def",
"cli_tempurl",
"(",
"context",
",",
"method",
",",
"path",
",",
"seconds",
"=",
"None",
",",
"use_container",
"=",
"False",
")",
":",
"with",
"contextlib",
".",
"nested",
"(",
"context",
".",
"io_manager",
".",
"with_stdout",
"(",
")",
",",
"conte... | 40.06 | 17.46 |
def add_schema(self, schema):
"""
Merge in a JSON schema. This can be a ``dict`` or another
``SchemaBuilder``
:param schema: a JSON Schema
.. note::
There is no schema validation. If you pass in a bad schema,
you might get back a bad schema.
"""
if isinstance(schema, SchemaBuilder):
schema_uri = schema.schema_uri
schema = schema.to_schema()
if schema_uri is None:
del schema['$schema']
elif isinstance(schema, SchemaNode):
schema = schema.to_schema()
if '$schema' in schema:
self.schema_uri = self.schema_uri or schema['$schema']
schema = dict(schema)
del schema['$schema']
self._root_node.add_schema(schema) | [
"def",
"add_schema",
"(",
"self",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"schema",
",",
"SchemaBuilder",
")",
":",
"schema_uri",
"=",
"schema",
".",
"schema_uri",
"schema",
"=",
"schema",
".",
"to_schema",
"(",
")",
"if",
"schema_uri",
"is",
"... | 33.083333 | 12.75 |
def create_objects_from_iterables(obj, args: dict, iterables: Dict[str, Any], formatting_options: Dict[str, Any], key_index_name: str = "KeyIndex") -> Tuple[Any, Dict[str, Any], dict]:
""" Create objects for each set of values based on the given arguments.
The iterable values are available under a key index ``dataclass`` which is used to index the returned
dictionary. The names of the fields are determined by the keys of iterables dictionary. The values are
the newly created object. Note that the iterable values must be convertible to a str() so they can be
included in the formatting dictionary.
Each set of values is also included in the object args.
As a basic example,
.. code-block:: python
>>> create_objects_from_iterables(
... obj = obj,
... args = {},
... iterables = {"a" : ["a1","a2"], "b" : ["b1", "b2"]},
... formatting_options = {}
... )
(
KeyIndex,
{"a": ["a1", "a2"], "b": ["b1", "b2"]}
{
KeyIndex(a = "a1", b = "b1"): obj(a = "a1", b = "b1"),
KeyIndex(a = "a1", b = "b2"): obj(a = "a1", b = "b2"),
KeyIndex(a = "a2", b = "b1"): obj(a = "a2", b = "b1"),
KeyIndex(a = "a2", b = "b2"): obj(a = "a2", b = "b2"),
}
)
Args:
obj (object): The object to be constructed.
args: Arguments to be passed to the object to create it.
iterables: Iterables to be used to create the objects, with entries of the form
``"name_of_iterable": iterable``.
formatting_options: Values to be used in formatting strings in the arguments.
key_index_name: Name of the iterable key index.
Returns:
(object, list, dict, dict): Roughly, (KeyIndex, iterables, objects). Specifically, the
key_index is a new dataclass which defines the parameters used to create the object, iterables
are the iterables used to create the objects, which names as keys and the iterables as values.
The objects dictionary keys are KeyIndex objects which describe the iterable arguments passed to the
object, while the values are the newly constructed arguments. See the example above.
"""
# Setup
objects = {}
names = list(iterables)
logger.debug(f"iterables: {iterables}")
# Create the key index object, where the name of each field is the name of each iterable.
KeyIndex = create_key_index_object(
key_index_name = key_index_name,
iterables = iterables,
)
# ``itertools.product`` produces all possible permutations of the iterables values.
# NOTE: Product preserves the order of the iterables values, which is important for properly
# assigning the values to the ``KeyIndex``.
for values in itertools.product(*iterables.values()):
logger.debug(f"Values: {values}")
# Skip if we don't have a sufficient set of values to create an object.
if not values:
continue
# Add in the values into the arguments and formatting options.
# NOTE: We don't need a deep copy for the iterable values in the args and formatting options
# because the values will be overwritten for each object.
for name, val in zip(names, values):
# We want to keep the original value for the arguments.
args[name] = val
# Here, we convert the value, regardless of type, into a string that can be displayed.
formatting_options[name] = str(val)
# Apply formatting options
# If we formatted in place, we would need to deepcopy the args to ensure that the iterable dependent
# values in the formatted values are properly set for each iterable object individually.
# However, by formatting into new variables, we can avoid a deepcopy, which greatly improves performance!
# NOTE: We don't need a deep copy do this for iterable value names themselves because they will be overwritten
# for each object. They are set in the block above.
object_args = copy.copy(args)
logger.debug(f"object_args pre format: {object_args}")
object_args = apply_formatting_dict(object_args, formatting_options)
# Print our results for debugging purposes. However, we skip printing the full
# config because it is quite long
print_args = {k: v for k, v in object_args.items() if k != "config"}
print_args["config"] = "..."
logger.debug(f"Constructing obj \"{obj}\" with args: \"{print_args}\"")
# Finally create the object.
objects[KeyIndex(*values)] = obj(**object_args)
# If nothing has been created at this point, then we are didn't iterating over anything and something
# has gone wrong.
if not objects:
raise ValueError(iterables, "There appear to be no iterables to use in creating objects.")
return (KeyIndex, iterables, objects) | [
"def",
"create_objects_from_iterables",
"(",
"obj",
",",
"args",
":",
"dict",
",",
"iterables",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"formatting_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"key_index_name",
":",
"str",
"=",
"\"KeyI... | 51.552083 | 31.114583 |
def pointTo(agent_host, ob, target_pitch, target_yaw, threshold):
'''Steer towards the target pitch/yaw, return True when within the given tolerance threshold.'''
pitch = ob.get(u'Pitch', 0)
yaw = ob.get(u'Yaw', 0)
delta_yaw = angvel(target_yaw, yaw, 50.0)
delta_pitch = angvel(target_pitch, pitch, 50.0)
agent_host.sendCommand("turn " + str(delta_yaw))
agent_host.sendCommand("pitch " + str(delta_pitch))
if abs(pitch-target_pitch) + abs(yaw-target_yaw) < threshold:
agent_host.sendCommand("turn 0")
agent_host.sendCommand("pitch 0")
return True
return False | [
"def",
"pointTo",
"(",
"agent_host",
",",
"ob",
",",
"target_pitch",
",",
"target_yaw",
",",
"threshold",
")",
":",
"pitch",
"=",
"ob",
".",
"get",
"(",
"u'Pitch'",
",",
"0",
")",
"yaw",
"=",
"ob",
".",
"get",
"(",
"u'Yaw'",
",",
"0",
")",
"delta_y... | 47 | 17.307692 |
def get_event(self, *etypes, timeout=None):
"""
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
"""
self._validate_etypes(*etypes)
start = time.time()
e = self._eventq.get(timeout=timeout)
if isinstance(e, Exception):
raise e
self._stats['events_recieved'] += 1
if etypes and e.type not in etypes:
if timeout:
timeout -= time.time() - start
log.debug('ignoring filtered event: {}'.format(e.json))
self._stats['events_dropped'] += 1
return self.get_event(*etypes, timeout=timeout)
return e | [
"def",
"get_event",
"(",
"self",
",",
"*",
"etypes",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_validate_etypes",
"(",
"*",
"etypes",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"e",
"=",
"self",
".",
"_eventq",
".",
"get",
"(",
... | 37.16 | 16.28 |
def save_hash(self, location, basedir, ext=None):
"""
Save response body into file with special path
builded from hash. That allows to lower number of files
per directory.
:param location: URL of file or something else. It is
used to build the SHA1 hash.
:param basedir: base directory to save the file. Note that
file will not be saved directly to this directory but to
some sub-directory of `basedir`
:param ext: extension which should be appended to file name. The
dot is inserted automatically between filename and extension.
:returns: path to saved file relative to `basedir`
Example::
>>> url = 'http://yandex.ru/logo.png'
>>> g.go(url)
>>> g.response.save_hash(url, 'some_dir', ext='png')
'e8/dc/f2918108788296df1facadc975d32b361a6a.png'
# the file was saved to $PWD/some_dir/e8/dc/...
TODO: replace `basedir` with two options: root and save_to. And
returns save_to + path
"""
if isinstance(location, six.text_type):
location = location.encode('utf-8')
rel_path = hashed_path(location, ext=ext)
path = os.path.join(basedir, rel_path)
if not os.path.exists(path):
path_dir, _ = os.path.split(path)
try:
os.makedirs(path_dir)
except OSError:
pass
with open(path, 'wb') as out:
out.write(self._bytes_body)
return rel_path | [
"def",
"save_hash",
"(",
"self",
",",
"location",
",",
"basedir",
",",
"ext",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"location",
",",
"six",
".",
"text_type",
")",
":",
"location",
"=",
"location",
".",
"encode",
"(",
"'utf-8'",
")",
"rel_path... | 38.575 | 17.375 |
def targets(tgt, tgt_type='range', **kwargs):
'''
Return the targets from a range query
'''
r = seco.range.Range(__opts__['range_server'])
log.debug('Range connection to \'%s\' established', __opts__['range_server'])
hosts = []
try:
log.debug('Querying range for \'%s\'', tgt)
hosts = r.expand(tgt)
except seco.range.RangeException as err:
log.error('Range server exception: %s', err)
return {}
log.debug('Range responded with: \'%s\'', hosts)
# Currently we only support giving a raw range entry, no target filtering supported other than what range returns :S
tgt_func = {
'range': target_range,
'glob': target_range,
# 'glob': target_glob,
}
log.debug('Filtering using tgt_type: \'%s\'', tgt_type)
try:
targeted_hosts = tgt_func[tgt_type](tgt, hosts)
except KeyError:
raise NotImplementedError
log.debug('Targeting data for salt-ssh: \'%s\'', targeted_hosts)
return targeted_hosts | [
"def",
"targets",
"(",
"tgt",
",",
"tgt_type",
"=",
"'range'",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"seco",
".",
"range",
".",
"Range",
"(",
"__opts__",
"[",
"'range_server'",
"]",
")",
"log",
".",
"debug",
"(",
"'Range connection to \\'%s\\' est... | 31.125 | 23.75 |
def measured_voltage(self):
"""
The measured voltage that the battery is supplying (in microvolts)
"""
self._measured_voltage, value = self.get_attr_int(self._measured_voltage, 'voltage_now')
return value | [
"def",
"measured_voltage",
"(",
"self",
")",
":",
"self",
".",
"_measured_voltage",
",",
"value",
"=",
"self",
".",
"get_attr_int",
"(",
"self",
".",
"_measured_voltage",
",",
"'voltage_now'",
")",
"return",
"value"
] | 39.833333 | 20.5 |
def delete(self, cascade=False, delete_shares=False):
"""
Deletes the video.
"""
if self.id:
self.connection.post('delete_video', video_id=self.id,
cascade=cascade, delete_shares=delete_shares)
self.id = None | [
"def",
"delete",
"(",
"self",
",",
"cascade",
"=",
"False",
",",
"delete_shares",
"=",
"False",
")",
":",
"if",
"self",
".",
"id",
":",
"self",
".",
"connection",
".",
"post",
"(",
"'delete_video'",
",",
"video_id",
"=",
"self",
".",
"id",
",",
"casc... | 34.125 | 13.625 |
def pop(self, identifier, default=None):
"""Pop a node of the AttrTree using its path string.
Args:
identifier: Path string of the node to return
default: Value to return if no node is found
Returns:
The node that was removed from the AttrTree
"""
if identifier in self.children:
item = self[identifier]
self.__delitem__(identifier)
return item
else:
return default | [
"def",
"pop",
"(",
"self",
",",
"identifier",
",",
"default",
"=",
"None",
")",
":",
"if",
"identifier",
"in",
"self",
".",
"children",
":",
"item",
"=",
"self",
"[",
"identifier",
"]",
"self",
".",
"__delitem__",
"(",
"identifier",
")",
"return",
"ite... | 30.25 | 15.1875 |
def create_shortcuts(self):
"""Create shortcuts for this widget."""
# Configurable
copyfig = config_shortcut(self.copy_figure, context='plots',
name='copy', parent=self)
prevfig = config_shortcut(self.go_previous_thumbnail, context='plots',
name='previous figure', parent=self)
nextfig = config_shortcut(self.go_next_thumbnail, context='plots',
name='next figure', parent=self)
return [copyfig, prevfig, nextfig] | [
"def",
"create_shortcuts",
"(",
"self",
")",
":",
"# Configurable",
"copyfig",
"=",
"config_shortcut",
"(",
"self",
".",
"copy_figure",
",",
"context",
"=",
"'plots'",
",",
"name",
"=",
"'copy'",
",",
"parent",
"=",
"self",
")",
"prevfig",
"=",
"config_short... | 50.272727 | 22.545455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.