text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def maybe_coroutine(obj):
"""
If 'obj' is a coroutine and we're using Python3, wrap it in
ensureDeferred. Otherwise return the original object.
(This is to insert in all callback chains from user code, in case
that user code is Python3 and used 'async def')
"""
if six.PY3 and asyncio.iscoroutine(obj):
return defer.ensureDeferred(obj)
return obj | [
"def",
"maybe_coroutine",
"(",
"obj",
")",
":",
"if",
"six",
".",
"PY3",
"and",
"asyncio",
".",
"iscoroutine",
"(",
"obj",
")",
":",
"return",
"defer",
".",
"ensureDeferred",
"(",
"obj",
")",
"return",
"obj"
] | 34.272727 | 15 |
def preparer(telefoon):
'''
Edit a phone value to a value that can be validated as a
phone number.
This takes the incoming value and :
Removes all whitespace ( space, tab , newline , ... ) characters
Removes the following characters: " / - . "
If no + is present at frond, add the country code
In short: just add a + at the beginning of the country code.
'''
if telefoon is None or telefoon == colander.null:
return colander.null
if 'landcode' in telefoon and telefoon.get('landcode') is not None:
landcode = telefoon.get('landcode')
value = re.sub(r'\s+', '', landcode).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['landcode'] = '+' + value if value[0] != '+' else value
if 'nummer' in telefoon and telefoon.get('nummer') is not None:
nummer = telefoon.get('nummer')
value = re.sub(r'\s+', '', nummer).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['nummer'] = value
return telefoon | [
"def",
"preparer",
"(",
"telefoon",
")",
":",
"if",
"telefoon",
"is",
"None",
"or",
"telefoon",
"==",
"colander",
".",
"null",
":",
"return",
"colander",
".",
"null",
"if",
"'landcode'",
"in",
"telefoon",
"and",
"telefoon",
".",
"get",
"(",
"'landcode'",
... | 51.695652 | 24.478261 |
def validate_sdl(
document_ast: DocumentNode,
schema_to_extend: GraphQLSchema = None,
rules: Sequence[RuleType] = None,
) -> List[GraphQLError]:
"""Validate an SDL document."""
context = SDLValidationContext(document_ast, schema_to_extend)
if rules is None:
rules = specified_sdl_rules
visitors = [rule(context) for rule in rules]
visit(document_ast, ParallelVisitor(visitors))
return context.errors | [
"def",
"validate_sdl",
"(",
"document_ast",
":",
"DocumentNode",
",",
"schema_to_extend",
":",
"GraphQLSchema",
"=",
"None",
",",
"rules",
":",
"Sequence",
"[",
"RuleType",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"GraphQLError",
"]",
":",
"context",
... | 36 | 11.416667 |
def _integrateLinearOrbit(vxvv,pot,t,method,dt):
"""
NAME:
integrateLinearOrbit
PURPOSE:
integrate a one-dimensional orbit
INPUT:
vxvv - initial condition [x,vx]
pot - linearPotential or list of linearPotentials
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
OUTPUT:
[:,2] array of [x,vx] at each t
HISTORY:
2010-07-13- Written - Bovy (NYU)
2018-10-05- Added support for C integration - Bovy (UofT)
"""
#First check that the potential has C
if '_c' in method:
if not ext_loaded or not _check_c(pot):
if ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
else:
method= 'odeint'
if not ext_loaded: # pragma: no cover
warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning)
else:
warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning)
if method.lower() == 'leapfrog':
return symplecticode.leapfrog(lambda x,t=t: _evaluatelinearForces(pot,x,
t=t),
nu.array(vxvv),
t,rtol=10.**-8)
elif method.lower() == 'dop853':
return dop853(func=_linearEOM, x=vxvv, t=t, args=(pot,))
elif ext_loaded and \
(method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c' \
or method.lower() == 'dop853_c'):
warnings.warn("Using C implementation to integrate orbits",
galpyWarningVerbose)
out, msg= integrateLinearOrbit_c(pot,nu.array(vxvv),t,method,dt=dt)
return out
elif method.lower() == 'odeint' or not ext_loaded:
return integrate.odeint(_linearEOM,vxvv,t,args=(pot,),rtol=10.**-8.) | [
"def",
"_integrateLinearOrbit",
"(",
"vxvv",
",",
"pot",
",",
"t",
",",
"method",
",",
"dt",
")",
":",
"#First check that the potential has C",
"if",
"'_c'",
"in",
"method",
":",
"if",
"not",
"ext_loaded",
"or",
"not",
"_check_c",
"(",
"pot",
")",
":",
"if... | 47 | 21 |
def _ref(self, param, base_name=None):
"""
Store a parameter schema and return a reference to it.
:param schema:
Swagger parameter definition.
:param base_name:
Name that should be used for the reference.
:rtype: dict
:returns: JSON pointer to the original parameter definition.
"""
name = base_name or param.get('title', '') or param.get('name', '')
pointer = self.json_pointer + name
self.parameter_registry[name] = param
return {'$ref': pointer} | [
"def",
"_ref",
"(",
"self",
",",
"param",
",",
"base_name",
"=",
"None",
")",
":",
"name",
"=",
"base_name",
"or",
"param",
".",
"get",
"(",
"'title'",
",",
"''",
")",
"or",
"param",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"pointer",
"=",
"sel... | 28.789474 | 19.526316 |
def remove_state_machine(self, state_machine_id):
"""Remove the state machine for a specified state machine id from the list of registered state machines.
:param state_machine_id: the id of the state machine to be removed
"""
import rafcon.core.singleton as core_singletons
removed_state_machine = None
if state_machine_id in self._state_machines:
logger.debug("Remove state machine with id {0}".format(state_machine_id))
removed_state_machine = self._state_machines.pop(state_machine_id)
else:
logger.error("There is no state_machine with state_machine_id: %s" % state_machine_id)
return removed_state_machine
# destroy execution history
removed_state_machine.destroy_execution_histories()
return removed_state_machine | [
"def",
"remove_state_machine",
"(",
"self",
",",
"state_machine_id",
")",
":",
"import",
"rafcon",
".",
"core",
".",
"singleton",
"as",
"core_singletons",
"removed_state_machine",
"=",
"None",
"if",
"state_machine_id",
"in",
"self",
".",
"_state_machines",
":",
"l... | 49 | 20.588235 |
def get_all_interface_details(devId, auth, url):
"""
function takes the devId of a specific device and the ifindex value assigned to a specific interface
and issues a RESTFUL call to get the interface details
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the devId of the target device
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dict objects which contains the details of all interfaces on the target device
:retype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> all_interface_details = get_all_interface_details('10', auth.creds, auth.url)
>>> assert type(all_interface_details) is list
>>> assert 'ifAlias' in all_interface_details[0]
"""
get_all_interface_details_url = "/imcrs/plat/res/device/" + str(devId) + "/interface/?start=0&size=1000&desc=false&total=false"
f_url = url + get_all_interface_details_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_details = (json.loads(r.text))
return dev_details['interface']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_interface_details: An Error has occured" | [
"def",
"get_all_interface_details",
"(",
"devId",
",",
"auth",
",",
"url",
")",
":",
"get_all_interface_details_url",
"=",
"\"/imcrs/plat/res/device/\"",
"+",
"str",
"(",
"devId",
")",
"+",
"\"/interface/?start=0&size=1000&desc=false&total=false\"",
"f_url",
"=",
"url",
... | 37.418605 | 30.139535 |
def create_revlookup_query(self, *fulltext_searchterms, **keyvalue_searchterms):
'''
Create the part of the solr request that comes after the question mark,
e.g. ?URL=*dkrz*&CHECKSUM=*abc*. If allowed search keys are
configured, only these are used. If no'allowed search keys are
specified, all key-value pairs are passed on to the reverse lookup
servlet.
:param fulltext_searchterms: Optional. Any term specified will be used
as search term. Not implemented yet, so will be ignored.
:param keyvalue_searchterms: Optional. Key-value pairs. Any key-value
pair will be used to search for the value in the field "key".
Wildcards accepted (refer to the documentation of the reverse
lookup servlet for syntax.)
:return: The query string, after the "?". If no valid search terms were
specified, None is returned.
'''
LOGGER.debug('create_revlookup_query...')
allowed_search_keys = self.__allowed_search_keys
only_search_for_allowed_keys = False
if len(allowed_search_keys) > 0:
only_search_for_allowed_keys = True
fulltext_searchterms_given = True
fulltext_searchterms = b2handle.util.remove_value_none_from_list(fulltext_searchterms)
if len(fulltext_searchterms) == 0:
fulltext_searchterms_given = False
if fulltext_searchterms_given:
msg = 'Full-text search is not implemented yet.'+\
' The provided searchterms '+str(fulltext_searchterms)+\
' can not be used.'
raise ReverseLookupException(msg=msg)
keyvalue_searchterms_given = True
keyvalue_searchterms = b2handle.util.remove_value_none_from_dict(keyvalue_searchterms)
if len(keyvalue_searchterms) == 0:
keyvalue_searchterms_given = False
if not keyvalue_searchterms_given and not fulltext_searchterms_given:
msg = 'No search terms have been specified. Please specify'+\
' at least one key-value-pair.'
raise ReverseLookupException(msg=msg)
counter = 0
query = '?'
for key, value in keyvalue_searchterms.items():
if only_search_for_allowed_keys and key not in allowed_search_keys:
msg = 'Cannot search for key "'+key+'". Only searches '+\
'for keys '+str(allowed_search_keys)+' are implemented.'
raise ReverseLookupException(msg=msg)
else:
query = query+'&'+key+'='+value
counter += 1
query = query.replace('?&', '?')
LOGGER.debug('create_revlookup_query: query: '+query)
if counter == 0: # unreachable?
msg = 'No valid search terms have been specified.'
raise ReverseLookupException(msg=msg)
return query | [
"def",
"create_revlookup_query",
"(",
"self",
",",
"*",
"fulltext_searchterms",
",",
"*",
"*",
"keyvalue_searchterms",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'create_revlookup_query...'",
")",
"allowed_search_keys",
"=",
"self",
".",
"__allowed_search_keys",
"only_se... | 45.460317 | 22.603175 |
def timestamp_to_local_time(timestamp, timezone_name):
"""Convert epoch timestamp to a localized Delorean datetime object.
Arguments
---------
timestamp : int
The timestamp to convert.
timezone_name : datetime.timezone
The timezone of the desired local time.
Returns
-------
delorean.Delorean
A localized Delorean datetime object.
"""
# first convert timestamp to UTC
utc_time = datetime.utcfromtimestamp(float(timestamp))
delo = Delorean(utc_time, timezone='UTC')
# shift d according to input timezone
localized_d = delo.shift(timezone_name)
return localized_d | [
"def",
"timestamp_to_local_time",
"(",
"timestamp",
",",
"timezone_name",
")",
":",
"# first convert timestamp to UTC",
"utc_time",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"timestamp",
")",
")",
"delo",
"=",
"Delorean",
"(",
"utc_time",
",",
"... | 29.857143 | 15.095238 |
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source) | [
"def",
"wordlist2cognates",
"(",
"wordlist",
",",
"source",
",",
"expert",
"=",
"'expert'",
",",
"ref",
"=",
"'cogid'",
")",
":",
"for",
"k",
"in",
"wordlist",
":",
"yield",
"dict",
"(",
"Form_ID",
"=",
"wordlist",
"[",
"k",
",",
"'lid'",
"]",
",",
"... | 41.545455 | 12.909091 |
def options(f):
"""
Shared options, used by all bartender commands
"""
f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f)
return f | [
"def",
"options",
"(",
"f",
")",
":",
"f",
"=",
"click",
".",
"option",
"(",
"'--config'",
",",
"envvar",
"=",
"'VODKA_HOME'",
",",
"default",
"=",
"click",
".",
"get_app_dir",
"(",
"'vodka'",
")",
",",
"help",
"=",
"\"location of config file\"",
")",
"(... | 30.714286 | 26.714286 |
def get(self,id):
'''Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object
'''
query = meta.Session.query(TagSemanticTag).filter(TagSemanticTag.id==id)
return query.first() | [
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"query",
"=",
"meta",
".",
"Session",
".",
"query",
"(",
"TagSemanticTag",
")",
".",
"filter",
"(",
"TagSemanticTag",
".",
"id",
"==",
"id",
")",
"return",
"query",
".",
"first",
"(",
")"
] | 30.111111 | 26.111111 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'credential_type') and self.credential_type is not None:
_dict['credential_type'] = self.credential_type
if hasattr(self, 'client_id') and self.client_id is not None:
_dict['client_id'] = self.client_id
if hasattr(self, 'enterprise_id') and self.enterprise_id is not None:
_dict['enterprise_id'] = self.enterprise_id
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url
if hasattr(self, 'username') and self.username is not None:
_dict['username'] = self.username
if hasattr(self,
'organization_url') and self.organization_url is not None:
_dict['organization_url'] = self.organization_url
if hasattr(self, 'site_collection_path'
) and self.site_collection_path is not None:
_dict['site_collection.path'] = self.site_collection_path
if hasattr(self, 'client_secret') and self.client_secret is not None:
_dict['client_secret'] = self.client_secret
if hasattr(self, 'public_key_id') and self.public_key_id is not None:
_dict['public_key_id'] = self.public_key_id
if hasattr(self, 'private_key') and self.private_key is not None:
_dict['private_key'] = self.private_key
if hasattr(self, 'passphrase') and self.passphrase is not None:
_dict['passphrase'] = self.passphrase
if hasattr(self, 'password') and self.password is not None:
_dict['password'] = self.password
if hasattr(self, 'gateway_id') and self.gateway_id is not None:
_dict['gateway_id'] = self.gateway_id
if hasattr(self, 'source_version') and self.source_version is not None:
_dict['source_version'] = self.source_version
if hasattr(
self,
'web_application_url') and self.web_application_url is not None:
_dict['web_application_url'] = self.web_application_url
if hasattr(self, 'domain') and self.domain is not None:
_dict['domain'] = self.domain
if hasattr(self, 'endpoint') and self.endpoint is not None:
_dict['endpoint'] = self.endpoint
if hasattr(self, 'access_key_id') and self.access_key_id is not None:
_dict['access_key_id'] = self.access_key_id
if hasattr(self,
'secret_access_key') and self.secret_access_key is not None:
_dict['secret_access_key'] = self.secret_access_key
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'credential_type'",
")",
"and",
"self",
".",
"credential_type",
"is",
"not",
"None",
":",
"_dict",
"[",
"'credential_type'",
"]",
"=",
"self",
".",
"c... | 55.145833 | 21.166667 |
def register_callbacks(self, on_create, on_modify, on_delete):
""" Register callbacks for file creation, modification, and deletion """
self.on_create = on_create
self.on_modify = on_modify
self.on_delete = on_delete | [
"def",
"register_callbacks",
"(",
"self",
",",
"on_create",
",",
"on_modify",
",",
"on_delete",
")",
":",
"self",
".",
"on_create",
"=",
"on_create",
"self",
".",
"on_modify",
"=",
"on_modify",
"self",
".",
"on_delete",
"=",
"on_delete"
] | 48.8 | 8 |
def is_merc_projection(srs):
""" Return true if the map projection matches that used by VEarth, Google, OSM, etc.
Is currently necessary for zoom-level shorthand for scale-denominator.
"""
if srs.lower() == '+init=epsg:900913':
return True
# observed
srs = dict([p.split('=') for p in srs.split() if '=' in p])
# expected
# note, common optional modifiers like +no_defs, +over, and +wkt
# are not pairs and should not prevent matching
gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null'
gym = dict([p.split('=') for p in gym.split() if '=' in p])
for p in gym:
if srs.get(p, None) != gym.get(p, None):
return False
return True | [
"def",
"is_merc_projection",
"(",
"srs",
")",
":",
"if",
"srs",
".",
"lower",
"(",
")",
"==",
"'+init=epsg:900913'",
":",
"return",
"True",
"# observed",
"srs",
"=",
"dict",
"(",
"[",
"p",
".",
"split",
"(",
"'='",
")",
"for",
"p",
"in",
"srs",
".",
... | 35 | 24.590909 |
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
name = "annotations"
module = node.root()
stmt = module.locals.get(name)
return (
stmt
and isinstance(stmt[0], astroid.ImportFrom)
and stmt[0].modname == "__future__"
) | [
"def",
"is_postponed_evaluation_enabled",
"(",
"node",
":",
"astroid",
".",
"node_classes",
".",
"NodeNG",
")",
"->",
"bool",
":",
"name",
"=",
"\"annotations\"",
"module",
"=",
"node",
".",
"root",
"(",
")",
"stmt",
"=",
"module",
".",
"locals",
".",
"get... | 35.3 | 18.2 |
def out_degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, out_degree) at time t.
The node out degree is the number of interactions outgoing from the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.out_degree_iter(0, t=0))
[(0, 1)]
>>> list(G.out_degree_iter([0,1], t=0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs = self._succ.items()
else:
nodes_nbrs = ((n, self._succ[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, nbrs in nodes_nbrs:
deg = len(self._succ[n])
yield (n, deg)
else:
for n, nbrs in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(n, v, t)])
if edges_t > 0:
yield (n, edges_t)
else:
yield (n, 0) | [
"def",
"out_degree_iter",
"(",
"self",
",",
"nbunch",
"=",
"None",
",",
"t",
"=",
"None",
")",
":",
"if",
"nbunch",
"is",
"None",
":",
"nodes_nbrs",
"=",
"self",
".",
"_succ",
".",
"items",
"(",
")",
"else",
":",
"nodes_nbrs",
"=",
"(",
"(",
"n",
... | 30.44898 | 21.591837 |
def encrypt_key(key_object, password):
"""
<Purpose>
Return a string containing 'key_object' in encrypted form. Encrypted
strings may be safely saved to a file. The corresponding decrypt_key()
function can be applied to the encrypted string to restore the original key
object. 'key_object' is a key (e.g., RSAKEY_SCHEMA, ED25519KEY_SCHEMA).
This function relies on the pyca_crypto_keys.py module to perform the
actual encryption.
Encrypted keys use AES-256-CTR-Mode, and passwords are strengthened with
PBKDF2-HMAC-SHA256 (100K iterations by default, but may be overriden in
'securesystemslib.settings.PBKDF2_ITERATIONS' by the user).
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
http://en.wikipedia.org/wiki/CTR_mode#Counter_.28CTR.29
https://en.wikipedia.org/wiki/PBKDF2
>>> ed25519_key = generate_ed25519_key()
>>> password = 'secret'
>>> encrypted_key = encrypt_key(ed25519_key, password).encode('utf-8')
>>> securesystemslib.formats.ENCRYPTEDKEY_SCHEMA.matches(encrypted_key)
True
<Arguments>
key_object:
A key (containing also the private key portion) of the form
'securesystemslib.formats.ANYKEY_SCHEMA'
password:
The password, or passphrase, to encrypt the private part of the RSA
key. 'password' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'key_object' cannot be
encrypted.
<Side Effects>
None.
<Returns>
An encrypted string of the form:
'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA'.
"""
# Does 'key_object' have the correct format?
# This check will ensure 'key_object' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_object)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Encrypted string of 'key_object'. The encrypted string may be safely saved
# to a file and stored offline.
encrypted_key = None
# Generate an encrypted string of 'key_object' using AES-256-CTR-Mode, where
# 'password' is strengthened with PBKDF2-HMAC-SHA256.
encrypted_key = securesystemslib.pyca_crypto_keys.encrypt_key(key_object, password)
return encrypted_key | [
"def",
"encrypt_key",
"(",
"key_object",
",",
"password",
")",
":",
"# Does 'key_object' have the correct format?",
"# This check will ensure 'key_object' has the appropriate number",
"# of objects and object types, and that all dict keys are properly named.",
"# Raise 'securesystemslib.except... | 37.537313 | 26.701493 |
def _get_segments(self):
"""
Subclasses may override this method.
"""
points = list(self.points)
segments = [[]]
lastWasOffCurve = False
firstIsMove = points[0].type == "move"
for point in points:
segments[-1].append(point)
if point.type != "offcurve":
segments.append([])
lastWasOffCurve = point.type == "offcurve"
if len(segments[-1]) == 0:
del segments[-1]
if lastWasOffCurve and firstIsMove:
# ignore trailing off curves
del segments[-1]
if lastWasOffCurve and not firstIsMove:
segment = segments.pop(-1)
segment.extend(segments[0])
del segments[0]
segments.append(segment)
if not lastWasOffCurve and not firstIsMove:
segment = segments.pop(0)
segments.append(segment)
# wrap into segments
wrapped = []
for points in segments:
s = self.segmentClass()
s._setPoints(points)
self._setContourInSegment(s)
wrapped.append(s)
return wrapped | [
"def",
"_get_segments",
"(",
"self",
")",
":",
"points",
"=",
"list",
"(",
"self",
".",
"points",
")",
"segments",
"=",
"[",
"[",
"]",
"]",
"lastWasOffCurve",
"=",
"False",
"firstIsMove",
"=",
"points",
"[",
"0",
"]",
".",
"type",
"==",
"\"move\"",
"... | 33.529412 | 7.411765 |
def affects(self, reglist):
""" Returns if this instruction affects any of the registers
in reglist.
"""
if isinstance(reglist, str):
reglist = [reglist]
reglist = single_registers(reglist)
return len([x for x in self.destroys if x in reglist]) > 0 | [
"def",
"affects",
"(",
"self",
",",
"reglist",
")",
":",
"if",
"isinstance",
"(",
"reglist",
",",
"str",
")",
":",
"reglist",
"=",
"[",
"reglist",
"]",
"reglist",
"=",
"single_registers",
"(",
"reglist",
")",
"return",
"len",
"(",
"[",
"x",
"for",
"x... | 30.1 | 15.6 |
def dqdv_cycle(cycle, splitter=True, **kwargs):
"""Convenience functions for creating dq-dv data from given capacity and
voltage cycle.
Returns the a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',
'direction' (1 or -1)).
splitter (bool): insert a np.NaN row between charge and discharge.
Returns:
List of step numbers corresponding to the selected steptype.
Returns a pandas.DataFrame
instead of a list if pdtype is set to True.
Example:
>>> cycle_df = my_data.get_cap(
>>> ... 1,
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth"
>>> ... )
>>> voltage, incremental = ica.dqdv_cycle(cycle_df)
"""
c_first = cycle.loc[cycle["direction"] == -1]
c_last = cycle.loc[cycle["direction"] == 1]
converter = Converter(**kwargs)
converter.set_data(c_first["capacity"], c_first["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_first = converter.voltage_processed
incremental_capacity_first = converter.incremental_capacity
if splitter:
voltage_first = np.append(voltage_first, np.NaN)
incremental_capacity_first = np.append(incremental_capacity_first,
np.NaN)
converter = Converter(**kwargs)
converter.set_data(c_last["capacity"], c_last["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_last = converter.voltage_processed[::-1]
incremental_capacity_last = converter.incremental_capacity[::-1]
voltage = np.concatenate((voltage_first,
voltage_last))
incremental_capacity = np.concatenate((incremental_capacity_first,
incremental_capacity_last))
return voltage, incremental_capacity | [
"def",
"dqdv_cycle",
"(",
"cycle",
",",
"splitter",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"c_first",
"=",
"cycle",
".",
"loc",
"[",
"cycle",
"[",
"\"direction\"",
"]",
"==",
"-",
"1",
"]",
"c_last",
"=",
"cycle",
".",
"loc",
"[",
"cycle",... | 36.534483 | 19.551724 |
def _checkSuccessorReadyToRunMultiplePredecessors(self, jobGraph, jobNode, successorJobStoreID):
"""Handle the special cases of checking if a successor job is
ready to run when there are multiple predecessors"""
# See implementation note at the top of this file for discussion of multiple predecessors
logger.debug("Successor job: %s of job: %s has multiple "
"predecessors", jobNode, jobGraph)
# Get the successor job graph, which is caches
if successorJobStoreID not in self.toilState.jobsToBeScheduledWithMultiplePredecessors:
self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] = self.jobStore.load(successorJobStoreID)
successorJobGraph = self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID]
# Add the jobGraph as a finished predecessor to the successor
successorJobGraph.predecessorsFinished.add(jobGraph.jobStoreID)
# If the successor is in the set of successors of failed jobs
if successorJobStoreID in self.toilState.failedSuccessors:
if not self._handledFailedSuccessor(jobNode, jobGraph, successorJobStoreID):
return False
# If the successor job's predecessors have all not all completed then
# ignore the jobGraph as is not yet ready to run
assert len(successorJobGraph.predecessorsFinished) <= successorJobGraph.predecessorNumber
if len(successorJobGraph.predecessorsFinished) < successorJobGraph.predecessorNumber:
return False
else:
# Remove the successor job from the cache
self.toilState.jobsToBeScheduledWithMultiplePredecessors.pop(successorJobStoreID)
return True | [
"def",
"_checkSuccessorReadyToRunMultiplePredecessors",
"(",
"self",
",",
"jobGraph",
",",
"jobNode",
",",
"successorJobStoreID",
")",
":",
"# See implementation note at the top of this file for discussion of multiple predecessors",
"logger",
".",
"debug",
"(",
"\"Successor job: %s... | 60.241379 | 34.551724 |
def _setup_serializers(self):
"""
Auto set the return serializer based on Accept headers
http://docs.webob.org/en/latest/reference.html#header-getters
Intersection of requested types and supported types tells us if we
can in fact respond in one of the request formats
"""
acceptable_offers = self.request.accept.acceptable_offers(self.response.supported_mime_types)
if len(acceptable_offers) > 0:
best_accept_match = acceptable_offers[0][0]
else:
best_accept_match = self.response.default_serializer.content_type()
# best_accept_match = self.request.accept.best_match(
# self.response.supported_mime_types,
# default_match=self.response.default_serializer.content_type()
# )
self.logger.info("%s determined as best match for accept header: %s" % (
best_accept_match,
self.request.accept
))
# if content_type is not acceptable it will raise UnsupportedVocabulary
self.response.content_type = best_accept_match | [
"def",
"_setup_serializers",
"(",
"self",
")",
":",
"acceptable_offers",
"=",
"self",
".",
"request",
".",
"accept",
".",
"acceptable_offers",
"(",
"self",
".",
"response",
".",
"supported_mime_types",
")",
"if",
"len",
"(",
"acceptable_offers",
")",
">",
"0",... | 41.5 | 25.115385 |
def __extract_features(self):
"""!
@brief Extracts features from CF-tree cluster.
"""
self.__features = [];
if (len(self.__tree.leafes) == 1):
# parameters are too general, copy all entries
for entry in self.__tree.leafes[0].entries:
self.__features.append(entry);
else:
# copy all leaf clustering features
for node in self.__tree.leafes:
self.__features.append(node.feature); | [
"def",
"__extract_features",
"(",
"self",
")",
":",
"self",
".",
"__features",
"=",
"[",
"]",
"if",
"(",
"len",
"(",
"self",
".",
"__tree",
".",
"leafes",
")",
"==",
"1",
")",
":",
"# parameters are too general, copy all entries\r",
"for",
"entry",
"in",
"... | 31.294118 | 15.470588 |
def ber(tp, tn, fp, fn):
"""Balanced Error Rate [0, 1]
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
"""
return (fp / float(tn + fp) + fn / float(fn + tp)) / 2 | [
"def",
"ber",
"(",
"tp",
",",
"tn",
",",
"fp",
",",
"fn",
")",
":",
"return",
"(",
"fp",
"/",
"float",
"(",
"tn",
"+",
"fp",
")",
"+",
"fn",
"/",
"float",
"(",
"fn",
"+",
"tp",
")",
")",
"/",
"2"
] | 31.8 | 10.6 |
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater | [
"async",
"def",
"update_rooms",
"(",
"self",
")",
":",
"homes",
"=",
"await",
"self",
".",
"get_home_list",
"(",
")",
"for",
"home",
"in",
"homes",
":",
"payload",
"=",
"{",
"\"homeId\"",
":",
"home",
".",
"get",
"(",
"\"homeId\"",
")",
",",
"\"timeZon... | 47.25 | 15.222222 |
def _get_distset(tgt):
'''
Get the distribution string for use with rpmbuild and mock
'''
# Centos adds 'centos' string to rpm names, removing that to have
# consistent naming on Centos and Redhat, and allow for Amazon naming
tgtattrs = tgt.split('-')
if tgtattrs[0] == 'amzn':
distset = '--define "dist .{0}1"'.format(tgtattrs[0])
elif tgtattrs[1] in ['6', '7']:
distset = '--define "dist .el{0}"'.format(tgtattrs[1])
else:
distset = ''
return distset | [
"def",
"_get_distset",
"(",
"tgt",
")",
":",
"# Centos adds 'centos' string to rpm names, removing that to have",
"# consistent naming on Centos and Redhat, and allow for Amazon naming",
"tgtattrs",
"=",
"tgt",
".",
"split",
"(",
"'-'",
")",
"if",
"tgtattrs",
"[",
"0",
"]",
... | 33.533333 | 23.4 |
def xdg_config_dirs():
"""Returns a list of paths taken from the XDG_CONFIG_DIRS
and XDG_CONFIG_HOME environment varibables if they exist
"""
paths = []
if 'XDG_CONFIG_HOME' in os.environ:
paths.append(os.environ['XDG_CONFIG_HOME'])
if 'XDG_CONFIG_DIRS' in os.environ:
paths.extend(os.environ['XDG_CONFIG_DIRS'].split(':'))
else:
paths.append('/etc/xdg')
paths.append('/etc')
return paths | [
"def",
"xdg_config_dirs",
"(",
")",
":",
"paths",
"=",
"[",
"]",
"if",
"'XDG_CONFIG_HOME'",
"in",
"os",
".",
"environ",
":",
"paths",
".",
"append",
"(",
"os",
".",
"environ",
"[",
"'XDG_CONFIG_HOME'",
"]",
")",
"if",
"'XDG_CONFIG_DIRS'",
"in",
"os",
"."... | 33.538462 | 13.692308 |
def sort(args):
"""
%prog sort fastafile
Sort a list of sequences and output with sorted IDs, etc.
"""
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
return sortedfastafile | [
"def",
"sort",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sort",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--sizes\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Sort by decreasing size [... | 28.567568 | 20.513514 |
def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4",
scatter_size=10, fill_alpha=0.4, line_alpha=0.4,
court_line_color='gray', court_line_width=1,
hover_tool=False, tooltips=None, **kwargs):
# TODO: Settings for hover tooltip
"""
Returns a figure with both FGA and basketball court lines drawn onto it.
This function expects data to be a ColumnDataSource with the x and y values
named "LOC_X" and "LOC_Y". Otherwise specify x and y.
Parameters
----------
data : DataFrame
The DataFrame that contains the shot chart data.
x, y : str, optional
The x and y coordinates of the shots taken.
fill_color : str, optional
The fill color of the shots. Can be a a Hex value.
scatter_size : int, optional
The size of the dots for the scatter plot.
fill_alpha : float, optional
Alpha value for the shots. Must be a floating point value between 0
(transparent) to 1 (opaque).
line_alpha : float, optiona
Alpha value for the outer lines of the plotted shots. Must be a
floating point value between 0 (transparent) to 1 (opaque).
court_line_color : str, optional
The color of the court lines. Can be a a Hex value.
court_line_width : float, optional
The linewidth the of the court lines in pixels.
hover_tool : boolean, optional
If ``True``, creates hover tooltip for the plot.
tooltips : List of tuples, optional
Provides the information for the the hover tooltip.
Returns
-------
fig : Figure
The Figure object with the shot chart plotted on it.
"""
source = ColumnDataSource(data)
fig = figure(width=700, height=658, x_range=[-250, 250],
y_range=[422.5, -47.5], min_border=0, x_axis_type=None,
y_axis_type=None, outline_line_color="black", **kwargs)
fig.scatter(x, y, source=source, size=scatter_size, color=fill_color,
alpha=fill_alpha, line_alpha=line_alpha)
bokeh_draw_court(fig, line_color=court_line_color,
line_width=court_line_width)
if hover_tool:
hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips)
fig.add_tools(hover)
return fig | [
"def",
"bokeh_shot_chart",
"(",
"data",
",",
"x",
"=",
"\"LOC_X\"",
",",
"y",
"=",
"\"LOC_Y\"",
",",
"fill_color",
"=",
"\"#1f77b4\"",
",",
"scatter_size",
"=",
"10",
",",
"fill_alpha",
"=",
"0.4",
",",
"line_alpha",
"=",
"0.4",
",",
"court_line_color",
"=... | 37.262295 | 22.245902 |
def extract_paragraphs(xml_string):
"""Returns list of paragraphs in an NLM XML.
Parameters
----------
xml_string : str
String containing valid NLM XML.
Returns
-------
list of str
List of extracted paragraphs in an NLM XML
"""
tree = etree.fromstring(xml_string.encode('utf-8'))
paragraphs = []
# In NLM xml, all plaintext is within <p> tags, and is the only thing
# that can be contained in <p> tags. To handle to possibility of namespaces
# uses regex to search for tags either of the form 'p' or '{<namespace>}p'
for element in tree.iter():
if isinstance(element.tag, basestring) and \
re.search('(^|})[p|title]$', element.tag) and element.text:
paragraph = ' '.join(element.itertext())
paragraphs.append(paragraph)
return paragraphs | [
"def",
"extract_paragraphs",
"(",
"xml_string",
")",
":",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"xml_string",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"paragraphs",
"=",
"[",
"]",
"# In NLM xml, all plaintext is within <p> tags, and is the only thing",
"# that... | 33.4 | 20.72 |
def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False):
"""
Yield the sequences found in the given directory.
Examples:
>>> findSequencesOnDisk('/path/to/files')
The `pattern` can also specify glob-like shell wildcards including the following:
* ``?`` - 1 wildcard character
* ``*`` - 1 or more wildcard character
* ``{foo,bar}`` - either 'foo' or 'bar'
Exact frame ranges are not considered, and padding characters are converted to
wildcards (``#`` or ``@``)
Examples:
>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')
>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)
Args:
pattern (str): directory to scan, or pattern to filter in directory
include_hidden (bool): if true, show .hidden files as well
strictPadding (bool): if True, ignore files with padding length different from pattern
Returns:
list:
"""
# reserve some functions we're going to need quick access to
_not_hidden = lambda f: not f.startswith('.')
_match_pattern = None
_filter_padding = None
_join = os.path.join
seq = None
dirpath = pattern
# Support the pattern defining a filter for the files
# in the existing directory
if not os.path.isdir(pattern):
dirpath, filepat = os.path.split(pattern)
if not os.path.isdir(dirpath):
return []
# Start building a regex for filtering files
seq = cls(filepat)
patt = seq.basename().replace('.', r'\.')
if seq.padding():
patt += '\d+'
if seq.extension():
patt += seq.extension()
# Convert braces groups into regex capture groups
view = bytearray(patt)
matches = re.finditer(r'{(.*?)(?:,(.*?))*}', patt)
for match in reversed(list(matches)):
i, j = match.span()
view[i:j] = '(%s)' % '|'.join([m.strip() for m in match.groups()])
view = view.replace('*', '.*')
view = view.replace('?', '.')
view += '$'
try:
_match_pattern = re.compile(str(view)).match
except re.error:
msg = 'Invalid file pattern: {}'.format(filepat)
raise FileSeqException(msg)
if seq.padding() and strictPadding:
_filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill())
# Get just the immediate files under the dir.
# Avoids testing the os.listdir() for files as
# a second step.
ret = next(os.walk(dirpath), None)
files = ret[-1] if ret else []
# collapse some generators to get us the files that match our regex
if not include_hidden:
files = ifilter(_not_hidden, files)
# Filter by files that match the provided file pattern
if _match_pattern:
files = ifilter(_match_pattern, files)
# Filter by files that match the frame padding in the file pattern
if _filter_padding:
# returns a generator
files = _filter_padding(files)
# Ensure our dirpath ends with a path separator, so
# that we can control which sep is used during the
# os.path.join
sep = utils._getPathSep(dirpath)
if not dirpath.endswith(sep):
dirpath += sep
files = (_join(dirpath, f) for f in files)
files = list(files)
seqs = list(FileSequence.yield_sequences_in_list(files))
if _filter_padding and seq:
pad = cls.conformPadding(seq.padding())
# strict padding should preserve the original padding
# characters in the found sequences.
for s in seqs:
s.setPadding(pad)
return seqs | [
"def",
"findSequencesOnDisk",
"(",
"cls",
",",
"pattern",
",",
"include_hidden",
"=",
"False",
",",
"strictPadding",
"=",
"False",
")",
":",
"# reserve some functions we're going to need quick access to",
"_not_hidden",
"=",
"lambda",
"f",
":",
"not",
"f",
".",
"sta... | 36.550459 | 21.321101 |
def get_edge_string(self, i):
"""Return a string based on the bond order"""
order = self.orders[i]
if order == 0:
return Graph.get_edge_string(self, i)
else:
# pad with zeros to make sure that string sort is identical to number sort
return "%03i" % order | [
"def",
"get_edge_string",
"(",
"self",
",",
"i",
")",
":",
"order",
"=",
"self",
".",
"orders",
"[",
"i",
"]",
"if",
"order",
"==",
"0",
":",
"return",
"Graph",
".",
"get_edge_string",
"(",
"self",
",",
"i",
")",
"else",
":",
"# pad with zeros to make ... | 39.375 | 16 |
def _configure_logging(self, logger_dict=None):
"""
Configures the logging module with a given dictionary, which in most cases was loaded from a configuration
file.
If no dictionary is provided, it falls back to a default configuration.
See `Python docs
<https://docs.python.org/3.5/library/logging.config.html#logging.config.dictConfig>`_ for more information.
:param logger_dict: dictionary for logger.
"""
self.log.debug("Configure logging")
# Let's be sure, that for our log no handlers are registered anymore
for handler in self.log.handlers:
self.log.removeHandler(handler)
if logger_dict is None:
self.log.debug("No logger dictionary defined. Doing default logger configuration")
formatter = logging.Formatter("%(name)s - %(asctime)s - [%(levelname)s] - %(module)s - %(message)s")
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.WARNING)
stream_handler.setFormatter(formatter)
self.log.addHandler(stream_handler)
self.log.setLevel(logging.WARNING)
else:
self.log.debug("Logger dictionary defined. Loading dictConfig for logging")
logging.config.dictConfig(logger_dict)
self.log.debug("dictConfig loaded") | [
"def",
"_configure_logging",
"(",
"self",
",",
"logger_dict",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Configure logging\"",
")",
"# Let's be sure, that for our log no handlers are registered anymore",
"for",
"handler",
"in",
"self",
".",
"log"... | 46.827586 | 25.310345 |
def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None):
"""
Convert an SArray of list, array, or dict type to an SFrame with
multiple columns.
`unpack` expands an SArray using the values of each list/array/dict as
elements in a new SFrame of multiple columns. For example, an SArray of
lists each of length 4 will be expanded into an SFrame of 4 columns,
one for each list element. An SArray of lists/arrays of varying size
will be expand to a number of columns equal to the longest list/array.
An SArray of dictionaries will be expanded into as many columns as
there are keys.
When unpacking an SArray of list or array type, new columns are named:
`column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a
column of dict type, unpacked columns are named
`column_name_prefix`.key1, `column_name_prefix`.key2, etc.
When unpacking an SArray of list or dictionary types, missing values in
the original element remain as missing values in the resultant columns.
If the `na_value` parameter is specified, all values equal to this
given value are also replaced with missing values. In an SArray of
array.array type, NaN is interpreted as a missing value.
:py:func:`turicreate.SFrame.pack_columns()` is the reverse effect of unpack
Parameters
----------
column_name_prefix: str, optional
If provided, unpacked column names would start with the given prefix.
column_types: list[type], optional
Column types for the unpacked columns. If not provided, column
types are automatically inferred from first 100 rows. Defaults to
None.
na_value: optional
Convert all values that are equal to `na_value` to
missing value if specified.
limit: list, optional
Limits the set of list/array/dict keys to unpack.
For list/array SArrays, 'limit' must contain integer indices.
For dict SArray, 'limit' must contain dictionary keys.
Returns
-------
out : SFrame
A new SFrame that contains all unpacked columns
Examples
--------
To unpack a dict SArray
>>> sa = SArray([{ 'word': 'a', 'count': 1},
... { 'word': 'cat', 'count': 2},
... { 'word': 'is', 'count': 3},
... { 'word': 'coming','count': 4}])
Normal case of unpacking SArray of type dict:
>>> sa.unpack(column_name_prefix=None)
Columns:
count int
word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+-------+--------+
| count | word |
+-------+--------+
| 1 | a |
| 2 | cat |
| 3 | is |
| 4 | coming |
+-------+--------+
[4 rows x 2 columns]
<BLANKLINE>
Unpack only keys with 'word':
>>> sa.unpack(limit=['word'])
Columns:
X.word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+--------+
| X.word |
+--------+
| a |
| cat |
| is |
| coming |
+--------+
[4 rows x 1 columns]
<BLANKLINE>
>>> sa2 = SArray([
... [1, 0, 1],
... [1, 1, 1],
... [0, 1]])
Convert all zeros to missing values:
>>> sa2.unpack(column_types=[int, int, int], na_value=0)
Columns:
X.0 int
X.1 int
X.2 int
<BLANKLINE>
Rows: 3
<BLANKLINE>
Data:
+------+------+------+
| X.0 | X.1 | X.2 |
+------+------+------+
| 1 | None | 1 |
| 1 | 1 | 1 |
| None | 1 | None |
+------+------+------+
[3 rows x 3 columns]
<BLANKLINE>
"""
from .sframe import SFrame as _SFrame
if self.dtype not in [dict, array.array, list]:
raise TypeError("Only SArray of dict/list/array type supports unpack")
if column_name_prefix is None:
column_name_prefix = ""
if not(isinstance(column_name_prefix, six.string_types)):
raise TypeError("'column_name_prefix' must be a string")
# validate 'limit'
if limit is not None:
if (not _is_non_string_iterable(limit)):
raise TypeError("'limit' must be a list")
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
# limit value should be numeric if unpacking sarray.array value
if (self.dtype != dict) and (name_types.pop() != int):
raise TypeError("'limit' must contain integer values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
if (column_types is not None):
if not _is_non_string_iterable(column_types):
raise TypeError("column_types must be a list")
for column_type in column_types:
if (column_type not in (int, float, str, list, dict, array.array)):
raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']")
if limit is not None:
if len(limit) != len(column_types):
raise ValueError("limit and column_types do not have the same length")
elif self.dtype == dict:
raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.")
else:
limit = range(len(column_types))
else:
head_rows = self.head(100).dropna()
lengths = [len(i) for i in head_rows]
if len(lengths) == 0 or max(lengths) == 0:
raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types")
# infer column types for dict type at server side, for list and array, infer from client side
if self.dtype != dict:
length = max(lengths)
if limit is None:
limit = range(length)
else:
# adjust the length
length = len(limit)
if self.dtype == array.array:
column_types = [float for i in range(length)]
else:
column_types = list()
for i in limit:
t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows]
column_types.append(infer_type_of_list(t))
with cython_context():
if (self.dtype == dict and column_types is None):
limit = limit if limit is not None else []
return _SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix.encode('utf-8'), limit, na_value))
else:
return _SFrame(_proxy=self.__proxy__.unpack(column_name_prefix.encode('utf-8'), limit, column_types, na_value)) | [
"def",
"unpack",
"(",
"self",
",",
"column_name_prefix",
"=",
"\"X\"",
",",
"column_types",
"=",
"None",
",",
"na_value",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"from",
".",
"sframe",
"import",
"SFrame",
"as",
"_SFrame",
"if",
"self",
".",
"... | 37.156566 | 24.419192 |
def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush()
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory,
self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
except IOError as err:
logger.error(
"Writing to %s failed: %s", self.get_current_file_name(), err)
self._lock.release() | [
"def",
"write_event",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
".",
"_events_writer",
".",
"WriteEvent",
"(",
"event",
")",
"self",
".",
"_event_count",
"+=",
"1",
"if",
"self",
".",
"_a... | 36.727273 | 19.272727 |
def get_active_tasks(self):
"""Return a list of UUIDs of active tasks."""
current_tasks = self.celery.control.inspect().active() or dict()
return [
task.get('id') for host in current_tasks.values() for task in host] | [
"def",
"get_active_tasks",
"(",
"self",
")",
":",
"current_tasks",
"=",
"self",
".",
"celery",
".",
"control",
".",
"inspect",
"(",
")",
".",
"active",
"(",
")",
"or",
"dict",
"(",
")",
"return",
"[",
"task",
".",
"get",
"(",
"'id'",
")",
"for",
"h... | 49.4 | 21.6 |
def url_for(self, attr=None, filter_value=None,
service_type=None, endpoint_type="publicURL",
service_name=None, volume_service_name=None):
"""Fetches the public URL from the given service for
a particular endpoint attribute. If none given, returns
the first. See tests for sample service catalog."""
matching_endpoints = []
# We don't always get a service catalog back ...
if "serviceCatalog" not in self.catalog["access"]:
return None
# Full catalog ...
catalog = self.catalog["access"]["serviceCatalog"]
for service in catalog:
if service.get("type") != service_type:
continue
endpoints = service["endpoints"]
for endpoint in endpoints:
if not filter_value or endpoint.get(attr) == filter_value:
endpoint["serviceName"] = service.get("name")
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise exc.EndpointNotFound()
elif len(matching_endpoints) > 1:
raise exc.AmbiguousEndpoints(endpoints=matching_endpoints)
else:
return matching_endpoints[0][endpoint_type] | [
"def",
"url_for",
"(",
"self",
",",
"attr",
"=",
"None",
",",
"filter_value",
"=",
"None",
",",
"service_type",
"=",
"None",
",",
"endpoint_type",
"=",
"\"publicURL\"",
",",
"service_name",
"=",
"None",
",",
"volume_service_name",
"=",
"None",
")",
":",
"m... | 43.928571 | 15.392857 |
def fit_model(y, x, yMaxLag, xMaxLag, includesOriginalX=True, noIntercept=False, sc=None):
"""
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.AutoregressionX.fitModel(_nparray2breezevector(sc, y.toArray()), _nparray2breezematrix(sc, x.toArray()), yMaxLag, xMaxLag, includesOriginalX, noIntercept)
return ARXModel(jmodel=jmodel, sc=sc) | [
"def",
"fit_model",
"(",
"y",
",",
"x",
",",
"yMaxLag",
",",
"xMaxLag",
",",
"includesOriginalX",
"=",
"True",
",",
"noIntercept",
"=",
"False",
",",
"sc",
"=",
"None",
")",
":",
"assert",
"sc",
"!=",
"None",
",",
"\"Missing SparkContext\"",
"jvm",
"=",
... | 50.885714 | 34.028571 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ExampleCollector, self).get_default_config()
config.update({
'path': 'example'
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"ExampleCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'example'",
"}",
")",
"return",
"config"
] | 27.888889 | 13 |
def setCurrentProfile(self, profile):
"""
Sets the current profile to the inputed profile.
:param profile | <XViewProfile>
"""
try:
index = self._profiles.index(profile)
except ValueError:
index = -1
self._profileCombo.setCurrentIndex(index) | [
"def",
"setCurrentProfile",
"(",
"self",
",",
"profile",
")",
":",
"try",
":",
"index",
"=",
"self",
".",
"_profiles",
".",
"index",
"(",
"profile",
")",
"except",
"ValueError",
":",
"index",
"=",
"-",
"1",
"self",
".",
"_profileCombo",
".",
"setCurrentI... | 27.75 | 13.75 |
def __get_WIOD_SEA_extension(root_path, year, data_sheet='DATA'):
""" Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO
"""
sea_ext = '.xlsx'
sea_start = 'WIOD_SEA'
_SEA_folder = os.path.join(root_path, 'SEA')
if not os.path.exists(_SEA_folder):
_SEA_folder = root_path
sea_folder_content = [ff for ff in os.listdir(_SEA_folder)
if os.path.splitext(ff)[-1] == sea_ext and
ff[:8] == sea_start]
if sea_folder_content:
# read data
sea_file = os.path.join(_SEA_folder, sorted(sea_folder_content)[0])
df_sea = pd.read_excel(sea_file,
sheet_name=data_sheet,
header=0,
index_col=[0, 1, 2, 3])
# fix years
ic_sea = df_sea.columns.tolist()
ic_sea = [yystr.lstrip('_') for yystr in ic_sea]
df_sea.columns = ic_sea
try:
ds_sea = df_sea[str(year)]
except KeyError:
warnings.warn(
'SEA extension does not include data for the '
'year {} - SEA-Extension not included'.format(year),
ParserWarning)
return None, None
# get useful data (employment)
mt_sea = ['EMP', 'EMPE', 'H_EMP', 'H_EMPE']
ds_use_sea = pd.concat(
[ds_sea.xs(key=vari, level='Variable', drop_level=False)
for vari in mt_sea])
ds_use_sea.drop(labels='TOT', level='Code', inplace=True)
ds_use_sea.reset_index('Description', drop=True, inplace=True)
# RoW not included in SEA but needed to get it consistent for
# all countries. Just add a dummy with 0 for all accounts.
if 'RoW' not in ds_use_sea.index.get_level_values('Country'):
ds_RoW = ds_use_sea.xs('USA',
level='Country', drop_level=False)
ds_RoW.ix[:] = 0
df_RoW = ds_RoW.reset_index()
df_RoW['Country'] = 'RoW'
ds_use_sea = pd.concat(
[ds_use_sea.reset_index(), df_RoW]).set_index(
['Country', 'Code', 'Variable'])
ds_use_sea.fillna(value=0, inplace=True)
df_use_sea = ds_use_sea.unstack(level=['Country', 'Code'])[str(year)]
df_use_sea.index.names = IDX_NAMES['VA_row_single']
df_use_sea.columns.names = IDX_NAMES['F_col']
df_use_sea = df_use_sea.astype('float')
df_unit = pd.DataFrame(
data=[ # this data must be in the same order as mt_sea
'thousand persons',
'thousand persons',
'mill hours',
'mill hours',
],
columns=['unit'],
index=df_use_sea.index)
return df_use_sea, df_unit
else:
warnings.warn(
'SEA extension raw data file not found - '
'SEA-Extension not included', ParserWarning)
return None, None | [
"def",
"__get_WIOD_SEA_extension",
"(",
"root_path",
",",
"year",
",",
"data_sheet",
"=",
"'DATA'",
")",
":",
"sea_ext",
"=",
"'.xlsx'",
"sea_start",
"=",
"'WIOD_SEA'",
"_SEA_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_path",
",",
"'SEA'",
")",
... | 36.459184 | 18.959184 |
def send_notification(self, method, args, kwargs):
"""Send a notification."""
msg = dumps([1, method, args, kwargs])
self.send(msg) | [
"def",
"send_notification",
"(",
"self",
",",
"method",
",",
"args",
",",
"kwargs",
")",
":",
"msg",
"=",
"dumps",
"(",
"[",
"1",
",",
"method",
",",
"args",
",",
"kwargs",
"]",
")",
"self",
".",
"send",
"(",
"msg",
")"
] | 38 | 8.5 |
def getParams(self):
""" get params """
rv = np.array([])
if self.n_terms>0:
rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)])
return rv | [
"def",
"getParams",
"(",
"self",
")",
":",
"rv",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"if",
"self",
".",
"n_terms",
">",
"0",
":",
"rv",
"=",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"reshape",
"(",
"self",
".",
"B",
"[",
"term_... | 40 | 26.333333 |
def is_protected(self):
"""
Determine if the function is protected using a check on msg.sender
Only detects if msg.sender is directly used in a condition
For example, it wont work for:
address a = msg.sender
require(a == owner)
Returns
(bool)
"""
if self.is_constructor:
return True
conditional_vars = self.all_conditional_solidity_variables_read(include_loop=False)
args_vars = self.all_solidity_variables_used_as_args()
return SolidityVariableComposed('msg.sender') in conditional_vars + args_vars | [
"def",
"is_protected",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_constructor",
":",
"return",
"True",
"conditional_vars",
"=",
"self",
".",
"all_conditional_solidity_variables_read",
"(",
"include_loop",
"=",
"False",
")",
"args_vars",
"=",
"self",
".",
"all... | 37.235294 | 21.470588 |
def mtf_image_transformer_cifar_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
return hparams | [
"def",
"mtf_image_transformer_cifar_4x",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_cifar",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:32\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch\"",
"hparams",
".",
"batch_size",
"=",
"128",
"retu... | 32.714286 | 8.857143 |
def registered(self, driver, frameworkId, masterInfo):
"""
Invoked when the scheduler successfully registers with a Mesos master
"""
log.debug("Registered with framework ID %s", frameworkId.value)
# Save the framework ID
self.frameworkId = frameworkId.value | [
"def",
"registered",
"(",
"self",
",",
"driver",
",",
"frameworkId",
",",
"masterInfo",
")",
":",
"log",
".",
"debug",
"(",
"\"Registered with framework ID %s\"",
",",
"frameworkId",
".",
"value",
")",
"# Save the framework ID",
"self",
".",
"frameworkId",
"=",
... | 42.714286 | 13.571429 |
def _convert_to_var(self, graph, var_res):
"""
Create tf.Variables from a list of numpy arrays
var_res: dictionary of numpy arrays with the key names corresponding to var
"""
with graph.as_default():
var = {}
for key, value in var_res.items():
if value is not None:
var[key] = tf.Variable(value, name="tf_%s" % key)
else:
var[key] = None
return var | [
"def",
"_convert_to_var",
"(",
"self",
",",
"graph",
",",
"var_res",
")",
":",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"var",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"var_res",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
... | 34.285714 | 15.142857 |
def get_network_instances(self, name=""):
""" get_network_instances implementation for NX-OS """
# command 'show vrf detail' returns all VRFs with detailed information
# format: list of dictionaries with keys such as 'vrf_name' and 'rd'
command = "show vrf detail"
vrf_table_raw = self._get_command_table(command, "TABLE_vrf", "ROW_vrf")
# command 'show vrf interface' returns all interfaces including their assigned VRF
# format: list of dictionaries with keys 'if_name', 'vrf_name', 'vrf_id' and 'soo'
command = "show vrf interface"
intf_table_raw = self._get_command_table(command, "TABLE_if", "ROW_if")
# create a dictionary with key = 'vrf_name' and value = list of interfaces
vrf_intfs = defaultdict(list)
for intf in intf_table_raw:
vrf_intfs[intf["vrf_name"]].append(py23_compat.text_type(intf["if_name"]))
vrfs = {}
for vrf in vrf_table_raw:
vrf_name = py23_compat.text_type(vrf.get("vrf_name"))
vrfs[vrf_name] = {}
vrfs[vrf_name]["name"] = vrf_name
# differentiate between VRF type 'DEFAULT_INSTANCE' and 'L3VRF'
if vrf_name == "default":
vrfs[vrf_name]["type"] = "DEFAULT_INSTANCE"
else:
vrfs[vrf_name]["type"] = "L3VRF"
vrfs[vrf_name]["state"] = {
"route_distinguisher": py23_compat.text_type(vrf.get("rd"))
}
# convert list of interfaces (vrf_intfs[vrf_name]) to expected format
# format = dict with key = interface name and empty values
vrfs[vrf_name]["interfaces"] = {}
vrfs[vrf_name]["interfaces"]["interface"] = dict.fromkeys(
vrf_intfs[vrf_name], {}
)
# if name of a specific VRF was passed as an argument
# only return results for this particular VRF
if name:
if name in vrfs.keys():
return {py23_compat.text_type(name): vrfs[name]}
else:
return {}
# else return results for all VRFs
else:
return vrfs | [
"def",
"get_network_instances",
"(",
"self",
",",
"name",
"=",
"\"\"",
")",
":",
"# command 'show vrf detail' returns all VRFs with detailed information",
"# format: list of dictionaries with keys such as 'vrf_name' and 'rd'",
"command",
"=",
"\"show vrf detail\"",
"vrf_table_raw",
"... | 41.764706 | 23.568627 |
def assignParameters(self,**kwds):
'''
Assign an arbitrary number of attributes to this agent.
Parameters
----------
**kwds : keyword arguments
Any number of keyword arguments of the form key=value. Each value
will be assigned to the attribute named in self.
Returns
-------
none
'''
for key in kwds:
setattr(self,key,kwds[key]) | [
"def",
"assignParameters",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"for",
"key",
"in",
"kwds",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"kwds",
"[",
"key",
"]",
")"
] | 27 | 23.125 |
def forward_moves(self, position):
"""
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
"""
if position.is_square_empty(self.square_in_front(self.location)):
"""
If square in front is empty add the move
"""
if self.would_move_be_promotion():
for move in self.create_promotion_moves(notation_const.PROMOTE):
yield move
else:
yield self.create_move(end_loc=self.square_in_front(self.location),
status=notation_const.MOVEMENT)
if self.on_home_row() and \
position.is_square_empty(self.two_squares_in_front(self.location)):
"""
If pawn is on home row and two squares in front of the pawn is empty
add the move
"""
yield self.create_move(
end_loc=self.square_in_front(self.square_in_front(self.location)),
status=notation_const.MOVEMENT
) | [
"def",
"forward_moves",
"(",
"self",
",",
"position",
")",
":",
"if",
"position",
".",
"is_square_empty",
"(",
"self",
".",
"square_in_front",
"(",
"self",
".",
"location",
")",
")",
":",
"\"\"\"\n If square in front is empty add the move\n \"\"\""... | 39 | 18.655172 |
def sync(self, *buckets):
"""Sync either a list of buckets or the entire connection.
Force all API calls to S3 and populate the database with the current
state of S3.
:param \*string \*buckets: Buckets to sync
"""
if buckets:
for _bucket in buckets:
for key in mimicdb.backend.smembers(tpl.bucket % _bucket):
mimicdb.backend.delete(tpl.key % (_bucket, key))
mimicdb.backend.delete(tpl.bucket % _bucket)
bucket = self.get_bucket(_bucket, force=True)
for key in bucket.list(force=True):
mimicdb.backend.sadd(tpl.bucket % bucket.name, key.name)
mimicdb.backend.hmset(tpl.key % (bucket.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
else:
for bucket in mimicdb.backend.smembers(tpl.connection):
for key in mimicdb.backend.smembers(tpl.bucket % bucket):
mimicdb.backend.delete(tpl.key % (bucket, key))
mimicdb.backend.delete(tpl.bucket % bucket)
for bucket in self.get_all_buckets(force=True):
for key in bucket.list(force=True):
mimicdb.backend.sadd(tpl.bucket % bucket.name, key.name)
mimicdb.backend.hmset(tpl.key % (bucket.name, key.name), dict(size=key.size, md5=key.etag.strip('"'))) | [
"def",
"sync",
"(",
"self",
",",
"*",
"buckets",
")",
":",
"if",
"buckets",
":",
"for",
"_bucket",
"in",
"buckets",
":",
"for",
"key",
"in",
"mimicdb",
".",
"backend",
".",
"smembers",
"(",
"tpl",
".",
"bucket",
"%",
"_bucket",
")",
":",
"mimicdb",
... | 45.193548 | 29.032258 |
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, string_types):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value | [
"def",
"_quote",
"(",
"self",
",",
"value",
",",
"multiline",
"=",
"True",
")",
":",
"if",
"multiline",
"and",
"self",
".",
"write_empty_values",
"and",
"value",
"==",
"''",
":",
"# Only if multiline is set, so that it is used for values not",
"# keys, and not values ... | 41.731343 | 22.925373 |
def create_response(self, status=201):
"""Generate a Response object for a POST request. By default, the newly created
object will be passed to the specified ResponseHandler and will be serialized
as the response body.
"""
self.response = self.get_response_handler()
self.response.process(self.obj)
return self._response(self.response.get_response_data(), status=status) | [
"def",
"create_response",
"(",
"self",
",",
"status",
"=",
"201",
")",
":",
"self",
".",
"response",
"=",
"self",
".",
"get_response_handler",
"(",
")",
"self",
".",
"response",
".",
"process",
"(",
"self",
".",
"obj",
")",
"return",
"self",
".",
"_res... | 46.666667 | 16.555556 |
def _read_projections(folder, indices):
"""Read mayo projections from a folder."""
datasets = []
# Get the relevant file names
file_names = sorted([f for f in os.listdir(folder) if f.endswith(".dcm")])
if len(file_names) == 0:
raise ValueError('No DICOM files found in {}'.format(folder))
file_names = file_names[indices]
data_array = None
for i, file_name in enumerate(tqdm.tqdm(file_names,
'Loading projection data')):
# read the file
dataset = dicom.read_file(folder + '/' + file_name)
# Get some required data
rows = dataset.NumberofDetectorRows
cols = dataset.NumberofDetectorColumns
hu_factor = dataset.HUCalibrationFactor
rescale_intercept = dataset.RescaleIntercept
rescale_slope = dataset.RescaleSlope
# Load the array as bytes
proj_array = np.array(np.frombuffer(dataset.PixelData, 'H'),
dtype='float32')
proj_array = proj_array.reshape([rows, cols], order='F').T
# Rescale array
proj_array *= rescale_slope
proj_array += rescale_intercept
proj_array /= hu_factor
# Store results
if data_array is None:
# We need to load the first dataset before we know the shape
data_array = np.empty((len(file_names), cols, rows),
dtype='float32')
data_array[i] = proj_array[:, ::-1]
datasets.append(dataset)
return datasets, data_array | [
"def",
"_read_projections",
"(",
"folder",
",",
"indices",
")",
":",
"datasets",
"=",
"[",
"]",
"# Get the relevant file names",
"file_names",
"=",
"sorted",
"(",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"folder",
")",
"if",
"f",
".",
"ends... | 33.173913 | 19.73913 |
def extract(cls, keystr):
""" for #{key} returns key """
regex = r'#{\s*(%s)\s*}' % cls.ALLOWED_KEY
return re.match(regex, keystr).group(1) | [
"def",
"extract",
"(",
"cls",
",",
"keystr",
")",
":",
"regex",
"=",
"r'#{\\s*(%s)\\s*}'",
"%",
"cls",
".",
"ALLOWED_KEY",
"return",
"re",
".",
"match",
"(",
"regex",
",",
"keystr",
")",
".",
"group",
"(",
"1",
")"
] | 40 | 8 |
def key_func(*keys, **kwargs):
"""Creates a "key function" based on given keys.
Resulting function will perform lookup using specified keys, in order,
on the object passed to it as an argument.
For example, ``key_func('a', 'b')(foo)`` is equivalent to ``foo['a']['b']``.
:param keys: Lookup keys
:param default: Optional keyword argument specifying default value
that will be returned when some lookup key is not present
:return: Unary key function
"""
ensure_argcount(keys, min_=1)
ensure_keyword_args(kwargs, optional=('default',))
keys = list(map(ensure_string, keys))
if 'default' in kwargs:
default = kwargs['default']
def getitems(obj):
for key in keys:
try:
obj = obj[key]
except KeyError:
return default
return obj
else:
if len(keys) == 1:
getitems = operator.itemgetter(keys[0])
else:
def getitems(obj):
for key in keys:
obj = obj[key]
return obj
return getitems | [
"def",
"key_func",
"(",
"*",
"keys",
",",
"*",
"*",
"kwargs",
")",
":",
"ensure_argcount",
"(",
"keys",
",",
"min_",
"=",
"1",
")",
"ensure_keyword_args",
"(",
"kwargs",
",",
"optional",
"=",
"(",
"'default'",
",",
")",
")",
"keys",
"=",
"list",
"(",... | 30.297297 | 18.486486 |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
cat = obj.getCategoryTitle()
cat_order = self.an_cats_order.get(cat)
if self.do_cats:
# category groups entries
item["category"] = cat
if (cat, cat_order) not in self.categories:
self.categories.append((cat, cat_order))
# Category
category = obj.getCategory()
if category:
title = category.Title()
url = category.absolute_url()
item["Category"] = title
item["replace"]["Category"] = get_link(url, value=title)
# Calculation
calculation = obj.getCalculation()
if calculation:
title = calculation.Title()
url = calculation.absolute_url()
item["Calculation"] = title
item["replace"]["Calculation"] = get_link(url, value=title)
# Methods
methods = obj.getMethods()
if methods:
links = map(
lambda m: get_link(
m.absolute_url(), value=m.Title(), css_class="link"),
methods)
item["replace"]["Methods"] = ", ".join(links)
# Max time allowed
maxtime = obj.MaxTimeAllowed
if maxtime:
item["MaxTimeAllowed"] = self.format_maxtime(maxtime)
# Price
item["Price"] = self.format_price(obj.Price)
# Duplicate Variation
dup_variation = obj.DuplicateVariation
if dup_variation:
item["DuplicateVariation"] = self.format_duplication_variation(
dup_variation)
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | [
"def",
"folderitem",
"(",
"self",
",",
"obj",
",",
"item",
",",
"index",
")",
":",
"cat",
"=",
"obj",
".",
"getCategoryTitle",
"(",
")",
"cat_order",
"=",
"self",
".",
"an_cats_order",
".",
"get",
"(",
"cat",
")",
"if",
"self",
".",
"do_cats",
":",
... | 34.704225 | 16.661972 |
def start(self):
"""Commence audio processing.
If successful, the stream is considered active.
"""
err = _pa.Pa_StartStream(self._stream)
if err == _pa.paStreamIsNotStopped:
return
self._handle_error(err) | [
"def",
"start",
"(",
"self",
")",
":",
"err",
"=",
"_pa",
".",
"Pa_StartStream",
"(",
"self",
".",
"_stream",
")",
"if",
"err",
"==",
"_pa",
".",
"paStreamIsNotStopped",
":",
"return",
"self",
".",
"_handle_error",
"(",
"err",
")"
] | 25.7 | 15.9 |
def _float(value):
"""Conversion of state vector field, with automatic unit handling
"""
if "[" in value:
# There is a unit field
value, sep, unit = value.partition("[")
unit = sep + unit
# As defined in the CCSDS Orbital Data Message Blue Book, the unit should
# be the same as defined in table 3-3 which are for km and km/s for position and
# velocity respectively. Thus, there should be no other conversion to make
if unit in ("[km]", "[km/s]"):
multiplier = 1000
elif unit == "[s]":
multiplier = 1
else:
raise ValueError("Unknown unit for this field", unit)
else:
# if no unit is provided, the default is km, and km/s
multiplier = 1000
return float(value) * multiplier | [
"def",
"_float",
"(",
"value",
")",
":",
"if",
"\"[\"",
"in",
"value",
":",
"# There is a unit field",
"value",
",",
"sep",
",",
"unit",
"=",
"value",
".",
"partition",
"(",
"\"[\"",
")",
"unit",
"=",
"sep",
"+",
"unit",
"# As defined in the CCSDS Orbital Da... | 36.227273 | 20.318182 |
def dump(obj, file_path, prettify=False):
"""
Dumps a data structure to the filesystem as TOML.
The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module.
"""
with open(file_path, 'w') as fp:
fp.write(dumps(obj)) | [
"def",
"dump",
"(",
"obj",
",",
"file_path",
",",
"prettify",
"=",
"False",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'w'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"dumps",
"(",
"obj",
")",
")"
] | 35 | 17.25 |
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word") | [
"def",
"correct_word",
"(",
"word_string",
")",
":",
"if",
"word_string",
"is",
"None",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"word_string",
",",
"str",
")",
":",
"return",
"max",
"(",
"find_candidates",
"(",
"word_string",
")",
",",
"key",
"="... | 39.818182 | 26.181818 |
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len) | [
"def",
"get_characteristic_subpattern",
"(",
"subpatterns",
")",
":",
"if",
"not",
"isinstance",
"(",
"subpatterns",
",",
"list",
")",
":",
"return",
"subpatterns",
"if",
"len",
"(",
"subpatterns",
")",
"==",
"1",
":",
"return",
"subpatterns",
"[",
"0",
"]",... | 40.138889 | 13.861111 |
def _update(qs):
"""
Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints.
"""
try:
with transaction.atomic():
qs.update(sort_order=models.F('sort_order') + 1)
except IntegrityError:
for obj in qs.order_by('-sort_order'):
qs.filter(pk=obj.pk).update(sort_order=models.F('sort_order') + 1) | [
"def",
"_update",
"(",
"qs",
")",
":",
"try",
":",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"qs",
".",
"update",
"(",
"sort_order",
"=",
"models",
".",
"F",
"(",
"'sort_order'",
")",
"+",
"1",
")",
"except",
"IntegrityError",
":",
"for",
... | 35.083333 | 17.25 |
def gender(self, iso5218: bool = False,
symbol: bool = False) -> Union[str, int]:
"""Get a random gender.
Get a random title of gender, code for the representation
of human sexes is an international standard that defines a
representation of human sexes through a language-neutral single-digit
code or symbol of gender.
:param iso5218:
Codes for the representation of human sexes is an international
standard (0 - not known, 1 - male, 2 - female, 9 - not applicable).
:param symbol: Symbol of gender.
:return: Title of gender.
:Example:
Male
"""
if iso5218:
return self.random.choice([0, 1, 2, 9])
if symbol:
return self.random.choice(GENDER_SYMBOLS)
return self.random.choice(self._data['gender']) | [
"def",
"gender",
"(",
"self",
",",
"iso5218",
":",
"bool",
"=",
"False",
",",
"symbol",
":",
"bool",
"=",
"False",
")",
"->",
"Union",
"[",
"str",
",",
"int",
"]",
":",
"if",
"iso5218",
":",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"[... | 34.28 | 21.56 |
def _load(self):
"""Load the MODIS RSR data for the band requested"""
if self.is_sw or self.platform_name == 'EOS-Aqua':
scale = 0.001
else:
scale = 1.0
detector = read_modis_response(self.requested_band_filename, scale)
self.rsr = detector
if self._sort:
self.sort() | [
"def",
"_load",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_sw",
"or",
"self",
".",
"platform_name",
"==",
"'EOS-Aqua'",
":",
"scale",
"=",
"0.001",
"else",
":",
"scale",
"=",
"1.0",
"detector",
"=",
"read_modis_response",
"(",
"self",
".",
"requested_... | 34.2 | 18.4 |
def get_url_for_id(client_site_url, apikey, resource_id):
"""Return the URL for the given resource ID.
Contacts the client site's API to get the URL for the ID and returns it.
:raises CouldNotGetURLError: if getting the URL fails for any reason
"""
# TODO: Handle invalid responses from the client site.
url = client_site_url + u"deadoralive/get_url_for_resource_id"
params = {"resource_id": resource_id}
response = requests.get(url, headers=dict(Authorization=apikey),
params=params)
if not response.ok:
raise CouldNotGetURLError(
u"Couldn't get URL for resource {id}: {code} {reason}".format(
id=resource_id, code=response.status_code,
reason=response.reason))
return response.json() | [
"def",
"get_url_for_id",
"(",
"client_site_url",
",",
"apikey",
",",
"resource_id",
")",
":",
"# TODO: Handle invalid responses from the client site.",
"url",
"=",
"client_site_url",
"+",
"u\"deadoralive/get_url_for_resource_id\"",
"params",
"=",
"{",
"\"resource_id\"",
":",
... | 39.5 | 20.45 |
def set_primary_heartbeat(self, interface_id):
"""
Set this interface as the primary heartbeat for this engine.
This will 'unset' the current primary heartbeat and move to
specified interface_id.
Clusters and Master NGFW Engines only.
:param str,int interface_id: interface specified for primary mgmt
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: failed modifying interfaces
:return: None
"""
self.interface.set_unset(interface_id, 'primary_heartbeat')
self._engine.update() | [
"def",
"set_primary_heartbeat",
"(",
"self",
",",
"interface_id",
")",
":",
"self",
".",
"interface",
".",
"set_unset",
"(",
"interface_id",
",",
"'primary_heartbeat'",
")",
"self",
".",
"_engine",
".",
"update",
"(",
")"
] | 43.571429 | 17.857143 |
def _handleUssd(self, lines):
""" Handler for USSD event notification line(s) """
if self._ussdSessionEvent:
# A sendUssd() call is waiting for this response - parse it
self._ussdResponse = self._parseCusdResponse(lines)
# Notify waiting thread
self._ussdSessionEvent.set() | [
"def",
"_handleUssd",
"(",
"self",
",",
"lines",
")",
":",
"if",
"self",
".",
"_ussdSessionEvent",
":",
"# A sendUssd() call is waiting for this response - parse it",
"self",
".",
"_ussdResponse",
"=",
"self",
".",
"_parseCusdResponse",
"(",
"lines",
")",
"# Notify wa... | 47.285714 | 10.857143 |
def _mass(self,R,z=0.,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2013-XX-XX - Written - Bovy (IAS)
"""
if z is None: r= R
else: r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)*special.gamma(1.5-self.alpha/2.) | [
"def",
"_mass",
"(",
"self",
",",
"R",
",",
"z",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"if",
"z",
"is",
"None",
":",
"r",
"=",
"R",
"else",
":",
"r",
"=",
"nu",
".",
"sqrt",
"(",
"R",
"**",
"2.",
"+",
"z",
"**",
"2.",
")",
"return",... | 31.166667 | 19.055556 |
def add_status_job(self, job_func, name=None, timeout=3):
"""Adds a job to be included during calls to the `/status` endpoint.
:param job_func: the status function.
:param name: the name used in the JSON response for the given status
function. The name of the function is the default.
:param timeout: the time limit before the job status is set to
"timeout exceeded".
"""
job_name = job_func.__name__ if name is None else name
job = (job_name, timeout, job_func)
self._jobs.append(job) | [
"def",
"add_status_job",
"(",
"self",
",",
"job_func",
",",
"name",
"=",
"None",
",",
"timeout",
"=",
"3",
")",
":",
"job_name",
"=",
"job_func",
".",
"__name__",
"if",
"name",
"is",
"None",
"else",
"name",
"job",
"=",
"(",
"job_name",
",",
"timeout",
... | 48.666667 | 16.416667 |
def cmd_gateway_find(network, iface, host, tcp, dport, timeout, verbose):
"""
Try to reach an external IP using any host has a router.
Useful to find routers in your network.
First, uses arping to detect alive hosts and obtain MAC addresses.
Later, create a network packet and put each MAC address as destination.
Last, print the devices that forwarded correctly the packets.
Example:
\b
# habu.find.gateway 192.168.0.0/24
192.168.0.1 a4:08:f5:19:17:a4 Sagemcom
192.168.0.7 b0:98:2b:5d:22:70 Sagemcom
192.168.0.8 b0:98:2b:5d:1f:e8 Sagemcom
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
res, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=network), timeout=2)
neighbors = set()
for _, pkt in res:
neighbors.add((pkt['Ether'].src, pkt['Ether'].psrc))
for mac,ip in neighbors:
if tcp:
res, unans = srp(Ether(dst=mac)/IP(dst=host)/TCP(dport=dport), timeout=timeout)
else:
res, unans = srp(Ether(dst=mac)/IP(dst=host)/ICMP(), timeout=timeout)
for _,pkt in res:
if pkt:
if verbose:
print(pkt.show())
else:
print(ip, mac, conf.manufdb._get_manuf(mac)) | [
"def",
"cmd_gateway_find",
"(",
"network",
",",
"iface",
",",
"host",
",",
"tcp",
",",
"dport",
",",
"timeout",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"="... | 28.319149 | 25.510638 |
def _set_axis(self,traces,on=None,side='right',title=''):
"""
Sets the axis in which each trace should appear
If the axis doesn't exist then a new axis is created
Parameters:
-----------
traces : list(str)
List of trace names
on : string
The axis in which the traces should be placed.
If this is not indicated then a new axis will be
created
side : string
Side where the axis will be placed
'left'
'right'
title : string
Sets the title of the axis
Applies only to new axis
"""
fig={}
fig_cpy=fig_to_dict(self).copy()
fig['data']=fig_cpy['data']
fig['layout']=fig_cpy['layout']
fig=Figure(fig)
traces=make_list(traces)
def update_data(trace,y):
anchor=fig.axis['def'][y]['anchor'] if 'anchor' in fig.axis['def'][y] else 'x1'
idx=fig.trace_dict[trace] if isinstance(trace,str) else trace
fig['data'][idx]['xaxis']=anchor
fig['data'][idx]['yaxis']=y
for trace in traces:
if on:
if on not in fig.axis['def']:
raise Exception('"on" axis does not exists: {0}'.format(on))
update_data(trace,y=on)
else:
curr_x,curr_y=fig.axis['ref'][trace]
domain='[0.0, 1.0]' if 'domain' not in fig.axis['def'][curr_y] else str(fig.axis['def'][curr_y]['domain'])
try:
new_axis=fig.axis['dom']['y'][domain][side]
except KeyError:
axis=fig.axis['def'][curr_y].copy()
### check overlaying values
axis.update(title=title,overlaying=curr_y,side=side,anchor=curr_x)
axis_idx=str(fig.axis['len']['y']+1)
fig['layout']['yaxis{0}'.format(axis_idx)]=axis
new_axis='y{0}'.format(axis_idx)
update_data(trace,y=new_axis)
for k in list(fig.axis['def'].keys()):
id='{0}axis{1}'.format(k[0],k[-1:])
if k not in fig.axis['ref_axis']:
try:
del fig['layout'][id]
except KeyError:
pass
return fig | [
"def",
"_set_axis",
"(",
"self",
",",
"traces",
",",
"on",
"=",
"None",
",",
"side",
"=",
"'right'",
",",
"title",
"=",
"''",
")",
":",
"fig",
"=",
"{",
"}",
"fig_cpy",
"=",
"fig_to_dict",
"(",
"self",
")",
".",
"copy",
"(",
")",
"fig",
"[",
"'... | 27.507937 | 19.698413 |
async def login(self, client_name, request, redirect_uri=None, **params):
"""Process login with OAuth.
:param client_name: A name one of configured clients
:param request: Web request
:param redirect_uri: An URI for authorization redirect
"""
client = self.client(client_name, logger=self.app.logger)
redirect_uri = redirect_uri or self.cfg.redirect_uri or '%s://%s%s' % (
request.scheme, request.host, request.path)
session = await self.app.ps.session(request)
if isinstance(client, OAuth1Client):
oauth_verifier = request.query.get('oauth_verifier')
if not oauth_verifier:
# Get request credentials
token, secret = await client.get_request_token(
oauth_callback=redirect_uri)
# Save the credentials in current user session
session['oauth_token'] = token
session['oauth_token_secret'] = secret
url = client.get_authorize_url()
raise muffin.HTTPFound(url)
# Check request_token
oauth_token = request.query.get('oauth_token')
if session['oauth_token'] != oauth_token:
raise muffin.HTTPForbidden(reason='Invalid token.')
client.oauth_token = oauth_token
client.oauth_token_secret = session.get('oauth_token_secret')
# Get access tokens
return client, await client.get_access_token(oauth_verifier)
if isinstance(client, OAuth2Client):
code = request.query.get('code')
if not code:
# Authorize an user
state = sha1(str(random()).encode('ascii')).hexdigest()
session['oauth_secret'] = state
url = client.get_authorize_url(
redirect_uri=redirect_uri, state=state, **params)
raise muffin.HTTPFound(url)
# Check state
state = request.query.get('state')
oauth_secret = session.pop('oauth_secret', '')
if oauth_secret != state:
raise muffin.HTTPForbidden(reason='Invalid token "%s".' % oauth_secret)
# Get access token
return client, await client.get_access_token(code, redirect_uri=redirect_uri)
return client | [
"async",
"def",
"login",
"(",
"self",
",",
"client_name",
",",
"request",
",",
"redirect_uri",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"client",
"=",
"self",
".",
"client",
"(",
"client_name",
",",
"logger",
"=",
"self",
".",
"app",
".",
"log... | 38.716667 | 21.1 |
def get_project_logs(self, request):
""" Get logs from log service.
Unsuccessful opertaion will cause an LogException.
:type request: GetProjectLogsRequest
:param request: the GetProjectLogs request parameters class.
:return: GetLogsResponse
:raise: LogException
"""
headers = {}
params = {}
if request.get_query() is not None:
params['query'] = request.get_query()
project = request.get_project()
resource = "/logs"
(resp, header) = self._send("GET", project, None, resource, params, headers)
return GetLogsResponse(resp, header) | [
"def",
"get_project_logs",
"(",
"self",
",",
"request",
")",
":",
"headers",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"if",
"request",
".",
"get_query",
"(",
")",
"is",
"not",
"None",
":",
"params",
"[",
"'query'",
"]",
"=",
"request",
".",
"get_query... | 35.842105 | 14.842105 |
def Handle(self, unused_args, token=None):
"""Build the data structure representing the config."""
sections = {}
for descriptor in config.CONFIG.type_infos:
if descriptor.section in sections:
continue
section_data = {}
for parameter in self._ListParametersInSection(descriptor.section):
section_data[parameter] = ApiConfigOption().InitFromConfigOption(
parameter)
sections[descriptor.section] = section_data
result = ApiGetConfigResult()
for section_name in sorted(sections):
section = sections[section_name]
api_section = ApiConfigSection(name=section_name)
api_section.options = []
for param_name in sorted(section):
api_section.options.append(section[param_name])
result.sections.append(api_section)
return result | [
"def",
"Handle",
"(",
"self",
",",
"unused_args",
",",
"token",
"=",
"None",
")",
":",
"sections",
"=",
"{",
"}",
"for",
"descriptor",
"in",
"config",
".",
"CONFIG",
".",
"type_infos",
":",
"if",
"descriptor",
".",
"section",
"in",
"sections",
":",
"co... | 31.192308 | 18.461538 |
def enrich_internal_unqualified_edges(graph, subgraph):
"""Add the missing unqualified edges between entities in the subgraph that are contained within the full graph.
:param pybel.BELGraph graph: The full BEL graph
:param pybel.BELGraph subgraph: The query BEL subgraph
"""
for u, v in itt.combinations(subgraph, 2):
if not graph.has_edge(u, v):
continue
for k in graph[u][v]:
if k < 0:
subgraph.add_edge(u, v, key=k, **graph[u][v][k]) | [
"def",
"enrich_internal_unqualified_edges",
"(",
"graph",
",",
"subgraph",
")",
":",
"for",
"u",
",",
"v",
"in",
"itt",
".",
"combinations",
"(",
"subgraph",
",",
"2",
")",
":",
"if",
"not",
"graph",
".",
"has_edge",
"(",
"u",
",",
"v",
")",
":",
"co... | 38.615385 | 16 |
def parsing_token_generator(data_dir, tmp_dir, train, source_vocab_size,
target_vocab_size):
"""Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files parsing_{train,dev}.trees, which contain
trees in WSJ format.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
train: whether we're training or not.
source_vocab_size: source vocab size.
target_vocab_size: target vocab size.
Returns:
A generator to a dictionary of inputs and outputs.
"""
# TODO(lukaszkaiser): Correct these calls to generate vocabularies. No data
# sources are being passed.
del (data_dir, tmp_dir, train, source_vocab_size, target_vocab_size)
assert False, "Vocabulary generation not implemented" | [
"def",
"parsing_token_generator",
"(",
"data_dir",
",",
"tmp_dir",
",",
"train",
",",
"source_vocab_size",
",",
"target_vocab_size",
")",
":",
"# TODO(lukaszkaiser): Correct these calls to generate vocabularies. No data",
"# sources are being passed.",
"del",
"(",
"data_dir",
"... | 38.619048 | 18.809524 |
def _get_repo_details(saltenv):
'''
Return repo details for the specified saltenv as a namedtuple
'''
contextkey = 'winrepo._get_repo_details.{0}'.format(saltenv)
if contextkey in __context__:
(winrepo_source_dir, local_dest, winrepo_file) = __context__[contextkey]
else:
winrepo_source_dir = __opts__['winrepo_source_dir']
dirs = [__opts__['cachedir'], 'files', saltenv]
url_parts = _urlparse(winrepo_source_dir)
dirs.append(url_parts.netloc)
dirs.extend(url_parts.path.strip('/').split('/'))
local_dest = os.sep.join(dirs)
winrepo_file = os.path.join(local_dest, 'winrepo.p') # Default
# Check for a valid windows file name
if not re.search(r'[\/:*?"<>|]',
__opts__['winrepo_cachefile'],
flags=re.IGNORECASE):
winrepo_file = os.path.join(
local_dest,
__opts__['winrepo_cachefile']
)
else:
log.error(
'minion configuration option \'winrepo_cachefile\' has been '
'ignored as its value (%s) is invalid. Please ensure this '
'option is set to a valid filename.',
__opts__['winrepo_cachefile']
)
# Do some safety checks on the repo_path as its contents can be removed,
# this includes check for bad coding
system_root = os.environ.get('SystemRoot', r'C:\Windows')
if not salt.utils.path.safe_path(
path=local_dest,
allow_path='\\'.join([system_root, 'TEMP'])):
raise CommandExecutionError(
'Attempting to delete files from a possibly unsafe location: '
'{0}'.format(local_dest)
)
__context__[contextkey] = (winrepo_source_dir, local_dest, winrepo_file)
try:
os.makedirs(local_dest)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise CommandExecutionError(
'Failed to create {0}: {1}'.format(local_dest, exc)
)
winrepo_age = -1
try:
stat_result = os.stat(winrepo_file)
mtime = stat_result.st_mtime
winrepo_age = time.time() - mtime
except OSError as exc:
if exc.errno != errno.ENOENT:
raise CommandExecutionError(
'Failed to get age of {0}: {1}'.format(winrepo_file, exc)
)
except AttributeError:
# Shouldn't happen but log if it does
log.warning('st_mtime missing from stat result %s', stat_result)
except TypeError:
# Shouldn't happen but log if it does
log.warning('mtime of %s (%s) is an invalid type', winrepo_file, mtime)
repo_details = collections.namedtuple(
'RepoDetails',
('winrepo_source_dir', 'local_dest', 'winrepo_file', 'winrepo_age')
)
return repo_details(winrepo_source_dir, local_dest, winrepo_file, winrepo_age) | [
"def",
"_get_repo_details",
"(",
"saltenv",
")",
":",
"contextkey",
"=",
"'winrepo._get_repo_details.{0}'",
".",
"format",
"(",
"saltenv",
")",
"if",
"contextkey",
"in",
"__context__",
":",
"(",
"winrepo_source_dir",
",",
"local_dest",
",",
"winrepo_file",
")",
"=... | 37.948052 | 20.077922 |
def refresh(self):
""" Refreshing a Library or individual item causes the metadata for the item to be
refreshed, even if it already has metadata. You can think of refreshing as
"update metadata for the requested item even if it already has some". You should
refresh a Library or individual item if:
* You've changed the Library Metadata Agent.
* You've added "Local Media Assets" (such as artwork, theme music, external
subtitle files, etc.)
* You want to freshen the item posters, summary, etc.
* There's a problem with the poster image that's been downloaded.
* Items are missing posters or other downloaded information. This is possible if
the refresh process is interrupted (the Server is turned off, internet
connection dies, etc).
"""
key = '%s/refresh' % self.key
self._server.query(key, method=self._server._session.put) | [
"def",
"refresh",
"(",
"self",
")",
":",
"key",
"=",
"'%s/refresh'",
"%",
"self",
".",
"key",
"self",
".",
"_server",
".",
"query",
"(",
"key",
",",
"method",
"=",
"self",
".",
"_server",
".",
"_session",
".",
"put",
")"
] | 58.176471 | 25.176471 |
def expand_classes_glob(classes, salt_data):
'''
Expand the list of `classes` to no longer include any globbing.
:param iterable(str) classes: Iterable of classes
:param dict salt_data: configuration data
:return: Expanded list of classes with resolved globbing
:rtype: list(str)
'''
all_classes = []
expanded_classes = []
saltclass_path = salt_data['path']
for _class in classes:
all_classes.extend(match_class_glob(_class, saltclass_path))
for _class in all_classes:
if _class not in expanded_classes:
expanded_classes.append(_class)
return expanded_classes | [
"def",
"expand_classes_glob",
"(",
"classes",
",",
"salt_data",
")",
":",
"all_classes",
"=",
"[",
"]",
"expanded_classes",
"=",
"[",
"]",
"saltclass_path",
"=",
"salt_data",
"[",
"'path'",
"]",
"for",
"_class",
"in",
"classes",
":",
"all_classes",
".",
"ext... | 29.666667 | 20.047619 |
def request_param_update(self, var_id):
"""Place a param update request on the queue"""
self._useV2 = self.cf.platform.get_protocol_version() >= 4
pk = CRTPPacket()
pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
if self._useV2:
pk.data = struct.pack('<H', var_id)
else:
pk.data = struct.pack('<B', var_id)
logger.debug('Requesting request to update param [%d]', var_id)
self.request_queue.put(pk) | [
"def",
"request_param_update",
"(",
"self",
",",
"var_id",
")",
":",
"self",
".",
"_useV2",
"=",
"self",
".",
"cf",
".",
"platform",
".",
"get_protocol_version",
"(",
")",
">=",
"4",
"pk",
"=",
"CRTPPacket",
"(",
")",
"pk",
".",
"set_header",
"(",
"CRT... | 42.818182 | 13.454545 |
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`SetOptions` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
if not op_xdr_object.body.setOptionsOp.inflationDest:
inflation_dest = None
else:
inflation_dest = encode_check(
'account', op_xdr_object.body.setOptionsOp.inflationDest[0]
.ed25519).decode()
clear_flags = op_xdr_object.body.setOptionsOp.clearFlags # list
set_flags = op_xdr_object.body.setOptionsOp.setFlags
master_weight = op_xdr_object.body.setOptionsOp.masterWeight
low_threshold = op_xdr_object.body.setOptionsOp.lowThreshold
med_threshold = op_xdr_object.body.setOptionsOp.medThreshold
high_threshold = op_xdr_object.body.setOptionsOp.highThreshold
home_domain = op_xdr_object.body.setOptionsOp.homeDomain
if op_xdr_object.body.setOptionsOp.signer:
key = op_xdr_object.body.setOptionsOp.signer[0].key
if key.type == Xdr.const.SIGNER_KEY_TYPE_ED25519:
signer_address = encode_check('account', key.ed25519).decode()
signer_type = 'ed25519PublicKey'
if key.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX:
signer_address = key.preAuthTx
signer_type = 'preAuthTx'
if key.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X:
signer_address = key.hashX
signer_type = 'hashX'
signer_weight = op_xdr_object.body.setOptionsOp.signer[0].weight
else:
signer_address = None
signer_type = None
signer_weight = None
return cls(
source=source,
inflation_dest=inflation_dest,
clear_flags=clear_flags,
set_flags=set_flags,
master_weight=master_weight,
low_threshold=low_threshold,
med_threshold=med_threshold,
high_threshold=high_threshold,
home_domain=home_domain,
signer_address=signer_address,
signer_type=signer_type,
signer_weight=signer_weight) | [
"def",
"from_xdr_object",
"(",
"cls",
",",
"op_xdr_object",
")",
":",
"if",
"not",
"op_xdr_object",
".",
"sourceAccount",
":",
"source",
"=",
"None",
"else",
":",
"source",
"=",
"encode_check",
"(",
"'account'",
",",
"op_xdr_object",
".",
"sourceAccount",
"[",... | 40.403509 | 16.508772 |
def convenience_calc_fisher_approx(self, params):
"""
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.choice_vector,
self.utility_transform,
self.calc_dh_d_shape,
self.calc_dh_dv,
self.calc_dh_d_alpha,
intercepts,
shapes,
self.ridge,
self.weights]
return cc.calc_fisher_info_matrix(*args) | [
"def",
"convenience_calc_fisher_approx",
"(",
"self",
",",
"params",
")",
":",
"shapes",
",",
"intercepts",
",",
"betas",
"=",
"self",
".",
"convenience_split_params",
"(",
"params",
")",
"args",
"=",
"[",
"betas",
",",
"self",
".",
"design",
",",
"self",
... | 32.043478 | 13.086957 |
def write_Note(file, note, bpm=120, repeat=0, verbose=False):
"""Expect a Note object from mingus.containers and save it into a MIDI
file, specified in file.
You can set the velocity and channel in Note.velocity and Note.channel.
"""
m = MidiFile()
t = MidiTrack(bpm)
m.tracks = [t]
while repeat >= 0:
t.set_deltatime('\x00')
t.play_Note(note)
t.set_deltatime("\x48")
t.stop_Note(note)
repeat -= 1
return m.write_file(file, verbose) | [
"def",
"write_Note",
"(",
"file",
",",
"note",
",",
"bpm",
"=",
"120",
",",
"repeat",
"=",
"0",
",",
"verbose",
"=",
"False",
")",
":",
"m",
"=",
"MidiFile",
"(",
")",
"t",
"=",
"MidiTrack",
"(",
"bpm",
")",
"m",
".",
"tracks",
"=",
"[",
"t",
... | 30.875 | 16.1875 |
def parse_photo(data):
"""
Parse a ``MeetupPhoto`` from the given response data.
Returns
-------
A `pythonkc_meetups.types.`MeetupPhoto``.
"""
return MeetupPhoto(
id=data.get('photo_id', data.get('id', None)),
url=data.get('photo_link', None),
highres_url=data.get('highres_link', None),
thumb_url=data.get('thumb_link', None)
) | [
"def",
"parse_photo",
"(",
"data",
")",
":",
"return",
"MeetupPhoto",
"(",
"id",
"=",
"data",
".",
"get",
"(",
"'photo_id'",
",",
"data",
".",
"get",
"(",
"'id'",
",",
"None",
")",
")",
",",
"url",
"=",
"data",
".",
"get",
"(",
"'photo_link'",
",",... | 25.333333 | 17.466667 |
def setViewTypes(self, viewTypes, window=None):
"""
Sets the view types that can be used for this widget. If the optional \
window member is supplied, then the registerToWindow method will be \
called for each view.
:param viewTypes | [<sublcass of XView>, ..]
window | <QMainWindow> || <QDialog> || None
"""
if window:
for viewType in self._viewTypes:
viewType.unregisterFromWindow(window)
self._viewTypes = viewTypes[:]
self._panelMenu = None
self._pluginMenu = None
if window:
for viewType in viewTypes:
viewType.registerToWindow(window) | [
"def",
"setViewTypes",
"(",
"self",
",",
"viewTypes",
",",
"window",
"=",
"None",
")",
":",
"if",
"window",
":",
"for",
"viewType",
"in",
"self",
".",
"_viewTypes",
":",
"viewType",
".",
"unregisterFromWindow",
"(",
"window",
")",
"self",
".",
"_viewTypes"... | 36.05 | 16.35 |
def autorenew_deactivate(cls, fqdn):
"""Activate deautorenew"""
fqdn = fqdn.lower()
result = cls.call('domain.autorenew.deactivate', fqdn)
return result | [
"def",
"autorenew_deactivate",
"(",
"cls",
",",
"fqdn",
")",
":",
"fqdn",
"=",
"fqdn",
".",
"lower",
"(",
")",
"result",
"=",
"cls",
".",
"call",
"(",
"'domain.autorenew.deactivate'",
",",
"fqdn",
")",
"return",
"result"
] | 25.714286 | 19.714286 |
def parents(self, node, relations=None):
"""
Return all direct parents of specified node.
Wraps networkx by default.
Arguments
---------
node: string
identifier for node in ontology
relations: list of strings
list of relation (object property) IDs used to filter
"""
g = self.get_graph()
if node in g:
parents = list(g.predecessors(node))
if relations is None:
return parents
else:
rset = set(relations)
return [p for p in parents if len(self.child_parent_relations(node, p, graph=g).intersection(rset)) > 0 ]
else:
return [] | [
"def",
"parents",
"(",
"self",
",",
"node",
",",
"relations",
"=",
"None",
")",
":",
"g",
"=",
"self",
".",
"get_graph",
"(",
")",
"if",
"node",
"in",
"g",
":",
"parents",
"=",
"list",
"(",
"g",
".",
"predecessors",
"(",
"node",
")",
")",
"if",
... | 29.625 | 18.541667 |
def shell_django(session: DjangoSession, backend: ShellBackend):
"""
This command includes Django DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header) | [
"def",
"shell_django",
"(",
"session",
":",
"DjangoSession",
",",
"backend",
":",
"ShellBackend",
")",
":",
"namespace",
"=",
"{",
"'session'",
":",
"session",
"}",
"namespace",
".",
"update",
"(",
"backend",
".",
"get_namespace",
"(",
")",
")",
"embed",
"... | 29.444444 | 12.777778 |
def GetCursorPos() -> tuple:
"""
GetCursorPos from Win32.
Get current mouse cursor positon.
Return tuple, two ints tuple (x, y).
"""
point = ctypes.wintypes.POINT(0, 0)
ctypes.windll.user32.GetCursorPos(ctypes.byref(point))
return point.x, point.y | [
"def",
"GetCursorPos",
"(",
")",
"->",
"tuple",
":",
"point",
"=",
"ctypes",
".",
"wintypes",
".",
"POINT",
"(",
"0",
",",
"0",
")",
"ctypes",
".",
"windll",
".",
"user32",
".",
"GetCursorPos",
"(",
"ctypes",
".",
"byref",
"(",
"point",
")",
")",
"... | 30.111111 | 6.555556 |
def handle(self, object, *args, **kw):
'''
Calls each plugin in this PluginSet with the specified object,
arguments, and keywords in the standard group plugin order. The
return value from each successive invoked plugin is passed as the
first parameter to the next plugin. The final return value is the
object returned from the last plugin.
If this plugin set is empty (i.e. no plugins exist or matched the
spec), then a ValueError exception is thrown.
'''
if not bool(self):
if not self.spec or self.spec == SPEC_ALL:
raise ValueError('No plugins available in group %r' % (self.group,))
raise ValueError(
'No plugins in group %r matched %r' % (self.group, self.spec))
for plugin in self.plugins:
object = plugin.handle(object, *args, **kw)
return object | [
"def",
"handle",
"(",
"self",
",",
"object",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"bool",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"spec",
"or",
"self",
".",
"spec",
"==",
"SPEC_ALL",
":",
"raise",
"ValueError",
... | 43.052632 | 21.473684 |
def open_channel(
self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None,
):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises:
`.SSHException` -- if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException("SSH session not active")
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == "x11":
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException("Timeout opening channel.")
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e | [
"def",
"open_channel",
"(",
"self",
",",
"kind",
",",
"dest_addr",
"=",
"None",
",",
"src_addr",
"=",
"None",
",",
"window_size",
"=",
"None",
",",
"max_packet_size",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
")",
":",
"if",
"not",
"self",
".",
... | 39.351064 | 17.5 |
def circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
output='text',
interactive=False,
line_length=None,
plot_barriers=True,
reverse_bits=False,
justify=None):
"""Draw a quantum circuit to different formats (set by output parameter):
0. text: ASCII art TextDrawing that can be printed in the console.
1. latex: high-quality images, but heavy external software dependencies
2. matplotlib: purely in Python with no external dependencies
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the `mpl`, `latex`, and `latex_source`
output types. If a str is passed in that is the path to a json
file which contains that will be open, parsed, and then used just
as the input dict.
output (TextDrawing): Select the output method to use for drawing the circuit.
Valid choices are `text`, `latex`, `latex_source`, `mpl`. Note if
one is not specified it will use latex and if that fails fallback
to mpl. However this behavior is deprecated and in a future release
the default will change.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
line_length (int): Sets the length of the lines generated by `text`
output type. This useful when the drawing does not fit in the
console. If None (default), it will try to guess the console width
using shutil.get_terminal_size(). However, if you're running in
jupyter the default line length is set to 80 characters. If you
don't want pagination at all, set `line_length=-1`.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are `left`, `right` or `none`, if anything
else is supplied it defaults to left justified. It refers to where
gates should be placed in the output circuit if there is an option.
`none` results in each gate being placed in its own column. Currently
only supported by text drawer.
Returns:
PIL.Image: (output `latex`) an in-memory representation of the image
of the circuit diagram.
matplotlib.figure: (output `mpl`) a matplotlib figure object for the
circuit diagram.
String: (output `latex_source`). The LaTeX source code.
TextDrawing: (output `text`). A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requieres non-installed libraries.
.. _style-dict-doc:
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. While the style dict is used by the `mpl`,
`latex`, and `latex_source` outputs some options in that are only used
by the `mpl` output. These options are defined below, if it is only used by
the `mpl` output it is marked as such:
textcolor (str): The color code to use for text. Defaults to
`'#000000'` (`mpl` only)
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'` (`mpl` only)
linecolor (str): The color code to use for lines. Defaults to
`'#000000'` (`mpl` only)
creglinecolor (str): The color code to use for classical register lines
`'#778899'`(`mpl` only)
gatetextcolor (str): The color code to use for gate text `'#000000'`
(`mpl` only)
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'` (`mpl` only)
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'` (`mpl` only)
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'` (`mpl` only)
fontsize (int): The font size to use for text. Defaults to 13 (`mpl`
only)
subfontsize (int): The font size to use for subtext. Defaults to 8
(`mpl` only)
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are:
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in. (`mpl` only)
displaycolor (dict): The color codes to use for each circuit element.
By default all values default to the value of `gatefacecolor` and
the keys are the same as `displaytext`. Also, just like
`displaytext` there is no provision for an incomplete dict passed
in. (`mpl` only)
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes. (`mpl` only)
usepiformat (bool): When set to True use radians for output (`mpl`
only)
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20 (`mpl` only)
cregbundle (bool): If set True bundle classical registers (`mpl` only)
showindex (bool): If set True draw an index. (`mpl` only)
compress (bool): If set True draw a compressed circuit (`mpl` only)
figwidth (int): The maximum width (in inches) for the output figure.
(`mpl` only)
dpi (int): The DPI to use for the output image. Defaults to 150 (`mpl`
only)
margin (list): `mpl` only
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`(`mpl` only)
"""
image = None
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
line_length=line_length,
reverse_bits=reverse_bits,
plotbarriers=plot_barriers,
justify=justify)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image | [
"def",
"circuit_drawer",
"(",
"circuit",
",",
"scale",
"=",
"0.7",
",",
"filename",
"=",
"None",
",",
"style",
"=",
"None",
",",
"output",
"=",
"'text'",
",",
"interactive",
"=",
"False",
",",
"line_length",
"=",
"None",
",",
"plot_barriers",
"=",
"True"... | 51.171598 | 23.39645 |
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
) | [
"def",
"empty_like",
"(",
"self",
",",
"shape",
")",
":",
"return",
"type",
"(",
"self",
")",
".",
"from_codes_and_metadata",
"(",
"codes",
"=",
"np",
".",
"full",
"(",
"shape",
",",
"self",
".",
"reverse_categories",
"[",
"self",
".",
"missing_value",
"... | 37.2 | 15.2 |
def create_cli(create_app=None):
"""Create CLI for ``inveniomanage`` command.
:param create_app: Flask application factory.
:returns: Click command group.
.. versionadded: 1.0.0
"""
def create_cli_app(info):
"""Application factory for CLI app.
Internal function for creating the CLI. When invoked via
``inveniomanage`` FLASK_APP must be set.
"""
if create_app is None:
# Fallback to normal Flask behavior
info.create_app = None
app = info.load_app()
else:
app = create_app(debug=get_debug_flag())
return app
@click.group(cls=FlaskGroup, create_app=create_cli_app)
def cli(**params):
"""Command Line Interface for Invenio."""
pass
return cli | [
"def",
"create_cli",
"(",
"create_app",
"=",
"None",
")",
":",
"def",
"create_cli_app",
"(",
"info",
")",
":",
"\"\"\"Application factory for CLI app.\n\n Internal function for creating the CLI. When invoked via\n ``inveniomanage`` FLASK_APP must be set.\n \"\"\"",
... | 27.642857 | 16.5 |
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) # Clean up command completion
d1_cli.impl.util.print_info("Exiting...") | [
"def",
"postloop",
"(",
"self",
")",
":",
"cmd",
".",
"Cmd",
".",
"postloop",
"(",
"self",
")",
"# Clean up command completion",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_info",
"(",
"\"Exiting...\"",
")"
] | 33.75 | 21.625 |
def _check_repos(self, repos):
"""Check if repodata urls are valid."""
self._checking_repos = []
self._valid_repos = []
for repo in repos:
worker = self.download_is_valid_url(repo)
worker.sig_finished.connect(self._repos_checked)
worker.repo = repo
self._checking_repos.append(repo) | [
"def",
"_check_repos",
"(",
"self",
",",
"repos",
")",
":",
"self",
".",
"_checking_repos",
"=",
"[",
"]",
"self",
".",
"_valid_repos",
"=",
"[",
"]",
"for",
"repo",
"in",
"repos",
":",
"worker",
"=",
"self",
".",
"download_is_valid_url",
"(",
"repo",
... | 35.4 | 12.9 |
def transformer_librispeech_v2():
"""HParams for training ASR model on LibriSpeech V2."""
hparams = transformer_base()
hparams.max_length = 1240000
hparams.max_input_seq_length = 1550
hparams.max_target_seq_length = 350
hparams.batch_size = 16
hparams.num_decoder_layers = 4
hparams.num_encoder_layers = 6
hparams.hidden_size = 384
hparams.learning_rate = 0.15
hparams.daisy_chain_variables = False
hparams.filter_size = 1536
hparams.num_heads = 2
hparams.ffn_layer = "conv_relu_conv"
hparams.conv_first_kernel = 9
hparams.weight_decay = 0
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams | [
"def",
"transformer_librispeech_v2",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"max_length",
"=",
"1240000",
"hparams",
".",
"max_input_seq_length",
"=",
"1550",
"hparams",
".",
"max_target_seq_length",
"=",
"350",
"hparams",
"."... | 29.227273 | 11.909091 |
def replace(self, ):
"""Replace the current reftrack
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.replace(tfi) | [
"def",
"replace",
"(",
"self",
",",
")",
":",
"tfi",
"=",
"self",
".",
"get_taskfileinfo_selection",
"(",
")",
"if",
"tfi",
":",
"self",
".",
"reftrack",
".",
"replace",
"(",
"tfi",
")"
] | 23.3 | 15.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.