text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def addPixmap(self, pixmap):
"""
Adds the pixmap to the list for this slider.
:param pixmap | <QPixmap> || <str>
"""
scene = self.scene()
scene.addItem(XImageItem(pixmap))
self.recalculate() | [
"def",
"addPixmap",
"(",
"self",
",",
"pixmap",
")",
":",
"scene",
"=",
"self",
".",
"scene",
"(",
")",
"scene",
".",
"addItem",
"(",
"XImageItem",
"(",
"pixmap",
")",
")",
"self",
".",
"recalculate",
"(",
")"
] | 28.888889 | 10 |
async def _check_resolver_ans(
self, dns_answer_list, record_name,
record_data_list, record_ttl, record_type_code):
"""Check if resolver answer is equal to record data.
Args:
dns_answer_list (list): DNS answer list contains record objects.
record_name (str): Record name.
record_data_list (list): List of data values for the record.
record_ttl (int): Record time-to-live info.
record_type_code (int): Record type code.
Returns:
boolean indicating if DNS answer data is equal to record data.
"""
type_filtered_list = [
ans for ans in dns_answer_list if ans.qtype == record_type_code
]
# check to see that type_filtered_lst has
# the same number of records as record_data_list
if len(type_filtered_list) != len(record_data_list):
return False
# check each record data is equal to the given data
for rec in type_filtered_list:
conditions = [rec.name == record_name,
rec.ttl == record_ttl,
rec.data in record_data_list]
# if ans record data is not equal
# to the given data return False
if not all(conditions):
return False
return True | [
"async",
"def",
"_check_resolver_ans",
"(",
"self",
",",
"dns_answer_list",
",",
"record_name",
",",
"record_data_list",
",",
"record_ttl",
",",
"record_type_code",
")",
":",
"type_filtered_list",
"=",
"[",
"ans",
"for",
"ans",
"in",
"dns_answer_list",
"if",
"ans"... | 37.055556 | 19.416667 |
def clean(self, value):
"""Clean
Uses the valid method to check which type the value is, and then calls
the correct version of clean on that node
Arguments:
value {mixed} -- The value to clean
Returns:
mixed
"""
# If the value is None and it's optional, return as is
if value is None and self._optional:
return None
# Go through each of the nodes
for i in range(len(self._nodes)):
# If it's valid
if self._nodes[i].valid(value):
# Use it's clean
return self._nodes[i].clean(value)
# Something went wrong
raise ValueError('value', value) | [
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"# If the value is None and it's optional, return as is",
"if",
"value",
"is",
"None",
"and",
"self",
".",
"_optional",
":",
"return",
"None",
"# Go through each of the nodes",
"for",
"i",
"in",
"range",
"(",
"l... | 20.142857 | 21.178571 |
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b)) | [
"def",
"_is_type_compatible",
"(",
"a",
",",
"b",
")",
":",
"is_ts_compat",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"(",
"Timestamp",
",",
"DateOffset",
")",
")",
"is_td_compat",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"(",
... | 52.5 | 12.75 |
def list(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Lists MediaInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime date_created_before: The `YYYY-MM-DD` value of the resources to read
:param datetime date_created: The `YYYY-MM-DD` value of the resources to read
:param datetime date_created_after: The `YYYY-MM-DD` value of the resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.message.media.MediaInstance]
"""
return list(self.stream(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
limit=limit,
page_size=page_size,
)) | [
"def",
"list",
"(",
"self",
",",
"date_created_before",
"=",
"values",
".",
"unset",
",",
"date_created",
"=",
"values",
".",
"unset",
",",
"date_created_after",
"=",
"values",
".",
"unset",
",",
"limit",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
... | 56.37037 | 29.777778 |
def key_for_request(self, method, url, **kwargs):
""" Return a cache key from a given set of request parameters.
Default behavior is to return a complete URL for all GET
requests, and None otherwise.
Can be overriden if caching of non-get requests is desired.
"""
if method != 'get':
return None
return requests.Request(url=url, params=kwargs.get('params', {})).prepare().url | [
"def",
"key_for_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"!=",
"'get'",
":",
"return",
"None",
"return",
"requests",
".",
"Request",
"(",
"url",
"=",
"url",
",",
"params",
"=",
"kwargs",
"."... | 37.25 | 22.166667 |
def wbmax(self, value=None):
""" Corresponds to IDD Field `wbmax`
Extreme maximum wet-bulb temperature
Args:
value (float): value for IDD Field `wbmax`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wbmax`'.format(value))
self._wbmax = value | [
"def",
"wbmax",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float '"... | 33.190476 | 19.380952 |
def analyze_fa(fa):
"""
analyze fa (names, insertions) and convert fasta to prodigal/cmscan safe file
- find insertions (masked sequence)
- make upper case
- assign names to id number
"""
if fa.name == '<stdin>':
safe = 'temp.id'
else:
safe = '%s.id' % (fa.name)
safe = open(safe, 'w')
sequences = {} # sequences[id] = sequence
insertions = {} # insertions[id] = [[start, stop], [start, stop], ...]
count = 0
id2name = {}
names = []
for seq in parse_fasta(fa):
id = '%010d' % (count,)
name = seq[0].split('>', 1)[1]
id2name[id] = name
id2name[name] = id
names.append(name)
insertions[id] = insertions_from_masked(seq[1])
sequences[id] = seq
print('\n'.join(['>%s' % (id), seq[1].upper()]), file=safe)
count += 1
safe.close()
lookup = open('%s.id.lookup' % (fa.name), 'w')
for i in list(id2name.items()):
print('\t'.join(i), file=lookup)
lookup.close()
return safe.name, sequences, id2name, names, insertions | [
"def",
"analyze_fa",
"(",
"fa",
")",
":",
"if",
"fa",
".",
"name",
"==",
"'<stdin>'",
":",
"safe",
"=",
"'temp.id'",
"else",
":",
"safe",
"=",
"'%s.id'",
"%",
"(",
"fa",
".",
"name",
")",
"safe",
"=",
"open",
"(",
"safe",
",",
"'w'",
")",
"sequen... | 31.848485 | 15.30303 |
def remove_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score) | [
"def",
"remove_absolute_impute__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_impute",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"... | 45.25 | 23.625 |
def path_to_filename(pathfile):
'''
Takes a path filename string and returns the split between the path and the filename
if filename is not given, filename = ''
if path is not given, path = './'
'''
path = pathfile[:pathfile.rfind('/') + 1]
if path == '':
path = './'
filename = pathfile[pathfile.rfind('/') + 1:len(pathfile)]
if '.' not in filename:
path = pathfile
filename = ''
if (filename == '') and (path[len(path) - 1] != '/'):
path += '/'
return path, filename | [
"def",
"path_to_filename",
"(",
"pathfile",
")",
":",
"path",
"=",
"pathfile",
"[",
":",
"pathfile",
".",
"rfind",
"(",
"'/'",
")",
"+",
"1",
"]",
"if",
"path",
"==",
"''",
":",
"path",
"=",
"'./'",
"filename",
"=",
"pathfile",
"[",
"pathfile",
".",
... | 24.045455 | 24.590909 |
def write_case_data(self, file):
""" Writes the header to file.
"""
case_sheet = self.book.add_sheet("Case")
case_sheet.write(0, 0, "Name")
case_sheet.write(0, 1, self.case.name)
case_sheet.write(1, 0, "base_mva")
case_sheet.write(1, 1, self.case.base_mva) | [
"def",
"write_case_data",
"(",
"self",
",",
"file",
")",
":",
"case_sheet",
"=",
"self",
".",
"book",
".",
"add_sheet",
"(",
"\"Case\"",
")",
"case_sheet",
".",
"write",
"(",
"0",
",",
"0",
",",
"\"Name\"",
")",
"case_sheet",
".",
"write",
"(",
"0",
... | 38.125 | 4.5 |
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> from numpy import log10
>>> x,b, data = dc.NRZ_bits(10000,10)
>>> Px,f = dc.my_psd(x,2**10,10)
>>> plt.plot(f, 10*log10(Px))
>>> plt.show()
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f | [
"def",
"my_psd",
"(",
"x",
",",
"NFFT",
"=",
"2",
"**",
"10",
",",
"Fs",
"=",
"1",
")",
":",
"Px",
",",
"f",
"=",
"pylab",
".",
"mlab",
".",
"psd",
"(",
"x",
",",
"NFFT",
",",
"Fs",
")",
"return",
"Px",
".",
"flatten",
"(",
")",
",",
"f"
... | 27.138889 | 18.416667 |
def get_session_data(ctx, username, password, salt, server_public, private, preset):
"""Print out client session data."""
session = SRPClientSession(
SRPContext(username, password, prime=preset[0], generator=preset[1]),
private=private)
session.process(server_public, salt, base64=True)
click.secho('Client session key: %s' % session.key_b64)
click.secho('Client session key proof: %s' % session.key_proof_b64)
click.secho('Client session key hash: %s' % session.key_proof_hash_b64) | [
"def",
"get_session_data",
"(",
"ctx",
",",
"username",
",",
"password",
",",
"salt",
",",
"server_public",
",",
"private",
",",
"preset",
")",
":",
"session",
"=",
"SRPClientSession",
"(",
"SRPContext",
"(",
"username",
",",
"password",
",",
"prime",
"=",
... | 46.727273 | 25.818182 |
def parse(self, file):
'''
Method the programmer should call when ready to parse a file.
:param file: exact file path of the file to be processed
:return: PieceTree object representing the file in memory
'''
parser = make_parser()
self.clear()
class Extractor(xml.sax.ContentHandler):
def __init__(self, parent):
self.parent = parent
def startElement(self, name, attrs):
attribs = {}
for attrname in attrs.getNames():
attrvalue = attrs.get(attrname)
attribs[attrname] = attrvalue
self.parent.StartTag(name, attribs)
def characters(self, text):
self.parent.NewData(text)
def endElement(self, name):
self.parent.EndTag(name)
parser.setContentHandler(Extractor(self))
# OFFLINE MODE
parser.setFeature(handler.feature_external_ges, False)
fob = open(file, 'r')
parser.parse(fob)
return self.piece | [
"def",
"parse",
"(",
"self",
",",
"file",
")",
":",
"parser",
"=",
"make_parser",
"(",
")",
"self",
".",
"clear",
"(",
")",
"class",
"Extractor",
"(",
"xml",
".",
"sax",
".",
"ContentHandler",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"parent",
... | 33.1875 | 17.1875 |
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0
"""
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults | [
"def",
"global_defaults",
"(",
")",
":",
"# TODO: hrm should the run-related things actually be derived from the",
"# runner_class? E.g. Local defines local stuff, Remote defines remote",
"# stuff? Doesn't help with the final config tree tho...",
"# TODO: as to that, this is a core problem, Fabric w... | 46.954545 | 21.5 |
def get_nearest(self, lat, lng, skip_cache=False):
"""
Calls `postcodes.get_nearest` but checks correctness of `lat`
and `long`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a dict of the nearest postcode's data.
"""
lat, lng = float(lat), float(lng)
self._check_point(lat, lng)
return self._lookup(skip_cache, get_nearest, lat, lng) | [
"def",
"get_nearest",
"(",
"self",
",",
"lat",
",",
"lng",
",",
"skip_cache",
"=",
"False",
")",
":",
"lat",
",",
"lng",
"=",
"float",
"(",
"lat",
")",
",",
"float",
"(",
"lng",
")",
"self",
".",
"_check_point",
"(",
"lat",
",",
"lng",
")",
"retu... | 41.125 | 20.375 |
def run(self, default=None):
"""Parse the command line arguments.
default:
Name of default command to run if no arguments are passed.
"""
parent, *sys_args = sys.argv
self.parent = Path(parent).stem
cmd_name = default
if sys_args:
cmd_name, *sys_args = sys_args
if cmd_name is None or cmd_name.lstrip("-") in HELP_COMMANDS:
self.show_help_root()
return
command = self.commands.get(cmd_name)
if command is None:
self.show_error(f"command `{cmd_name}` not found")
self.show_help_root()
return
args, opts = parse_args(sys_args)
return command.run(*args, **opts) | [
"def",
"run",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"parent",
",",
"",
"*",
"sys_args",
"=",
"sys",
".",
"argv",
"self",
".",
"parent",
"=",
"Path",
"(",
"parent",
")",
".",
"stem",
"cmd_name",
"=",
"default",
"if",
"sys_args",
":",
... | 28.76 | 17.44 |
def _set_ospf1(self, v, load=False):
"""
Setter method for ospf1, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf/ospf1 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ospf1.ospf1, is_container='container', presence=False, yang_name="ospf1", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF).', u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "false"', u'sort-priority': u'130', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf1 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ospf1.ospf1, is_container='container', presence=False, yang_name="ospf1", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF).', u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "false"', u'sort-priority': u'130', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__ospf1 = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_ospf1",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
... | 82.681818 | 39.636364 |
def get_catalogs_by_query(self, catalog_query):
"""Gets a list of ``Catalogs`` matching the given catalog query.
arg: catalog_query (osid.cataloging.CatalogQuery): the
catalog query
return: (osid.cataloging.CatalogList) - the returned
``CatalogList``
raise: NullArgument - ``catalog_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``catalog_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(catalog_query)
query_terms = dict(catalog_query._query_terms)
collection = JSONClientValidated('cataloging',
collection='Catalog',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.CatalogList(result, runtime=self._runtime) | [
"def",
"get_catalogs_by_query",
"(",
"self",
",",
"catalog_query",
")",
":",
"# Implemented from template for",
"# osid.resource.BinQuerySession.get_bins_by_query_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_se... | 48.88 | 20.04 |
def choose_locale(self, locale: Text) -> Text:
"""
Returns the best matching locale in what is available.
:param locale: Locale to match
:return: Locale to use
"""
if locale not in self._choice_cache:
locales = self.list_locales()
best_choice = locales[0]
best_level = 0
for candidate in locales:
cmp = compare_locales(locale, candidate)
if cmp > best_level:
best_choice = candidate
best_level = cmp
self._choice_cache[locale] = best_choice
return self._choice_cache[locale] | [
"def",
"choose_locale",
"(",
"self",
",",
"locale",
":",
"Text",
")",
"->",
"Text",
":",
"if",
"locale",
"not",
"in",
"self",
".",
"_choice_cache",
":",
"locales",
"=",
"self",
".",
"list_locales",
"(",
")",
"best_choice",
"=",
"locales",
"[",
"0",
"]"... | 26.916667 | 16.083333 |
def get_JWT(url, address=None):
"""
Given a URL, fetch and decode the JWT it points to.
If address is given, then authenticate the JWT with the address.
Return None if we could not fetch it, or unable to authenticate it.
NOTE: the URL must be usable by the requests library
"""
jwt_txt = None
jwt = None
log.debug("Try {}".format(url))
# special case: handle file://
urlinfo = urllib2.urlparse.urlparse(url)
if urlinfo.scheme == 'file':
# points to a path on disk
try:
with open(urlinfo.path, 'r') as f:
jwt_txt = f.read()
except Exception as e:
if BLOCKSTACK_TEST:
log.exception(e)
log.warning("Failed to read {}".format(url))
return None
else:
# http(s) URL or similar
try:
resp = requests.get(url)
assert resp.status_code == 200, 'Bad status code on {}: {}'.format(url, resp.status_code)
jwt_txt = resp.text
except Exception as e:
if BLOCKSTACK_TEST:
log.exception(e)
log.warning("Unable to resolve {}".format(url))
return None
try:
# one of two things are possible:
# * this is a JWT string
# * this is a serialized JSON string whose first item is a dict that has 'token' as key,
# and that key is a JWT string.
try:
jwt_txt = json.loads(jwt_txt)[0]['token']
except:
pass
jwt = jsontokens.decode_token(jwt_txt)
except Exception as e:
if BLOCKSTACK_TEST:
log.exception(e)
log.warning("Unable to decode token at {}".format(url))
return None
try:
# must be well-formed
assert isinstance(jwt, dict)
assert 'payload' in jwt, jwt
assert isinstance(jwt['payload'], dict)
assert 'issuer' in jwt['payload'], jwt
assert isinstance(jwt['payload']['issuer'], dict)
assert 'publicKey' in jwt['payload']['issuer'], jwt
assert virtualchain.ecdsalib.ecdsa_public_key(str(jwt['payload']['issuer']['publicKey']))
except AssertionError as ae:
if BLOCKSTACK_TEST or BLOCKSTACK_DEBUG:
log.exception(ae)
log.warning("JWT at {} is malformed".format(url))
return None
if address is not None:
public_key = str(jwt['payload']['issuer']['publicKey'])
addrs = [virtualchain.address_reencode(virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.decompress(public_key)).address()),
virtualchain.address_reencode(virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.compress(public_key)).address())]
if virtualchain.address_reencode(address) not in addrs:
# got a JWT, but it doesn't match the address
log.warning("Found JWT at {}, but its public key has addresses {} and {} (expected {})".format(url, addrs[0], addrs[1], address))
return None
verifier = jsontokens.TokenVerifier()
if not verifier.verify(jwt_txt, public_key):
# got a JWT, and the address matches, but the signature does not
log.warning("Found JWT at {}, but it was not signed by {} ({})".format(url, public_key, address))
return None
return jwt | [
"def",
"get_JWT",
"(",
"url",
",",
"address",
"=",
"None",
")",
":",
"jwt_txt",
"=",
"None",
"jwt",
"=",
"None",
"log",
".",
"debug",
"(",
"\"Try {}\"",
".",
"format",
"(",
"url",
")",
")",
"# special case: handle file://",
"urlinfo",
"=",
"urllib2",
"."... | 34.347368 | 24.221053 |
async def _dump_container(self, writer, container, container_type, params=None):
"""
Dumps container of elements to the writer.
:param writer:
:param container:
:param container_type:
:param params:
:return:
"""
await self._dump_container_size(writer, len(container), container_type)
elem_type = container_elem_type(container_type, params)
for idx, elem in enumerate(container):
try:
self.tracker.push_index(idx)
await self.dump_field(
writer, elem, elem_type, params[1:] if params else None
)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e | [
"async",
"def",
"_dump_container",
"(",
"self",
",",
"writer",
",",
"container",
",",
"container_type",
",",
"params",
"=",
"None",
")",
":",
"await",
"self",
".",
"_dump_container_size",
"(",
"writer",
",",
"len",
"(",
"container",
")",
",",
"container_type... | 34.391304 | 20.043478 |
def call_backward(self, proj_data, out=None):
"""Run an ASTRA back-projection on the given data using the GPU.
Parameters
----------
proj_data : ``proj_space`` element
Projection data to which the back-projector is applied.
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the
back-projector. If ``out`` was provided, the returned object is a
reference to it.
"""
with self._mutex:
assert proj_data in self.proj_space
if out is not None:
assert out in self.reco_space
else:
out = self.reco_space.element()
# Copy data to GPU memory
if self.geometry.ndim == 2:
astra.data2d.store(self.sino_id, proj_data.asarray())
elif self.geometry.ndim == 3:
shape = (-1,) + self.geometry.det_partition.shape
reshaped_proj_data = proj_data.asarray().reshape(shape)
swapped_proj_data = np.ascontiguousarray(
np.swapaxes(reshaped_proj_data, 0, 1))
astra.data3d.store(self.sino_id, swapped_proj_data)
# Run algorithm
astra.algorithm.run(self.algo_id)
# Copy result to CPU memory
out[:] = self.out_array
# Fix scaling to weight by pixel/voxel size
out *= astra_cuda_bp_scaling_factor(
self.proj_space, self.reco_space, self.geometry)
return out | [
"def",
"call_backward",
"(",
"self",
",",
"proj_data",
",",
"out",
"=",
"None",
")",
":",
"with",
"self",
".",
"_mutex",
":",
"assert",
"proj_data",
"in",
"self",
".",
"proj_space",
"if",
"out",
"is",
"not",
"None",
":",
"assert",
"out",
"in",
"self",
... | 38.130435 | 18.847826 |
def avail_modules(desc=False):
'''
List available modules in registered Powershell module repositories.
:param desc: If ``True``, the verbose description will be returned.
:type desc: ``bool``
CLI Example:
.. code-block:: bash
salt 'win01' psget.avail_modules
salt 'win01' psget.avail_modules desc=True
'''
cmd = 'Find-Module'
modules = _pshell(cmd)
names = []
if desc:
names = {}
for module in modules:
if desc:
names[module['Name']] = module['Description']
continue
names.append(module['Name'])
return names | [
"def",
"avail_modules",
"(",
"desc",
"=",
"False",
")",
":",
"cmd",
"=",
"'Find-Module'",
"modules",
"=",
"_pshell",
"(",
"cmd",
")",
"names",
"=",
"[",
"]",
"if",
"desc",
":",
"names",
"=",
"{",
"}",
"for",
"module",
"in",
"modules",
":",
"if",
"d... | 24.24 | 22.96 |
def parse_ports(ports_text):
"""Parse ports text
e.g. ports_text = "12345,13000-15000,20000-30000"
"""
ports_set = set()
for bit in ports_text.split(','):
if '-' in bit:
low, high = bit.split('-', 1)
ports_set = ports_set.union(range(int(low), int(high) + 1))
else:
ports_set.add(int(bit))
return sorted(list(ports_set)) | [
"def",
"parse_ports",
"(",
"ports_text",
")",
":",
"ports_set",
"=",
"set",
"(",
")",
"for",
"bit",
"in",
"ports_text",
".",
"split",
"(",
"','",
")",
":",
"if",
"'-'",
"in",
"bit",
":",
"low",
",",
"high",
"=",
"bit",
".",
"split",
"(",
"'-'",
"... | 29.615385 | 13.461538 |
def helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution):
"""
Figure out the data, free_dims and create an Xgrid for
the prediction.
This is only implemented for two dimensions for now!
"""
#work out what the inputs are for plotting (1D or 2D)
if fixed_inputs is None:
fixed_inputs = []
fixed_dims = get_fixed_dims(fixed_inputs)
free_dims = get_free_dims(self, visible_dims, fixed_dims)
if len(free_dims) == 1:
#define the frame on which to plot
resolution = resolution or 200
Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits, resolution=resolution)
Xgrid = np.zeros((Xnew.shape[0],self.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
x = Xgrid
y = None
elif len(free_dims) == 2:
#define the frame for plotting on
resolution = resolution or 35
Xnew, x, y, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution)
Xgrid = np.zeros((Xnew.shape[0], self.input_dim))
Xgrid[:,free_dims] = Xnew
#xmin = Xgrid.min(0)[free_dims]
#xmax = Xgrid.max(0)[free_dims]
for i,v in fixed_inputs:
Xgrid[:,i] = v
else:
raise TypeError("calculated free_dims {} from visible_dims {} and fixed_dims {} is neither 1D nor 2D".format(free_dims, visible_dims, fixed_dims))
return fixed_dims, free_dims, Xgrid, x, y, xmin, xmax, resolution | [
"def",
"helper_for_plot_data",
"(",
"self",
",",
"X",
",",
"plot_limits",
",",
"visible_dims",
",",
"fixed_inputs",
",",
"resolution",
")",
":",
"#work out what the inputs are for plotting (1D or 2D)",
"if",
"fixed_inputs",
"is",
"None",
":",
"fixed_inputs",
"=",
"[",... | 41.25 | 19.472222 |
def make_request(name, params=None, version="V001", key=None, api_type="web",
fetcher=get_page, base=None, language="en_us"):
"""
Make an API request
"""
params = params or {}
params["key"] = key or API_KEY
params["language"] = language
if not params["key"]:
raise ValueError("API key not set, please set DOTA2_API_KEY")
url = url_map("%s%s/%s/" % (base or BASE_URL, name, version), params)
return fetcher(url) | [
"def",
"make_request",
"(",
"name",
",",
"params",
"=",
"None",
",",
"version",
"=",
"\"V001\"",
",",
"key",
"=",
"None",
",",
"api_type",
"=",
"\"web\"",
",",
"fetcher",
"=",
"get_page",
",",
"base",
"=",
"None",
",",
"language",
"=",
"\"en_us\"",
")"... | 30.666667 | 21.333333 |
def ancestors(self):
"""A list of this browse node's ancestors in the browse node tree.
:return:
List of :class:`~.AmazonBrowseNode` objects.
"""
ancestors = []
node = self.ancestor
while node is not None:
ancestors.append(node)
node = node.ancestor
return ancestors | [
"def",
"ancestors",
"(",
"self",
")",
":",
"ancestors",
"=",
"[",
"]",
"node",
"=",
"self",
".",
"ancestor",
"while",
"node",
"is",
"not",
"None",
":",
"ancestors",
".",
"append",
"(",
"node",
")",
"node",
"=",
"node",
".",
"ancestor",
"return",
"anc... | 29 | 14.083333 |
def batch_create_read_session_streams(
self,
session,
requested_streams,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates additional streams for a ReadSession. This API can be used to
dynamically adjust the parallelism of a batch processing task upwards by
adding additional workers.
Example:
>>> from google.cloud import bigquery_storage_v1beta1
>>>
>>> client = bigquery_storage_v1beta1.BigQueryStorageClient()
>>>
>>> # TODO: Initialize `session`:
>>> session = {}
>>>
>>> # TODO: Initialize `requested_streams`:
>>> requested_streams = 0
>>>
>>> response = client.batch_create_read_session_streams(session, requested_streams)
Args:
session (Union[dict, ~google.cloud.bigquery_storage_v1beta1.types.ReadSession]): Required. Must be a non-expired session obtained from a call to
CreateReadSession. Only the name field needs to be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadSession`
requested_streams (int): Required. Number of new streams requested. Must be positive.
Number of added streams may be less than this, see CreateReadSessionRequest
for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigquery_storage_v1beta1.types.BatchCreateReadSessionStreamsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_create_read_session_streams" not in self._inner_api_calls:
self._inner_api_calls[
"batch_create_read_session_streams"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_create_read_session_streams,
default_retry=self._method_configs[
"BatchCreateReadSessionStreams"
].retry,
default_timeout=self._method_configs[
"BatchCreateReadSessionStreams"
].timeout,
client_info=self._client_info,
)
request = storage_pb2.BatchCreateReadSessionStreamsRequest(
session=session, requested_streams=requested_streams
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session.name", session.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( # pragma: no cover
routing_header
)
metadata.append(routing_metadata) # pragma: no cover
return self._inner_api_calls["batch_create_read_session_streams"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"batch_create_read_session_streams",
"(",
"self",
",",
"session",
",",
"requested_streams",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".... | 44.738636 | 26.329545 |
def exons(context, build):
"""Delete all exons in the database"""
LOG.info("Running scout delete exons")
adapter = context.obj['adapter']
adapter.drop_exons(build) | [
"def",
"exons",
"(",
"context",
",",
"build",
")",
":",
"LOG",
".",
"info",
"(",
"\"Running scout delete exons\"",
")",
"adapter",
"=",
"context",
".",
"obj",
"[",
"'adapter'",
"]",
"adapter",
".",
"drop_exons",
"(",
"build",
")"
] | 29.166667 | 11.833333 |
def _setPath(cls):
""" Sets the path of the custom configuration file
"""
cls._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'],
cls.customFileName) | [
"def",
"_setPath",
"(",
"cls",
")",
":",
"cls",
".",
"_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"'NTA_DYNAMIC_CONF_DIR'",
"]",
",",
"cls",
".",
"customFileName",
")"
] | 38.2 | 10.8 |
def attach(cls, training_job_name, sagemaker_session=None, model_channel_name='model'):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates the actual conversion of a training
job description to the arguments that the class constructor expects. After attaching, if the training job has a
Complete status, it can be ``deploy()`` ed to create a SageMaker Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block and display log messages
from the training job, until the training job completes.
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded (default:
'model'). If no channel with the same name exists in the training job, this option will be ignored.
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.deploy()
Returns:
Instance of the calling ``Estimator`` Class with the attached training job.
"""
estimator = super(Framework, cls).attach(training_job_name, sagemaker_session, model_channel_name)
# pylint gets confused thinking that estimator is an EstimatorBase instance, but it actually
# is a Framework or any of its derived classes. We can safely ignore the no-member errors.
estimator.uploaded_code = UploadedCode(
estimator.source_dir, estimator.entry_point) # pylint: disable=no-member
return estimator | [
"def",
"attach",
"(",
"cls",
",",
"training_job_name",
",",
"sagemaker_session",
"=",
"None",
",",
"model_channel_name",
"=",
"'model'",
")",
":",
"estimator",
"=",
"super",
"(",
"Framework",
",",
"cls",
")",
".",
"attach",
"(",
"training_job_name",
",",
"sa... | 60.444444 | 40.472222 |
def MaxPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output') | [
"def",
"MaxPooling",
"(",
"inputs",
",",
"pool_size",
",",
"strides",
"=",
"None",
",",
"padding",
"=",
"'valid'",
",",
"data_format",
"=",
"'channels_last'",
")",
":",
"if",
"strides",
"is",
"None",
":",
"strides",
"=",
"pool_size",
"layer",
"=",
"tf",
... | 33.428571 | 18.142857 |
def ReadPreprocessingInformation(self, knowledge_base):
"""Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
Raises:
IOError: if the storage type does not support writing preprocessing
information or when the storage writer is closed.
OSError: if the storage type does not support writing preprocessing
information or when the storage writer is closed.
"""
self._RaiseIfNotWritable()
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Preprocessing information not supported by storage type.') | [
"def",
"ReadPreprocessingInformation",
"(",
"self",
",",
"knowledge_base",
")",
":",
"self",
".",
"_RaiseIfNotWritable",
"(",
")",
"if",
"self",
".",
"_storage_type",
"!=",
"definitions",
".",
"STORAGE_TYPE_SESSION",
":",
"raise",
"IOError",
"(",
"'Preprocessing inf... | 39.571429 | 25.380952 |
def bifurcate_base(cls, newick):
""" Rewrites a newick string so that the base is a bifurcation
(rooted tree) """
t = cls(newick)
t._tree.resolve_polytomies()
return t.newick | [
"def",
"bifurcate_base",
"(",
"cls",
",",
"newick",
")",
":",
"t",
"=",
"cls",
"(",
"newick",
")",
"t",
".",
"_tree",
".",
"resolve_polytomies",
"(",
")",
"return",
"t",
".",
"newick"
] | 34.833333 | 7.666667 |
def to_utf8(x):
"""
Tries to utf-8 encode x when possible
If x is a string returns it encoded, otherwise tries to iter x and
encode utf-8 all strings it contains, returning a list.
"""
if isinstance(x, basestring):
return x.encode('utf-8') if isinstance(x, unicode) else x
try:
l = iter(x)
except TypeError:
return x
return [to_utf8(i) for i in l] | [
"def",
"to_utf8",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
":",
"return",
"x",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"x",
",",
"unicode",
")",
"else",
"x",
"try",
":",
"l",
"=",
"iter",
"(",... | 28.428571 | 17.857143 |
def _input_as_parameters(self, data):
""" Set the input path (a fasta filepath)
"""
# The list of values which can be passed on a per-run basis
allowed_values = ['--input', '--uc', '--fastapairs',
'--uc2clstr', '--output', '--mergesort']
unsupported_parameters = set(data.keys()) - set(allowed_values)
if unsupported_parameters:
raise ApplicationError(
"Unsupported parameter(s) passed when calling uclust: %s" %
' '.join(unsupported_parameters))
for v in allowed_values:
# turn the parameter off so subsequent runs are not
# affected by parameter settings from previous runs
self.Parameters[v].off()
if v in data:
# turn the parameter on if specified by the user
self.Parameters[v].on(data[v])
return '' | [
"def",
"_input_as_parameters",
"(",
"self",
",",
"data",
")",
":",
"# The list of values which can be passed on a per-run basis",
"allowed_values",
"=",
"[",
"'--input'",
",",
"'--uc'",
",",
"'--fastapairs'",
",",
"'--uc2clstr'",
",",
"'--output'",
",",
"'--mergesort'",
... | 40.909091 | 18.545455 |
def update_group(self, ID, data):
"""Update a Group."""
# http://teampasswordmanager.com/docs/api-groups/#update_group
log.info('Update group %s with %s' % (ID, data))
self.put('groups/%s.json' % ID, data) | [
"def",
"update_group",
"(",
"self",
",",
"ID",
",",
"data",
")",
":",
"# http://teampasswordmanager.com/docs/api-groups/#update_group",
"log",
".",
"info",
"(",
"'Update group %s with %s'",
"%",
"(",
"ID",
",",
"data",
")",
")",
"self",
".",
"put",
"(",
"'groups... | 46.6 | 11.6 |
def aes_cbc_pkcs7_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key) | [
"def",
"aes_cbc_pkcs7_decrypt",
"(",
"key",
",",
"data",
",",
"iv",
")",
":",
"if",
"len",
"(",
"key",
")",
"not",
"in",
"[",
"16",
",",
"24",
",",
"32",
"]",
":",
"raise",
"ValueError",
"(",
"pretty_message",
"(",
"'''\n key must be either 16, ... | 27.65 | 23.95 |
def circ_corrcc(x, y, tail='two-sided'):
"""Correlation coefficient between two circular variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (expressed in radians)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Adapted from the CircStats MATLAB toolbox (Berens 2009).
Use the np.deg2rad function to convert angles from degrees to radians.
Please note that NaN are automatically removed.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(r, pval)
0.942 0.06579836070349088
"""
from scipy.stats import norm
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficient
x_sin = np.sin(x - circmean(x))
y_sin = np.sin(y - circmean(y))
# Similar to np.corrcoef(x_sin, y_sin)[0][1]
r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
# Compute T- and p-values
tval = np.sqrt((n * (x_sin**2).mean() * (y_sin**2).mean())
/ np.mean(x_sin**2 * y_sin**2)) * r
# Approximately distributed as a standard normal
pval = 2 * norm.sf(abs(tval))
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | [
"def",
"circ_corrcc",
"(",
"x",
",",
"y",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"norm",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"y",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"# Check size",
"i... | 27.546875 | 20.96875 |
def alter(self, id_filter, name, description):
"""Change Filter by the identifier.
:param id_filter: Identifier of the Filter. Integer value and greater than zero.
:param name: Name. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param description: Description. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Filter identifier is null and invalid.
:raise InvalidParameterError: The value of name or description is invalid.
:raise FilterNotFoundError: Filter not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_filter):
raise InvalidParameterError(
u'The identifier of Filter is invalid or was not informed.')
filter_map = dict()
filter_map['name'] = name
filter_map['description'] = description
url = 'filter/' + str(id_filter) + '/'
code, xml = self.submit({'filter': filter_map}, 'PUT', url)
return self.response(code, xml) | [
"def",
"alter",
"(",
"self",
",",
"id_filter",
",",
"name",
",",
"description",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_filter",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'The identifier of Filter is invalid or was not informed.'",
")",
"filter_... | 40.689655 | 26.413793 |
def tag_labels(self):
"""Tag named entity labels in the ``words`` layer."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__ner_tagger is None:
self.__ner_tagger = load_default_ner_tagger()
self.__ner_tagger.tag_document(self)
return self | [
"def",
"tag_labels",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"ANALYSIS",
")",
":",
"self",
".",
"tag_analysis",
"(",
")",
"if",
"self",
".",
"__ner_tagger",
"is",
"None",
":",
"self",
".",
"__ner_tagger",
"=",
"load_default_ner... | 38.75 | 9.125 |
def filterAcceptsRow(self, row, parentindex):
"""Return True, if the filter accepts the given row of the parent
:param row: the row to filter
:type row: :class:`int`
:param parentindex: the parent index
:type parentindex: :class:`QtCore.QModelIndex`
:returns: True, if the filter accepts the row
:rtype: :class:`bool`
:raises: None
"""
if not super(ReftrackSortFilterModel, self).filterAcceptsRow(row, parentindex):
return False
if parentindex.isValid():
m = parentindex.model()
else:
m = self.sourceModel()
i = m.index(row, 18, parentindex)
reftrack = i.data(REFTRACK_OBJECT_ROLE)
if not reftrack:
return True
else:
return self.filter_accept_reftrack(reftrack) | [
"def",
"filterAcceptsRow",
"(",
"self",
",",
"row",
",",
"parentindex",
")",
":",
"if",
"not",
"super",
"(",
"ReftrackSortFilterModel",
",",
"self",
")",
".",
"filterAcceptsRow",
"(",
"row",
",",
"parentindex",
")",
":",
"return",
"False",
"if",
"parentindex... | 34.5 | 14.583333 |
def get_base_level(text, upper_is_rtl=False):
"""Get the paragraph base embedding level. Returns 0 for LTR,
1 for RTL.
`text` a unicode object.
Set `upper_is_rtl` to True to treat upper case chars as strong 'R'
for debugging (default: False).
"""
base_level = None
prev_surrogate = False
# P2
for _ch in text:
# surrogate in case of ucs2
if _IS_UCS2 and (_SURROGATE_MIN <= ord(_ch) <= _SURROGATE_MAX):
prev_surrogate = _ch
continue
elif prev_surrogate:
_ch = prev_surrogate + _ch
prev_surrogate = False
# treat upper as RTL ?
if upper_is_rtl and _ch.isupper():
base_level = 1
break
bidi_type = bidirectional(_ch)
if bidi_type in ('AL', 'R'):
base_level = 1
break
elif bidi_type == 'L':
base_level = 0
break
# P3
if base_level is None:
base_level = 0
return base_level | [
"def",
"get_base_level",
"(",
"text",
",",
"upper_is_rtl",
"=",
"False",
")",
":",
"base_level",
"=",
"None",
"prev_surrogate",
"=",
"False",
"# P2",
"for",
"_ch",
"in",
"text",
":",
"# surrogate in case of ucs2",
"if",
"_IS_UCS2",
"and",
"(",
"_SURROGATE_MIN",
... | 22.25 | 20.659091 |
def usnjrnl_timeline(self):
"""Iterates over the changes occurred within the filesystem.
Yields UsnJrnlEvent namedtuples containing:
file_reference_number: known in Unix FS as inode.
path: full path of the file.
size: size of the file in bytes if recoverable.
allocated: whether the file exists or it has been deleted.
timestamp: timespamp of the change.
changes: list of changes applied to the file.
attributes: list of file attributes.
"""
filesystem_content = defaultdict(list)
self.logger.debug("Extracting Update Sequence Number journal.")
journal = self._read_journal()
for dirent in self._visit_filesystem():
filesystem_content[dirent.inode].append(dirent)
self.logger.debug("Generating timeline.")
yield from generate_timeline(journal, filesystem_content) | [
"def",
"usnjrnl_timeline",
"(",
"self",
")",
":",
"filesystem_content",
"=",
"defaultdict",
"(",
"list",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Extracting Update Sequence Number journal.\"",
")",
"journal",
"=",
"self",
".",
"_read_journal",
"(",
")",
... | 36.56 | 20.2 |
def differing_blocks(self):
"""
:returns: A list of block matches which appear to differ
"""
differing_blocks = []
for (block_a, block_b) in self._block_matches:
if not self.blocks_probably_identical(block_a, block_b):
differing_blocks.append((block_a, block_b))
return differing_blocks | [
"def",
"differing_blocks",
"(",
"self",
")",
":",
"differing_blocks",
"=",
"[",
"]",
"for",
"(",
"block_a",
",",
"block_b",
")",
"in",
"self",
".",
"_block_matches",
":",
"if",
"not",
"self",
".",
"blocks_probably_identical",
"(",
"block_a",
",",
"block_b",
... | 39.333333 | 13.111111 |
def competition_submit_cli(self,
file_name,
message,
competition,
competition_opt=None,
quiet=False):
""" submit a competition using the client. Arguments are same as for
competition_submit, except for extra arguments provided here.
Parameters
==========
competition_opt: an alternative competition option provided by cli
"""
competition = competition or competition_opt
try:
submit_result = self.competition_submit(file_name, message,
competition, quiet)
except ApiException as e:
if e.status == 404:
print('Could not find competition - please verify that you '
'entered the correct competition ID and that the '
'competition is still accepting submissions.')
return None
else:
raise e
return submit_result | [
"def",
"competition_submit_cli",
"(",
"self",
",",
"file_name",
",",
"message",
",",
"competition",
",",
"competition_opt",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"competition",
"=",
"competition",
"or",
"competition_opt",
"try",
":",
"submit_result"... | 44.6 | 16.56 |
def proper_kwargs(self, section, kwargs):
"""Returns kwargs updated with proper meta variables (like __assistant__).
If this method is run repeatedly with the same section and the same kwargs,
it always modifies kwargs in the same way.
"""
kwargs['__section__'] = section
kwargs['__assistant__'] = self
kwargs['__env__'] = copy.deepcopy(os.environ)
kwargs['__files__'] = [self._files]
kwargs['__files_dir__'] = [self.files_dir]
kwargs['__sourcefiles__'] = [self.path]
# if any of the following fails, DA should keep running
for i in ['system_name', 'system_version', 'distro_name', 'distro_version']:
try:
val = getattr(utils, 'get_' + i)()
except:
val = ''
kwargs['__' + i + '__'] = val | [
"def",
"proper_kwargs",
"(",
"self",
",",
"section",
",",
"kwargs",
")",
":",
"kwargs",
"[",
"'__section__'",
"]",
"=",
"section",
"kwargs",
"[",
"'__assistant__'",
"]",
"=",
"self",
"kwargs",
"[",
"'__env__'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"os... | 46.333333 | 12.722222 |
def parse_compounds(compound_info, case_id, variant_type):
"""Get a list with compounds objects for this variant.
Arguments:
compound_info(str): A Variant dictionary
case_id (str): unique family id
variant_type(str): 'research' or 'clinical'
Returns:
compounds(list(dict)): A list of compounds
"""
# We need the case to construct the correct id
compounds = []
if compound_info:
for family_info in compound_info.split(','):
splitted_entry = family_info.split(':')
# This is the family id
if splitted_entry[0] == case_id:
for compound in splitted_entry[1].split('|'):
splitted_compound = compound.split('>')
compound_obj = {}
compound_name = splitted_compound[0]
compound_obj['variant'] = generate_md5_key(compound_name.split('_') +
[variant_type, case_id])
try:
compound_score = float(splitted_compound[1])
except (TypeError, IndexError):
compound_score = 0.0
compound_obj['score'] = compound_score
compound_obj['display_name'] = compound_name
compounds.append(compound_obj)
return compounds | [
"def",
"parse_compounds",
"(",
"compound_info",
",",
"case_id",
",",
"variant_type",
")",
":",
"# We need the case to construct the correct id",
"compounds",
"=",
"[",
"]",
"if",
"compound_info",
":",
"for",
"family_info",
"in",
"compound_info",
".",
"split",
"(",
"... | 38.888889 | 19.916667 |
def on_press(self, event):
'on but-ton press we will see if the mouse is over us and store data'
if event.inaxes != self.ax:
return
# contains, attrd = self.rect.contains(event)
# if not contains: return
# print('event contains', self.rect.xy)
# x0, y0 = self.rect.xy
self.press = [event.xdata], [event.ydata], event.button | [
"def",
"on_press",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"inaxes",
"!=",
"self",
".",
"ax",
":",
"return",
"# contains, attrd = self.rect.contains(event)\r",
"# if not contains: return\r",
"# print('event contains', self.rect.xy)\r",
"# x0, y0 = self.rect... | 43.444444 | 15 |
def _getTimeStamps(self, table):
''' get time stamps '''
timeStamps = []
for th in table.thead.tr.contents:
if '\n' != th:
timeStamps.append(th.getText())
return timeStamps[1:] | [
"def",
"_getTimeStamps",
"(",
"self",
",",
"table",
")",
":",
"timeStamps",
"=",
"[",
"]",
"for",
"th",
"in",
"table",
".",
"thead",
".",
"tr",
".",
"contents",
":",
"if",
"'\\n'",
"!=",
"th",
":",
"timeStamps",
".",
"append",
"(",
"th",
".",
"getT... | 29.625 | 13.125 |
def drop_zombies(feed: "Feed") -> "Feed":
"""
In the given "Feed", drop stops with no stop times,
trips with no stop times, shapes with no trips,
routes with no trips, and services with no trips, in that order.
Return the resulting "Feed".
"""
feed = feed.copy()
# Drop stops of location type 0 that lack stop times
ids = feed.stop_times["stop_id"].unique()
f = feed.stops
cond = f["stop_id"].isin(ids)
if "location_type" in f.columns:
cond |= f["location_type"] != 0
feed.stops = f[cond].copy()
# Drop trips with no stop times
ids = feed.stop_times["trip_id"].unique()
f = feed.trips
feed.trips = f[f["trip_id"].isin(ids)]
# Drop shapes with no trips
ids = feed.trips["shape_id"].unique()
f = feed.shapes
if f is not None:
feed.shapes = f[f["shape_id"].isin(ids)]
# Drop routes with no trips
ids = feed.trips["route_id"].unique()
f = feed.routes
feed.routes = f[f["route_id"].isin(ids)]
# Drop services with no trips
ids = feed.trips["service_id"].unique()
if feed.calendar is not None:
f = feed.calendar
feed.calendar = f[f["service_id"].isin(ids)]
if feed.calendar_dates is not None:
f = feed.calendar_dates
feed.calendar_dates = f[f["service_id"].isin(ids)]
return feed | [
"def",
"drop_zombies",
"(",
"feed",
":",
"\"Feed\"",
")",
"->",
"\"Feed\"",
":",
"feed",
"=",
"feed",
".",
"copy",
"(",
")",
"# Drop stops of location type 0 that lack stop times",
"ids",
"=",
"feed",
".",
"stop_times",
"[",
"\"stop_id\"",
"]",
".",
"unique",
... | 30.348837 | 14.162791 |
def add(self, it: Signature) -> bool:
""" Add it to the Set """
if isinstance(it, Scope):
it.state = StateScope.EMBEDDED
txt = it.internal_name()
it.set_parent(self)
if self.is_namespace:
txt = it.internal_name()
if txt == "":
txt = '_' + str(len(self._hsig))
if txt in self._hsig:
raise KeyError("Already exists %s" % txt)
self._hsig[txt] = it
self.__update_count()
return True | [
"def",
"add",
"(",
"self",
",",
"it",
":",
"Signature",
")",
"->",
"bool",
":",
"if",
"isinstance",
"(",
"it",
",",
"Scope",
")",
":",
"it",
".",
"state",
"=",
"StateScope",
".",
"EMBEDDED",
"txt",
"=",
"it",
".",
"internal_name",
"(",
")",
"it",
... | 32.8 | 9.266667 |
def patch_module(module, name, replacement, original=UNSPECIFIED, aliases=True, location=None, **_bogus_options):
"""
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
"""
rollback = Rollback()
seen = False
original = getattr(module, name) if original is UNSPECIFIED else original
location = module.__name__ if hasattr(module, '__name__') else type(module).__module__
target = module.__name__ if hasattr(module, '__name__') else type(module).__name__
try:
replacement.__module__ = location
except (TypeError, AttributeError):
pass
for alias in dir(module):
logdebug("alias:%s (%s)", alias, name)
if hasattr(module, alias):
obj = getattr(module, alias)
logdebug("- %s:%s (%s)", obj, original, obj is original)
if obj is original:
if aliases or alias == name:
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
if alias == name:
seen = True
elif alias == name:
if ismethod(obj):
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
seen = True
else:
raise AssertionError("%s.%s = %s is not %s." % (module, alias, obj, original))
if not seen:
warnings.warn('Setting %s.%s to %s. There was no previous definition, probably patching the wrong module.' % (
target, name, replacement
))
logdebug("= saving %s on %s.%s ...", replacement, target, name)
setattr(module, name, replacement)
rollback.merge(lambda: setattr(module, name, original))
return rollback | [
"def",
"patch_module",
"(",
"module",
",",
"name",
",",
"replacement",
",",
"original",
"=",
"UNSPECIFIED",
",",
"aliases",
"=",
"True",
",",
"location",
"=",
"None",
",",
"*",
"*",
"_bogus_options",
")",
":",
"rollback",
"=",
"Rollback",
"(",
")",
"seen... | 46.74 | 23.78 |
def _related_field_data(field, obj):
"""Returns relation ``field`` as a dict.
Dict contains related pk info and some meta information
for reconstructing objects.
"""
data = _basic_field_data(field, obj)
relation_info = {
Field.REL_DB_TABLE: field.rel.to._meta.db_table,
Field.REL_APP: field.rel.to._meta.app_label,
Field.REL_MODEL: field.rel.to.__name__
}
data[Field.TYPE] = FieldType.REL
data[Field.REL] = relation_info
return data | [
"def",
"_related_field_data",
"(",
"field",
",",
"obj",
")",
":",
"data",
"=",
"_basic_field_data",
"(",
"field",
",",
"obj",
")",
"relation_info",
"=",
"{",
"Field",
".",
"REL_DB_TABLE",
":",
"field",
".",
"rel",
".",
"to",
".",
"_meta",
".",
"db_table"... | 32.2 | 12.933333 |
def _discover(**kwargs):
"""Yields info about station servers announcing themselves via multicast."""
query = station_server.MULTICAST_QUERY
for host, response in multicast.send(query, **kwargs):
try:
result = json.loads(response)
except ValueError:
_LOG.warn('Received bad JSON over multicast from %s: %s', host, response)
try:
yield StationInfo(result['cell'], host, result['port'],
result['station_id'], 'ONLINE',
result.get('test_description'),
result['test_name'])
except KeyError:
if 'last_activity_time_millis' in result:
_LOG.debug('Received old station API response on multicast. Ignoring.')
else:
_LOG.warn('Received bad multicast response from %s: %s', host, response) | [
"def",
"_discover",
"(",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"station_server",
".",
"MULTICAST_QUERY",
"for",
"host",
",",
"response",
"in",
"multicast",
".",
"send",
"(",
"query",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"result",
"=",
... | 44.555556 | 19.333333 |
def precision(self, label=None):
"""
Returns precision or precision for a given label (category) if specified.
"""
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) | [
"def",
"precision",
"(",
"self",
",",
"label",
"=",
"None",
")",
":",
"if",
"label",
"is",
"None",
":",
"return",
"self",
".",
"call",
"(",
"\"precision\"",
")",
"else",
":",
"return",
"self",
".",
"call",
"(",
"\"precision\"",
",",
"float",
"(",
"la... | 33.625 | 13.375 |
def convert_coord_object(coord):
"""Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile"""
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row)) | [
"def",
"convert_coord_object",
"(",
"coord",
")",
":",
"assert",
"isinstance",
"(",
"coord",
",",
"Coordinate",
")",
"coord",
"=",
"coord",
".",
"container",
"(",
")",
"return",
"Tile",
"(",
"int",
"(",
"coord",
".",
"zoom",
")",
",",
"int",
"(",
"coor... | 47 | 9.2 |
def dump_molecule(self, filepath=None, include_coms=False, **kwargs):
"""
Dump a :class:`Molecule` to a file (PDB or XYZ).
Kwargs are passed to :func:`pywindow.io_tools.Output.dump2file()`.
For validation purposes an overlay of window centres and COMs can also
be dumped as:
He - for the centre of mass
Ne - for the centre of the optimised cavity
Ar - for the centres of each found window
Parameters
----------
filepath : :class:`str`
The filepath for the dumped file. If :class:`None`, the file is
dumped localy with :attr:`molecule_id` as filename.
(defualt=None)
include_coms : :class:`bool`
If True, dump also with an overlay of window centres and COMs.
(default=False)
Returns
-------
None : :class:`NoneType`
"""
# If no filepath is provided we create one.
if filepath is None:
filepath = "_".join(
(str(self.parent_system), str(self.molecule_id)))
filepath = '/'.join((os.getcwd(), filepath))
filepath = '.'.join((filepath, 'pdb'))
# Check if there is an 'atom_ids' keyword in the self.mol dict.
# Otherwise pass to the dump2file atom_ids='elements'.
if 'atom_ids' not in self.mol.keys():
atom_ids = 'elements'
else:
atom_ids = 'atom_ids'
# Dump molecule into a file.
# If coms are to be included additional steps are required.
# First deepcopy the molecule
if include_coms is True:
mmol = deepcopy(self.mol)
# add centre of mass (centre of not optimised pore) as 'He'.
mmol['elements'] = np.concatenate(
(mmol['elements'], np.array(['He'])))
if 'atom_ids' not in self.mol.keys():
pass
else:
mmol['atom_ids'] = np.concatenate(
(mmol['atom_ids'], np.array(['He'])))
mmol['coordinates'] = np.concatenate(
(mmol['coordinates'],
np.array([self.properties['centre_of_mass']])))
# add centre of pore optimised as 'Ne'.
mmol['elements'] = np.concatenate(
(mmol['elements'], np.array(['Ne'])))
if 'atom_ids' not in self.mol.keys():
pass
else:
mmol['atom_ids'] = np.concatenate(
(mmol['atom_ids'], np.array(['Ne'])))
mmol['coordinates'] = np.concatenate(
(mmol['coordinates'], np.array(
[self.properties['pore_diameter_opt']['centre_of_mass']])))
# add centre of windows as 'Ar'.
if self.properties['windows']['centre_of_mass'] is not None:
range_ = range(
len(self.properties['windows']['centre_of_mass']))
for com in range_:
mmol['elements'] = np.concatenate(
(mmol['elements'], np.array(['Ar'])))
if 'atom_ids' not in self.mol.keys():
pass
else:
mmol['atom_ids'] = np.concatenate(
(mmol['atom_ids'],
np.array(['Ar{0}'.format(com + 1)])))
mmol['coordinates'] = np.concatenate(
(mmol['coordinates'], np.array([
self.properties['windows']['centre_of_mass'][com]
])))
self._Output.dump2file(mmol, filepath, atom_ids=atom_ids, **kwargs)
else:
self._Output.dump2file(
self.mol, filepath, atom_ids=atom_ids, **kwargs) | [
"def",
"dump_molecule",
"(",
"self",
",",
"filepath",
"=",
"None",
",",
"include_coms",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# If no filepath is provided we create one.",
"if",
"filepath",
"is",
"None",
":",
"filepath",
"=",
"\"_\"",
".",
"join",
... | 40.402174 | 18.880435 |
def get(self, terser):
"""
Get a value in the HL7 dictionary. The terser can be fully qualified or not.
Fully qualified : OBR[1]-10-01
Simpliest form : OBR-10-1 (in this case, 1 uniq segment OBR is present in the HL7 message)
:return : the value or a list of values. Otherwise None.
"""
key = terser
# if the expression in not found n the qualified names
# find the alias
if terser not in self.data and terser in self.aliasKeys:
key = self.aliasKeys[terser]
value = self.data.get(key, None)
# if the value is not found, it can be a partial terser
if value:
return value
else:
# find the values : simple startswith : should it be a regexp ? (more powerful, more slower)
values = list(filter(lambda k: k.startswith(key), self.aliasKeys))
if len(values) > 0:
return values
else:
return None | [
"def",
"get",
"(",
"self",
",",
"terser",
")",
":",
"key",
"=",
"terser",
"# if the expression in not found n the qualified names",
"# find the alias",
"if",
"terser",
"not",
"in",
"self",
".",
"data",
"and",
"terser",
"in",
"self",
".",
"aliasKeys",
":",
"key",... | 38.384615 | 23.384615 |
def runner(opts, utils=None, context=None, whitelist=None):
'''
Directly call a function inside a loader directory
'''
if utils is None:
utils = {}
if context is None:
context = {}
ret = LazyLoader(
_module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'),
opts,
tag='runners',
pack={'__utils__': utils, '__context__': context},
whitelist=whitelist,
)
# TODO: change from __salt__ to something else, we overload __salt__ too much
ret.pack['__salt__'] = ret
return ret | [
"def",
"runner",
"(",
"opts",
",",
"utils",
"=",
"None",
",",
"context",
"=",
"None",
",",
"whitelist",
"=",
"None",
")",
":",
"if",
"utils",
"is",
"None",
":",
"utils",
"=",
"{",
"}",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"{",
"}",... | 31 | 23.333333 |
def _custom_rdd_reduce(self, reduce_func):
"""Provides a custom RDD reduce which preserves ordering if the RDD has
been sorted. This is useful for us because we need this functionality
as many pandas operations support sorting the results. The standard
reduce in PySpark does not have this property. Note that when PySpark
no longer does partition reduces locally this code will also need to
be updated."""
def accumulating_iter(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = reduce_func(acc, obj)
if acc is not None:
yield acc
vals = self._rdd.mapPartitions(accumulating_iter).collect()
return reduce(accumulating_iter, vals) | [
"def",
"_custom_rdd_reduce",
"(",
"self",
",",
"reduce_func",
")",
":",
"def",
"accumulating_iter",
"(",
"iterator",
")",
":",
"acc",
"=",
"None",
"for",
"obj",
"in",
"iterator",
":",
"if",
"acc",
"is",
"None",
":",
"acc",
"=",
"obj",
"else",
":",
"acc... | 46.666667 | 15.388889 |
def get_details(var):
"""
Given a variable inside the context, obtain the attributes/callables,
their values where possible, and the module name and class name if possible
"""
var_data = {}
# Obtain module and class details if available and add them in
module = getattr(var, '__module__', '')
kls = getattr(getattr(var, '__class__', ''), '__name__', '')
if module:
var_data['META_module_name'] = module
if kls:
var_data['META_class_name'] = kls
for attr in get_attributes(var):
value = _get_detail_value(var, attr)
if value is not None:
var_data[attr] = value
return var_data | [
"def",
"get_details",
"(",
"var",
")",
":",
"var_data",
"=",
"{",
"}",
"# Obtain module and class details if available and add them in",
"module",
"=",
"getattr",
"(",
"var",
",",
"'__module__'",
",",
"''",
")",
"kls",
"=",
"getattr",
"(",
"getattr",
"(",
"var",... | 36.111111 | 15.222222 |
def vcirc(self,R,phi=None):
"""
NAME:
vcirc
PURPOSE:
calculate the circular velocity at R in potential Pot
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius (can be Quantity)
phi= (None) azimuth to use for non-axisymmetric potentials
OUTPUT:
circular rotation velocity
HISTORY:
2011-10-09 - Written - Bovy (IAS)
2016-06-15 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)
"""
return nu.sqrt(R*-self.Rforce(R,phi=phi,use_physical=False)) | [
"def",
"vcirc",
"(",
"self",
",",
"R",
",",
"phi",
"=",
"None",
")",
":",
"return",
"nu",
".",
"sqrt",
"(",
"R",
"*",
"-",
"self",
".",
"Rforce",
"(",
"R",
",",
"phi",
"=",
"phi",
",",
"use_physical",
"=",
"False",
")",
")"
] | 23.193548 | 26.096774 |
def oauth_client_create(self, name, redirect_uri, **kwargs):
"""
Make a new OAuth Client and return it
"""
params = {
"label": name,
"redirect_uri": redirect_uri,
}
params.update(kwargs)
result = self.client.post('/account/oauth-clients', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating OAuth Client!',
json=result)
c = OAuthClient(self.client, result['id'], result)
return c | [
"def",
"oauth_client_create",
"(",
"self",
",",
"name",
",",
"redirect_uri",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"\"label\"",
":",
"name",
",",
"\"redirect_uri\"",
":",
"redirect_uri",
",",
"}",
"params",
".",
"update",
"(",
"kwargs",
... | 30.555556 | 20.444444 |
def _call_handler(self, key, insert_text):
"""
Callback to handler.
"""
if isinstance(key, tuple):
for k in key:
self._call_handler(k, insert_text)
else:
if key == Keys.BracketedPaste:
self._in_bracketed_paste = True
self._paste_buffer = ''
else:
self.feed_key_callback(KeyPress(key, insert_text)) | [
"def",
"_call_handler",
"(",
"self",
",",
"key",
",",
"insert_text",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"for",
"k",
"in",
"key",
":",
"self",
".",
"_call_handler",
"(",
"k",
",",
"insert_text",
")",
"else",
":",
"if",
... | 32.692308 | 10.076923 |
def _fullname(o):
"""Return the fully-qualified name of a function."""
return o.__module__ + "." + o.__name__ if o.__module__ else o.__name__ | [
"def",
"_fullname",
"(",
"o",
")",
":",
"return",
"o",
".",
"__module__",
"+",
"\".\"",
"+",
"o",
".",
"__name__",
"if",
"o",
".",
"__module__",
"else",
"o",
".",
"__name__"
] | 49 | 19 |
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False | [
"def",
"validate",
"(",
"self",
",",
"signed_value",
",",
"max_age",
"=",
"None",
")",
":",
"try",
":",
"self",
".",
"unsign",
"(",
"signed_value",
",",
"max_age",
"=",
"max_age",
")",
"return",
"True",
"except",
"BadSignature",
":",
"return",
"False"
] | 40 | 11.75 |
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict:
""" sets the devices for the security zones
Args:
internal_devices(List[Device]): the devices which should be used for the internal zone
external_devices(List[Device]): the devices which should be used for the external(hull) zone
Returns:
the result of _restCall
"""
internal = [x.id for x in internal_devices]
external = [x.id for x in external_devices]
data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}}
return self._restCall(
"home/security/setZonesDeviceAssignment", body=json.dumps(data)
) | [
"def",
"set_zones_device_assignment",
"(",
"self",
",",
"internal_devices",
",",
"external_devices",
")",
"->",
"dict",
":",
"internal",
"=",
"[",
"x",
".",
"id",
"for",
"x",
"in",
"internal_devices",
"]",
"external",
"=",
"[",
"x",
".",
"id",
"for",
"x",
... | 49 | 26.6 |
def deliver_tx(self, raw_transaction):
"""Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk) | [
"def",
"deliver_tx",
"(",
"self",
",",
"raw_transaction",
")",
":",
"self",
".",
"abort_if_abci_chain_is_not_synced",
"(",
")",
"logger",
".",
"debug",
"(",
"'deliver_tx: %s'",
",",
"raw_transaction",
")",
"transaction",
"=",
"self",
".",
"bigchaindb",
".",
"is_... | 36.047619 | 18.380952 |
def init_random(X, n_clusters, random_state):
"""K-means initialization using randomly chosen points"""
logger.info("Initializing randomly")
idx = sorted(draw_seed(random_state, 0, len(X), size=n_clusters))
centers = X[idx].compute()
return centers | [
"def",
"init_random",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
")",
":",
"logger",
".",
"info",
"(",
"\"Initializing randomly\"",
")",
"idx",
"=",
"sorted",
"(",
"draw_seed",
"(",
"random_state",
",",
"0",
",",
"len",
"(",
"X",
")",
",",
"size",... | 43.833333 | 11 |
def _meanOmega_num_approx(self,dangle,tdisrupt,higherorder=False):
"""Compute the numerator going into meanOmega using the direct integration of the spline representation"""
# First construct the breakpoints for this dangle
Oparb= (dangle-self._kick_interpdOpar_poly.x)/self._timpact
# Find the lower limit of the integration in the pw-linear-kick approx.
lowbindx,lowx= self.minOpar(dangle,tdisrupt,_return_raw=True)
lowbindx= numpy.arange(len(Oparb)-1)[lowbindx]
Oparb[lowbindx+1]= Oparb[lowbindx]-lowx
# Now integrate between breakpoints
out= numpy.sum(((Oparb[:-1]
+(self._meandO+self._kick_interpdOpar_poly.c[-1]
-Oparb[:-1])/
(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact))
*self._density_par_approx(dangle,tdisrupt,
_return_array=True)
+numpy.sqrt(self._sortedSigOEig[2]/2./numpy.pi)/
(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact)**2.
*(numpy.exp(-0.5*(Oparb[:-1]
-self._kick_interpdOpar_poly.c[-1]
-(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact)
*(Oparb-numpy.roll(Oparb,-1))[:-1]
-self._meandO)**2.
/self._sortedSigOEig[2])
-numpy.exp(-0.5*(Oparb[:-1]-self._kick_interpdOpar_poly.c[-1]
-self._meandO)**2.
/self._sortedSigOEig[2])))[:lowbindx+1])
if higherorder:
# Add higher-order contribution
out+= self._meanOmega_num_approx_higherorder(Oparb,lowbindx)
# Add integration to infinity
out+= 0.5*(numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*numpy.exp(-0.5*(self._meandO-Oparb[0])**2.\
/self._sortedSigOEig[2])
+self._meandO
*(1.+special.erf((self._meandO-Oparb[0])
/numpy.sqrt(2.*self._sortedSigOEig[2]))))
return out | [
"def",
"_meanOmega_num_approx",
"(",
"self",
",",
"dangle",
",",
"tdisrupt",
",",
"higherorder",
"=",
"False",
")",
":",
"# First construct the breakpoints for this dangle",
"Oparb",
"=",
"(",
"dangle",
"-",
"self",
".",
"_kick_interpdOpar_poly",
".",
"x",
")",
"/... | 62.324324 | 23.297297 |
def end(self, *args, **kwargs):
"""
Writes the passed chunk, flushes it to the client,
and terminates the connection.
"""
self.send(*args, **kwargs)
self.close() | [
"def",
"end",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"send",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"close",
"(",
")"
] | 29 | 7.857143 |
def put_httpsconf(self, name, certid, forceHttps):
"""
修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11
Args:
domains: 域名name
CertID: 证书id,从上传或者获取证书列表里拿到证书id
ForceHttps: 是否强制https跳转
Returns:
{}
"""
req = {}
req.update({"certid": certid})
req.update({"forceHttps": forceHttps})
body = json.dumps(req)
url = '{0}/domain/{1}/httpsconf'.format(self.server, name)
return self.__put(url, body) | [
"def",
"put_httpsconf",
"(",
"self",
",",
"name",
",",
"certid",
",",
"forceHttps",
")",
":",
"req",
"=",
"{",
"}",
"req",
".",
"update",
"(",
"{",
"\"certid\"",
":",
"certid",
"}",
")",
"req",
".",
"update",
"(",
"{",
"\"forceHttps\"",
":",
"forceHt... | 28.210526 | 17.894737 |
def _get_request_token(self):
"""
Obtain a temporary request token to authorize an access token and to
sign the request to obtain the access token
"""
if self.request_token is None:
get_params = {}
if self.parameters:
get_params.update(self.parameters)
get_params['oauth_callback'] \
= self.request.build_absolute_uri(self.callback_url)
rt_url = self.request_token_url + '?' + urlencode(get_params)
oauth = OAuth1(self.consumer_key,
client_secret=self.consumer_secret)
response = requests.post(url=rt_url, auth=oauth)
if response.status_code not in [200, 201]:
raise OAuthError(
_('Invalid response while obtaining request token'
' from "%s".') % get_token_prefix(
self.request_token_url))
self.request_token = dict(parse_qsl(response.text))
self.request.session['oauth_%s_request_token' % get_token_prefix(
self.request_token_url)] = self.request_token
return self.request_token | [
"def",
"_get_request_token",
"(",
"self",
")",
":",
"if",
"self",
".",
"request_token",
"is",
"None",
":",
"get_params",
"=",
"{",
"}",
"if",
"self",
".",
"parameters",
":",
"get_params",
".",
"update",
"(",
"self",
".",
"parameters",
")",
"get_params",
... | 48.791667 | 15.291667 |
def parse_nargs(self, nargs):
""" Nargs is essentially a multi-type encoding. We have to parse it
to understand how many values this action may consume. """
self.max_args = self.min_args = 0
if nargs is None:
self.max_args = self.min_args = 1
elif nargs == argparse.OPTIONAL:
self.max_args = 1
elif nargs == argparse.ZERO_OR_MORE:
self.max_args = None
elif nargs in (argparse.ONE_OR_MORE, argparse.REMAINDER):
self.min_args = 1
self.max_args = None
elif nargs != argparse.PARSER:
self.max_args = self.min_args = nargs | [
"def",
"parse_nargs",
"(",
"self",
",",
"nargs",
")",
":",
"self",
".",
"max_args",
"=",
"self",
".",
"min_args",
"=",
"0",
"if",
"nargs",
"is",
"None",
":",
"self",
".",
"max_args",
"=",
"self",
".",
"min_args",
"=",
"1",
"elif",
"nargs",
"==",
"a... | 42.666667 | 7.333333 |
def initialize_base(self, es):
"""set parameters and state variable based on dimension,
mueff and possibly further options.
"""
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N**b + (es.sp.mueff + 3)**b)
self.ps = np.zeros(es.N)
self.is_initialized_base = True
return self | [
"def",
"initialize_base",
"(",
"self",
",",
"es",
")",
":",
"## meta_parameters.cs_exponent == 1.0",
"b",
"=",
"1.0",
"## meta_parameters.cs_multiplier == 1.0",
"self",
".",
"cs",
"=",
"1.0",
"*",
"(",
"es",
".",
"sp",
".",
"mueff",
"+",
"2",
")",
"**",
"b",... | 35.333333 | 13.25 |
def get_hooks(self):
"""
:calls: `GET /orgs/:owner/hooks <http://developer.github.com/v3/orgs/hooks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Hook.Hook`
"""
return github.PaginatedList.PaginatedList(
github.Hook.Hook,
self._requester,
self.url + "/hooks",
None
) | [
"def",
"get_hooks",
"(",
"self",
")",
":",
"return",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"(",
"github",
".",
"Hook",
".",
"Hook",
",",
"self",
".",
"_requester",
",",
"self",
".",
"url",
"+",
"\"/hooks\"",
",",
"None",
")"
] | 34.454545 | 19 |
def _set_internal_value(self, new_internal_value):
"""
This is supposed to be only used by fitting engines
:param new_internal_value: new value in internal representation
:return: none
"""
if new_internal_value != self._internal_value:
self._internal_value = new_internal_value
# Call callbacks if any
for callback in self._callbacks:
callback(self) | [
"def",
"_set_internal_value",
"(",
"self",
",",
"new_internal_value",
")",
":",
"if",
"new_internal_value",
"!=",
"self",
".",
"_internal_value",
":",
"self",
".",
"_internal_value",
"=",
"new_internal_value",
"# Call callbacks if any",
"for",
"callback",
"in",
"self"... | 25.823529 | 21.470588 |
def process_delta(delta):
"""
This is the part of the code where you would process the information
from the webhook notification. Each delta is one change that happened,
and might require fetching message IDs, updating your database,
and so on.
However, because this is just an example project, we'll just print
out information about the notification, so you can see what
information is being sent.
"""
kwargs = {
"type": delta["type"],
"date": datetime.datetime.utcfromtimestamp(delta["date"]),
"object_id": delta["object_data"]["id"],
}
print(" * {type} at {date} with ID {object_id}".format(**kwargs)) | [
"def",
"process_delta",
"(",
"delta",
")",
":",
"kwargs",
"=",
"{",
"\"type\"",
":",
"delta",
"[",
"\"type\"",
"]",
",",
"\"date\"",
":",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"delta",
"[",
"\"date\"",
"]",
")",
",",
"\"object_id\"",
... | 38.882353 | 21.823529 |
def labeled_intervals(intervals, labels, label_set=None,
base=None, height=None, extend_labels=True,
ax=None, tick=True, **kwargs):
'''Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = []
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set))
elif label_set:
ticks = label_set
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks))
if height is None:
height = 1
if np.isscalar(height):
height = height * np.ones_like(base)
seg_y = dict()
for ybase, yheight, lab in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight)
xvals = defaultdict(list)
for ival, lab in zip(intervals, labels):
if lab not in seg_y:
continue
xvals[lab].append((ival[0], ival[1] - ival[0]))
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab],
**style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None)
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5)
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks))
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y')
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax | [
"def",
"labeled_intervals",
"(",
"intervals",
",",
"labels",
",",
"label_set",
"=",
"None",
",",
"base",
"=",
"None",
",",
"height",
"=",
"None",
",",
"extend_labels",
"=",
"True",
",",
"ax",
"=",
"None",
",",
"tick",
"=",
"True",
",",
"*",
"*",
"kwa... | 32.409091 | 21.424242 |
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' ) | [
"def",
"make_html_items",
"(",
"self",
",",
"items",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"item",
".",
"lines",
":",
"lines",
".",
"append",
"(",
"self",
".",
"make_html_code",
"(",
"item",
".",
"lines",
")",
"... | 35.6 | 16.8 |
def get_context(self):
"""
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
"""
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls) | [
"def",
"get_context",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"context",
":",
"return",
"else",
":",
"assert",
"isinstance",
"(",
"self",
".",
"context",
",",
"tuple",
")",
",",
"'Expected a Tuple not {0}'",
".",
"format",
"(",
"type",
"(",
"sel... | 38.857143 | 15.285714 |
def user_segment(self):
"""
| Comment: The id of the user segment to which this section belongs
"""
if self.api and self.user_segment_id:
return self.api._get_user_segment(self.user_segment_id) | [
"def",
"user_segment",
"(",
"self",
")",
":",
"if",
"self",
".",
"api",
"and",
"self",
".",
"user_segment_id",
":",
"return",
"self",
".",
"api",
".",
"_get_user_segment",
"(",
"self",
".",
"user_segment_id",
")"
] | 38.833333 | 14.166667 |
def read(author, kind):
"""
Attempts to read the cache to fetch missing arguments.
This method will attempt to find a '.license' file in the
'CACHE_DIRECTORY', to read any arguments that were not passed to the
license utility.
Arguments:
author (str): The author passed, if any.
kind (str): The kind of license passed, if any.
Throws:
LicenseError, if there was a cache miss or I/O error.
"""
if not os.path.exists(CACHE_PATH):
raise LicenseError('No cache found. You must '
'supply at least -a and -k.')
cache = read_cache()
if author is None:
author = read_author(cache)
if kind is None:
kind = read_kind(cache)
return author, kind | [
"def",
"read",
"(",
"author",
",",
"kind",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CACHE_PATH",
")",
":",
"raise",
"LicenseError",
"(",
"'No cache found. You must '",
"'supply at least -a and -k.'",
")",
"cache",
"=",
"read_cache",
"(",
... | 26.357143 | 21.5 |
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
.. versionchanged:: 2.6
It's now possible to use dotted notation to group by the child
attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
return [_GroupTuple(key, list(values)) for key, values
in groupby(sorted(value, key=expr), expr)] | [
"def",
"do_groupby",
"(",
"environment",
",",
"value",
",",
"attribute",
")",
":",
"expr",
"=",
"make_attrgetter",
"(",
"environment",
",",
"attribute",
")",
"return",
"[",
"_GroupTuple",
"(",
"key",
",",
"list",
"(",
"values",
")",
")",
"for",
"key",
",... | 32.829268 | 23.560976 |
def list(self, query_criteria=None, order_criteria=None):
'''
a generator method to list records in table which match query criteria
:param query_criteria: dictionary with schema dot-path field names and query qualifiers
:param order_criteria: list of single keypair dictionaries with field names to order by
:return: generator object with string of primary key
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ]
}
}
NOTE: sql only supports a limited number of query conditions and all list
fields in a record are stored as a blob. this method constructs a
sql query which contains clauses wherever the query conditions can
be translated one-to-one into sql keywords and returns the entire
record of each qualifying record. once sql returns its results, the
remaining query conditions are applied to the record and only those
results which match all conditions are yield by the generator. as
such, depending upon the conditions selected, this method acts more
or less like a SCAN of the entire database. if no sql supported
conditions are provided, the method will look through all records.
native SQL supported conditions
float, integer & strings:
value_exists
equal_to
discrete_values
excluded_values
greater_than
less_than
max_value
min_value
booleans:
value_exists
equal_to
lists:
value_exists
NOTE: the full list of all criteria are found in the reference page for the
jsonmodel module as well as the query-rules.json file included in the
module.
http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
an example of how to construct the order_criteria argument:
order_criteria = [
{ '.path.to.number': 'descend' },
{ '.path.to.string': '' }
]
NOTE: results can be ordered either by ascending or descending values. to
order in ascending order, leave the value for the field empty. any value
for the field key automatically is interpreted as descending order
'''
title = '%s.list' % self.__class__.__name__
from sqlalchemy import desc as order_desc
# validate inputs
if query_criteria:
self.model.query(query_criteria)
else:
query_criteria = {}
if order_criteria:
object_title = '%s(%s=%s)' % (title, 'order_criteria', str(order_criteria))
self.fields.validate(order_criteria, '.order_criteria', object_title)
for i in range(len(order_criteria)):
criterion = order_criteria[i]
for key, value in criterion.items():
criteria_key = key
if key.find('.') != 0:
criteria_key = '.%s' % key
if criteria_key not in self.model.keyMap.keys():
raise ValueError('%s(order_criteria=[...]) item %s key %s does not exist in record_schema.' % (title, i, key))
else:
order_criteria = []
# construct select statement with sql supported conditions
# http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#common-filter-operators
select_object = self.table.select()
for key, value in query_criteria.items():
record_key = key
map_key = key
if key.find('.') == 0:
record_key = key[1:]
else:
map_key = '.%s' % key
if record_key:
if self.item_key.findall(record_key):
pass
else:
test_value = value
if not isinstance(value, dict):
test_value = { 'equal_to': value }
column_object = getattr(self.table.c, record_key)
for k, v in test_value.items():
if k == 'value_exists':
if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean', 'list'):
if v:
select_object = select_object.where(column_object!=None)
else:
select_object = select_object.where(column_object==None)
else:
if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean'):
if k == 'equal_to':
select_object = select_object.where(column_object==v)
elif k == 'discrete_values':
select_object = select_object.where(column_object.in_(v))
elif k == 'excluded_values':
select_object = select_object.where(~column_object.in_(v))
elif k == 'greater_than':
select_object = select_object.where(column_object.__gt__(v))
elif k == 'less_than':
select_object = select_object.where(column_object.__lt__(v))
elif k == 'max_value':
select_object = select_object.where(column_object.__le__(v))
elif k == 'min_value':
select_object = select_object.where(column_object.__ge__(v))
# add order criteria
for criterion in order_criteria:
key, value = next(iter(criterion.items()))
record_key = key
if key.find('.') == 0:
record_key = key[1:]
if record_key:
if self.item_key.findall(record_key):
pass
else:
column_object = getattr(self.table.c, record_key)
if value:
select_object = select_object.order_by(order_desc(column_object))
else:
select_object = select_object.order_by(column_object)
# execute query on database
# print(select_object)
for record in self.session.execute(select_object).fetchall():
record_details = self._reconstruct_record(record)
# filter results with non-sql supported conditions
if query_criteria:
if self.model.query(query_criteria, record_details):
yield record_details
else:
yield record_details | [
"def",
"list",
"(",
"self",
",",
"query_criteria",
"=",
"None",
",",
"order_criteria",
"=",
"None",
")",
":",
"title",
"=",
"'%s.list'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"from",
"sqlalchemy",
"import",
"desc",
"as",
"order_desc",
"# validate i... | 45.69375 | 24.25625 |
def translate(ra, dec, r, theta):
"""
Translate a given point a distance r in the (initial) direction theta, along a great circle.
Parameters
----------
ra, dec : float
The initial point of interest (degrees).
r, theta : float
The distance and initial direction to translate (degrees).
Returns
-------
ra, dec : float
The translated position (degrees).
"""
factor = np.sin(np.radians(dec)) * np.cos(np.radians(r))
factor += np.cos(np.radians(dec)) * np.sin(np.radians(r)) * np.cos(np.radians(theta))
dec_out = np.degrees(np.arcsin(factor))
y = np.sin(np.radians(theta)) * np.sin(np.radians(r)) * np.cos(np.radians(dec))
x = np.cos(np.radians(r)) - np.sin(np.radians(dec)) * np.sin(np.radians(dec_out))
ra_out = ra + np.degrees(np.arctan2(y, x))
return ra_out, dec_out | [
"def",
"translate",
"(",
"ra",
",",
"dec",
",",
"r",
",",
"theta",
")",
":",
"factor",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"dec",
")",
")",
"*",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"r",
")",
")",
"factor",
"... | 33.6 | 24.48 |
def setRpms(self, package, build, build_ts, rpms):
"""Add/Update package rpm
"""
self._builds[package] = {"build": build, "build_ts": build_ts, "rpms": rpms} | [
"def",
"setRpms",
"(",
"self",
",",
"package",
",",
"build",
",",
"build_ts",
",",
"rpms",
")",
":",
"self",
".",
"_builds",
"[",
"package",
"]",
"=",
"{",
"\"build\"",
":",
"build",
",",
"\"build_ts\"",
":",
"build_ts",
",",
"\"rpms\"",
":",
"rpms",
... | 40 | 12 |
def create_address(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#create-address"""
response = self._post('v2', 'accounts', account_id, 'addresses', data=params)
return self._make_api_object(response, Address) | [
"def",
"create_address",
"(",
"self",
",",
"account_id",
",",
"*",
"*",
"params",
")",
":",
"response",
"=",
"self",
".",
"_post",
"(",
"'v2'",
",",
"'accounts'",
",",
"account_id",
",",
"'addresses'",
",",
"data",
"=",
"params",
")",
"return",
"self",
... | 63.5 | 16.75 |
def _validate_nbf(claims, leeway=0):
"""Validates that the 'nbf' claim is valid.
The "nbf" (not before) claim identifies the time before which the JWT
MUST NOT be accepted for processing. The processing of the "nbf"
claim requires that the current date/time MUST be after or equal to
the not-before date/time listed in the "nbf" claim. Implementers MAY
provide for some small leeway, usually no more than a few minutes, to
account for clock skew. Its value MUST be a number containing a
NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
"""
if 'nbf' not in claims:
return
try:
nbf = int(claims['nbf'])
except ValueError:
raise JWTClaimsError('Not Before claim (nbf) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if nbf > (now + leeway):
raise JWTClaimsError('The token is not yet valid (nbf)') | [
"def",
"_validate_nbf",
"(",
"claims",
",",
"leeway",
"=",
"0",
")",
":",
"if",
"'nbf'",
"not",
"in",
"claims",
":",
"return",
"try",
":",
"nbf",
"=",
"int",
"(",
"claims",
"[",
"'nbf'",
"]",
")",
"except",
"ValueError",
":",
"raise",
"JWTClaimsError",... | 36.571429 | 24.928571 |
def asum(data, axis=None, mapper=None, blen=None, storage=None,
create='array', **kwargs):
"""Compute the sum."""
return reduce_axis(data, axis=axis, reducer=np.sum,
block_reducer=np.add, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs) | [
"def",
"asum",
"(",
"data",
",",
"axis",
"=",
"None",
",",
"mapper",
"=",
"None",
",",
"blen",
"=",
"None",
",",
"storage",
"=",
"None",
",",
"create",
"=",
"'array'",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"reduce_axis",
"(",
"data",
",",
... | 52.166667 | 16.166667 |
def _fix_deps_repos(self, dependencies):
"""Fix store deps include in repository
"""
requires = []
for dep in dependencies:
if dep in self.repo_pkg_names:
requires.append(dep)
return requires | [
"def",
"_fix_deps_repos",
"(",
"self",
",",
"dependencies",
")",
":",
"requires",
"=",
"[",
"]",
"for",
"dep",
"in",
"dependencies",
":",
"if",
"dep",
"in",
"self",
".",
"repo_pkg_names",
":",
"requires",
".",
"append",
"(",
"dep",
")",
"return",
"requir... | 31.5 | 6.25 |
async def status_by_state(self, state: str) -> dict:
"""Return the CDC status for the specified state."""
data = await self.raw_cdc_data()
try:
info = next((v for k, v in data.items() if state in k))
except StopIteration:
return {}
return adjust_status(info) | [
"async",
"def",
"status_by_state",
"(",
"self",
",",
"state",
":",
"str",
")",
"->",
"dict",
":",
"data",
"=",
"await",
"self",
".",
"raw_cdc_data",
"(",
")",
"try",
":",
"info",
"=",
"next",
"(",
"(",
"v",
"for",
"k",
",",
"v",
"in",
"data",
"."... | 31.5 | 18.3 |
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
""" Parse vim script file and return the AST. """
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized) | [
"def",
"parse",
"(",
"self",
",",
"lint_target",
")",
":",
"# type: (AbstractLintTarget) -> Dict[str, Any]",
"decoder",
"=",
"Decoder",
"(",
"default_decoding_strategy",
")",
"decoded",
"=",
"decoder",
".",
"decode",
"(",
"lint_target",
".",
"read",
"(",
")",
")",... | 51.714286 | 20.714286 |
def maximum_independent_set(G, sampler=None, lagrange=2.0, **sampler_args):
"""Returns an approximate maximum independent set.
Defines a QUBO with ground states corresponding to a
maximum independent set and uses the sampler to sample from
it.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximum
independent set is an independent set of largest possible size.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut independent set.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
lagrange : optional (default 2)
Lagrange parameter to weight constraints (no edges within set)
versus objective (largest set possible).
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
indep_nodes : list
List of nodes that form a maximum independent set, as
determined by the given sampler.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum
independent set for a graph of a Chimera unit cell created using the
`chimera_graph()` function.
>>> import dimod
>>> sampler = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> indep_nodes = dnx.maximum_independent_set(G, sampler)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
References
----------
`Independent Set on Wikipedia <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_
`QUBO on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_
.. [AL] Lucas, A. (2014). Ising formulations of many NP problems.
Frontiers in Physics, Volume 2, Article 5.
"""
return maximum_weighted_independent_set(G, None, sampler, lagrange, **sampler_args) | [
"def",
"maximum_independent_set",
"(",
"G",
",",
"sampler",
"=",
"None",
",",
"lagrange",
"=",
"2.0",
",",
"*",
"*",
"sampler_args",
")",
":",
"return",
"maximum_weighted_independent_set",
"(",
"G",
",",
"None",
",",
"sampler",
",",
"lagrange",
",",
"*",
"... | 35.724638 | 26.927536 |
def debug(self, command):
"""
Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param command: The command object that holds all the necessary information from the remote process.
"""
if self.console_level == logging.DEBUG:
message = self.get_format()
message = message.replace('{L}', 'DEBUG')
message = '{}\t{}\n'.format(message, command.text)
self.append_message(message)
# Redraw
self.changes_made = True
self.redraw()
self.log.debug('\t{}'.format(command.text)) | [
"def",
"debug",
"(",
"self",
",",
"command",
")",
":",
"if",
"self",
".",
"console_level",
"==",
"logging",
".",
"DEBUG",
":",
"message",
"=",
"self",
".",
"get_format",
"(",
")",
"message",
"=",
"message",
".",
"replace",
"(",
"'{L}'",
",",
"'DEBUG'",... | 47.909091 | 30.090909 |
def find_val(self, eq, val):
"""Return the name of the equation having the given value"""
if eq not in ('f', 'g', 'q'):
return
elif eq in ('f', 'q'):
key = 'unamex'
elif eq == 'g':
key = 'unamey'
idx = 0
for m, n in zip(self.system.varname.__dict__[key], self.__dict__[eq]):
if n == val:
return m, idx
idx += 1
return | [
"def",
"find_val",
"(",
"self",
",",
"eq",
",",
"val",
")",
":",
"if",
"eq",
"not",
"in",
"(",
"'f'",
",",
"'g'",
",",
"'q'",
")",
":",
"return",
"elif",
"eq",
"in",
"(",
"'f'",
",",
"'q'",
")",
":",
"key",
"=",
"'unamex'",
"elif",
"eq",
"=="... | 31.142857 | 16.285714 |
def master(cls, cluster_id_label):
"""
Show the details of the master of the cluster with id/label `cluster_id_label`.
"""
cluster_status = cls.status(cluster_id_label)
if cluster_status.get("state") == 'UP':
return list(filter(lambda x: x["role"] == "master", cluster_status.get("nodes")))[0]
else:
return cluster_status | [
"def",
"master",
"(",
"cls",
",",
"cluster_id_label",
")",
":",
"cluster_status",
"=",
"cls",
".",
"status",
"(",
"cluster_id_label",
")",
"if",
"cluster_status",
".",
"get",
"(",
"\"state\"",
")",
"==",
"'UP'",
":",
"return",
"list",
"(",
"filter",
"(",
... | 42.777778 | 18.111111 |
def _add_group_columns(data, gdf):
"""
Add group columns to data with a value from the grouped dataframe
It is assumed that the grouped dataframe contains a single group
>>> data = pd.DataFrame({
... 'x': [5, 6, 7]})
>>> gdf = GroupedDataFrame({
... 'g': list('aaa'),
... 'x': range(3)}, groups=['g'])
>>> _add_group_columns(data, gdf)
g x
0 a 5
1 a 6
2 a 7
"""
n = len(data)
if isinstance(gdf, GroupedDataFrame):
for i, col in enumerate(gdf.plydata_groups):
if col not in data:
group_values = [gdf[col].iloc[0]] * n
# Need to be careful and maintain the dtypes
# of the group columns
if pdtypes.is_categorical_dtype(gdf[col]):
col_values = pd.Categorical(
group_values,
categories=gdf[col].cat.categories,
ordered=gdf[col].cat.ordered
)
else:
col_values = pd.Series(
group_values,
index=data.index,
dtype=gdf[col].dtype
)
# Group columns come first
data.insert(i, col, col_values)
return data | [
"def",
"_add_group_columns",
"(",
"data",
",",
"gdf",
")",
":",
"n",
"=",
"len",
"(",
"data",
")",
"if",
"isinstance",
"(",
"gdf",
",",
"GroupedDataFrame",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"gdf",
".",
"plydata_groups",
")",
"... | 33.410256 | 14.025641 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.