text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def lbest_idx(state, idx):
""" lbest Neighbourhood topology function.
Neighbourhood size is determined by state.params['n_s'].
Args:
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
idx: int: index of the particle in the swarm.
Returns:
int: The index of the lbest particle.
"""
swarm = state.swarm
n_s = state.params['n_s']
cmp = comparator(swarm[0].best_fitness)
indices = __lbest_indices__(len(swarm), n_s, idx)
best = None
for i in indices:
if best is None or cmp(swarm[i].best_fitness, swarm[best].best_fitness):
best = i
return best | [
"def",
"lbest_idx",
"(",
"state",
",",
"idx",
")",
":",
"swarm",
"=",
"state",
".",
"swarm",
"n_s",
"=",
"state",
".",
"params",
"[",
"'n_s'",
"]",
"cmp",
"=",
"comparator",
"(",
"swarm",
"[",
"0",
"]",
".",
"best_fitness",
")",
"indices",
"=",
"__lbest_indices__",
"(",
"len",
"(",
"swarm",
")",
",",
"n_s",
",",
"idx",
")",
"best",
"=",
"None",
"for",
"i",
"in",
"indices",
":",
"if",
"best",
"is",
"None",
"or",
"cmp",
"(",
"swarm",
"[",
"i",
"]",
".",
"best_fitness",
",",
"swarm",
"[",
"best",
"]",
".",
"best_fitness",
")",
":",
"best",
"=",
"i",
"return",
"best"
] | 30 | 20.809524 |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the GetUsageAllocation request payload and
decode it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(GetUsageAllocationRequestPayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream):
self._usage_limits_count = primitives.LongInteger(
tag=enums.Tags.USAGE_LIMITS_COUNT
)
self._usage_limits_count.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream) | [
"def",
"read",
"(",
"self",
",",
"input_stream",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"GetUsageAllocationRequestPayload",
",",
"self",
")",
".",
"read",
"(",
"input_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_stream",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_stream",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"UNIQUE_IDENTIFIER",
",",
"local_stream",
")",
":",
"self",
".",
"_unique_identifier",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"UNIQUE_IDENTIFIER",
")",
"self",
".",
"_unique_identifier",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"USAGE_LIMITS_COUNT",
",",
"local_stream",
")",
":",
"self",
".",
"_usage_limits_count",
"=",
"primitives",
".",
"LongInteger",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"USAGE_LIMITS_COUNT",
")",
"self",
".",
"_usage_limits_count",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"is_oversized",
"(",
"local_stream",
")"
] | 38.463415 | 20.365854 |
def domains(request):
"""
A page with number of services and layers faceted on domains.
"""
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings.SEARCH_TYPE == 'elasticsearch':
url = '%s/select?q=%s' % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == 'solr':
url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace('\n', '')
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/index.html')
context = RequestContext(request, {
'data': data,
'layers_count': layers_count,
'services_count': services_count,
})
return HttpResponse(template.render(context)) | [
"def",
"domains",
"(",
"request",
")",
":",
"url",
"=",
"''",
"query",
"=",
"'*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'",
"if",
"settings",
".",
"SEARCH_TYPE",
"==",
"'elasticsearch'",
":",
"url",
"=",
"'%s/select?q=%s'",
"%",
"(",
"settings",
".",
"SEARCH_URL",
",",
"query",
")",
"if",
"settings",
".",
"SEARCH_TYPE",
"==",
"'solr'",
":",
"url",
"=",
"'%s/solr/hypermap/select?q=%s'",
"%",
"(",
"settings",
".",
"SEARCH_URL",
",",
"query",
")",
"LOGGER",
".",
"debug",
"(",
"url",
")",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"# stats",
"layers_count",
"=",
"Layer",
".",
"objects",
".",
"all",
"(",
")",
".",
"count",
"(",
")",
"services_count",
"=",
"Service",
".",
"objects",
".",
"all",
"(",
")",
".",
"count",
"(",
")",
"template",
"=",
"loader",
".",
"get_template",
"(",
"'aggregator/index.html'",
")",
"context",
"=",
"RequestContext",
"(",
"request",
",",
"{",
"'data'",
":",
"data",
",",
"'layers_count'",
":",
"layers_count",
",",
"'services_count'",
":",
"services_count",
",",
"}",
")",
"return",
"HttpResponse",
"(",
"template",
".",
"render",
"(",
"context",
")",
")"
] | 39 | 15.695652 |
def encode_file_header(boundary, paramname, filesize, filename=None,
filetype=None):
"""Returns the leading data for a multipart/form-data field that contains
file data.
``boundary`` is the boundary string used throughout a single request to
separate variables.
``paramname`` is the name of the variable in this request.
``filesize`` is the size of the file data.
``filename`` if specified is the filename to give to this field. This
field is only useful to the server for determining the original filename.
``filetype`` if specified is the MIME type of this file.
The actual file data should be sent after this header has been sent.
"""
return MultipartParam(paramname, filesize=filesize, filename=filename,
filetype=filetype).encode_hdr(boundary) | [
"def",
"encode_file_header",
"(",
"boundary",
",",
"paramname",
",",
"filesize",
",",
"filename",
"=",
"None",
",",
"filetype",
"=",
"None",
")",
":",
"return",
"MultipartParam",
"(",
"paramname",
",",
"filesize",
"=",
"filesize",
",",
"filename",
"=",
"filename",
",",
"filetype",
"=",
"filetype",
")",
".",
"encode_hdr",
"(",
"boundary",
")"
] | 37.818182 | 27.181818 |
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name,
}
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cached_creds_str = cache_file.LockedRead()
if not cached_creds_str:
return None
cached_creds = json.loads(cached_creds_str)
if creds['svc_acct_name'] == cached_creds['svc_acct_name']:
if creds['scopes'] in (None, cached_creds['scopes']):
return cached_creds['scopes']
except KeyboardInterrupt:
raise
except: # pylint: disable=bare-except
# Treat exceptions as a cache miss.
pass | [
"def",
"_CheckCacheFileForMatch",
"(",
"self",
",",
"cache_filename",
",",
"scopes",
")",
":",
"creds",
"=",
"{",
"# Credentials metadata dict.",
"'scopes'",
":",
"sorted",
"(",
"list",
"(",
"scopes",
")",
")",
"if",
"scopes",
"else",
"None",
",",
"'svc_acct_name'",
":",
"self",
".",
"__service_account_name",
",",
"}",
"cache_file",
"=",
"_MultiProcessCacheFile",
"(",
"cache_filename",
")",
"try",
":",
"cached_creds_str",
"=",
"cache_file",
".",
"LockedRead",
"(",
")",
"if",
"not",
"cached_creds_str",
":",
"return",
"None",
"cached_creds",
"=",
"json",
".",
"loads",
"(",
"cached_creds_str",
")",
"if",
"creds",
"[",
"'svc_acct_name'",
"]",
"==",
"cached_creds",
"[",
"'svc_acct_name'",
"]",
":",
"if",
"creds",
"[",
"'scopes'",
"]",
"in",
"(",
"None",
",",
"cached_creds",
"[",
"'scopes'",
"]",
")",
":",
"return",
"cached_creds",
"[",
"'scopes'",
"]",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"# pylint: disable=bare-except",
"# Treat exceptions as a cache miss.",
"pass"
] | 39.142857 | 17.535714 |
def kunc_p(v, v0, k0, k0p, order=5):
"""
calculate Kunc EOS
see Dorogokupets 2015 for detail
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param order: order for the Kunc equation
:return: pressure in GPa
"""
return cal_p_kunc(v, [v0, k0, k0p], order=order,
uncertainties=isuncertainties([v, v0, k0, k0p])) | [
"def",
"kunc_p",
"(",
"v",
",",
"v0",
",",
"k0",
",",
"k0p",
",",
"order",
"=",
"5",
")",
":",
"return",
"cal_p_kunc",
"(",
"v",
",",
"[",
"v0",
",",
"k0",
",",
"k0p",
"]",
",",
"order",
"=",
"order",
",",
"uncertainties",
"=",
"isuncertainties",
"(",
"[",
"v",
",",
"v0",
",",
"k0",
",",
"k0p",
"]",
")",
")"
] | 36.642857 | 12.928571 |
def save(self, project):
'''
Saves an AnswerFactory Project
Args:
project (dict): Dictionary specifying an AnswerFactory Project.
Returns:
AnswerFactory Project id
'''
# test if this is a create vs. an update
if 'id' in project and project['id'] is not None:
# update -> use put op
self.logger.debug('Updating existing project: ' + json.dumps(project))
url = '%(base_url)s/%(project_id)s' % {
'base_url': self.base_url, 'project_id': project['id']
}
r = self.gbdx_connection.put(url, json=project)
try:
r.raise_for_status()
except:
print(r.text)
raise
# updates only get the Accepted response -> return the original project id
return project['id']
else:
self.logger.debug('Creating new project: ' + json.dumps(project))
# create -> use post op
url = self.base_url
r = self.gbdx_connection.post(url, json=project)
try:
r.raise_for_status()
except:
print(r.text)
raise
project_json = r.json()
# create returns the saved project -> return the project id that's saved
return project_json['id'] | [
"def",
"save",
"(",
"self",
",",
"project",
")",
":",
"# test if this is a create vs. an update",
"if",
"'id'",
"in",
"project",
"and",
"project",
"[",
"'id'",
"]",
"is",
"not",
"None",
":",
"# update -> use put op",
"self",
".",
"logger",
".",
"debug",
"(",
"'Updating existing project: '",
"+",
"json",
".",
"dumps",
"(",
"project",
")",
")",
"url",
"=",
"'%(base_url)s/%(project_id)s'",
"%",
"{",
"'base_url'",
":",
"self",
".",
"base_url",
",",
"'project_id'",
":",
"project",
"[",
"'id'",
"]",
"}",
"r",
"=",
"self",
".",
"gbdx_connection",
".",
"put",
"(",
"url",
",",
"json",
"=",
"project",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"except",
":",
"print",
"(",
"r",
".",
"text",
")",
"raise",
"# updates only get the Accepted response -> return the original project id",
"return",
"project",
"[",
"'id'",
"]",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Creating new project: '",
"+",
"json",
".",
"dumps",
"(",
"project",
")",
")",
"# create -> use post op",
"url",
"=",
"self",
".",
"base_url",
"r",
"=",
"self",
".",
"gbdx_connection",
".",
"post",
"(",
"url",
",",
"json",
"=",
"project",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"except",
":",
"print",
"(",
"r",
".",
"text",
")",
"raise",
"project_json",
"=",
"r",
".",
"json",
"(",
")",
"# create returns the saved project -> return the project id that's saved",
"return",
"project_json",
"[",
"'id'",
"]"
] | 35.128205 | 20.717949 |
def plot(self,xdata,ydata=[],logScale=False,disp=True,**kwargs):
'''Graphs a line plot.
xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well.
logScale: set to True to set the y axis to log scale.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
#check if only 1 vector was sent, then plot against a count
if ydata:
data = combineData(xdata,ydata,self.xlabel)
else:
data = combineData(range(len(xdata)),xdata,self.xlabel)
#determine log scale parameter
if logScale:
logScaleStr = 'true'
else:
logScaleStr = 'false'
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data': str(data),
'title':self.title,
'functionName':slugify(self.title),
'height': self.height,
'width': self.width,
'logScaleFlag': logScaleStr,
'ylabel': self.ylabel,
'plotType': 'LineChart',
'numFig': self.numFig,
'other': other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile() | [
"def",
"plot",
"(",
"self",
",",
"xdata",
",",
"ydata",
"=",
"[",
"]",
",",
"logScale",
"=",
"False",
",",
"disp",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#combine data into proper format",
"#check if only 1 vector was sent, then plot against a count",
"if",
"ydata",
":",
"data",
"=",
"combineData",
"(",
"xdata",
",",
"ydata",
",",
"self",
".",
"xlabel",
")",
"else",
":",
"data",
"=",
"combineData",
"(",
"range",
"(",
"len",
"(",
"xdata",
")",
")",
",",
"xdata",
",",
"self",
".",
"xlabel",
")",
"#determine log scale parameter",
"if",
"logScale",
":",
"logScaleStr",
"=",
"'true'",
"else",
":",
"logScaleStr",
"=",
"'false'",
"#Include other options, supplied by **kwargs",
"other",
"=",
"''",
"for",
"option",
"in",
"kwargs",
":",
"other",
"+=",
"option",
"+",
"': '",
"+",
"kwargs",
"[",
"option",
"]",
"+",
"',\\n'",
"#input argument format to template is in dictionary format (see template for where variables are inserted)",
"argDict",
"=",
"{",
"'data'",
":",
"str",
"(",
"data",
")",
",",
"'title'",
":",
"self",
".",
"title",
",",
"'functionName'",
":",
"slugify",
"(",
"self",
".",
"title",
")",
",",
"'height'",
":",
"self",
".",
"height",
",",
"'width'",
":",
"self",
".",
"width",
",",
"'logScaleFlag'",
":",
"logScaleStr",
",",
"'ylabel'",
":",
"self",
".",
"ylabel",
",",
"'plotType'",
":",
"'LineChart'",
",",
"'numFig'",
":",
"self",
".",
"numFig",
",",
"'other'",
":",
"other",
"}",
"self",
".",
"javascript",
"=",
"templateType",
"(",
"xdata",
")",
"%",
"argDict",
"if",
"disp",
":",
"self",
".",
"dispFile",
"(",
")"
] | 43.4 | 26.822222 |
def mps_device_memory_limit():
"""
Returns the memory size in bytes that can be effectively allocated on the
MPS device that will be used, or None if no suitable device is available.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
c_size = _ctypes.c_uint64()
ret = lib.TCMPSMetalDeviceMemoryLimit(_ctypes.byref(c_size))
return c_size.value if ret == 0 else None | [
"def",
"mps_device_memory_limit",
"(",
")",
":",
"lib",
"=",
"_load_tcmps_lib",
"(",
")",
"if",
"lib",
"is",
"None",
":",
"return",
"None",
"c_size",
"=",
"_ctypes",
".",
"c_uint64",
"(",
")",
"ret",
"=",
"lib",
".",
"TCMPSMetalDeviceMemoryLimit",
"(",
"_ctypes",
".",
"byref",
"(",
"c_size",
")",
")",
"return",
"c_size",
".",
"value",
"if",
"ret",
"==",
"0",
"else",
"None"
] | 33.583333 | 18.083333 |
def render_error_debug(request, exception, is_html):
'''Render the ``exception`` traceback
'''
error = Html('div', cn='well well-lg') if is_html else []
for trace in format_traceback(exception):
counter = 0
for line in trace.split('\n'):
if line.startswith(' '):
counter += 1
line = line[2:]
if line:
if is_html:
line = Html('p', escape(line), cn='text-danger')
if counter:
line.css({'margin-left': '%spx' % (20*counter)})
error.append(line)
if is_html:
error = Html('div', Html('h1', request.response.status), error)
return error | [
"def",
"render_error_debug",
"(",
"request",
",",
"exception",
",",
"is_html",
")",
":",
"error",
"=",
"Html",
"(",
"'div'",
",",
"cn",
"=",
"'well well-lg'",
")",
"if",
"is_html",
"else",
"[",
"]",
"for",
"trace",
"in",
"format_traceback",
"(",
"exception",
")",
":",
"counter",
"=",
"0",
"for",
"line",
"in",
"trace",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"' '",
")",
":",
"counter",
"+=",
"1",
"line",
"=",
"line",
"[",
"2",
":",
"]",
"if",
"line",
":",
"if",
"is_html",
":",
"line",
"=",
"Html",
"(",
"'p'",
",",
"escape",
"(",
"line",
")",
",",
"cn",
"=",
"'text-danger'",
")",
"if",
"counter",
":",
"line",
".",
"css",
"(",
"{",
"'margin-left'",
":",
"'%spx'",
"%",
"(",
"20",
"*",
"counter",
")",
"}",
")",
"error",
".",
"append",
"(",
"line",
")",
"if",
"is_html",
":",
"error",
"=",
"Html",
"(",
"'div'",
",",
"Html",
"(",
"'h1'",
",",
"request",
".",
"response",
".",
"status",
")",
",",
"error",
")",
"return",
"error"
] | 37.526316 | 16.157895 |
def get_signature(self, signature):
"""Retrieve one signature, discriminated by name or id.
Note that signature name is not case sensitive.
:param: a zobjects.Signature describing the signature
like "Signature(name='my-sig')"
:returns: a zobjects.Signature object, filled with the signature if no
signature is matching, returns None.
"""
resp = self.request_list('GetSignatures')
# GetSignature does not allow to filter the results, so we do it by
# hand...
if resp and (len(resp) > 0):
for sig_dict in resp:
sig = zobjects.Signature.from_dict(sig_dict)
if hasattr(signature, 'id'):
its_this_one = (sig.id == signature.id)
elif hasattr(signature, 'name'):
its_this_one = (sig.name.upper() == signature.name.upper())
else:
raise ValueError('should mention one of id,name')
if its_this_one:
return sig
else:
return None | [
"def",
"get_signature",
"(",
"self",
",",
"signature",
")",
":",
"resp",
"=",
"self",
".",
"request_list",
"(",
"'GetSignatures'",
")",
"# GetSignature does not allow to filter the results, so we do it by",
"# hand...",
"if",
"resp",
"and",
"(",
"len",
"(",
"resp",
")",
">",
"0",
")",
":",
"for",
"sig_dict",
"in",
"resp",
":",
"sig",
"=",
"zobjects",
".",
"Signature",
".",
"from_dict",
"(",
"sig_dict",
")",
"if",
"hasattr",
"(",
"signature",
",",
"'id'",
")",
":",
"its_this_one",
"=",
"(",
"sig",
".",
"id",
"==",
"signature",
".",
"id",
")",
"elif",
"hasattr",
"(",
"signature",
",",
"'name'",
")",
":",
"its_this_one",
"=",
"(",
"sig",
".",
"name",
".",
"upper",
"(",
")",
"==",
"signature",
".",
"name",
".",
"upper",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'should mention one of id,name'",
")",
"if",
"its_this_one",
":",
"return",
"sig",
"else",
":",
"return",
"None"
] | 38.928571 | 19.142857 |
def augment_send(self, send_func):
"""
:param send_func:
a function that sends messages, such as :meth:`.Bot.send\*`
:return:
a function that wraps around ``send_func`` and examines whether the
sent message contains an inline keyboard with callback data. If so,
future callback query originating from the sent message will be captured.
"""
def augmented(*aa, **kw):
sent = send_func(*aa, **kw)
if self._enable_chat and self._contains_callback_data(kw):
self.capture_origin(message_identifier(sent))
return sent
return augmented | [
"def",
"augment_send",
"(",
"self",
",",
"send_func",
")",
":",
"def",
"augmented",
"(",
"*",
"aa",
",",
"*",
"*",
"kw",
")",
":",
"sent",
"=",
"send_func",
"(",
"*",
"aa",
",",
"*",
"*",
"kw",
")",
"if",
"self",
".",
"_enable_chat",
"and",
"self",
".",
"_contains_callback_data",
"(",
"kw",
")",
":",
"self",
".",
"capture_origin",
"(",
"message_identifier",
"(",
"sent",
")",
")",
"return",
"sent",
"return",
"augmented"
] | 36.722222 | 22.833333 |
def set_last_position(self, last_position):
"""
Called from the manager, it is in charge of updating the last position of data commited
by the writer, in order to have resume support
"""
if last_position:
if isinstance(last_position, six.string_types):
last_key = last_position
else:
last_key = last_position.get('last_key', '')
self.last_position = dict(last_key=last_key)
self.collection_scanner.set_startafter(last_key)
else:
self.last_position = {} | [
"def",
"set_last_position",
"(",
"self",
",",
"last_position",
")",
":",
"if",
"last_position",
":",
"if",
"isinstance",
"(",
"last_position",
",",
"six",
".",
"string_types",
")",
":",
"last_key",
"=",
"last_position",
"else",
":",
"last_key",
"=",
"last_position",
".",
"get",
"(",
"'last_key'",
",",
"''",
")",
"self",
".",
"last_position",
"=",
"dict",
"(",
"last_key",
"=",
"last_key",
")",
"self",
".",
"collection_scanner",
".",
"set_startafter",
"(",
"last_key",
")",
"else",
":",
"self",
".",
"last_position",
"=",
"{",
"}"
] | 41.357143 | 15.5 |
def set(self, value):
"""Set a GValue.
The value is converted to the type of the GValue, if possible, and
assigned.
"""
# logger.debug('GValue.set: value = %s', value)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype == GValue.gbool_type:
gobject_lib.g_value_set_boolean(self.gvalue, value)
elif gtype == GValue.gint_type:
gobject_lib.g_value_set_int(self.gvalue, int(value))
elif gtype == GValue.guint64_type:
gobject_lib.g_value_set_uint64(self.gvalue, value)
elif gtype == GValue.gdouble_type:
gobject_lib.g_value_set_double(self.gvalue, value)
elif fundamental == GValue.genum_type:
gobject_lib.g_value_set_enum(self.gvalue,
GValue.to_enum(gtype, value))
elif fundamental == GValue.gflags_type:
gobject_lib.g_value_set_flags(self.gvalue, value)
elif gtype == GValue.gstr_type:
gobject_lib.g_value_set_string(self.gvalue, _to_bytes(value))
elif gtype == GValue.refstr_type:
vips_lib.vips_value_set_ref_string(self.gvalue, _to_bytes(value))
elif fundamental == GValue.gobject_type:
gobject_lib.g_value_set_object(self.gvalue, value.pointer)
elif gtype == GValue.array_int_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('int[]', value)
vips_lib.vips_value_set_array_int(self.gvalue, array, len(value))
elif gtype == GValue.array_double_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('double[]', value)
vips_lib.vips_value_set_array_double(self.gvalue, array,
len(value))
elif gtype == GValue.array_image_type:
if isinstance(value, pyvips.Image):
value = [value]
vips_lib.vips_value_set_array_image(self.gvalue, len(value))
array = vips_lib.vips_value_get_array_image(self.gvalue, ffi.NULL)
for i, image in enumerate(value):
gobject_lib.g_object_ref(image.pointer)
array[i] = image.pointer
elif gtype == GValue.blob_type:
# we need to set the blob to a copy of the string that vips_lib
# can own
memory = glib_lib.g_malloc(len(value))
ffi.memmove(memory, value, len(value))
# this is horrible!
#
# * in API mode, we must have 8.6+ and use set_blob_free to
# attach the metadata to avoid leaks
# * pre-8.6, we just pass a NULL free pointer and live with the
# leak
#
# this is because in API mode you can't pass a builtin (what
# vips_lib.g_free() becomes) as a parameter to ffi.callback(), and
# vips_value_set_blob() needs a callback for arg 2
#
# additionally, you can't make a py def which calls g_free() and
# then use the py def in the callback, since libvips will trigger
# these functions during cleanup, and py will have shut down by
# then and you'll get a segv
if at_least_libvips(8, 6):
vips_lib.vips_value_set_blob_free(self.gvalue,
memory, len(value))
else:
if pyvips.API_mode:
vips_lib.vips_value_set_blob(self.gvalue,
ffi.NULL, memory, len(value))
else:
vips_lib.vips_value_set_blob(self.gvalue,
glib_lib.g_free,
memory, len(value))
else:
raise Error('unsupported gtype for set {0}, fundamental {1}'.
format(type_name(gtype), type_name(fundamental))) | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"# logger.debug('GValue.set: value = %s', value)",
"gtype",
"=",
"self",
".",
"gvalue",
".",
"g_type",
"fundamental",
"=",
"gobject_lib",
".",
"g_type_fundamental",
"(",
"gtype",
")",
"if",
"gtype",
"==",
"GValue",
".",
"gbool_type",
":",
"gobject_lib",
".",
"g_value_set_boolean",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gint_type",
":",
"gobject_lib",
".",
"g_value_set_int",
"(",
"self",
".",
"gvalue",
",",
"int",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"guint64_type",
":",
"gobject_lib",
".",
"g_value_set_uint64",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gdouble_type",
":",
"gobject_lib",
".",
"g_value_set_double",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"genum_type",
":",
"gobject_lib",
".",
"g_value_set_enum",
"(",
"self",
".",
"gvalue",
",",
"GValue",
".",
"to_enum",
"(",
"gtype",
",",
"value",
")",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"gflags_type",
":",
"gobject_lib",
".",
"g_value_set_flags",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gstr_type",
":",
"gobject_lib",
".",
"g_value_set_string",
"(",
"self",
".",
"gvalue",
",",
"_to_bytes",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"refstr_type",
":",
"vips_lib",
".",
"vips_value_set_ref_string",
"(",
"self",
".",
"gvalue",
",",
"_to_bytes",
"(",
"value",
")",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"gobject_type",
":",
"gobject_lib",
".",
"g_value_set_object",
"(",
"self",
".",
"gvalue",
",",
"value",
".",
"pointer",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_int_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"numbers",
".",
"Number",
")",
":",
"value",
"=",
"[",
"value",
"]",
"array",
"=",
"ffi",
".",
"new",
"(",
"'int[]'",
",",
"value",
")",
"vips_lib",
".",
"vips_value_set_array_int",
"(",
"self",
".",
"gvalue",
",",
"array",
",",
"len",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_double_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"numbers",
".",
"Number",
")",
":",
"value",
"=",
"[",
"value",
"]",
"array",
"=",
"ffi",
".",
"new",
"(",
"'double[]'",
",",
"value",
")",
"vips_lib",
".",
"vips_value_set_array_double",
"(",
"self",
".",
"gvalue",
",",
"array",
",",
"len",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_image_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"pyvips",
".",
"Image",
")",
":",
"value",
"=",
"[",
"value",
"]",
"vips_lib",
".",
"vips_value_set_array_image",
"(",
"self",
".",
"gvalue",
",",
"len",
"(",
"value",
")",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_array_image",
"(",
"self",
".",
"gvalue",
",",
"ffi",
".",
"NULL",
")",
"for",
"i",
",",
"image",
"in",
"enumerate",
"(",
"value",
")",
":",
"gobject_lib",
".",
"g_object_ref",
"(",
"image",
".",
"pointer",
")",
"array",
"[",
"i",
"]",
"=",
"image",
".",
"pointer",
"elif",
"gtype",
"==",
"GValue",
".",
"blob_type",
":",
"# we need to set the blob to a copy of the string that vips_lib",
"# can own",
"memory",
"=",
"glib_lib",
".",
"g_malloc",
"(",
"len",
"(",
"value",
")",
")",
"ffi",
".",
"memmove",
"(",
"memory",
",",
"value",
",",
"len",
"(",
"value",
")",
")",
"# this is horrible!",
"#",
"# * in API mode, we must have 8.6+ and use set_blob_free to",
"# attach the metadata to avoid leaks",
"# * pre-8.6, we just pass a NULL free pointer and live with the",
"# leak",
"#",
"# this is because in API mode you can't pass a builtin (what",
"# vips_lib.g_free() becomes) as a parameter to ffi.callback(), and",
"# vips_value_set_blob() needs a callback for arg 2",
"#",
"# additionally, you can't make a py def which calls g_free() and",
"# then use the py def in the callback, since libvips will trigger",
"# these functions during cleanup, and py will have shut down by",
"# then and you'll get a segv",
"if",
"at_least_libvips",
"(",
"8",
",",
"6",
")",
":",
"vips_lib",
".",
"vips_value_set_blob_free",
"(",
"self",
".",
"gvalue",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"if",
"pyvips",
".",
"API_mode",
":",
"vips_lib",
".",
"vips_value_set_blob",
"(",
"self",
".",
"gvalue",
",",
"ffi",
".",
"NULL",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"vips_lib",
".",
"vips_value_set_blob",
"(",
"self",
".",
"gvalue",
",",
"glib_lib",
".",
"g_free",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"raise",
"Error",
"(",
"'unsupported gtype for set {0}, fundamental {1}'",
".",
"format",
"(",
"type_name",
"(",
"gtype",
")",
",",
"type_name",
"(",
"fundamental",
")",
")",
")"
] | 44.733333 | 20.455556 |
def find_related_modules(package, related_name_re='.+',
ignore_exceptions=False):
"""Find matching modules using a package and a module name pattern."""
warnings.warn('find_related_modules has been deprecated.',
DeprecationWarning)
package_elements = package.rsplit(".", 1)
try:
if len(package_elements) == 2:
pkg = __import__(package_elements[0], globals(), locals(), [
package_elements[1]])
pkg = getattr(pkg, package_elements[1])
else:
pkg = __import__(package_elements[0], globals(), locals(), [])
pkg_path = pkg.__path__
except AttributeError:
return []
# Find all modules named according to related_name
p = re.compile(related_name_re)
modules = []
for name in find_modules(package, include_packages=True):
if p.match(name.split('.')[-1]):
try:
modules.append(import_string(name, silent=ignore_exceptions))
except Exception as e:
if not ignore_exceptions:
raise e
return modules | [
"def",
"find_related_modules",
"(",
"package",
",",
"related_name_re",
"=",
"'.+'",
",",
"ignore_exceptions",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"'find_related_modules has been deprecated.'",
",",
"DeprecationWarning",
")",
"package_elements",
"=",
"package",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"try",
":",
"if",
"len",
"(",
"package_elements",
")",
"==",
"2",
":",
"pkg",
"=",
"__import__",
"(",
"package_elements",
"[",
"0",
"]",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"package_elements",
"[",
"1",
"]",
"]",
")",
"pkg",
"=",
"getattr",
"(",
"pkg",
",",
"package_elements",
"[",
"1",
"]",
")",
"else",
":",
"pkg",
"=",
"__import__",
"(",
"package_elements",
"[",
"0",
"]",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"]",
")",
"pkg_path",
"=",
"pkg",
".",
"__path__",
"except",
"AttributeError",
":",
"return",
"[",
"]",
"# Find all modules named according to related_name",
"p",
"=",
"re",
".",
"compile",
"(",
"related_name_re",
")",
"modules",
"=",
"[",
"]",
"for",
"name",
"in",
"find_modules",
"(",
"package",
",",
"include_packages",
"=",
"True",
")",
":",
"if",
"p",
".",
"match",
"(",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")",
":",
"try",
":",
"modules",
".",
"append",
"(",
"import_string",
"(",
"name",
",",
"silent",
"=",
"ignore_exceptions",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"ignore_exceptions",
":",
"raise",
"e",
"return",
"modules"
] | 37.4 | 17.866667 |
def handle(cls, value, **kwargs):
"""Use a value from the environment or fall back to a default if the
environment doesn't contain the variable.
Format of value:
<env_var>::<default value>
For example:
Groups: ${default app_security_groups::sg-12345,sg-67890}
If `app_security_groups` is defined in the environment, its defined
value will be returned. Otherwise, `sg-12345,sg-67890` will be the
returned value.
This allows defaults to be set at the config file level.
"""
try:
env_var_name, default_val = value.split("::", 1)
except ValueError:
raise ValueError("Invalid value for default: %s. Must be in "
"<env_var>::<default value> format." % value)
if env_var_name in kwargs['context'].environment:
return kwargs['context'].environment[env_var_name]
else:
return default_val | [
"def",
"handle",
"(",
"cls",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"env_var_name",
",",
"default_val",
"=",
"value",
".",
"split",
"(",
"\"::\"",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for default: %s. Must be in \"",
"\"<env_var>::<default value> format.\"",
"%",
"value",
")",
"if",
"env_var_name",
"in",
"kwargs",
"[",
"'context'",
"]",
".",
"environment",
":",
"return",
"kwargs",
"[",
"'context'",
"]",
".",
"environment",
"[",
"env_var_name",
"]",
"else",
":",
"return",
"default_val"
] | 33.310345 | 24.862069 |
def cli(env, identifier, path, name):
"""Adds an attachment to an existing ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if path is None:
raise exceptions.ArgumentError("Missing argument --path")
if not os.path.exists(path):
raise exceptions.ArgumentError("%s not exist" % path)
if name is None:
name = os.path.basename(path)
attached_file = mgr.upload_attachment(ticket_id=ticket_id,
file_path=path,
file_name=name)
env.fout("File attached: \n%s" % attached_file) | [
"def",
"cli",
"(",
"env",
",",
"identifier",
",",
"path",
",",
"name",
")",
":",
"mgr",
"=",
"SoftLayer",
".",
"TicketManager",
"(",
"env",
".",
"client",
")",
"ticket_id",
"=",
"helpers",
".",
"resolve_id",
"(",
"mgr",
".",
"resolve_ids",
",",
"identifier",
",",
"'ticket'",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"exceptions",
".",
"ArgumentError",
"(",
"\"Missing argument --path\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"exceptions",
".",
"ArgumentError",
"(",
"\"%s not exist\"",
"%",
"path",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"attached_file",
"=",
"mgr",
".",
"upload_attachment",
"(",
"ticket_id",
"=",
"ticket_id",
",",
"file_path",
"=",
"path",
",",
"file_name",
"=",
"name",
")",
"env",
".",
"fout",
"(",
"\"File attached: \\n%s\"",
"%",
"attached_file",
")"
] | 35.157895 | 21.315789 |
def js_on_change(self, event, *callbacks):
''' Attach a ``CustomJS`` callback to an arbitrary BokehJS model event.
On the BokehJS side, change events for model properties have the
form ``"change:property_name"``. As a convenience, if the event name
passed to this method is also the name of a property on the model,
then it will be prefixed with ``"change:"`` automatically:
.. code:: python
# these two are equivalent
source.js_on_change('data', callback)
source.js_on_change('change:data', callback)
However, there are other kinds of events that can be useful to respond
to, in addition to property change events. For example to run a
callback whenever data is streamed to a ``ColumnDataSource``, use the
``"stream"`` event on the source:
.. code:: python
source.js_on_change('streaming', callback)
'''
if len(callbacks) == 0:
raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter")
# handle any CustomJS callbacks here
from bokeh.models.callbacks import CustomJS
if not all(isinstance(x, CustomJS) for x in callbacks):
raise ValueError("not all callback values are CustomJS instances")
if event in self.properties():
event = "change:%s" % event
if event not in self.js_property_callbacks:
self.js_property_callbacks[event] = []
for callback in callbacks:
if callback in self.js_property_callbacks[event]:
continue
self.js_property_callbacks[event].append(callback) | [
"def",
"js_on_change",
"(",
"self",
",",
"event",
",",
"*",
"callbacks",
")",
":",
"if",
"len",
"(",
"callbacks",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"js_on_change takes an event name and one or more callbacks, got only one parameter\"",
")",
"# handle any CustomJS callbacks here",
"from",
"bokeh",
".",
"models",
".",
"callbacks",
"import",
"CustomJS",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"x",
",",
"CustomJS",
")",
"for",
"x",
"in",
"callbacks",
")",
":",
"raise",
"ValueError",
"(",
"\"not all callback values are CustomJS instances\"",
")",
"if",
"event",
"in",
"self",
".",
"properties",
"(",
")",
":",
"event",
"=",
"\"change:%s\"",
"%",
"event",
"if",
"event",
"not",
"in",
"self",
".",
"js_property_callbacks",
":",
"self",
".",
"js_property_callbacks",
"[",
"event",
"]",
"=",
"[",
"]",
"for",
"callback",
"in",
"callbacks",
":",
"if",
"callback",
"in",
"self",
".",
"js_property_callbacks",
"[",
"event",
"]",
":",
"continue",
"self",
".",
"js_property_callbacks",
"[",
"event",
"]",
".",
"append",
"(",
"callback",
")"
] | 40.780488 | 25.02439 |
def get(
self, uri, host=None, strict_slashes=None, version=None, name=None
):
"""
Add an API URL under the **GET** *HTTP* method
:param uri: URL to be tagged to **GET** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`sanic.app.Sanic` to check
if the request URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"GET"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
) | [
"def",
"get",
"(",
"self",
",",
"uri",
",",
"host",
"=",
"None",
",",
"strict_slashes",
"=",
"None",
",",
"version",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"return",
"self",
".",
"route",
"(",
"uri",
",",
"methods",
"=",
"frozenset",
"(",
"{",
"\"GET\"",
"}",
")",
",",
"host",
"=",
"host",
",",
"strict_slashes",
"=",
"strict_slashes",
",",
"version",
"=",
"version",
",",
"name",
"=",
"name",
",",
")"
] | 35.863636 | 19.318182 |
def is_connected(self):
r"""Check if the graph is connected (cached).
A graph is connected if and only if there exists a (directed) path
between any two vertices.
Returns
-------
connected : bool
True if the graph is connected, False otherwise.
Notes
-----
For undirected graphs, starting at a vertex and trying to visit all the
others is enough.
For directed graphs, one needs to check that a vertex can both be
visited by all the others and visit all the others.
Examples
--------
Connected graph:
>>> graph = graphs.Graph([
... [0, 3, 0, 0],
... [3, 0, 4, 0],
... [0, 4, 0, 2],
... [0, 0, 2, 0],
... ])
>>> graph.is_connected()
True
Disconnected graph:
>>> graph = graphs.Graph([
... [0, 3, 0, 0],
... [3, 0, 4, 0],
... [0, 0, 0, 2],
... [0, 0, 2, 0],
... ])
>>> graph.is_connected()
False
"""
if self._connected is not None:
return self._connected
adjacencies = [self.W]
if self.is_directed():
adjacencies.append(self.W.T)
for adjacency in adjacencies:
visited = np.zeros(self.n_vertices, dtype=np.bool)
stack = set([0])
while stack:
vertex = stack.pop()
if visited[vertex]:
continue
visited[vertex] = True
neighbors = adjacency[vertex].nonzero()[1]
stack.update(neighbors)
if not np.all(visited):
self._connected = False
return self._connected
self._connected = True
return self._connected | [
"def",
"is_connected",
"(",
"self",
")",
":",
"if",
"self",
".",
"_connected",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_connected",
"adjacencies",
"=",
"[",
"self",
".",
"W",
"]",
"if",
"self",
".",
"is_directed",
"(",
")",
":",
"adjacencies",
".",
"append",
"(",
"self",
".",
"W",
".",
"T",
")",
"for",
"adjacency",
"in",
"adjacencies",
":",
"visited",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"n_vertices",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"stack",
"=",
"set",
"(",
"[",
"0",
"]",
")",
"while",
"stack",
":",
"vertex",
"=",
"stack",
".",
"pop",
"(",
")",
"if",
"visited",
"[",
"vertex",
"]",
":",
"continue",
"visited",
"[",
"vertex",
"]",
"=",
"True",
"neighbors",
"=",
"adjacency",
"[",
"vertex",
"]",
".",
"nonzero",
"(",
")",
"[",
"1",
"]",
"stack",
".",
"update",
"(",
"neighbors",
")",
"if",
"not",
"np",
".",
"all",
"(",
"visited",
")",
":",
"self",
".",
"_connected",
"=",
"False",
"return",
"self",
".",
"_connected",
"self",
".",
"_connected",
"=",
"True",
"return",
"self",
".",
"_connected"
] | 24.657534 | 20.191781 |
def confindr_reporter(self, analysistype='confindr'):
"""
Creates a final report of all the ConFindr results
"""
# Initialise the data strings
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
# Iterate through all the results
for sample in self.runmetadata.samples:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
# Write the string to the report
report.write(data) | [
"def",
"confindr_reporter",
"(",
"self",
",",
"analysistype",
"=",
"'confindr'",
")",
":",
"# Initialise the data strings",
"data",
"=",
"'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\\n'",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"analysistype",
"+",
"'.csv'",
")",
",",
"'w'",
")",
"as",
"report",
":",
"# Iterate through all the results",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"data",
"+=",
"'{str},{genus},{numcontamsnv},{status},{pc},{pcs}\\n'",
".",
"format",
"(",
"str",
"=",
"sample",
".",
"name",
",",
"genus",
"=",
"sample",
".",
"confindr",
".",
"genus",
",",
"numcontamsnv",
"=",
"sample",
".",
"confindr",
".",
"num_contaminated_snvs",
",",
"status",
"=",
"sample",
".",
"confindr",
".",
"contam_status",
",",
"pc",
"=",
"sample",
".",
"confindr",
".",
"percent_contam",
",",
"pcs",
"=",
"sample",
".",
"confindr",
".",
"percent_contam_std",
")",
"# Write the string to the report",
"report",
".",
"write",
"(",
"data",
")"
] | 49.526316 | 16.789474 |
def layer_mapproxy(request, catalog_slug, layer_uuid, path_info):
"""
Get Layer with matching catalog and uuid
"""
layer = get_object_or_404(Layer,
uuid=layer_uuid,
catalog__slug=catalog_slug)
# for WorldMap layers we need to use the url of the layer
if layer.service.type == 'Hypermap:WorldMap':
layer.service.url = layer.url
# Set up a mapproxy app for this particular layer
mp, yaml_config = get_mapproxy(layer)
query = request.META['QUERY_STRING']
if len(query) > 0:
path_info = path_info + '?' + query
params = {}
headers = {
'X-Script-Name': '/registry/{0}/layer/{1}/map/'.format(catalog_slug, layer.id),
'X-Forwarded-Host': request.META['HTTP_HOST'],
'HTTP_HOST': request.META['HTTP_HOST'],
'SERVER_NAME': request.META['SERVER_NAME'],
}
if path_info == '/config':
response = HttpResponse(yaml_config, content_type='text/plain')
return response
# Get a response from MapProxy as if it was running standalone.
mp_response = mp.get(path_info, params, headers)
# Create a Django response from the MapProxy WSGI response.
response = HttpResponse(mp_response.body, status=mp_response.status_int)
for header, value in mp_response.headers.iteritems():
response[header] = value
return response | [
"def",
"layer_mapproxy",
"(",
"request",
",",
"catalog_slug",
",",
"layer_uuid",
",",
"path_info",
")",
":",
"layer",
"=",
"get_object_or_404",
"(",
"Layer",
",",
"uuid",
"=",
"layer_uuid",
",",
"catalog__slug",
"=",
"catalog_slug",
")",
"# for WorldMap layers we need to use the url of the layer",
"if",
"layer",
".",
"service",
".",
"type",
"==",
"'Hypermap:WorldMap'",
":",
"layer",
".",
"service",
".",
"url",
"=",
"layer",
".",
"url",
"# Set up a mapproxy app for this particular layer",
"mp",
",",
"yaml_config",
"=",
"get_mapproxy",
"(",
"layer",
")",
"query",
"=",
"request",
".",
"META",
"[",
"'QUERY_STRING'",
"]",
"if",
"len",
"(",
"query",
")",
">",
"0",
":",
"path_info",
"=",
"path_info",
"+",
"'?'",
"+",
"query",
"params",
"=",
"{",
"}",
"headers",
"=",
"{",
"'X-Script-Name'",
":",
"'/registry/{0}/layer/{1}/map/'",
".",
"format",
"(",
"catalog_slug",
",",
"layer",
".",
"id",
")",
",",
"'X-Forwarded-Host'",
":",
"request",
".",
"META",
"[",
"'HTTP_HOST'",
"]",
",",
"'HTTP_HOST'",
":",
"request",
".",
"META",
"[",
"'HTTP_HOST'",
"]",
",",
"'SERVER_NAME'",
":",
"request",
".",
"META",
"[",
"'SERVER_NAME'",
"]",
",",
"}",
"if",
"path_info",
"==",
"'/config'",
":",
"response",
"=",
"HttpResponse",
"(",
"yaml_config",
",",
"content_type",
"=",
"'text/plain'",
")",
"return",
"response",
"# Get a response from MapProxy as if it was running standalone.",
"mp_response",
"=",
"mp",
".",
"get",
"(",
"path_info",
",",
"params",
",",
"headers",
")",
"# Create a Django response from the MapProxy WSGI response.",
"response",
"=",
"HttpResponse",
"(",
"mp_response",
".",
"body",
",",
"status",
"=",
"mp_response",
".",
"status_int",
")",
"for",
"header",
",",
"value",
"in",
"mp_response",
".",
"headers",
".",
"iteritems",
"(",
")",
":",
"response",
"[",
"header",
"]",
"=",
"value",
"return",
"response"
] | 34.04878 | 20.926829 |
def _normalized(self, data):
"""
Does a normalization of sorts on image type data so that values
that should be integers are converted from strings
"""
int_keys = ('frames', 'width', 'height', 'size')
for key in int_keys:
if key not in data:
continue
try:
data[key] = int(data[key])
except ValueError:
pass # Ignored
return data | [
"def",
"_normalized",
"(",
"self",
",",
"data",
")",
":",
"int_keys",
"=",
"(",
"'frames'",
",",
"'width'",
",",
"'height'",
",",
"'size'",
")",
"for",
"key",
"in",
"int_keys",
":",
"if",
"key",
"not",
"in",
"data",
":",
"continue",
"try",
":",
"data",
"[",
"key",
"]",
"=",
"int",
"(",
"data",
"[",
"key",
"]",
")",
"except",
"ValueError",
":",
"pass",
"# Ignored",
"return",
"data"
] | 26.823529 | 17.647059 |
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc | [
"def",
"_spintaylor_aligned_prec_swapper",
"(",
"*",
"*",
"p",
")",
":",
"orig_approximant",
"=",
"p",
"[",
"'approximant'",
"]",
"if",
"p",
"[",
"'spin2x'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin2y'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin1x'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin1y'",
"]",
"==",
"0",
":",
"p",
"[",
"'approximant'",
"]",
"=",
"'TaylorF2'",
"else",
":",
"p",
"[",
"'approximant'",
"]",
"=",
"'SpinTaylorF2'",
"hp",
",",
"hc",
"=",
"_lalsim_fd_waveform",
"(",
"*",
"*",
"p",
")",
"p",
"[",
"'approximant'",
"]",
"=",
"orig_approximant",
"return",
"hp",
",",
"hc"
] | 46.277778 | 18.055556 |
def QA_util_datetime_to_strdate(dt):
"""
:param dt: pythone datetime.datetime
:return: 1999-02-01 string type
"""
strdate = "%04d-%02d-%02d" % (dt.year, dt.month, dt.day)
return strdate | [
"def",
"QA_util_datetime_to_strdate",
"(",
"dt",
")",
":",
"strdate",
"=",
"\"%04d-%02d-%02d\"",
"%",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
"return",
"strdate"
] | 29.285714 | 7.285714 |
def GetAmi(ec2, ami_spec):
""" Get the boto ami object given a AmiSpecification object. """
images = ec2.get_all_images(owners=[ami_spec.owner_id] )
requested_image = None
for image in images:
if image.name == ami_spec.ami_name:
requested_image = image
break
return requested_image | [
"def",
"GetAmi",
"(",
"ec2",
",",
"ami_spec",
")",
":",
"images",
"=",
"ec2",
".",
"get_all_images",
"(",
"owners",
"=",
"[",
"ami_spec",
".",
"owner_id",
"]",
")",
"requested_image",
"=",
"None",
"for",
"image",
"in",
"images",
":",
"if",
"image",
".",
"name",
"==",
"ami_spec",
".",
"ami_name",
":",
"requested_image",
"=",
"image",
"break",
"return",
"requested_image"
] | 35.111111 | 12.777778 |
def _update_config_sets(self,directory,files=None):
"""
Loads set information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
# Load sets from SET_FILE
_sets=self._load_sets(directory)
# Connect to flickr and get dicionary of photosets
psets=self._getphotosets()
db = self._loadDB(directory)
# To create a set, one needs to pass it the primary
# photo to use, let's open the DB and load the first
# photo
primary_pid=db[db.keys()[0]]['photoid']
# Loop through all sets, create if it doesn't exist
for myset in _sets:
if myset not in psets:
logger.info('set [%s] not in flickr sets, will create set'%(myset))
self._createphotoset(myset,primary_pid)
# Now reaload photosets from flickr
psets=self._getphotosets()
# --- Load DB of photos, and update them all with new tags
for fn in db:
# --- If file list provided, skip files not in the list
if files and fn not in files:
continue
pid=db[fn]['photoid']
# Get all the photosets this photo belongs to
psets_for_photo=self._getphotosets_forphoto(pid)
for myset in _sets:
if myset in psets_for_photo:
logger.debug("%s - Already in photoset [%s] - skipping"%(fn,myset))
continue
logger.info("%s [flickr] Adding to set [%s]" %(fn,myset))
psid=psets[myset]['id']
logger.debug("%s - Adding to photoset %s"%(fn,psid))
resp=self.flickr.photosets_addPhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_addPhoto failed with status: %s",\
resp.attrib['stat']);
return False
# Go through all sets flickr says this photo belongs to and
# remove from those sets if they don't appear in SET_FILE
for pset in psets_for_photo:
if pset not in _sets:
psid=psets[pset]['id']
logger.info("%s [flickr] Removing from set [%s]" %(fn,pset))
logger.debug("%s - Removing from photoset %s"%(fn,psid))
resp=self.flickr.photosets_removePhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photossets_removePhoto failed with status: %s",\
resp.attrib['stat']);
return False
return True | [
"def",
"_update_config_sets",
"(",
"self",
",",
"directory",
",",
"files",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"# Load sets from SET_FILE",
"_sets",
"=",
"self",
".",
"_load_sets",
"(",
"directory",
")",
"# Connect to flickr and get dicionary of photosets",
"psets",
"=",
"self",
".",
"_getphotosets",
"(",
")",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"# To create a set, one needs to pass it the primary",
"# photo to use, let's open the DB and load the first",
"# photo",
"primary_pid",
"=",
"db",
"[",
"db",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"[",
"'photoid'",
"]",
"# Loop through all sets, create if it doesn't exist",
"for",
"myset",
"in",
"_sets",
":",
"if",
"myset",
"not",
"in",
"psets",
":",
"logger",
".",
"info",
"(",
"'set [%s] not in flickr sets, will create set'",
"%",
"(",
"myset",
")",
")",
"self",
".",
"_createphotoset",
"(",
"myset",
",",
"primary_pid",
")",
"# Now reaload photosets from flickr",
"psets",
"=",
"self",
".",
"_getphotosets",
"(",
")",
"# --- Load DB of photos, and update them all with new tags",
"for",
"fn",
"in",
"db",
":",
"# --- If file list provided, skip files not in the list",
"if",
"files",
"and",
"fn",
"not",
"in",
"files",
":",
"continue",
"pid",
"=",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"# Get all the photosets this photo belongs to",
"psets_for_photo",
"=",
"self",
".",
"_getphotosets_forphoto",
"(",
"pid",
")",
"for",
"myset",
"in",
"_sets",
":",
"if",
"myset",
"in",
"psets_for_photo",
":",
"logger",
".",
"debug",
"(",
"\"%s - Already in photoset [%s] - skipping\"",
"%",
"(",
"fn",
",",
"myset",
")",
")",
"continue",
"logger",
".",
"info",
"(",
"\"%s [flickr] Adding to set [%s]\"",
"%",
"(",
"fn",
",",
"myset",
")",
")",
"psid",
"=",
"psets",
"[",
"myset",
"]",
"[",
"'id'",
"]",
"logger",
".",
"debug",
"(",
"\"%s - Adding to photoset %s\"",
"%",
"(",
"fn",
",",
"psid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photosets_addPhoto",
"(",
"photoset_id",
"=",
"psid",
",",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_addPhoto failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"# Go through all sets flickr says this photo belongs to and",
"# remove from those sets if they don't appear in SET_FILE",
"for",
"pset",
"in",
"psets_for_photo",
":",
"if",
"pset",
"not",
"in",
"_sets",
":",
"psid",
"=",
"psets",
"[",
"pset",
"]",
"[",
"'id'",
"]",
"logger",
".",
"info",
"(",
"\"%s [flickr] Removing from set [%s]\"",
"%",
"(",
"fn",
",",
"pset",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s - Removing from photoset %s\"",
"%",
"(",
"fn",
",",
"psid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photosets_removePhoto",
"(",
"photoset_id",
"=",
"psid",
",",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photossets_removePhoto failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"return",
"True"
] | 41.861111 | 21.111111 |
def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with odbc
'''
if instance:
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
elif conn_key:
dsn, host, username, password, database, driver = conn_key.split(":")
conn_str = ''
if dsn:
conn_str = 'DSN={};'.format(dsn)
if driver:
conn_str += 'DRIVER={};'.format(driver)
if host:
conn_str += 'Server={};'.format(host)
if database:
conn_str += 'Database={};'.format(database)
if username:
conn_str += 'UID={};'.format(username)
self.log.debug("Connection string (before password) {}".format(conn_str))
if password:
conn_str += 'PWD={};'.format(password)
return conn_str | [
"def",
"_conn_string_odbc",
"(",
"self",
",",
"db_key",
",",
"instance",
"=",
"None",
",",
"conn_key",
"=",
"None",
",",
"db_name",
"=",
"None",
")",
":",
"if",
"instance",
":",
"dsn",
",",
"host",
",",
"username",
",",
"password",
",",
"database",
",",
"driver",
"=",
"self",
".",
"_get_access_info",
"(",
"instance",
",",
"db_key",
",",
"db_name",
")",
"elif",
"conn_key",
":",
"dsn",
",",
"host",
",",
"username",
",",
"password",
",",
"database",
",",
"driver",
"=",
"conn_key",
".",
"split",
"(",
"\":\"",
")",
"conn_str",
"=",
"''",
"if",
"dsn",
":",
"conn_str",
"=",
"'DSN={};'",
".",
"format",
"(",
"dsn",
")",
"if",
"driver",
":",
"conn_str",
"+=",
"'DRIVER={};'",
".",
"format",
"(",
"driver",
")",
"if",
"host",
":",
"conn_str",
"+=",
"'Server={};'",
".",
"format",
"(",
"host",
")",
"if",
"database",
":",
"conn_str",
"+=",
"'Database={};'",
".",
"format",
"(",
"database",
")",
"if",
"username",
":",
"conn_str",
"+=",
"'UID={};'",
".",
"format",
"(",
"username",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Connection string (before password) {}\"",
".",
"format",
"(",
"conn_str",
")",
")",
"if",
"password",
":",
"conn_str",
"+=",
"'PWD={};'",
".",
"format",
"(",
"password",
")",
"return",
"conn_str"
] | 36.48 | 24.8 |
def write(self, path, data, offset=0, timeout=0):
"""write data at path
path is a string, data binary; it is responsability of the caller
ensure proper encoding.
"""
# fixme: check of path type delayed to str2bytez
if not isinstance(data, (bytes, bytearray, )):
raise TypeError("'data' argument must be binary")
ret, rdata = self.sendmess(MSG_WRITE, str2bytez(path) + data,
size=len(data), offset=offset,
timeout=timeout)
assert not rdata, (ret, rdata)
if ret < 0:
raise OwnetError(-ret, self.errmess[-ret], path) | [
"def",
"write",
"(",
"self",
",",
"path",
",",
"data",
",",
"offset",
"=",
"0",
",",
"timeout",
"=",
"0",
")",
":",
"# fixme: check of path type delayed to str2bytez",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"bytes",
",",
"bytearray",
",",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"'data' argument must be binary\"",
")",
"ret",
",",
"rdata",
"=",
"self",
".",
"sendmess",
"(",
"MSG_WRITE",
",",
"str2bytez",
"(",
"path",
")",
"+",
"data",
",",
"size",
"=",
"len",
"(",
"data",
")",
",",
"offset",
"=",
"offset",
",",
"timeout",
"=",
"timeout",
")",
"assert",
"not",
"rdata",
",",
"(",
"ret",
",",
"rdata",
")",
"if",
"ret",
"<",
"0",
":",
"raise",
"OwnetError",
"(",
"-",
"ret",
",",
"self",
".",
"errmess",
"[",
"-",
"ret",
"]",
",",
"path",
")"
] | 39.176471 | 19.411765 |
def render_revalidation_failure(self, failed_step, form, **kwargs):
"""
When a step fails, we have to redirect the user to the first failing
step.
"""
self.storage.current_step = failed_step
return redirect(self.url_name, step=failed_step) | [
"def",
"render_revalidation_failure",
"(",
"self",
",",
"failed_step",
",",
"form",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"storage",
".",
"current_step",
"=",
"failed_step",
"return",
"redirect",
"(",
"self",
".",
"url_name",
",",
"step",
"=",
"failed_step",
")"
] | 40.142857 | 16.142857 |
def find_disulfide_bridges(self, representatives_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
for g in tqdm(self.genes):
g.protein.find_disulfide_bridges(representative_only=representatives_only) | [
"def",
"find_disulfide_bridges",
"(",
"self",
",",
"representatives_only",
"=",
"True",
")",
":",
"for",
"g",
"in",
"tqdm",
"(",
"self",
".",
"genes",
")",
":",
"g",
".",
"protein",
".",
"find_disulfide_bridges",
"(",
"representative_only",
"=",
"representatives_only",
")"
] | 43.666667 | 28.666667 |
def _is_collinear(self, x, y):
"""
Checks if first three points are collinear
"""
pts = np.column_stack([x[:3], y[:3], np.ones(3)])
return np.linalg.det(pts) == 0.0 | [
"def",
"_is_collinear",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"pts",
"=",
"np",
".",
"column_stack",
"(",
"[",
"x",
"[",
":",
"3",
"]",
",",
"y",
"[",
":",
"3",
"]",
",",
"np",
".",
"ones",
"(",
"3",
")",
"]",
")",
"return",
"np",
".",
"linalg",
".",
"det",
"(",
"pts",
")",
"==",
"0.0"
] | 33.166667 | 6.166667 |
def to_nullable_boolean(value):
"""
Converts value into boolean or returns None when conversion is not possible.
:param value: the value to convert.
:return: boolean value or None when convertion is not supported.
"""
# Shortcuts
if value == None:
return None
if type(value) == type(True):
return value
str_value = str(value).lower()
# All true values
if str_value in ['1', 'true', 't', 'yes', 'y']:
return True
# All false values
if str_value in ['0', 'frue', 'f', 'no', 'n']:
return False
# Everything else:
return None | [
"def",
"to_nullable_boolean",
"(",
"value",
")",
":",
"# Shortcuts",
"if",
"value",
"==",
"None",
":",
"return",
"None",
"if",
"type",
"(",
"value",
")",
"==",
"type",
"(",
"True",
")",
":",
"return",
"value",
"str_value",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"# All true values",
"if",
"str_value",
"in",
"[",
"'1'",
",",
"'true'",
",",
"'t'",
",",
"'yes'",
",",
"'y'",
"]",
":",
"return",
"True",
"# All false values",
"if",
"str_value",
"in",
"[",
"'0'",
",",
"'frue'",
",",
"'f'",
",",
"'no'",
",",
"'n'",
"]",
":",
"return",
"False",
"# Everything else:",
"return",
"None"
] | 27.916667 | 18.666667 |
def set_option(self, name, value):
"""
Sets an option from an SConscript file.
"""
if not name in self.settable:
raise SCons.Errors.UserError("This option is not settable from a SConscript file: %s"%name)
if name == 'num_jobs':
try:
value = int(value)
if value < 1:
raise ValueError
except ValueError:
raise SCons.Errors.UserError("A positive integer is required: %s"%repr(value))
elif name == 'max_drift':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'duplicate':
try:
value = str(value)
except ValueError:
raise SCons.Errors.UserError("A string is required: %s"%repr(value))
if not value in SCons.Node.FS.Valid_Duplicates:
raise SCons.Errors.UserError("Not a valid duplication style: %s" % value)
# Set the duplicate style right away so it can affect linking
# of SConscript files.
SCons.Node.FS.set_duplicate(value)
elif name == 'diskcheck':
try:
value = diskcheck_convert(value)
except ValueError as v:
raise SCons.Errors.UserError("Not a valid diskcheck value: %s"%v)
if 'diskcheck' not in self.__dict__:
# No --diskcheck= option was specified on the command line.
# Set this right away so it can affect the rest of the
# file/Node lookups while processing the SConscript files.
SCons.Node.FS.set_diskcheck(value)
elif name == 'stack_size':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'md5_chunksize':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'warn':
if SCons.Util.is_String(value):
value = [value]
value = self.__SConscript_settings__.get(name, []) + value
SCons.Warnings.process_warn_strings(value)
self.__SConscript_settings__[name] = value | [
"def",
"set_option",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"settable",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"This option is not settable from a SConscript file: %s\"",
"%",
"name",
")",
"if",
"name",
"==",
"'num_jobs'",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
"<",
"1",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"A positive integer is required: %s\"",
"%",
"repr",
"(",
"value",
")",
")",
"elif",
"name",
"==",
"'max_drift'",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"An integer is required: %s\"",
"%",
"repr",
"(",
"value",
")",
")",
"elif",
"name",
"==",
"'duplicate'",
":",
"try",
":",
"value",
"=",
"str",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"A string is required: %s\"",
"%",
"repr",
"(",
"value",
")",
")",
"if",
"not",
"value",
"in",
"SCons",
".",
"Node",
".",
"FS",
".",
"Valid_Duplicates",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Not a valid duplication style: %s\"",
"%",
"value",
")",
"# Set the duplicate style right away so it can affect linking",
"# of SConscript files.",
"SCons",
".",
"Node",
".",
"FS",
".",
"set_duplicate",
"(",
"value",
")",
"elif",
"name",
"==",
"'diskcheck'",
":",
"try",
":",
"value",
"=",
"diskcheck_convert",
"(",
"value",
")",
"except",
"ValueError",
"as",
"v",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Not a valid diskcheck value: %s\"",
"%",
"v",
")",
"if",
"'diskcheck'",
"not",
"in",
"self",
".",
"__dict__",
":",
"# No --diskcheck= option was specified on the command line.",
"# Set this right away so it can affect the rest of the",
"# file/Node lookups while processing the SConscript files.",
"SCons",
".",
"Node",
".",
"FS",
".",
"set_diskcheck",
"(",
"value",
")",
"elif",
"name",
"==",
"'stack_size'",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"An integer is required: %s\"",
"%",
"repr",
"(",
"value",
")",
")",
"elif",
"name",
"==",
"'md5_chunksize'",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"An integer is required: %s\"",
"%",
"repr",
"(",
"value",
")",
")",
"elif",
"name",
"==",
"'warn'",
":",
"if",
"SCons",
".",
"Util",
".",
"is_String",
"(",
"value",
")",
":",
"value",
"=",
"[",
"value",
"]",
"value",
"=",
"self",
".",
"__SConscript_settings__",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
"+",
"value",
"SCons",
".",
"Warnings",
".",
"process_warn_strings",
"(",
"value",
")",
"self",
".",
"__SConscript_settings__",
"[",
"name",
"]",
"=",
"value"
] | 43.178571 | 18.5 |
def commit(self, message=None):
"""Executes the command
:params message: The message
to use as a comment for this
action.
"""
flags = filters.Items()
if self._status:
flags.add_flags(self._status)
if self._code_review is not None:
flags.add_flags("code-review %s" % self._code_review)
if self._verified is not None:
flags.add_flags("verified %s" % self._verified)
if message:
flags.add_flags("message '%s'" % message)
query = ['gerrit', 'review', str(flags), str(self._review)]
results = self.client.exec_command(' '.join(query))
stdin, stdout, stderr = results
# NOTE(flaper87): Log error messages
error = []
for line in stderr:
error.append(line)
# True if success
return not error | [
"def",
"commit",
"(",
"self",
",",
"message",
"=",
"None",
")",
":",
"flags",
"=",
"filters",
".",
"Items",
"(",
")",
"if",
"self",
".",
"_status",
":",
"flags",
".",
"add_flags",
"(",
"self",
".",
"_status",
")",
"if",
"self",
".",
"_code_review",
"is",
"not",
"None",
":",
"flags",
".",
"add_flags",
"(",
"\"code-review %s\"",
"%",
"self",
".",
"_code_review",
")",
"if",
"self",
".",
"_verified",
"is",
"not",
"None",
":",
"flags",
".",
"add_flags",
"(",
"\"verified %s\"",
"%",
"self",
".",
"_verified",
")",
"if",
"message",
":",
"flags",
".",
"add_flags",
"(",
"\"message '%s'\"",
"%",
"message",
")",
"query",
"=",
"[",
"'gerrit'",
",",
"'review'",
",",
"str",
"(",
"flags",
")",
",",
"str",
"(",
"self",
".",
"_review",
")",
"]",
"results",
"=",
"self",
".",
"client",
".",
"exec_command",
"(",
"' '",
".",
"join",
"(",
"query",
")",
")",
"stdin",
",",
"stdout",
",",
"stderr",
"=",
"results",
"# NOTE(flaper87): Log error messages",
"error",
"=",
"[",
"]",
"for",
"line",
"in",
"stderr",
":",
"error",
".",
"append",
"(",
"line",
")",
"# True if success",
"return",
"not",
"error"
] | 25.411765 | 19.882353 |
def dependencies(self, sort=False):
""" Return all dependencies required to use this object. The last item
in the list is *self*.
"""
alldeps = []
if sort:
def key(obj):
# sort deps such that we get functions, variables, self.
if not isinstance(obj, Variable):
return (0, 0)
else:
return (1, obj.vtype)
deps = sorted(self._deps, key=key)
else:
deps = self._deps
for dep in deps:
alldeps.extend(dep.dependencies(sort=sort))
alldeps.append(self)
return alldeps | [
"def",
"dependencies",
"(",
"self",
",",
"sort",
"=",
"False",
")",
":",
"alldeps",
"=",
"[",
"]",
"if",
"sort",
":",
"def",
"key",
"(",
"obj",
")",
":",
"# sort deps such that we get functions, variables, self.",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"Variable",
")",
":",
"return",
"(",
"0",
",",
"0",
")",
"else",
":",
"return",
"(",
"1",
",",
"obj",
".",
"vtype",
")",
"deps",
"=",
"sorted",
"(",
"self",
".",
"_deps",
",",
"key",
"=",
"key",
")",
"else",
":",
"deps",
"=",
"self",
".",
"_deps",
"for",
"dep",
"in",
"deps",
":",
"alldeps",
".",
"extend",
"(",
"dep",
".",
"dependencies",
"(",
"sort",
"=",
"sort",
")",
")",
"alldeps",
".",
"append",
"(",
"self",
")",
"return",
"alldeps"
] | 31.857143 | 14.619048 |
def ignore_cxx(self) -> bool:
"""Consume comments and whitespace characters."""
self._stream.save_context()
while not self.read_eof():
idxref = self._stream.index
if self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
if self.peek_text("//"):
while not self.read_eof() and not self.peek_char("\n"):
self._stream.incpos()
if not self.read_char("\n") and self.read_eof():
return self._stream.validate_context()
if self.peek_text("/*"):
while not self.read_eof() and not self.peek_text("*/"):
self._stream.incpos()
if not self.read_text("*/") and self.read_eof():
return self._stream.restore_context()
if idxref == self._stream.index:
break
return self._stream.validate_context() | [
"def",
"ignore_cxx",
"(",
"self",
")",
"->",
"bool",
":",
"self",
".",
"_stream",
".",
"save_context",
"(",
")",
"while",
"not",
"self",
".",
"read_eof",
"(",
")",
":",
"idxref",
"=",
"self",
".",
"_stream",
".",
"index",
"if",
"self",
".",
"_stream",
".",
"peek_char",
"in",
"\" \\t\\v\\f\\r\\n\"",
":",
"while",
"(",
"not",
"self",
".",
"read_eof",
"(",
")",
"and",
"self",
".",
"_stream",
".",
"peek_char",
"in",
"\" \\t\\v\\f\\r\\n\"",
")",
":",
"self",
".",
"_stream",
".",
"incpos",
"(",
")",
"if",
"self",
".",
"peek_text",
"(",
"\"//\"",
")",
":",
"while",
"not",
"self",
".",
"read_eof",
"(",
")",
"and",
"not",
"self",
".",
"peek_char",
"(",
"\"\\n\"",
")",
":",
"self",
".",
"_stream",
".",
"incpos",
"(",
")",
"if",
"not",
"self",
".",
"read_char",
"(",
"\"\\n\"",
")",
"and",
"self",
".",
"read_eof",
"(",
")",
":",
"return",
"self",
".",
"_stream",
".",
"validate_context",
"(",
")",
"if",
"self",
".",
"peek_text",
"(",
"\"/*\"",
")",
":",
"while",
"not",
"self",
".",
"read_eof",
"(",
")",
"and",
"not",
"self",
".",
"peek_text",
"(",
"\"*/\"",
")",
":",
"self",
".",
"_stream",
".",
"incpos",
"(",
")",
"if",
"not",
"self",
".",
"read_text",
"(",
"\"*/\"",
")",
"and",
"self",
".",
"read_eof",
"(",
")",
":",
"return",
"self",
".",
"_stream",
".",
"restore_context",
"(",
")",
"if",
"idxref",
"==",
"self",
".",
"_stream",
".",
"index",
":",
"break",
"return",
"self",
".",
"_stream",
".",
"validate_context",
"(",
")"
] | 43.954545 | 11.090909 |
def snapshots(self, space_id, environment_id, resource_id, resource_kind='entries'):
"""
Provides access to snapshot management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`SnapshotsProxy <contentful_management.snapshots_proxy.SnapshotsProxy>` object.
:rtype: contentful.snapshots_proxy.SnapshotsProxy
Usage:
>>> entry_snapshots_proxy = client.snapshots('cfexampleapi', 'master', 'nyancat')
<SnapshotsProxy[entries] space_id="cfexampleapi" environment_id="master" parent_resource_id="nyancat">
>>> content_type_snapshots_proxy = client.snapshots('cfexampleapi', 'master', 'cat', 'content_types')
<SnapshotsProxy[content_types] space_id="cfexampleapi" environment_id="master" parent_resource_id="cat">
"""
return SnapshotsProxy(self, space_id, environment_id, resource_id, resource_kind) | [
"def",
"snapshots",
"(",
"self",
",",
"space_id",
",",
"environment_id",
",",
"resource_id",
",",
"resource_kind",
"=",
"'entries'",
")",
":",
"return",
"SnapshotsProxy",
"(",
"self",
",",
"space_id",
",",
"environment_id",
",",
"resource_id",
",",
"resource_kind",
")"
] | 51.631579 | 42.684211 |
def get_proxy_session(self):
"""Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client.
:return: a proxy session for this service
:rtype: ``osid.proxy.ProxySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.*
"""
if not self.supports_proxy():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
try:
session = sessions.ProxySession()
except AttributeError:
raise # OperationFailed()
return session | [
"def",
"get_proxy_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_proxy",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"# OperationFailed()",
"try",
":",
"session",
"=",
"sessions",
".",
"ProxySession",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"# OperationFailed()",
"return",
"session"
] | 37.409091 | 17.409091 |
def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago' | [
"def",
"datetime_to_ns",
"(",
"then",
")",
":",
"if",
"then",
"==",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"0",
",",
"0",
")",
":",
"return",
"'Antiquity'",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"delta",
"=",
"now",
"-",
"then",
"seconds",
"=",
"delta",
".",
"total_seconds",
"(",
")",
"# There's gotta be a better way to do this...",
"years",
",",
"seconds",
"=",
"divmod",
"(",
"seconds",
",",
"60",
"*",
"60",
"*",
"24",
"*",
"365",
")",
"days",
",",
"seconds",
"=",
"divmod",
"(",
"seconds",
",",
"60",
"*",
"60",
"*",
"24",
")",
"hours",
",",
"seconds",
"=",
"divmod",
"(",
"seconds",
",",
"60",
"*",
"60",
")",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"seconds",
",",
"60",
")",
"years",
"=",
"int",
"(",
"years",
")",
"days",
"=",
"int",
"(",
"days",
")",
"hours",
"=",
"int",
"(",
"hours",
")",
"minutes",
"=",
"int",
"(",
"minutes",
")",
"seconds",
"=",
"round",
"(",
"seconds",
")",
"if",
"years",
">",
"1",
":",
"if",
"days",
">",
"1",
":",
"return",
"f'{years} years {days} days ago'",
"elif",
"days",
"==",
"1",
":",
"return",
"'{years} years 1 day ago'",
"return",
"'{years} years ago'",
"if",
"years",
"==",
"1",
":",
"if",
"days",
">",
"1",
":",
"return",
"f'1 year {days} days ago'",
"elif",
"days",
"==",
"1",
":",
"return",
"'1 year 1 day ago'",
"return",
"'1 year ago'",
"if",
"days",
">",
"3",
":",
"return",
"f'{days} days ago'",
"if",
"days",
">",
"1",
":",
"if",
"hours",
">",
"1",
":",
"return",
"f'{days} days {hours} hours ago'",
"elif",
"hours",
"==",
"1",
":",
"return",
"f'{days} days 1 hour ago'",
"return",
"f'{days} days ago'",
"if",
"days",
"==",
"1",
":",
"if",
"hours",
">",
"1",
":",
"return",
"f'1 day {hours} hours ago'",
"elif",
"hours",
"==",
"1",
":",
"return",
"'1 day 1 hour ago'",
"return",
"'1 day ago'",
"if",
"hours",
">",
"1",
":",
"return",
"f'{hours} hours ago'",
"if",
"hours",
"==",
"1",
":",
"return",
"f'{minutes + 60} minutes ago'",
"if",
"minutes",
">",
"1",
":",
"return",
"f'{minutes} minutes ago'",
"if",
"minutes",
"==",
"1",
":",
"return",
"'1 minute ago'",
"return",
"'Seconds ago'"
] | 26.809524 | 16.111111 |
def query_versions(self, version=None):
"""Check specified version and resolve special values."""
if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS:
return [version]
url = urljoin(self.base_url, 'releases/')
parser = self._create_directory_parser(url)
if version:
versions = parser.filter(RELEASE_AND_CANDIDATE_LATEST_VERSIONS[version])
from distutils.version import LooseVersion
versions.sort(key=LooseVersion)
return [versions[-1]]
else:
return parser.entries | [
"def",
"query_versions",
"(",
"self",
",",
"version",
"=",
"None",
")",
":",
"if",
"version",
"not",
"in",
"RELEASE_AND_CANDIDATE_LATEST_VERSIONS",
":",
"return",
"[",
"version",
"]",
"url",
"=",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"'releases/'",
")",
"parser",
"=",
"self",
".",
"_create_directory_parser",
"(",
"url",
")",
"if",
"version",
":",
"versions",
"=",
"parser",
".",
"filter",
"(",
"RELEASE_AND_CANDIDATE_LATEST_VERSIONS",
"[",
"version",
"]",
")",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"versions",
".",
"sort",
"(",
"key",
"=",
"LooseVersion",
")",
"return",
"[",
"versions",
"[",
"-",
"1",
"]",
"]",
"else",
":",
"return",
"parser",
".",
"entries"
] | 41.071429 | 15.714286 |
def cmd_build(conf: Config, run_tests: bool=False):
"""Build requested targets, and their dependencies."""
build_context = BuildContext(conf)
populate_targets_graph(build_context, conf)
build_context.build_graph(run_tests=run_tests)
build_context.write_artifacts_metadata() | [
"def",
"cmd_build",
"(",
"conf",
":",
"Config",
",",
"run_tests",
":",
"bool",
"=",
"False",
")",
":",
"build_context",
"=",
"BuildContext",
"(",
"conf",
")",
"populate_targets_graph",
"(",
"build_context",
",",
"conf",
")",
"build_context",
".",
"build_graph",
"(",
"run_tests",
"=",
"run_tests",
")",
"build_context",
".",
"write_artifacts_metadata",
"(",
")"
] | 48 | 5.666667 |
def close_connection(self, connection, force=False):
"""overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections."""
if force:
try:
connection.close()
except self.operational_exceptions:
self.config.logger.error('ConnectionFactory - failed closing')
for name, conn in self.pool.iteritems():
if conn is connection:
break
del self.pool[name]
else:
pass | [
"def",
"close_connection",
"(",
"self",
",",
"connection",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"try",
":",
"connection",
".",
"close",
"(",
")",
"except",
"self",
".",
"operational_exceptions",
":",
"self",
".",
"config",
".",
"logger",
".",
"error",
"(",
"'ConnectionFactory - failed closing'",
")",
"for",
"name",
",",
"conn",
"in",
"self",
".",
"pool",
".",
"iteritems",
"(",
")",
":",
"if",
"conn",
"is",
"connection",
":",
"break",
"del",
"self",
".",
"pool",
"[",
"name",
"]",
"else",
":",
"pass"
] | 40.333333 | 15.666667 |
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep | [
"def",
"sep",
"(",
"s",
")",
":",
"sep_match",
"=",
"re",
".",
"search",
"(",
"r\"[\\\\/]\"",
",",
"s",
")",
"if",
"sep_match",
":",
"the_sep",
"=",
"sep_match",
".",
"group",
"(",
"0",
")",
"else",
":",
"the_sep",
"=",
"os",
".",
"sep",
"return",
"the_sep"
] | 28.25 | 15.875 |
def list_project(self, offset=0, size=100):
""" list the project
Unsuccessful opertaion will cause an LogException.
:type offset: int
:param offset: the offset of all the matched names
:type size: int
:param size: the max return names count, -1 means return all data
:return: ListProjectResponse
:raise: LogException
"""
# need to use extended method to get more
if int(size) == -1 or int(size) > MAX_LIST_PAGING_SIZE:
return list_more(self.list_project, int(offset), int(size), MAX_LIST_PAGING_SIZE)
headers = {}
params = {}
resource = "/"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", None, None, resource, params, headers)
return ListProjectResponse(resp, header) | [
"def",
"list_project",
"(",
"self",
",",
"offset",
"=",
"0",
",",
"size",
"=",
"100",
")",
":",
"# need to use extended method to get more\r",
"if",
"int",
"(",
"size",
")",
"==",
"-",
"1",
"or",
"int",
"(",
"size",
")",
">",
"MAX_LIST_PAGING_SIZE",
":",
"return",
"list_more",
"(",
"self",
".",
"list_project",
",",
"int",
"(",
"offset",
")",
",",
"int",
"(",
"size",
")",
",",
"MAX_LIST_PAGING_SIZE",
")",
"headers",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"resource",
"=",
"\"/\"",
"params",
"[",
"'offset'",
"]",
"=",
"str",
"(",
"offset",
")",
"params",
"[",
"'size'",
"]",
"=",
"str",
"(",
"size",
")",
"(",
"resp",
",",
"header",
")",
"=",
"self",
".",
"_send",
"(",
"\"GET\"",
",",
"None",
",",
"None",
",",
"resource",
",",
"params",
",",
"headers",
")",
"return",
"ListProjectResponse",
"(",
"resp",
",",
"header",
")"
] | 33.653846 | 21.307692 |
def residmap(self, prefix='', **kwargs):
"""Generate 2-D spatial residual maps using the current ROI
model and the convolution kernel defined with the `model`
argument.
Parameters
----------
prefix : str
String that will be prefixed to the output residual map files.
{options}
Returns
-------
maps : dict
A dictionary containing the `~fermipy.utils.Map` objects
for the residual significance and amplitude.
"""
timer = Timer.create(start=True)
self.logger.info('Generating residual maps')
schema = ConfigSchema(self.defaults['residmap'])
config = schema.create_config(self.config['residmap'], **kwargs)
# Defining default properties of test source model
config['model'].setdefault('Index', 2.0)
config['model'].setdefault('SpectrumType', 'PowerLaw')
config['model'].setdefault('SpatialModel', 'PointSource')
config['model'].setdefault('Prefactor', 1E-13)
o = self._make_residual_map(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_residmap_plots(o, self.roi)
self.logger.info('Finished residual maps')
outfile = utils.format_filename(self.workdir, 'residmap',
prefix=[o['name']])
if config['write_fits']:
o['file'] = os.path.basename(outfile) + '.fits'
self._make_residmap_fits(o, outfile + '.fits')
if config['write_npy']:
np.save(outfile + '.npy', o)
self.logger.info('Execution time: %.2f s', timer.elapsed_time)
return o | [
"def",
"residmap",
"(",
"self",
",",
"prefix",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"timer",
"=",
"Timer",
".",
"create",
"(",
"start",
"=",
"True",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Generating residual maps'",
")",
"schema",
"=",
"ConfigSchema",
"(",
"self",
".",
"defaults",
"[",
"'residmap'",
"]",
")",
"config",
"=",
"schema",
".",
"create_config",
"(",
"self",
".",
"config",
"[",
"'residmap'",
"]",
",",
"*",
"*",
"kwargs",
")",
"# Defining default properties of test source model",
"config",
"[",
"'model'",
"]",
".",
"setdefault",
"(",
"'Index'",
",",
"2.0",
")",
"config",
"[",
"'model'",
"]",
".",
"setdefault",
"(",
"'SpectrumType'",
",",
"'PowerLaw'",
")",
"config",
"[",
"'model'",
"]",
".",
"setdefault",
"(",
"'SpatialModel'",
",",
"'PointSource'",
")",
"config",
"[",
"'model'",
"]",
".",
"setdefault",
"(",
"'Prefactor'",
",",
"1E-13",
")",
"o",
"=",
"self",
".",
"_make_residual_map",
"(",
"prefix",
",",
"*",
"*",
"config",
")",
"if",
"config",
"[",
"'make_plots'",
"]",
":",
"plotter",
"=",
"plotting",
".",
"AnalysisPlotter",
"(",
"self",
".",
"config",
"[",
"'plotting'",
"]",
",",
"fileio",
"=",
"self",
".",
"config",
"[",
"'fileio'",
"]",
",",
"logging",
"=",
"self",
".",
"config",
"[",
"'logging'",
"]",
")",
"plotter",
".",
"make_residmap_plots",
"(",
"o",
",",
"self",
".",
"roi",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Finished residual maps'",
")",
"outfile",
"=",
"utils",
".",
"format_filename",
"(",
"self",
".",
"workdir",
",",
"'residmap'",
",",
"prefix",
"=",
"[",
"o",
"[",
"'name'",
"]",
"]",
")",
"if",
"config",
"[",
"'write_fits'",
"]",
":",
"o",
"[",
"'file'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"outfile",
")",
"+",
"'.fits'",
"self",
".",
"_make_residmap_fits",
"(",
"o",
",",
"outfile",
"+",
"'.fits'",
")",
"if",
"config",
"[",
"'write_npy'",
"]",
":",
"np",
".",
"save",
"(",
"outfile",
"+",
"'.npy'",
",",
"o",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Execution time: %.2f s'",
",",
"timer",
".",
"elapsed_time",
")",
"return",
"o"
] | 34.018182 | 24.236364 |
def baltree(ntips, treeheight=1.0):
"""
Returns a balanced tree topology.
"""
# require even number of tips
if ntips % 2:
raise ToytreeError("balanced trees must have even number of tips.")
# make first cherry
rtree = toytree.tree()
rtree.treenode.add_child(name="0")
rtree.treenode.add_child(name="1")
# add tips in a balanced way
for i in range(2, ntips):
# get node to split
node = return_small_clade(rtree.treenode)
# add two children
node.add_child(name=node.name)
node.add_child(name=str(i))
# rename ancestral node
node.name = None
# rename tips so names are in order
idx = 0
for node in rtree.treenode.traverse("postorder"):
if node.is_leaf():
node.name = str(idx)
idx += 1
# get toytree from newick
tre = toytree.tree(rtree.write(tree_format=9))
tre = tre.mod.make_ultrametric()
self = tre.mod.node_scale_root_height(treeheight)
self._coords.update()
return self | [
"def",
"baltree",
"(",
"ntips",
",",
"treeheight",
"=",
"1.0",
")",
":",
"# require even number of tips",
"if",
"ntips",
"%",
"2",
":",
"raise",
"ToytreeError",
"(",
"\"balanced trees must have even number of tips.\"",
")",
"# make first cherry",
"rtree",
"=",
"toytree",
".",
"tree",
"(",
")",
"rtree",
".",
"treenode",
".",
"add_child",
"(",
"name",
"=",
"\"0\"",
")",
"rtree",
".",
"treenode",
".",
"add_child",
"(",
"name",
"=",
"\"1\"",
")",
"# add tips in a balanced way",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"ntips",
")",
":",
"# get node to split",
"node",
"=",
"return_small_clade",
"(",
"rtree",
".",
"treenode",
")",
"# add two children",
"node",
".",
"add_child",
"(",
"name",
"=",
"node",
".",
"name",
")",
"node",
".",
"add_child",
"(",
"name",
"=",
"str",
"(",
"i",
")",
")",
"# rename ancestral node",
"node",
".",
"name",
"=",
"None",
"# rename tips so names are in order",
"idx",
"=",
"0",
"for",
"node",
"in",
"rtree",
".",
"treenode",
".",
"traverse",
"(",
"\"postorder\"",
")",
":",
"if",
"node",
".",
"is_leaf",
"(",
")",
":",
"node",
".",
"name",
"=",
"str",
"(",
"idx",
")",
"idx",
"+=",
"1",
"# get toytree from newick ",
"tre",
"=",
"toytree",
".",
"tree",
"(",
"rtree",
".",
"write",
"(",
"tree_format",
"=",
"9",
")",
")",
"tre",
"=",
"tre",
".",
"mod",
".",
"make_ultrametric",
"(",
")",
"self",
"=",
"tre",
".",
"mod",
".",
"node_scale_root_height",
"(",
"treeheight",
")",
"self",
".",
"_coords",
".",
"update",
"(",
")",
"return",
"self"
] | 29.74359 | 14.666667 |
def read_namespaced_network_policy(self, name, namespace, **kwargs):
"""
read the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_network_policy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
return data | [
"def",
"read_namespaced_network_policy",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"read_namespaced_network_policy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"read_namespaced_network_policy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 58.666667 | 30.25 |
def __unLock(self):
"""Unlock sending requests to projector."""
self._operation = False
self._timer = 0
self._isLocked = False | [
"def",
"__unLock",
"(",
"self",
")",
":",
"self",
".",
"_operation",
"=",
"False",
"self",
".",
"_timer",
"=",
"0",
"self",
".",
"_isLocked",
"=",
"False"
] | 30.8 | 11.4 |
def plot_validate(self, figure_list):
"""
plots the data contained in self.data, which should be a dictionary or a deque of dictionaries
for the latter use the last entry
"""
axes_list = self.get_axes_layout_validate(figure_list)
self._plot_validate(axes_list) | [
"def",
"plot_validate",
"(",
"self",
",",
"figure_list",
")",
":",
"axes_list",
"=",
"self",
".",
"get_axes_layout_validate",
"(",
"figure_list",
")",
"self",
".",
"_plot_validate",
"(",
"axes_list",
")"
] | 37.75 | 16.25 |
def _merge_extra_filerefs(*args):
'''
Takes a list of filerefs and returns a merged list
'''
ret = []
for arg in args:
if isinstance(arg, six.string_types):
if arg:
ret.extend(arg.split(','))
elif isinstance(arg, list):
if arg:
ret.extend(arg)
return ','.join(ret) | [
"def",
"_merge_extra_filerefs",
"(",
"*",
"args",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"six",
".",
"string_types",
")",
":",
"if",
"arg",
":",
"ret",
".",
"extend",
"(",
"arg",
".",
"split",
"(",
"','",
")",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"list",
")",
":",
"if",
"arg",
":",
"ret",
".",
"extend",
"(",
"arg",
")",
"return",
"','",
".",
"join",
"(",
"ret",
")"
] | 26.769231 | 16.461538 |
def _decode_embedded_dict(src):
'''
Convert enbedded bytes to strings if possible.
Dict helper.
'''
output = {}
for key, val in six.iteritems(src):
if isinstance(val, dict):
val = _decode_embedded_dict(val)
elif isinstance(val, list):
val = _decode_embedded_list(val) # pylint: disable=redefined-variable-type
elif isinstance(val, bytes):
try:
val = val.decode()
except UnicodeError:
pass
if isinstance(key, bytes):
try:
key = key.decode()
except UnicodeError:
pass
output[key] = val
return output | [
"def",
"_decode_embedded_dict",
"(",
"src",
")",
":",
"output",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"src",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"val",
"=",
"_decode_embedded_dict",
"(",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"val",
"=",
"_decode_embedded_list",
"(",
"val",
")",
"# pylint: disable=redefined-variable-type",
"elif",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"try",
":",
"val",
"=",
"val",
".",
"decode",
"(",
")",
"except",
"UnicodeError",
":",
"pass",
"if",
"isinstance",
"(",
"key",
",",
"bytes",
")",
":",
"try",
":",
"key",
"=",
"key",
".",
"decode",
"(",
")",
"except",
"UnicodeError",
":",
"pass",
"output",
"[",
"key",
"]",
"=",
"val",
"return",
"output"
] | 29.565217 | 15.73913 |
def pre_delete(cls, sender, instance, *args, **kwargs):
"""Deletes the CC email marketing campaign associated with me.
"""
cc = ConstantContact()
response = cc.delete_email_marketing_campaign(instance)
response.raise_for_status() | [
"def",
"pre_delete",
"(",
"cls",
",",
"sender",
",",
"instance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cc",
"=",
"ConstantContact",
"(",
")",
"response",
"=",
"cc",
".",
"delete_email_marketing_campaign",
"(",
"instance",
")",
"response",
".",
"raise_for_status",
"(",
")"
] | 44 | 8.833333 |
def get_provider(self, name):
"""Allows for lazy instantiation of providers (Jinja2 templating is heavy, so only instantiate it if
necessary)."""
if name not in self.providers:
cls = self.provider_classes[name]
# instantiate the provider
self.providers[name] = cls(self)
return self.providers[name] | [
"def",
"get_provider",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"providers",
":",
"cls",
"=",
"self",
".",
"provider_classes",
"[",
"name",
"]",
"# instantiate the provider",
"self",
".",
"providers",
"[",
"name",
"]",
"=",
"cls",
"(",
"self",
")",
"return",
"self",
".",
"providers",
"[",
"name",
"]"
] | 44.875 | 3.625 |
def run_json(self):
"""
Run checks on self.files, printing json object
containing information relavent to the CS50 IDE plugin at the end.
"""
checks = {}
for file in self.files:
try:
results = self._check(file)
except Error as e:
checks[file] = {
"error": e.msg
}
else:
checks[file] = {
"score": results.score,
"comments": results.comment_ratio >= results.COMMENT_MIN,
"diff": "<pre>{}</pre>".format("\n".join(self.html_diff(results.original, results.styled))),
}
json.dump(checks, sys.stdout, indent=4)
print() | [
"def",
"run_json",
"(",
"self",
")",
":",
"checks",
"=",
"{",
"}",
"for",
"file",
"in",
"self",
".",
"files",
":",
"try",
":",
"results",
"=",
"self",
".",
"_check",
"(",
"file",
")",
"except",
"Error",
"as",
"e",
":",
"checks",
"[",
"file",
"]",
"=",
"{",
"\"error\"",
":",
"e",
".",
"msg",
"}",
"else",
":",
"checks",
"[",
"file",
"]",
"=",
"{",
"\"score\"",
":",
"results",
".",
"score",
",",
"\"comments\"",
":",
"results",
".",
"comment_ratio",
">=",
"results",
".",
"COMMENT_MIN",
",",
"\"diff\"",
":",
"\"<pre>{}</pre>\"",
".",
"format",
"(",
"\"\\n\"",
".",
"join",
"(",
"self",
".",
"html_diff",
"(",
"results",
".",
"original",
",",
"results",
".",
"styled",
")",
")",
")",
",",
"}",
"json",
".",
"dump",
"(",
"checks",
",",
"sys",
".",
"stdout",
",",
"indent",
"=",
"4",
")",
"print",
"(",
")"
] | 34.136364 | 18.681818 |
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names, ]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError("Too many files to merge - "
"could be a p2-p3 zip thing")
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty")
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.datasets.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self | [
"def",
"from_raw",
"(",
"self",
",",
"file_names",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# This function only loads one test at a time (but could contain several",
"# files). The function from_res() also implements loading several",
"# datasets (using list of lists as input).",
"if",
"file_names",
":",
"self",
".",
"file_names",
"=",
"file_names",
"if",
"not",
"isinstance",
"(",
"file_names",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"self",
".",
"file_names",
"=",
"[",
"file_names",
",",
"]",
"# file_type = self.tester",
"raw_file_loader",
"=",
"self",
".",
"loader",
"set_number",
"=",
"0",
"test",
"=",
"None",
"counter",
"=",
"0",
"self",
".",
"logger",
".",
"debug",
"(",
"\"start iterating through file(s)\"",
")",
"for",
"f",
"in",
"self",
".",
"file_names",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"loading raw file:\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"{f}\"",
")",
"new_tests",
"=",
"raw_file_loader",
"(",
"f",
",",
"*",
"*",
"kwargs",
")",
"if",
"new_tests",
":",
"if",
"test",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"continuing reading files...\"",
")",
"_test",
"=",
"self",
".",
"_append",
"(",
"test",
"[",
"set_number",
"]",
",",
"new_tests",
"[",
"set_number",
"]",
")",
"if",
"not",
"_test",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"f\"EMPTY TEST: {f}\"",
")",
"continue",
"test",
"[",
"set_number",
"]",
"=",
"_test",
"self",
".",
"logger",
".",
"debug",
"(",
"\"added this test - started merging\"",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"new_tests",
"[",
"set_number",
"]",
".",
"raw_data_files",
")",
")",
":",
"raw_data_file",
"=",
"new_tests",
"[",
"set_number",
"]",
".",
"raw_data_files",
"[",
"j",
"]",
"file_size",
"=",
"new_tests",
"[",
"set_number",
"]",
".",
"raw_data_files_length",
"[",
"j",
"]",
"test",
"[",
"set_number",
"]",
".",
"raw_data_files",
".",
"append",
"(",
"raw_data_file",
")",
"test",
"[",
"set_number",
"]",
".",
"raw_data_files_length",
".",
"append",
"(",
"file_size",
")",
"counter",
"+=",
"1",
"if",
"counter",
">",
"10",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"ERROR? Too many files to merge\"",
")",
"raise",
"ValueError",
"(",
"\"Too many files to merge - \"",
"\"could be a p2-p3 zip thing\"",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"getting data from first file\"",
")",
"if",
"new_tests",
"[",
"set_number",
"]",
".",
"no_data",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"NO DATA\"",
")",
"else",
":",
"test",
"=",
"new_tests",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"NOTHING LOADED\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"finished loading the raw-files\"",
")",
"test_exists",
"=",
"False",
"if",
"test",
":",
"if",
"test",
"[",
"0",
"]",
".",
"no_data",
":",
"self",
".",
"logging",
".",
"debug",
"(",
"\"the first dataset (or only dataset) loaded from the raw data file is empty\"",
")",
"else",
":",
"test_exists",
"=",
"True",
"if",
"test_exists",
":",
"if",
"not",
"prms",
".",
"Reader",
".",
"sorted_data",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"sorting data\"",
")",
"test",
"[",
"set_number",
"]",
"=",
"self",
".",
"_sort_data",
"(",
"test",
"[",
"set_number",
"]",
")",
"self",
".",
"datasets",
".",
"append",
"(",
"test",
"[",
"set_number",
"]",
")",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"No new datasets added!\"",
")",
"self",
".",
"number_of_datasets",
"=",
"len",
"(",
"self",
".",
"datasets",
")",
"self",
".",
"status_datasets",
"=",
"self",
".",
"_validate_datasets",
"(",
")",
"self",
".",
"_invent_a_name",
"(",
")",
"return",
"self"
] | 42.363636 | 20.792208 |
def get_attachments_by_name(self, name, check_regex, find_first=False):
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:type name: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param find_first: If set to True it will only find the first match and then quit.
:type find_first: bool
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
:rtype: list of tuple
"""
attachments = []
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
found_attachment = mail_part.has_matching_name(name) if check_regex \
else mail_part.has_equal_name(name)
if found_attachment:
file_name, file_payload = mail_part.get_file()
self.log.info('Found attachment: {}'.format(file_name))
attachments.append((file_name, file_payload))
if find_first:
break
return attachments | [
"def",
"get_attachments_by_name",
"(",
"self",
",",
"name",
",",
"check_regex",
",",
"find_first",
"=",
"False",
")",
":",
"attachments",
"=",
"[",
"]",
"for",
"part",
"in",
"self",
".",
"mail",
".",
"walk",
"(",
")",
":",
"mail_part",
"=",
"MailPart",
"(",
"part",
")",
"if",
"mail_part",
".",
"is_attachment",
"(",
")",
":",
"found_attachment",
"=",
"mail_part",
".",
"has_matching_name",
"(",
"name",
")",
"if",
"check_regex",
"else",
"mail_part",
".",
"has_equal_name",
"(",
"name",
")",
"if",
"found_attachment",
":",
"file_name",
",",
"file_payload",
"=",
"mail_part",
".",
"get_file",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Found attachment: {}'",
".",
"format",
"(",
"file_name",
")",
")",
"attachments",
".",
"append",
"(",
"(",
"file_name",
",",
"file_payload",
")",
")",
"if",
"find_first",
":",
"break",
"return",
"attachments"
] | 42.103448 | 19.482759 |
def search_anime(self, query):
"""Fuzzy searches the Anime Database for the query.
:param str query: The text to fuzzy search.
:returns: List of Anime Objects. This list can be empty.
"""
r = self._query_('/search/anime', 'GET',
params={'query': query})
results = [Anime(item) for item in r.json()]
return results | [
"def",
"search_anime",
"(",
"self",
",",
"query",
")",
":",
"r",
"=",
"self",
".",
"_query_",
"(",
"'/search/anime'",
",",
"'GET'",
",",
"params",
"=",
"{",
"'query'",
":",
"query",
"}",
")",
"results",
"=",
"[",
"Anime",
"(",
"item",
")",
"for",
"item",
"in",
"r",
".",
"json",
"(",
")",
"]",
"return",
"results"
] | 32.166667 | 17.666667 |
def filter_google_songs(songs, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a Google Music song dict against a set of metadata filters.
Parameters:
songs (list): Google Music song dicts to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
if include_filters or exclude_filters:
for song in songs:
if _check_filters(
song, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(song)
else:
filtered_songs.append(song)
else:
matched_songs += songs
return matched_songs, filtered_songs | [
"def",
"filter_google_songs",
"(",
"songs",
",",
"include_filters",
"=",
"None",
",",
"exclude_filters",
"=",
"None",
",",
"all_includes",
"=",
"False",
",",
"all_excludes",
"=",
"False",
")",
":",
"matched_songs",
"=",
"[",
"]",
"filtered_songs",
"=",
"[",
"]",
"if",
"include_filters",
"or",
"exclude_filters",
":",
"for",
"song",
"in",
"songs",
":",
"if",
"_check_filters",
"(",
"song",
",",
"include_filters",
"=",
"include_filters",
",",
"exclude_filters",
"=",
"exclude_filters",
",",
"all_includes",
"=",
"all_includes",
",",
"all_excludes",
"=",
"all_excludes",
")",
":",
"matched_songs",
".",
"append",
"(",
"song",
")",
"else",
":",
"filtered_songs",
".",
"append",
"(",
"song",
")",
"else",
":",
"matched_songs",
"+=",
"songs",
"return",
"matched_songs",
",",
"filtered_songs"
] | 37 | 29.813953 |
def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) | [
"def",
"generate_random_nhs_number",
"(",
")",
"->",
"int",
":",
"check_digit",
"=",
"10",
"# NHS numbers with this check digit are all invalid",
"while",
"check_digit",
"==",
"10",
":",
"digits",
"=",
"[",
"random",
".",
"randint",
"(",
"1",
",",
"9",
")",
"]",
"# don't start with a zero",
"digits",
".",
"extend",
"(",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"9",
")",
"for",
"_",
"in",
"range",
"(",
"8",
")",
"]",
")",
"# ... length now 9",
"check_digit",
"=",
"nhs_check_digit",
"(",
"digits",
")",
"# noinspection PyUnboundLocalVariable",
"digits",
".",
"append",
"(",
"check_digit",
")",
"return",
"int",
"(",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"digits",
"]",
")",
")"
] | 40.615385 | 11.230769 |
def parse_text(text: str, schema: dict) -> Any:
"""
Validate and parse the BMA answer from websocket
:param text: the bma answer
:param schema: dict for jsonschema
:return: the json data
"""
try:
data = json.loads(text)
jsonschema.validate(data, schema)
except (TypeError, json.decoder.JSONDecodeError):
raise jsonschema.ValidationError("Could not parse json")
return data | [
"def",
"parse_text",
"(",
"text",
":",
"str",
",",
"schema",
":",
"dict",
")",
"->",
"Any",
":",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"text",
")",
"jsonschema",
".",
"validate",
"(",
"data",
",",
"schema",
")",
"except",
"(",
"TypeError",
",",
"json",
".",
"decoder",
".",
"JSONDecodeError",
")",
":",
"raise",
"jsonschema",
".",
"ValidationError",
"(",
"\"Could not parse json\"",
")",
"return",
"data"
] | 28 | 15.2 |
def filter_publisher_references(root, head, update):
"""Remove references from ``update`` if there are any in ``head``.
This is useful when merging a record from a publisher with an update form arXiv,
as arXiv should never overwrite references from the publisher.
"""
if 'references' in head:
root = _remove_if_present(root, 'references')
update = _remove_if_present(update, 'references')
return root, head, update | [
"def",
"filter_publisher_references",
"(",
"root",
",",
"head",
",",
"update",
")",
":",
"if",
"'references'",
"in",
"head",
":",
"root",
"=",
"_remove_if_present",
"(",
"root",
",",
"'references'",
")",
"update",
"=",
"_remove_if_present",
"(",
"update",
",",
"'references'",
")",
"return",
"root",
",",
"head",
",",
"update"
] | 40.545455 | 19.545455 |
def parse(representation):
"""Attempts to parse an ISO8601 formatted ``representation`` string,
which could be of any valid ISO8601 format (date, time, duration, interval).
Return value is specific to ``representation``.
"""
representation = str(representation).upper().strip()
if '/' in representation:
return parse_interval(representation)
if representation[0] is 'P':
return parse_duration(representation)
return parse_date(representation) | [
"def",
"parse",
"(",
"representation",
")",
":",
"representation",
"=",
"str",
"(",
"representation",
")",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"if",
"'/'",
"in",
"representation",
":",
"return",
"parse_interval",
"(",
"representation",
")",
"if",
"representation",
"[",
"0",
"]",
"is",
"'P'",
":",
"return",
"parse_duration",
"(",
"representation",
")",
"return",
"parse_date",
"(",
"representation",
")"
] | 32 | 18.2 |
def _synoname_strip_punct(self, word):
"""Return a word with punctuation stripped out.
Parameters
----------
word : str
A word to strip punctuation from
Returns
-------
str
The word stripped of punctuation
Examples
--------
>>> pe = Synoname()
>>> pe._synoname_strip_punct('AB;CD EF-GH$IJ')
'ABCD EFGHIJ'
"""
stripped = ''
for char in word:
if char not in set(',-./:;"&\'()!{|}?$%*+<=>[\\]^_`~'):
stripped += char
return stripped.strip() | [
"def",
"_synoname_strip_punct",
"(",
"self",
",",
"word",
")",
":",
"stripped",
"=",
"''",
"for",
"char",
"in",
"word",
":",
"if",
"char",
"not",
"in",
"set",
"(",
"',-./:;\"&\\'()!{|}?$%*+<=>[\\\\]^_`~'",
")",
":",
"stripped",
"+=",
"char",
"return",
"stripped",
".",
"strip",
"(",
")"
] | 23.88 | 19.48 |
def element_text_should_be(self, locator, expected, message=''):
"""Verifies element identified by ``locator`` exactly contains text ``expected``.
In contrast to `Element Should Contain Text`, this keyword does not try
a substring match but an exact match on the element identified by ``locator``.
``message`` can be used to override the default error message.
New in AppiumLibrary 1.4.
"""
self._info("Verifying element '%s' contains exactly text '%s'."
% (locator, expected))
element = self._element_find(locator, True, True)
actual = element.text
if expected != actual:
if not message:
message = "The text of element '%s' should have been '%s' but "\
"in fact it was '%s'." % (locator, expected, actual)
raise AssertionError(message) | [
"def",
"element_text_should_be",
"(",
"self",
",",
"locator",
",",
"expected",
",",
"message",
"=",
"''",
")",
":",
"self",
".",
"_info",
"(",
"\"Verifying element '%s' contains exactly text '%s'.\"",
"%",
"(",
"locator",
",",
"expected",
")",
")",
"element",
"=",
"self",
".",
"_element_find",
"(",
"locator",
",",
"True",
",",
"True",
")",
"actual",
"=",
"element",
".",
"text",
"if",
"expected",
"!=",
"actual",
":",
"if",
"not",
"message",
":",
"message",
"=",
"\"The text of element '%s' should have been '%s' but \"",
"\"in fact it was '%s'.\"",
"%",
"(",
"locator",
",",
"expected",
",",
"actual",
")",
"raise",
"AssertionError",
"(",
"message",
")"
] | 47.631579 | 22.684211 |
def calc_coverage(ref, start, end, length, nucs):
"""
calculate coverage for positions in range start -> end
"""
ref = ref[start - 1:end]
bases = 0
for pos in ref:
for base, count in list(pos.items()):
if base in nucs:
bases += count
return float(bases)/float(length) | [
"def",
"calc_coverage",
"(",
"ref",
",",
"start",
",",
"end",
",",
"length",
",",
"nucs",
")",
":",
"ref",
"=",
"ref",
"[",
"start",
"-",
"1",
":",
"end",
"]",
"bases",
"=",
"0",
"for",
"pos",
"in",
"ref",
":",
"for",
"base",
",",
"count",
"in",
"list",
"(",
"pos",
".",
"items",
"(",
")",
")",
":",
"if",
"base",
"in",
"nucs",
":",
"bases",
"+=",
"count",
"return",
"float",
"(",
"bases",
")",
"/",
"float",
"(",
"length",
")"
] | 29.181818 | 10.636364 |
def find_value_in_object(attr, obj):
"""Return values for any key coincidence with attr in obj or any other
nested dict.
"""
# Carry on inspecting inside the list or tuple
if isinstance(obj, (collections.Iterator, list)):
for item in obj:
yield from find_value_in_object(attr, item)
# Final object (dict or entity) inspect inside
elif isinstance(obj, collections.Mapping):
# If result is found, inspect inside and return inner results
if attr in obj:
# If it is iterable, just return the inner elements (avoid nested
# lists)
if isinstance(obj[attr], (collections.Iterator, list)):
for item in obj[attr]:
yield item
# If not, return just the objects
else:
yield obj[attr]
# Carry on inspecting inside the object
for item in obj.values():
if item:
yield from find_value_in_object(attr, item) | [
"def",
"find_value_in_object",
"(",
"attr",
",",
"obj",
")",
":",
"# Carry on inspecting inside the list or tuple",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"collections",
".",
"Iterator",
",",
"list",
")",
")",
":",
"for",
"item",
"in",
"obj",
":",
"yield",
"from",
"find_value_in_object",
"(",
"attr",
",",
"item",
")",
"# Final object (dict or entity) inspect inside",
"elif",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Mapping",
")",
":",
"# If result is found, inspect inside and return inner results",
"if",
"attr",
"in",
"obj",
":",
"# If it is iterable, just return the inner elements (avoid nested",
"# lists)",
"if",
"isinstance",
"(",
"obj",
"[",
"attr",
"]",
",",
"(",
"collections",
".",
"Iterator",
",",
"list",
")",
")",
":",
"for",
"item",
"in",
"obj",
"[",
"attr",
"]",
":",
"yield",
"item",
"# If not, return just the objects",
"else",
":",
"yield",
"obj",
"[",
"attr",
"]",
"# Carry on inspecting inside the object",
"for",
"item",
"in",
"obj",
".",
"values",
"(",
")",
":",
"if",
"item",
":",
"yield",
"from",
"find_value_in_object",
"(",
"attr",
",",
"item",
")"
] | 32.9 | 19 |
async def _on_report_notification(self, event):
"""Callback function called when a report event is received.
Args:
event (dict): The report_event
"""
conn_string = event.get('connection_string')
report = self._report_parser.deserialize_report(event.get('serialized_report'))
self.notify_event(conn_string, 'report', report) | [
"async",
"def",
"_on_report_notification",
"(",
"self",
",",
"event",
")",
":",
"conn_string",
"=",
"event",
".",
"get",
"(",
"'connection_string'",
")",
"report",
"=",
"self",
".",
"_report_parser",
".",
"deserialize_report",
"(",
"event",
".",
"get",
"(",
"'serialized_report'",
")",
")",
"self",
".",
"notify_event",
"(",
"conn_string",
",",
"'report'",
",",
"report",
")"
] | 34.181818 | 21 |
def has_source_contents(self, src_id):
"""Checks if some sources exist."""
return bool(rustcall(_lib.lsm_view_has_source_contents,
self._get_ptr(), src_id)) | [
"def",
"has_source_contents",
"(",
"self",
",",
"src_id",
")",
":",
"return",
"bool",
"(",
"rustcall",
"(",
"_lib",
".",
"lsm_view_has_source_contents",
",",
"self",
".",
"_get_ptr",
"(",
")",
",",
"src_id",
")",
")"
] | 49.5 | 9.75 |
def get_needs_provenance(parameters):
"""Get the provenance of minimum needs.
:param parameters: A dictionary of impact function parameters.
:type parameters: dict
:returns: A parameter of provenance
:rtype: TextParameter
"""
if 'minimum needs' not in parameters:
return None
needs = parameters['minimum needs']
provenance = [p for p in needs if p.name == tr('Provenance')]
if provenance:
return provenance[0]
return None | [
"def",
"get_needs_provenance",
"(",
"parameters",
")",
":",
"if",
"'minimum needs'",
"not",
"in",
"parameters",
":",
"return",
"None",
"needs",
"=",
"parameters",
"[",
"'minimum needs'",
"]",
"provenance",
"=",
"[",
"p",
"for",
"p",
"in",
"needs",
"if",
"p",
".",
"name",
"==",
"tr",
"(",
"'Provenance'",
")",
"]",
"if",
"provenance",
":",
"return",
"provenance",
"[",
"0",
"]",
"return",
"None"
] | 29.25 | 15.375 |
def compute_memory_contents_under_schedule(self, schedule):
"""The in-memory tensors present when executing each operation in schedule.
Simulates running operations in the order given by a schedule. Keeps track
of the tensors in memory at every point in time, and outputs a list (one
entry for each point in time) of all sets of all memory contents (i.e. a
frozenset of strings) ever seen in this execution.
It is assumed (but not checked) that schedule is a valid topological sort of
the operations in this graph.
Args:
schedule: A list of integer ids; the order to run operations in.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
get_all_operation_names()).
"""
out_degree = self._compute_initial_out_degree()
curr_memory_contents = set()
memory_contents_for_each_operation = []
for operation_id in schedule:
operation_name = self._operations[operation_id].name
# Allocate new memory to perform the computation at this node.
for output_name in self.get_operation_output_names(operation_name):
curr_memory_contents.add(output_name)
memory_contents_for_each_operation.append(frozenset(curr_memory_contents))
# Free any tensors which are no longer needed.
for output_name in self.get_operation_output_names(operation_name):
if out_degree[output_name] == 0:
curr_memory_contents.remove(output_name)
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] -= 1
if out_degree[input_name] == 0:
curr_memory_contents.remove(input_name)
return memory_contents_for_each_operation | [
"def",
"compute_memory_contents_under_schedule",
"(",
"self",
",",
"schedule",
")",
":",
"out_degree",
"=",
"self",
".",
"_compute_initial_out_degree",
"(",
")",
"curr_memory_contents",
"=",
"set",
"(",
")",
"memory_contents_for_each_operation",
"=",
"[",
"]",
"for",
"operation_id",
"in",
"schedule",
":",
"operation_name",
"=",
"self",
".",
"_operations",
"[",
"operation_id",
"]",
".",
"name",
"# Allocate new memory to perform the computation at this node.",
"for",
"output_name",
"in",
"self",
".",
"get_operation_output_names",
"(",
"operation_name",
")",
":",
"curr_memory_contents",
".",
"add",
"(",
"output_name",
")",
"memory_contents_for_each_operation",
".",
"append",
"(",
"frozenset",
"(",
"curr_memory_contents",
")",
")",
"# Free any tensors which are no longer needed.",
"for",
"output_name",
"in",
"self",
".",
"get_operation_output_names",
"(",
"operation_name",
")",
":",
"if",
"out_degree",
"[",
"output_name",
"]",
"==",
"0",
":",
"curr_memory_contents",
".",
"remove",
"(",
"output_name",
")",
"for",
"input_name",
"in",
"self",
".",
"get_operation_input_names",
"(",
"operation_name",
")",
":",
"out_degree",
"[",
"input_name",
"]",
"-=",
"1",
"if",
"out_degree",
"[",
"input_name",
"]",
"==",
"0",
":",
"curr_memory_contents",
".",
"remove",
"(",
"input_name",
")",
"return",
"memory_contents_for_each_operation"
] | 42.926829 | 22.97561 |
def publishFeatureCollections(self, configs):
"""Publishes feature collections to a feature service.
Args:
configs (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
config = None
res = None
resItm = None
try:
res = []
if isinstance(configs, list):
for config in configs:
if 'ReplaceTag' in config:
resItm = {"ReplaceTag":config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
if 'Zip' in config:
resItm['FCInfo'] = self._publishFeatureCollection(config=config)
if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:
print ("%s feature collection created" % resItm['FCInfo']['id'])
res.append(resItm)
else:
print (str(resItm['FCInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFeatureCollections",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
resItm = None
config = None
del resItm
del config
gc.collect() | [
"def",
"publishFeatureCollections",
"(",
"self",
",",
"configs",
")",
":",
"if",
"self",
".",
"securityhandler",
"is",
"None",
":",
"print",
"(",
"\"Security handler required\"",
")",
"return",
"config",
"=",
"None",
"res",
"=",
"None",
"resItm",
"=",
"None",
"try",
":",
"res",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"configs",
",",
"list",
")",
":",
"for",
"config",
"in",
"configs",
":",
"if",
"'ReplaceTag'",
"in",
"config",
":",
"resItm",
"=",
"{",
"\"ReplaceTag\"",
":",
"config",
"[",
"'ReplaceTag'",
"]",
"}",
"else",
":",
"resItm",
"=",
"{",
"\"ReplaceTag\"",
":",
"\"{FeatureService}\"",
"}",
"if",
"'Zip'",
"in",
"config",
":",
"resItm",
"[",
"'FCInfo'",
"]",
"=",
"self",
".",
"_publishFeatureCollection",
"(",
"config",
"=",
"config",
")",
"if",
"not",
"resItm",
"[",
"'FCInfo'",
"]",
"is",
"None",
"and",
"'id'",
"in",
"resItm",
"[",
"'FCInfo'",
"]",
":",
"print",
"(",
"\"%s feature collection created\"",
"%",
"resItm",
"[",
"'FCInfo'",
"]",
"[",
"'id'",
"]",
")",
"res",
".",
"append",
"(",
"resItm",
")",
"else",
":",
"print",
"(",
"str",
"(",
"resItm",
"[",
"'FCInfo'",
"]",
")",
")",
"return",
"res",
"except",
"common",
".",
"ArcRestHelperError",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"line",
",",
"filename",
",",
"synerror",
"=",
"trace",
"(",
")",
"raise",
"common",
".",
"ArcRestHelperError",
"(",
"{",
"\"function\"",
":",
"\"publishFeatureCollections\"",
",",
"\"line\"",
":",
"line",
",",
"\"filename\"",
":",
"filename",
",",
"\"synerror\"",
":",
"synerror",
",",
"}",
")",
"finally",
":",
"resItm",
"=",
"None",
"config",
"=",
"None",
"del",
"resItm",
"del",
"config",
"gc",
".",
"collect",
"(",
")"
] | 29.517241 | 21.465517 |
def _calculate_distance(latlon1, latlon2):
"""Calculates the distance between two points on earth.
"""
lat1, lon1 = latlon1
lat2, lon2 = latlon2
dlon = lon2 - lon1
dlat = lat2 - lat1
R = 6371 # radius of the earth in kilometers
a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon / 2))**2
c = 2 * np.pi * R * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) / 180
return c | [
"def",
"_calculate_distance",
"(",
"latlon1",
",",
"latlon2",
")",
":",
"lat1",
",",
"lon1",
"=",
"latlon1",
"lat2",
",",
"lon2",
"=",
"latlon2",
"dlon",
"=",
"lon2",
"-",
"lon1",
"dlat",
"=",
"lat2",
"-",
"lat1",
"R",
"=",
"6371",
"# radius of the earth in kilometers",
"a",
"=",
"np",
".",
"sin",
"(",
"dlat",
"/",
"2",
")",
"**",
"2",
"+",
"np",
".",
"cos",
"(",
"lat1",
")",
"*",
"np",
".",
"cos",
"(",
"lat2",
")",
"*",
"(",
"np",
".",
"sin",
"(",
"dlon",
"/",
"2",
")",
")",
"**",
"2",
"c",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"R",
"*",
"np",
".",
"arctan2",
"(",
"np",
".",
"sqrt",
"(",
"a",
")",
",",
"np",
".",
"sqrt",
"(",
"1",
"-",
"a",
")",
")",
"/",
"180",
"return",
"c"
] | 37.272727 | 16 |
def _update_dest_ip(self):
'''如果未指定DEST_IP,默认与RTSP使用相同IP'''
global DEST_IP
if not DEST_IP:
DEST_IP = self._sock.getsockname()[0]
PRINT('DEST_IP: %s\n'%DEST_IP, CYAN) | [
"def",
"_update_dest_ip",
"(",
"self",
")",
":",
"global",
"DEST_IP",
"if",
"not",
"DEST_IP",
":",
"DEST_IP",
"=",
"self",
".",
"_sock",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
"PRINT",
"(",
"'DEST_IP: %s\\n'",
"%",
"DEST_IP",
",",
"CYAN",
")"
] | 34.666667 | 11 |
def decode(string, encoding=None, errors=None):
"""Decode from specified encoding.
``encoding`` defaults to the preferred encoding.
``errors`` defaults to the preferred error handler.
"""
if encoding is None:
encoding = getpreferredencoding()
if errors is None:
errors = getpreferrederrors()
return string.decode(encoding, errors) | [
"def",
"decode",
"(",
"string",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"getpreferredencoding",
"(",
")",
"if",
"errors",
"is",
"None",
":",
"errors",
"=",
"getpreferrederrors",
"(",
")",
"return",
"string",
".",
"decode",
"(",
"encoding",
",",
"errors",
")"
] | 33.181818 | 10.363636 |
def _get_stddevs(self, C, stddev_types, num_sites, mag, c1_rrup,
log_phi_ss, mean_phi_ss):
"""
Return standard deviations
"""
phi_ss = _compute_phi_ss(C, mag, c1_rrup, log_phi_ss, mean_phi_ss)
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(
C['tau'] * C['tau'] +
phi_ss * phi_ss) +
np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(phi_ss + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
return stddevs | [
"def",
"_get_stddevs",
"(",
"self",
",",
"C",
",",
"stddev_types",
",",
"num_sites",
",",
"mag",
",",
"c1_rrup",
",",
"log_phi_ss",
",",
"mean_phi_ss",
")",
":",
"phi_ss",
"=",
"_compute_phi_ss",
"(",
"C",
",",
"mag",
",",
"c1_rrup",
",",
"log_phi_ss",
",",
"mean_phi_ss",
")",
"stddevs",
"=",
"[",
"]",
"for",
"stddev_type",
"in",
"stddev_types",
":",
"assert",
"stddev_type",
"in",
"self",
".",
"DEFINED_FOR_STANDARD_DEVIATION_TYPES",
"if",
"stddev_type",
"==",
"const",
".",
"StdDev",
".",
"TOTAL",
":",
"stddevs",
".",
"append",
"(",
"np",
".",
"sqrt",
"(",
"C",
"[",
"'tau'",
"]",
"*",
"C",
"[",
"'tau'",
"]",
"+",
"phi_ss",
"*",
"phi_ss",
")",
"+",
"np",
".",
"zeros",
"(",
"num_sites",
")",
")",
"elif",
"stddev_type",
"==",
"const",
".",
"StdDev",
".",
"INTRA_EVENT",
":",
"stddevs",
".",
"append",
"(",
"phi_ss",
"+",
"np",
".",
"zeros",
"(",
"num_sites",
")",
")",
"elif",
"stddev_type",
"==",
"const",
".",
"StdDev",
".",
"INTER_EVENT",
":",
"stddevs",
".",
"append",
"(",
"C",
"[",
"'tau'",
"]",
"+",
"np",
".",
"zeros",
"(",
"num_sites",
")",
")",
"return",
"stddevs"
] | 37.956522 | 18.304348 |
def unpack_reply(cls, header, payload):
"""Take already unpacked header and binary payload of received request reply and creates message instance
:param header: a namedtuple header object providing header information
:param payload: payload (BytesIO instance) of message
"""
reply = cls(
header.session_id, header.packet_count,
segments=tuple(ReplySegment.unpack_from(payload, expected_segments=header.num_segments)),
header=header
)
trace(reply)
return reply | [
"def",
"unpack_reply",
"(",
"cls",
",",
"header",
",",
"payload",
")",
":",
"reply",
"=",
"cls",
"(",
"header",
".",
"session_id",
",",
"header",
".",
"packet_count",
",",
"segments",
"=",
"tuple",
"(",
"ReplySegment",
".",
"unpack_from",
"(",
"payload",
",",
"expected_segments",
"=",
"header",
".",
"num_segments",
")",
")",
",",
"header",
"=",
"header",
")",
"trace",
"(",
"reply",
")",
"return",
"reply"
] | 45.666667 | 19.833333 |
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for i, buck in enumerate(self.sentences):
self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
self.sentences[i],
self.characters[i],
self.label[i])
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for i, buck in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"curr_idx",
"=",
"0",
"#shuffle data in each bucket",
"random",
".",
"shuffle",
"(",
"self",
".",
"idx",
")",
"for",
"i",
",",
"buck",
"in",
"enumerate",
"(",
"self",
".",
"sentences",
")",
":",
"self",
".",
"indices",
"[",
"i",
"]",
",",
"self",
".",
"sentences",
"[",
"i",
"]",
",",
"self",
".",
"characters",
"[",
"i",
"]",
",",
"self",
".",
"label",
"[",
"i",
"]",
"=",
"shuffle",
"(",
"self",
".",
"indices",
"[",
"i",
"]",
",",
"self",
".",
"sentences",
"[",
"i",
"]",
",",
"self",
".",
"characters",
"[",
"i",
"]",
",",
"self",
".",
"label",
"[",
"i",
"]",
")",
"self",
".",
"ndindex",
"=",
"[",
"]",
"self",
".",
"ndsent",
"=",
"[",
"]",
"self",
".",
"ndchar",
"=",
"[",
"]",
"self",
".",
"ndlabel",
"=",
"[",
"]",
"#for each bucket of data",
"for",
"i",
",",
"buck",
"in",
"enumerate",
"(",
"self",
".",
"sentences",
")",
":",
"#append the lists with an array",
"self",
".",
"ndindex",
".",
"append",
"(",
"ndarray",
".",
"array",
"(",
"self",
".",
"indices",
"[",
"i",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
")",
"self",
".",
"ndsent",
".",
"append",
"(",
"ndarray",
".",
"array",
"(",
"self",
".",
"sentences",
"[",
"i",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
")",
"self",
".",
"ndchar",
".",
"append",
"(",
"ndarray",
".",
"array",
"(",
"self",
".",
"characters",
"[",
"i",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
")",
"self",
".",
"ndlabel",
".",
"append",
"(",
"ndarray",
".",
"array",
"(",
"self",
".",
"label",
"[",
"i",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
")"
] | 52.304348 | 28.782609 |
def convolve_image(self, image_array, blurring_array):
"""For a given 1D regular array and blurring array, convolve the two using this convolver.
Parameters
-----------
image_array : ndarray
1D array of the regular values which are to be blurred with the convolver's PSF.
blurring_array : ndarray
1D array of the blurring regular values which blur into the regular-array after PSF convolution.
"""
return self.convolve_jit(image_array, self.image_frame_indexes, self.image_frame_psfs, self.image_frame_lengths,
blurring_array, self.blurring_frame_indexes, self.blurring_frame_psfs,
self.blurring_frame_lengths) | [
"def",
"convolve_image",
"(",
"self",
",",
"image_array",
",",
"blurring_array",
")",
":",
"return",
"self",
".",
"convolve_jit",
"(",
"image_array",
",",
"self",
".",
"image_frame_indexes",
",",
"self",
".",
"image_frame_psfs",
",",
"self",
".",
"image_frame_lengths",
",",
"blurring_array",
",",
"self",
".",
"blurring_frame_indexes",
",",
"self",
".",
"blurring_frame_psfs",
",",
"self",
".",
"blurring_frame_lengths",
")"
] | 57.307692 | 30.769231 |
def _read_requirements(filename, extra_packages):
"""Returns a list of package requirements read from the file."""
requirements_file = open(filename).read()
hard_requirements = []
for line in requirements_file.splitlines():
if _is_requirement(line):
if line.find(';') > -1:
dep, condition = tuple(line.split(';'))
extra_packages[condition.strip()].append(dep.strip())
else:
hard_requirements.append(line.strip())
return hard_requirements, extra_packages | [
"def",
"_read_requirements",
"(",
"filename",
",",
"extra_packages",
")",
":",
"requirements_file",
"=",
"open",
"(",
"filename",
")",
".",
"read",
"(",
")",
"hard_requirements",
"=",
"[",
"]",
"for",
"line",
"in",
"requirements_file",
".",
"splitlines",
"(",
")",
":",
"if",
"_is_requirement",
"(",
"line",
")",
":",
"if",
"line",
".",
"find",
"(",
"';'",
")",
">",
"-",
"1",
":",
"dep",
",",
"condition",
"=",
"tuple",
"(",
"line",
".",
"split",
"(",
"';'",
")",
")",
"extra_packages",
"[",
"condition",
".",
"strip",
"(",
")",
"]",
".",
"append",
"(",
"dep",
".",
"strip",
"(",
")",
")",
"else",
":",
"hard_requirements",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"return",
"hard_requirements",
",",
"extra_packages"
] | 41.692308 | 13.230769 |
def assert_create_update_delete_permission(f):
"""Access only by subjects with Create/Update/Delete permission and by trusted
infrastructure (CNs)."""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
d1_gmn.app.auth.assert_create_update_delete_permission(request)
return f(request, *args, **kwargs)
return wrapper | [
"def",
"assert_create_update_delete_permission",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"d1_gmn",
".",
"app",
".",
"auth",
".",
"assert_create_update_delete_permission",
"(",
"request",
")",
"return",
"f",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 35.2 | 16 |
def normalize_rgb(r, g, b, a):
"""Transform a rgb[a] color to #hex[a].
"""
r = int(r, 10)
g = int(g, 10)
b = int(b, 10)
if a:
a = float(a) * 256
if r > 255 or g > 255 or b > 255 or (a and a > 255):
return None
color = '#%02x%02x%02x' % (r, g, b)
if a:
color += '%02x' % int(a)
return color | [
"def",
"normalize_rgb",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
")",
":",
"r",
"=",
"int",
"(",
"r",
",",
"10",
")",
"g",
"=",
"int",
"(",
"g",
",",
"10",
")",
"b",
"=",
"int",
"(",
"b",
",",
"10",
")",
"if",
"a",
":",
"a",
"=",
"float",
"(",
"a",
")",
"*",
"256",
"if",
"r",
">",
"255",
"or",
"g",
">",
"255",
"or",
"b",
">",
"255",
"or",
"(",
"a",
"and",
"a",
">",
"255",
")",
":",
"return",
"None",
"color",
"=",
"'#%02x%02x%02x'",
"%",
"(",
"r",
",",
"g",
",",
"b",
")",
"if",
"a",
":",
"color",
"+=",
"'%02x'",
"%",
"int",
"(",
"a",
")",
"return",
"color"
] | 24.285714 | 15.857143 |
def get_net(req):
"""Get the net of any 'next' and 'prev' querystrings."""
try:
nxt, prev = map(
int, (req.GET.get('cal_next', 0), req.GET.get('cal_prev', 0))
)
net = nxt - prev
except Exception:
net = 0
return net | [
"def",
"get_net",
"(",
"req",
")",
":",
"try",
":",
"nxt",
",",
"prev",
"=",
"map",
"(",
"int",
",",
"(",
"req",
".",
"GET",
".",
"get",
"(",
"'cal_next'",
",",
"0",
")",
",",
"req",
".",
"GET",
".",
"get",
"(",
"'cal_prev'",
",",
"0",
")",
")",
")",
"net",
"=",
"nxt",
"-",
"prev",
"except",
"Exception",
":",
"net",
"=",
"0",
"return",
"net"
] | 26.5 | 22.1 |
def set_extension(self, name, value):
"""
Sets the value for an extension using a fully constructed
asn1crypto.core.Asn1Value object. Normally this should not be needed,
and the convenience attributes should be sufficient.
See the definition of asn1crypto.ocsp.SingleResponseExtension and
asn1crypto.ocsp.ResponseDataExtension to determine the appropriate
object type for a given extension. Extensions are marked as critical
when RFC 6960 indicates so.
:param name:
A unicode string of an extension id name from
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one
defined in those classes, this must be an instance of one of the
classes instead of a unicode string.
:param value:
A value object per the specs defined by
asn1crypto.ocsp.SingleResponseExtension or
asn1crypto.ocsp.ResponseDataExtension
"""
if isinstance(name, str_cls):
response_data_extension_oids = set([
'nonce',
'extended_revoke',
'1.3.6.1.5.5.7.48.1.2',
'1.3.6.1.5.5.7.48.1.9'
])
single_response_extension_oids = set([
'crl',
'archive_cutoff',
'crl_reason',
'invalidity_date',
'certificate_issuer',
'1.3.6.1.5.5.7.48.1.3',
'1.3.6.1.5.5.7.48.1.6',
'2.5.29.21',
'2.5.29.24',
'2.5.29.29'
])
if name in response_data_extension_oids:
name = ocsp.ResponseDataExtensionId(name)
elif name in single_response_extension_oids:
name = ocsp.SingleResponseExtensionId(name)
else:
raise ValueError(_pretty_message(
'''
name must be a unicode string from
asn1crypto.ocsp.ResponseDataExtensionId or
asn1crypto.ocsp.SingleResponseExtensionId, not %s
''',
repr(name)
))
if isinstance(name, ocsp.ResponseDataExtensionId):
extension = ocsp.ResponseDataExtension({'extn_id': name})
elif isinstance(name, ocsp.SingleResponseExtensionId):
extension = ocsp.SingleResponseExtension({'extn_id': name})
else:
raise TypeError(_pretty_message(
'''
name must be a unicode string or an instance of
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId, not %s
''',
_type_name(name)
))
# We use native here to convert OIDs to meaningful names
name = extension['extn_id'].native
spec = extension.spec('extn_value')
if name == 'nonce':
raise ValueError(_pretty_message(
'''
The nonce value should be set via the .nonce attribute, not the
.set_extension() method
'''
))
if name == 'crl_reason':
raise ValueError(_pretty_message(
'''
The crl_reason value should be set via the certificate_status
parameter of the OCSPResponseBuilder() constructor, not the
.set_extension() method
'''
))
if name == 'certificate_issuer':
raise ValueError(_pretty_message(
'''
The certificate_issuer value should be set via the
.certificate_issuer attribute, not the .set_extension() method
'''
))
if not isinstance(value, spec) and value is not None:
raise TypeError(_pretty_message(
'''
value must be an instance of %s, not %s
''',
_type_name(spec),
_type_name(value)
))
if isinstance(extension, ocsp.ResponseDataExtension):
extn_dict = self._response_data_extensions
else:
extn_dict = self._single_response_extensions
if value is None:
if name in extn_dict:
del extn_dict[name]
else:
extn_dict[name] = value | [
"def",
"set_extension",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"str_cls",
")",
":",
"response_data_extension_oids",
"=",
"set",
"(",
"[",
"'nonce'",
",",
"'extended_revoke'",
",",
"'1.3.6.1.5.5.7.48.1.2'",
",",
"'1.3.6.1.5.5.7.48.1.9'",
"]",
")",
"single_response_extension_oids",
"=",
"set",
"(",
"[",
"'crl'",
",",
"'archive_cutoff'",
",",
"'crl_reason'",
",",
"'invalidity_date'",
",",
"'certificate_issuer'",
",",
"'1.3.6.1.5.5.7.48.1.3'",
",",
"'1.3.6.1.5.5.7.48.1.6'",
",",
"'2.5.29.21'",
",",
"'2.5.29.24'",
",",
"'2.5.29.29'",
"]",
")",
"if",
"name",
"in",
"response_data_extension_oids",
":",
"name",
"=",
"ocsp",
".",
"ResponseDataExtensionId",
"(",
"name",
")",
"elif",
"name",
"in",
"single_response_extension_oids",
":",
"name",
"=",
"ocsp",
".",
"SingleResponseExtensionId",
"(",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"_pretty_message",
"(",
"'''\n name must be a unicode string from\n asn1crypto.ocsp.ResponseDataExtensionId or\n asn1crypto.ocsp.SingleResponseExtensionId, not %s\n '''",
",",
"repr",
"(",
"name",
")",
")",
")",
"if",
"isinstance",
"(",
"name",
",",
"ocsp",
".",
"ResponseDataExtensionId",
")",
":",
"extension",
"=",
"ocsp",
".",
"ResponseDataExtension",
"(",
"{",
"'extn_id'",
":",
"name",
"}",
")",
"elif",
"isinstance",
"(",
"name",
",",
"ocsp",
".",
"SingleResponseExtensionId",
")",
":",
"extension",
"=",
"ocsp",
".",
"SingleResponseExtension",
"(",
"{",
"'extn_id'",
":",
"name",
"}",
")",
"else",
":",
"raise",
"TypeError",
"(",
"_pretty_message",
"(",
"'''\n name must be a unicode string or an instance of\n asn1crypto.ocsp.SingleResponseExtensionId or\n asn1crypto.ocsp.ResponseDataExtensionId, not %s\n '''",
",",
"_type_name",
"(",
"name",
")",
")",
")",
"# We use native here to convert OIDs to meaningful names",
"name",
"=",
"extension",
"[",
"'extn_id'",
"]",
".",
"native",
"spec",
"=",
"extension",
".",
"spec",
"(",
"'extn_value'",
")",
"if",
"name",
"==",
"'nonce'",
":",
"raise",
"ValueError",
"(",
"_pretty_message",
"(",
"'''\n The nonce value should be set via the .nonce attribute, not the\n .set_extension() method\n '''",
")",
")",
"if",
"name",
"==",
"'crl_reason'",
":",
"raise",
"ValueError",
"(",
"_pretty_message",
"(",
"'''\n The crl_reason value should be set via the certificate_status\n parameter of the OCSPResponseBuilder() constructor, not the\n .set_extension() method\n '''",
")",
")",
"if",
"name",
"==",
"'certificate_issuer'",
":",
"raise",
"ValueError",
"(",
"_pretty_message",
"(",
"'''\n The certificate_issuer value should be set via the\n .certificate_issuer attribute, not the .set_extension() method\n '''",
")",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"spec",
")",
"and",
"value",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"_pretty_message",
"(",
"'''\n value must be an instance of %s, not %s\n '''",
",",
"_type_name",
"(",
"spec",
")",
",",
"_type_name",
"(",
"value",
")",
")",
")",
"if",
"isinstance",
"(",
"extension",
",",
"ocsp",
".",
"ResponseDataExtension",
")",
":",
"extn_dict",
"=",
"self",
".",
"_response_data_extensions",
"else",
":",
"extn_dict",
"=",
"self",
".",
"_single_response_extensions",
"if",
"value",
"is",
"None",
":",
"if",
"name",
"in",
"extn_dict",
":",
"del",
"extn_dict",
"[",
"name",
"]",
"else",
":",
"extn_dict",
"[",
"name",
"]",
"=",
"value"
] | 35.192 | 19.896 |
def get_splits_in_period(self, start: Datum, end: Datum) -> List[Split]:
""" returns splits only up to the given date """
# from gnucash_portfolio.lib import generic
query = (
self.book.session.query(Split)
.join(Transaction)
.filter(Split.account == self.account,
Transaction.post_date >= start.value.date(),
Transaction.post_date <= end.value.date()
)
)
# sql = generic.get_sql(query)
return query.all() | [
"def",
"get_splits_in_period",
"(",
"self",
",",
"start",
":",
"Datum",
",",
"end",
":",
"Datum",
")",
"->",
"List",
"[",
"Split",
"]",
":",
"# from gnucash_portfolio.lib import generic",
"query",
"=",
"(",
"self",
".",
"book",
".",
"session",
".",
"query",
"(",
"Split",
")",
".",
"join",
"(",
"Transaction",
")",
".",
"filter",
"(",
"Split",
".",
"account",
"==",
"self",
".",
"account",
",",
"Transaction",
".",
"post_date",
">=",
"start",
".",
"value",
".",
"date",
"(",
")",
",",
"Transaction",
".",
"post_date",
"<=",
"end",
".",
"value",
".",
"date",
"(",
")",
")",
")",
"# sql = generic.get_sql(query)",
"return",
"query",
".",
"all",
"(",
")"
] | 38.357143 | 17.071429 |
def findSensor(self, sensors, sensor_name, device_type = None):
"""
Find a sensor in the provided list of sensors
@param sensors (list) - List of sensors to search in
@param sensor_name (string) - Name of sensor to find
@param device_type (string) - Device type of sensor to find, can be None
@return (string) - sensor_id of sensor or None if not found
"""
if device_type == None:
for sensor in sensors:
if sensor['name'] == sensor_name:
return sensor['id']
else:
for sensor in sensors:
if sensor['name'] == sensor_name and sensor['device_type'] == device_type:
return sensor['id']
return None | [
"def",
"findSensor",
"(",
"self",
",",
"sensors",
",",
"sensor_name",
",",
"device_type",
"=",
"None",
")",
":",
"if",
"device_type",
"==",
"None",
":",
"for",
"sensor",
"in",
"sensors",
":",
"if",
"sensor",
"[",
"'name'",
"]",
"==",
"sensor_name",
":",
"return",
"sensor",
"[",
"'id'",
"]",
"else",
":",
"for",
"sensor",
"in",
"sensors",
":",
"if",
"sensor",
"[",
"'name'",
"]",
"==",
"sensor_name",
"and",
"sensor",
"[",
"'device_type'",
"]",
"==",
"device_type",
":",
"return",
"sensor",
"[",
"'id'",
"]",
"return",
"None"
] | 38.904762 | 20.333333 |
def send_group_file(self, sender, receiver, media_id):
"""
发送群聊文件消息
:param sender: 发送人
:param receiver: 会话 ID
:param media_id: 文件id,可以调用上传素材文件接口获取, 文件须大于4字节
:return: 返回的 JSON 数据包
"""
return self.send_file(sender, 'group', receiver, media_id) | [
"def",
"send_group_file",
"(",
"self",
",",
"sender",
",",
"receiver",
",",
"media_id",
")",
":",
"return",
"self",
".",
"send_file",
"(",
"sender",
",",
"'group'",
",",
"receiver",
",",
"media_id",
")"
] | 29.7 | 15.3 |
def _get_roles_for_request(request, application):
""" Check the authentication of the current user. """
roles = application.get_roles_for_person(request.user)
if common.is_admin(request):
roles.add("is_admin")
roles.add('is_authorised')
return roles | [
"def",
"_get_roles_for_request",
"(",
"request",
",",
"application",
")",
":",
"roles",
"=",
"application",
".",
"get_roles_for_person",
"(",
"request",
".",
"user",
")",
"if",
"common",
".",
"is_admin",
"(",
"request",
")",
":",
"roles",
".",
"add",
"(",
"\"is_admin\"",
")",
"roles",
".",
"add",
"(",
"'is_authorised'",
")",
"return",
"roles"
] | 33.222222 | 16 |
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket[0])
del self._socket | [
"def",
"clear_socket",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_socket'",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"poller",
".",
"sockets",
",",
"dict",
")",
":",
"sockets",
"=",
"list",
"(",
"self",
".",
"poller",
".",
"sockets",
".",
"keys",
"(",
")",
")",
"for",
"socket",
"in",
"sockets",
":",
"log",
".",
"trace",
"(",
"'Unregistering socket: %s'",
",",
"socket",
")",
"self",
".",
"poller",
".",
"unregister",
"(",
"socket",
")",
"else",
":",
"for",
"socket",
"in",
"self",
".",
"poller",
".",
"sockets",
":",
"log",
".",
"trace",
"(",
"'Unregistering socket: %s'",
",",
"socket",
")",
"self",
".",
"poller",
".",
"unregister",
"(",
"socket",
"[",
"0",
"]",
")",
"del",
"self",
".",
"_socket"
] | 39.6 | 15.6 |
def listen_tta(self, target, timeout):
"""Listen *timeout* seconds for a Type A activation at 106 kbps. The
``sens_res``, ``sdd_res``, and ``sel_res`` response data must
be provided and ``sdd_res`` must be a 4 byte UID that starts
with ``08h``. Depending on ``sel_res`` an activation may
return a target with a ``tt2_cmd``, ``tt4_cmd`` or ``atr_req``
attribute. The default RATS response sent for a Type 4 Tag
activation can be replaced with a ``rats_res`` attribute.
"""
return super(Device, self).listen_tta(target, timeout) | [
"def",
"listen_tta",
"(",
"self",
",",
"target",
",",
"timeout",
")",
":",
"return",
"super",
"(",
"Device",
",",
"self",
")",
".",
"listen_tta",
"(",
"target",
",",
"timeout",
")"
] | 53.545455 | 20.545455 |
def update_routing_table_from(self, *routers):
""" Try to update routing tables with the given routers.
:return: True if the routing table is successfully updated, otherwise False
"""
for router in routers:
new_routing_table = self.fetch_routing_table(router)
if new_routing_table is not None:
self.routing_table.update(new_routing_table)
return True
return False | [
"def",
"update_routing_table_from",
"(",
"self",
",",
"*",
"routers",
")",
":",
"for",
"router",
"in",
"routers",
":",
"new_routing_table",
"=",
"self",
".",
"fetch_routing_table",
"(",
"router",
")",
"if",
"new_routing_table",
"is",
"not",
"None",
":",
"self",
".",
"routing_table",
".",
"update",
"(",
"new_routing_table",
")",
"return",
"True",
"return",
"False"
] | 40.909091 | 16.454545 |
def _validate_type(self): # type: () -> None
"""Validation to ensure value is the correct type"""
if not isinstance(self._value, self._type):
title = '{} has an invalid type'.format(self._key_name())
description = '{} must be a {}'.format(self._key_name(), self._type.__name__)
self._add_error(title=title, description=description) | [
"def",
"_validate_type",
"(",
"self",
")",
":",
"# type: () -> None",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_value",
",",
"self",
".",
"_type",
")",
":",
"title",
"=",
"'{} has an invalid type'",
".",
"format",
"(",
"self",
".",
"_key_name",
"(",
")",
")",
"description",
"=",
"'{} must be a {}'",
".",
"format",
"(",
"self",
".",
"_key_name",
"(",
")",
",",
"self",
".",
"_type",
".",
"__name__",
")",
"self",
".",
"_add_error",
"(",
"title",
"=",
"title",
",",
"description",
"=",
"description",
")"
] | 54.142857 | 22.714286 |
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child) | [
"def",
"add_dockwidget",
"(",
"self",
",",
"child",
")",
":",
"dockwidget",
",",
"location",
"=",
"child",
".",
"create_dockwidget",
"(",
")",
"if",
"CONF",
".",
"get",
"(",
"'main'",
",",
"'vertical_dockwidget_titlebars'",
")",
":",
"dockwidget",
".",
"setFeatures",
"(",
"dockwidget",
".",
"features",
"(",
")",
"|",
"QDockWidget",
".",
"DockWidgetVerticalTitleBar",
")",
"self",
".",
"addDockWidget",
"(",
"location",
",",
"dockwidget",
")",
"self",
".",
"widgetlist",
".",
"append",
"(",
"child",
")"
] | 52.75 | 13.875 |
def get_bad_rows_and_cols(df, validation_names, type_col_names,
value_col_names, verbose=False):
"""
Input: validated DataFrame, all validation names, names of the type columns,
names of the value columns, verbose (True or False).
Output: list of rows with bad values, list of columns with bad values,
list of missing (but required) columns.
"""
df["num"] = list(range(len(df)))
problems = df[validation_names.union(["num"])]
all_problems = problems.dropna(how='all', axis=0, subset=validation_names)
value_problems = problems.dropna(how='all', axis=0, subset=type_col_names.union(value_col_names))
all_problems = all_problems.dropna(how='all', axis=1)
value_problems = value_problems.dropna(how='all', axis=1)
if not len(problems):
return None, None, None
#
bad_cols = all_problems.columns
prefixes = ["value_pass_", "type_pass_"]
missing_prefix = "presence_pass_"
problem_cols = []
missing_cols = []
long_missing_cols = []
problem_rows = []
for col in bad_cols:
pre, stripped_col = extract_col_name(col)
for prefix in prefixes:
if col.startswith(prefix):
problem_cols.append(stripped_col)
continue
if col.startswith(missing_prefix):
missing_cols.append(stripped_col)
long_missing_cols.append(col)
if len(value_problems):
bad_rows = list(zip(list(value_problems["num"]), list(value_problems.index)))
else:
bad_rows = []
if verbose:
if bad_rows:
formatted_rows = ["row: {}, name: {}".format(row[0], row[1]) for row in bad_rows]
if len(bad_rows) > 5:
print("-W- these rows have problems:\n", "\n".join(formatted_rows[:5]), " ...")
print("(for full error output see error file)")
else:
print("-W- these rows have problems:", "\n".join(formatted_rows))
if problem_cols:
print("-W- these columns contain bad values:", ", ".join(set(problem_cols)))
if missing_cols:
print("-W- these required columns are missing:", ", ".join(missing_cols))
return bad_rows, problem_cols, missing_cols | [
"def",
"get_bad_rows_and_cols",
"(",
"df",
",",
"validation_names",
",",
"type_col_names",
",",
"value_col_names",
",",
"verbose",
"=",
"False",
")",
":",
"df",
"[",
"\"num\"",
"]",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"df",
")",
")",
")",
"problems",
"=",
"df",
"[",
"validation_names",
".",
"union",
"(",
"[",
"\"num\"",
"]",
")",
"]",
"all_problems",
"=",
"problems",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"0",
",",
"subset",
"=",
"validation_names",
")",
"value_problems",
"=",
"problems",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"0",
",",
"subset",
"=",
"type_col_names",
".",
"union",
"(",
"value_col_names",
")",
")",
"all_problems",
"=",
"all_problems",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"1",
")",
"value_problems",
"=",
"value_problems",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"1",
")",
"if",
"not",
"len",
"(",
"problems",
")",
":",
"return",
"None",
",",
"None",
",",
"None",
"#",
"bad_cols",
"=",
"all_problems",
".",
"columns",
"prefixes",
"=",
"[",
"\"value_pass_\"",
",",
"\"type_pass_\"",
"]",
"missing_prefix",
"=",
"\"presence_pass_\"",
"problem_cols",
"=",
"[",
"]",
"missing_cols",
"=",
"[",
"]",
"long_missing_cols",
"=",
"[",
"]",
"problem_rows",
"=",
"[",
"]",
"for",
"col",
"in",
"bad_cols",
":",
"pre",
",",
"stripped_col",
"=",
"extract_col_name",
"(",
"col",
")",
"for",
"prefix",
"in",
"prefixes",
":",
"if",
"col",
".",
"startswith",
"(",
"prefix",
")",
":",
"problem_cols",
".",
"append",
"(",
"stripped_col",
")",
"continue",
"if",
"col",
".",
"startswith",
"(",
"missing_prefix",
")",
":",
"missing_cols",
".",
"append",
"(",
"stripped_col",
")",
"long_missing_cols",
".",
"append",
"(",
"col",
")",
"if",
"len",
"(",
"value_problems",
")",
":",
"bad_rows",
"=",
"list",
"(",
"zip",
"(",
"list",
"(",
"value_problems",
"[",
"\"num\"",
"]",
")",
",",
"list",
"(",
"value_problems",
".",
"index",
")",
")",
")",
"else",
":",
"bad_rows",
"=",
"[",
"]",
"if",
"verbose",
":",
"if",
"bad_rows",
":",
"formatted_rows",
"=",
"[",
"\"row: {}, name: {}\"",
".",
"format",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
")",
"for",
"row",
"in",
"bad_rows",
"]",
"if",
"len",
"(",
"bad_rows",
")",
">",
"5",
":",
"print",
"(",
"\"-W- these rows have problems:\\n\"",
",",
"\"\\n\"",
".",
"join",
"(",
"formatted_rows",
"[",
":",
"5",
"]",
")",
",",
"\" ...\"",
")",
"print",
"(",
"\"(for full error output see error file)\"",
")",
"else",
":",
"print",
"(",
"\"-W- these rows have problems:\"",
",",
"\"\\n\"",
".",
"join",
"(",
"formatted_rows",
")",
")",
"if",
"problem_cols",
":",
"print",
"(",
"\"-W- these columns contain bad values:\"",
",",
"\", \"",
".",
"join",
"(",
"set",
"(",
"problem_cols",
")",
")",
")",
"if",
"missing_cols",
":",
"print",
"(",
"\"-W- these required columns are missing:\"",
",",
"\", \"",
".",
"join",
"(",
"missing_cols",
")",
")",
"return",
"bad_rows",
",",
"problem_cols",
",",
"missing_cols"
] | 44.14 | 19.66 |
def _read_result(self):
"""Parse read a response from the AGI and parse it.
:return dict: The AGI response parsed into a dict.
"""
response = yield from self.reader.readline()
return parse_agi_result(response.decode(self.encoding)[:-1]) | [
"def",
"_read_result",
"(",
"self",
")",
":",
"response",
"=",
"yield",
"from",
"self",
".",
"reader",
".",
"readline",
"(",
")",
"return",
"parse_agi_result",
"(",
"response",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"[",
":",
"-",
"1",
"]",
")"
] | 38.714286 | 16.428571 |
def options(self, context, module_options):
'''
LHOST IP hosting the handler
LPORT Handler port
PAYLOAD Payload to inject: reverse_http or reverse_https (default: reverse_https)
PROCID Process ID to inject into (default: current powershell process)
'''
self.met_payload = 'reverse_https'
self.procid = None
if not 'LHOST' in module_options or not 'LPORT' in module_options:
context.log.error('LHOST and LPORT options are required!')
exit(1)
if 'PAYLOAD' in module_options:
self.met_payload = module_options['PAYLOAD']
if 'PROCID' in module_options:
self.procid = module_options['PROCID']
self.lhost = module_options['LHOST']
self.lport = module_options['LPORT']
self.ps_script = obfs_ps_script('powersploit/CodeExecution/Invoke-Shellcode.ps1') | [
"def",
"options",
"(",
"self",
",",
"context",
",",
"module_options",
")",
":",
"self",
".",
"met_payload",
"=",
"'reverse_https'",
"self",
".",
"procid",
"=",
"None",
"if",
"not",
"'LHOST'",
"in",
"module_options",
"or",
"not",
"'LPORT'",
"in",
"module_options",
":",
"context",
".",
"log",
".",
"error",
"(",
"'LHOST and LPORT options are required!'",
")",
"exit",
"(",
"1",
")",
"if",
"'PAYLOAD'",
"in",
"module_options",
":",
"self",
".",
"met_payload",
"=",
"module_options",
"[",
"'PAYLOAD'",
"]",
"if",
"'PROCID'",
"in",
"module_options",
":",
"self",
".",
"procid",
"=",
"module_options",
"[",
"'PROCID'",
"]",
"self",
".",
"lhost",
"=",
"module_options",
"[",
"'LHOST'",
"]",
"self",
".",
"lport",
"=",
"module_options",
"[",
"'LPORT'",
"]",
"self",
".",
"ps_script",
"=",
"obfs_ps_script",
"(",
"'powersploit/CodeExecution/Invoke-Shellcode.ps1'",
")"
] | 36.4 | 23.84 |
def _em_conversion(orig_units, conv_data, to_units=None, unit_system=None):
"""Convert between E&M & MKS base units.
If orig_units is a CGS (or MKS) E&M unit, conv_data contains the
corresponding MKS (or CGS) unit and scale factor converting between them.
This must be done by replacing the expression of the original unit
with the new one in the unit expression and multiplying by the scale
factor.
"""
conv_unit, canonical_unit, scale = conv_data
if conv_unit is None:
conv_unit = canonical_unit
new_expr = scale * canonical_unit.expr
if unit_system is not None:
# we don't know the to_units, so we get it directly from the
# conv_data
to_units = Unit(conv_unit.expr, registry=orig_units.registry)
new_units = Unit(new_expr, registry=orig_units.registry)
conv = new_units.get_conversion_factor(to_units)
return to_units, conv | [
"def",
"_em_conversion",
"(",
"orig_units",
",",
"conv_data",
",",
"to_units",
"=",
"None",
",",
"unit_system",
"=",
"None",
")",
":",
"conv_unit",
",",
"canonical_unit",
",",
"scale",
"=",
"conv_data",
"if",
"conv_unit",
"is",
"None",
":",
"conv_unit",
"=",
"canonical_unit",
"new_expr",
"=",
"scale",
"*",
"canonical_unit",
".",
"expr",
"if",
"unit_system",
"is",
"not",
"None",
":",
"# we don't know the to_units, so we get it directly from the",
"# conv_data",
"to_units",
"=",
"Unit",
"(",
"conv_unit",
".",
"expr",
",",
"registry",
"=",
"orig_units",
".",
"registry",
")",
"new_units",
"=",
"Unit",
"(",
"new_expr",
",",
"registry",
"=",
"orig_units",
".",
"registry",
")",
"conv",
"=",
"new_units",
".",
"get_conversion_factor",
"(",
"to_units",
")",
"return",
"to_units",
",",
"conv"
] | 44.85 | 19.8 |
def pre_dissect(self, s):
"""
Decrypt, verify and decompress the message.
"""
if len(s) < 5:
raise Exception("Invalid record: header is too short.")
if isinstance(self.tls_session.rcs.cipher, Cipher_NULL):
self.deciphered_len = None
return s
else:
msglen = struct.unpack('!H', s[3:5])[0]
hdr, efrag, r = s[:5], s[5:5 + msglen], s[msglen + 5:]
frag, auth_tag = self._tls_auth_decrypt(efrag)
self.deciphered_len = len(frag)
return hdr + frag + auth_tag + r | [
"def",
"pre_dissect",
"(",
"self",
",",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
"<",
"5",
":",
"raise",
"Exception",
"(",
"\"Invalid record: header is too short.\"",
")",
"if",
"isinstance",
"(",
"self",
".",
"tls_session",
".",
"rcs",
".",
"cipher",
",",
"Cipher_NULL",
")",
":",
"self",
".",
"deciphered_len",
"=",
"None",
"return",
"s",
"else",
":",
"msglen",
"=",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"s",
"[",
"3",
":",
"5",
"]",
")",
"[",
"0",
"]",
"hdr",
",",
"efrag",
",",
"r",
"=",
"s",
"[",
":",
"5",
"]",
",",
"s",
"[",
"5",
":",
"5",
"+",
"msglen",
"]",
",",
"s",
"[",
"msglen",
"+",
"5",
":",
"]",
"frag",
",",
"auth_tag",
"=",
"self",
".",
"_tls_auth_decrypt",
"(",
"efrag",
")",
"self",
".",
"deciphered_len",
"=",
"len",
"(",
"frag",
")",
"return",
"hdr",
"+",
"frag",
"+",
"auth_tag",
"+",
"r"
] | 36.5 | 15.375 |
def get_checksum_by_target(self, target):
""" returns a checksum of a specific kind """
for csum in self.checksums:
if csum.target == target:
return csum
return None | [
"def",
"get_checksum_by_target",
"(",
"self",
",",
"target",
")",
":",
"for",
"csum",
"in",
"self",
".",
"checksums",
":",
"if",
"csum",
".",
"target",
"==",
"target",
":",
"return",
"csum",
"return",
"None"
] | 35.333333 | 7.166667 |
def calculateOptionPrice(self, reqId, contract, volatility, underPrice):
"""calculateOptionPrice(EClient self, TickerId reqId, Contract contract, double volatility, double underPrice)"""
return _swigibpy.EClient_calculateOptionPrice(self, reqId, contract, volatility, underPrice) | [
"def",
"calculateOptionPrice",
"(",
"self",
",",
"reqId",
",",
"contract",
",",
"volatility",
",",
"underPrice",
")",
":",
"return",
"_swigibpy",
".",
"EClient_calculateOptionPrice",
"(",
"self",
",",
"reqId",
",",
"contract",
",",
"volatility",
",",
"underPrice",
")"
] | 97.666667 | 30.666667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.