text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def BinaryBool(pred):
""" Lifts predicates that take an argument into the DSL. """
class Predicate(Bool):
def __init__(self, value, ignore_case=False):
self.value = caseless(value) if ignore_case else value
self.ignore_case = ignore_case
def __call__(self, data):
if not isinstance(data, list):
data = [data]
for d in data:
try:
if pred(caseless(d) if self.ignore_case else d, self.value):
return True
except:
pass
return False
return Predicate
|
[
"def",
"BinaryBool",
"(",
"pred",
")",
":",
"class",
"Predicate",
"(",
"Bool",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"value",
",",
"ignore_case",
"=",
"False",
")",
":",
"self",
".",
"value",
"=",
"caseless",
"(",
"value",
")",
"if",
"ignore_case",
"else",
"value",
"self",
".",
"ignore_case",
"=",
"ignore_case",
"def",
"__call__",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"[",
"data",
"]",
"for",
"d",
"in",
"data",
":",
"try",
":",
"if",
"pred",
"(",
"caseless",
"(",
"d",
")",
"if",
"self",
".",
"ignore_case",
"else",
"d",
",",
"self",
".",
"value",
")",
":",
"return",
"True",
"except",
":",
"pass",
"return",
"False",
"return",
"Predicate"
] | 34.888889
| 15.666667
|
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:returns: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
self._get_crc32(i)
return self.files_crc32
|
[
"def",
"get_files_crc32",
"(",
"self",
")",
":",
"if",
"self",
".",
"files_crc32",
"==",
"{",
"}",
":",
"for",
"i",
"in",
"self",
".",
"get_files",
"(",
")",
":",
"self",
".",
"_get_crc32",
"(",
"i",
")",
"return",
"self",
".",
"files_crc32"
] | 26.545455
| 13.090909
|
def CaptureToImage(self, savePath: str, x: int = 0, y: int = 0, width: int = 0, height: int = 0) -> bool:
"""
Capture control to a image file.
savePath: str, should end with .bmp, .jpg, .jpeg, .png, .gif, .tif, .tiff.
x, y: int, the point in control's internal position(from 0,0).
width, height: int, image's width and height from x, y, use 0 for entire area.
If width(or height) < 0, image size will be control's width(or height) - width(or height).
Return bool, True if succeed otherwise False.
"""
bitmap = Bitmap()
if bitmap.FromControl(self, x, y, width, height):
return bitmap.ToFile(savePath)
return False
|
[
"def",
"CaptureToImage",
"(",
"self",
",",
"savePath",
":",
"str",
",",
"x",
":",
"int",
"=",
"0",
",",
"y",
":",
"int",
"=",
"0",
",",
"width",
":",
"int",
"=",
"0",
",",
"height",
":",
"int",
"=",
"0",
")",
"->",
"bool",
":",
"bitmap",
"=",
"Bitmap",
"(",
")",
"if",
"bitmap",
".",
"FromControl",
"(",
"self",
",",
"x",
",",
"y",
",",
"width",
",",
"height",
")",
":",
"return",
"bitmap",
".",
"ToFile",
"(",
"savePath",
")",
"return",
"False"
] | 55
| 24.846154
|
def get_vnetwork_vswitches_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_vnetwork_vswitches_input_datacenter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_vnetwork_vswitches",
"=",
"ET",
".",
"Element",
"(",
"\"get_vnetwork_vswitches\"",
")",
"config",
"=",
"get_vnetwork_vswitches",
"input",
"=",
"ET",
".",
"SubElement",
"(",
"get_vnetwork_vswitches",
",",
"\"input\"",
")",
"datacenter",
"=",
"ET",
".",
"SubElement",
"(",
"input",
",",
"\"datacenter\"",
")",
"datacenter",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'datacenter'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 41.75
| 13.833333
|
def search_fast(self, text):
"""do a sloppy quick "search" via the json index"""
resp = self.impl.get(
"{base_url}/{text}/json".format(base_url=self.base_url, text=text)
)
return resp.json()["info"]["package_url"]
|
[
"def",
"search_fast",
"(",
"self",
",",
"text",
")",
":",
"resp",
"=",
"self",
".",
"impl",
".",
"get",
"(",
"\"{base_url}/{text}/json\"",
".",
"format",
"(",
"base_url",
"=",
"self",
".",
"base_url",
",",
"text",
"=",
"text",
")",
")",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"\"info\"",
"]",
"[",
"\"package_url\"",
"]"
] | 33.142857
| 20.142857
|
def add_step(self, step):
"""
Adds a new step to the waterfall.
:param step: Step to add
:return: Waterfall dialog for fluent calls to `add_step()`.
"""
if not step:
raise TypeError('WaterfallDialog.add_step(): step cannot be None.')
self._steps.append(step)
return self
|
[
"def",
"add_step",
"(",
"self",
",",
"step",
")",
":",
"if",
"not",
"step",
":",
"raise",
"TypeError",
"(",
"'WaterfallDialog.add_step(): step cannot be None.'",
")",
"self",
".",
"_steps",
".",
"append",
"(",
"step",
")",
"return",
"self"
] | 29
| 17.333333
|
def check(context: click.Context, family: str):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analyses(family=family).first()
if analysis_obj is None:
LOG.error('no analysis found')
context.abort()
config_path = Path(analysis_obj.config_path)
if not config_path.exists():
LOG.error(f"analysis config not found: {config_path}")
context.abort()
config_raw = ruamel.yaml.safe_load(config_path.open())
config_data = files.parse_config(config_raw)
sampleinfo_raw = ruamel.yaml.safe_load(Path(config_data['sampleinfo_path']).open())
sampleinfo_data = files.parse_sampleinfo(sampleinfo_raw)
qcmetrics_path = Path(sampleinfo_data['qcmetrics_path'])
if not qcmetrics_path.exists():
LOG.error(f"qc metrics not found: {str(qcmetrics_path)}")
context.abort()
qcmetrics_raw = ruamel.yaml.safe_load(qcmetrics_path.open())
qcmetrics_data = files.parse_qcmetrics(qcmetrics_raw)
samples = {
'sample': [],
'type': [],
'ped': [],
'chanjo': [],
'peddy': [],
'plink': [],
'duplicates': [],
}
for sample_data in config_data['samples']:
LOG.debug(f"{sample_data['id']}: parse analysis config")
samples['sample'].append(sample_data['id'])
samples['type'].append(sample_data['type'])
for sample_data in sampleinfo_data['samples']:
LOG.debug(f"{sample_data['id']}: parse sample info")
samples['ped'].append(sample_data['sex'])
with Path(sample_data['chanjo_sexcheck']).open() as chanjo_handle:
sexcheck_data = files.parse_chanjo_sexcheck(chanjo_handle)
predicted_sex = sexcheck_data['predicted_sex']
xy_ratio = sexcheck_data['y_coverage'] / sexcheck_data['x_coverage']
samples['chanjo'].append(f"{predicted_sex} ({xy_ratio:.3f})")
for sample_data in qcmetrics_data['samples']:
LOG.debug(f"{sample_data['id']}: parse qc metrics")
samples['plink'].append(sample_data['plink_sex'])
duplicates_percent = sample_data['duplicates'] * 100
samples['duplicates'].append(f"{duplicates_percent:.3f}%")
peddy_path = Path(sampleinfo_data['peddy']['sex_check'])
if peddy_path.exists():
with peddy_path.open() as sexcheck_handle:
peddy_data = files.parse_peddy_sexcheck(sexcheck_handle)
for sample_id in samples['sample']:
LOG.debug(f"{sample_id}: parse peddy")
predicted_sex = peddy_data[sample_id]['predicted_sex']
het_ratio = peddy_data[sample_id]['het_ratio']
samples['peddy'].append(f"{predicted_sex} ({het_ratio})")
else:
LOG.warning(f"missing peddy output: {peddy_path}")
print(tabulate(samples, headers='keys', tablefmt='psql'))
|
[
"def",
"check",
"(",
"context",
":",
"click",
".",
"Context",
",",
"family",
":",
"str",
")",
":",
"analysis_obj",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analyses",
"(",
"family",
"=",
"family",
")",
".",
"first",
"(",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"'no analysis found'",
")",
"context",
".",
"abort",
"(",
")",
"config_path",
"=",
"Path",
"(",
"analysis_obj",
".",
"config_path",
")",
"if",
"not",
"config_path",
".",
"exists",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"f\"analysis config not found: {config_path}\"",
")",
"context",
".",
"abort",
"(",
")",
"config_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"config_path",
".",
"open",
"(",
")",
")",
"config_data",
"=",
"files",
".",
"parse_config",
"(",
"config_raw",
")",
"sampleinfo_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"Path",
"(",
"config_data",
"[",
"'sampleinfo_path'",
"]",
")",
".",
"open",
"(",
")",
")",
"sampleinfo_data",
"=",
"files",
".",
"parse_sampleinfo",
"(",
"sampleinfo_raw",
")",
"qcmetrics_path",
"=",
"Path",
"(",
"sampleinfo_data",
"[",
"'qcmetrics_path'",
"]",
")",
"if",
"not",
"qcmetrics_path",
".",
"exists",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"f\"qc metrics not found: {str(qcmetrics_path)}\"",
")",
"context",
".",
"abort",
"(",
")",
"qcmetrics_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"qcmetrics_path",
".",
"open",
"(",
")",
")",
"qcmetrics_data",
"=",
"files",
".",
"parse_qcmetrics",
"(",
"qcmetrics_raw",
")",
"samples",
"=",
"{",
"'sample'",
":",
"[",
"]",
",",
"'type'",
":",
"[",
"]",
",",
"'ped'",
":",
"[",
"]",
",",
"'chanjo'",
":",
"[",
"]",
",",
"'peddy'",
":",
"[",
"]",
",",
"'plink'",
":",
"[",
"]",
",",
"'duplicates'",
":",
"[",
"]",
",",
"}",
"for",
"sample_data",
"in",
"config_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse analysis config\"",
")",
"samples",
"[",
"'sample'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'id'",
"]",
")",
"samples",
"[",
"'type'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'type'",
"]",
")",
"for",
"sample_data",
"in",
"sampleinfo_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse sample info\"",
")",
"samples",
"[",
"'ped'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'sex'",
"]",
")",
"with",
"Path",
"(",
"sample_data",
"[",
"'chanjo_sexcheck'",
"]",
")",
".",
"open",
"(",
")",
"as",
"chanjo_handle",
":",
"sexcheck_data",
"=",
"files",
".",
"parse_chanjo_sexcheck",
"(",
"chanjo_handle",
")",
"predicted_sex",
"=",
"sexcheck_data",
"[",
"'predicted_sex'",
"]",
"xy_ratio",
"=",
"sexcheck_data",
"[",
"'y_coverage'",
"]",
"/",
"sexcheck_data",
"[",
"'x_coverage'",
"]",
"samples",
"[",
"'chanjo'",
"]",
".",
"append",
"(",
"f\"{predicted_sex} ({xy_ratio:.3f})\"",
")",
"for",
"sample_data",
"in",
"qcmetrics_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse qc metrics\"",
")",
"samples",
"[",
"'plink'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'plink_sex'",
"]",
")",
"duplicates_percent",
"=",
"sample_data",
"[",
"'duplicates'",
"]",
"*",
"100",
"samples",
"[",
"'duplicates'",
"]",
".",
"append",
"(",
"f\"{duplicates_percent:.3f}%\"",
")",
"peddy_path",
"=",
"Path",
"(",
"sampleinfo_data",
"[",
"'peddy'",
"]",
"[",
"'sex_check'",
"]",
")",
"if",
"peddy_path",
".",
"exists",
"(",
")",
":",
"with",
"peddy_path",
".",
"open",
"(",
")",
"as",
"sexcheck_handle",
":",
"peddy_data",
"=",
"files",
".",
"parse_peddy_sexcheck",
"(",
"sexcheck_handle",
")",
"for",
"sample_id",
"in",
"samples",
"[",
"'sample'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_id}: parse peddy\"",
")",
"predicted_sex",
"=",
"peddy_data",
"[",
"sample_id",
"]",
"[",
"'predicted_sex'",
"]",
"het_ratio",
"=",
"peddy_data",
"[",
"sample_id",
"]",
"[",
"'het_ratio'",
"]",
"samples",
"[",
"'peddy'",
"]",
".",
"append",
"(",
"f\"{predicted_sex} ({het_ratio})\"",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"f\"missing peddy output: {peddy_path}\"",
")",
"print",
"(",
"tabulate",
"(",
"samples",
",",
"headers",
"=",
"'keys'",
",",
"tablefmt",
"=",
"'psql'",
")",
")"
] | 40.115942
| 21.927536
|
def wait_gone(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): default timeout
raise_error (bool): return bool or raise error
Returns:
bool: works when raise_error is False
Raises:
WDAElementNotDisappearError
"""
start_time = time.time()
if timeout is None or timeout <= 0:
timeout = self.timeout
while start_time + timeout > time.time():
if not self.exists:
return True
if not raise_error:
return False
raise WDAElementNotDisappearError("element not gone")
|
[
"def",
"wait_gone",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"raise_error",
"=",
"True",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"timeout",
"is",
"None",
"or",
"timeout",
"<=",
"0",
":",
"timeout",
"=",
"self",
".",
"timeout",
"while",
"start_time",
"+",
"timeout",
">",
"time",
".",
"time",
"(",
")",
":",
"if",
"not",
"self",
".",
"exists",
":",
"return",
"True",
"if",
"not",
"raise_error",
":",
"return",
"False",
"raise",
"WDAElementNotDisappearError",
"(",
"\"element not gone\"",
")"
] | 30.666667
| 13.809524
|
def _inherit_docstrings(parent, excluded=[]):
"""Creates a decorator which overwrites a decorated class' __doc__
attribute with parent's __doc__ attribute. Also overwrites __doc__ of
methods and properties defined in the class with the __doc__ of matching
methods and properties in parent.
Args:
parent (object): Class from which the decorated class inherits __doc__.
excluded (list): List of parent objects from which the class does not
inherit docstrings.
Returns:
function: decorator which replaces the decorated class' documentation
parent's documentation.
"""
def decorator(cls):
if parent not in excluded:
cls.__doc__ = parent.__doc__
for attr, obj in cls.__dict__.items():
parent_obj = getattr(parent, attr, None)
if parent_obj in excluded or (
not callable(parent_obj) and not isinstance(parent_obj, property)
):
continue
if callable(obj):
obj.__doc__ = parent_obj.__doc__
elif isinstance(obj, property) and obj.fget is not None:
p = property(obj.fget, obj.fset, obj.fdel, parent_obj.__doc__)
setattr(cls, attr, p)
return cls
return decorator
|
[
"def",
"_inherit_docstrings",
"(",
"parent",
",",
"excluded",
"=",
"[",
"]",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"if",
"parent",
"not",
"in",
"excluded",
":",
"cls",
".",
"__doc__",
"=",
"parent",
".",
"__doc__",
"for",
"attr",
",",
"obj",
"in",
"cls",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"parent_obj",
"=",
"getattr",
"(",
"parent",
",",
"attr",
",",
"None",
")",
"if",
"parent_obj",
"in",
"excluded",
"or",
"(",
"not",
"callable",
"(",
"parent_obj",
")",
"and",
"not",
"isinstance",
"(",
"parent_obj",
",",
"property",
")",
")",
":",
"continue",
"if",
"callable",
"(",
"obj",
")",
":",
"obj",
".",
"__doc__",
"=",
"parent_obj",
".",
"__doc__",
"elif",
"isinstance",
"(",
"obj",
",",
"property",
")",
"and",
"obj",
".",
"fget",
"is",
"not",
"None",
":",
"p",
"=",
"property",
"(",
"obj",
".",
"fget",
",",
"obj",
".",
"fset",
",",
"obj",
".",
"fdel",
",",
"parent_obj",
".",
"__doc__",
")",
"setattr",
"(",
"cls",
",",
"attr",
",",
"p",
")",
"return",
"cls",
"return",
"decorator"
] | 38.848485
| 20.575758
|
def _union_indexes(indexes, sort=True):
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
|
[
"def",
"_union_indexes",
"(",
"indexes",
",",
"sort",
"=",
"True",
")",
":",
"if",
"len",
"(",
"indexes",
")",
"==",
"0",
":",
"raise",
"AssertionError",
"(",
"'Must have at least 1 Index to union'",
")",
"if",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"result",
"=",
"indexes",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"result",
"=",
"Index",
"(",
"sorted",
"(",
"result",
")",
")",
"return",
"result",
"indexes",
",",
"kind",
"=",
"_sanitize_and_check",
"(",
"indexes",
")",
"def",
"_unique_indices",
"(",
"inds",
")",
":",
"\"\"\"\n Convert indexes to lists and concatenate them, removing duplicates.\n\n The final dtype is inferred.\n\n Parameters\n ----------\n inds : list of Index or list objects\n\n Returns\n -------\n Index\n \"\"\"",
"def",
"conv",
"(",
"i",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"Index",
")",
":",
"i",
"=",
"i",
".",
"tolist",
"(",
")",
"return",
"i",
"return",
"Index",
"(",
"lib",
".",
"fast_unique_multiple_list",
"(",
"[",
"conv",
"(",
"i",
")",
"for",
"i",
"in",
"inds",
"]",
",",
"sort",
"=",
"sort",
")",
")",
"if",
"kind",
"==",
"'special'",
":",
"result",
"=",
"indexes",
"[",
"0",
"]",
"if",
"hasattr",
"(",
"result",
",",
"'union_many'",
")",
":",
"return",
"result",
".",
"union_many",
"(",
"indexes",
"[",
"1",
":",
"]",
")",
"else",
":",
"for",
"other",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"result",
"=",
"result",
".",
"union",
"(",
"other",
")",
"return",
"result",
"elif",
"kind",
"==",
"'array'",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"for",
"other",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"if",
"not",
"index",
".",
"equals",
"(",
"other",
")",
":",
"if",
"sort",
"is",
"None",
":",
"# TODO: remove once pd.concat sort default changes",
"warnings",
".",
"warn",
"(",
"_sort_msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"8",
")",
"sort",
"=",
"True",
"return",
"_unique_indices",
"(",
"indexes",
")",
"name",
"=",
"_get_consensus_names",
"(",
"indexes",
")",
"[",
"0",
"]",
"if",
"name",
"!=",
"index",
".",
"name",
":",
"index",
"=",
"index",
".",
"_shallow_copy",
"(",
"name",
"=",
"name",
")",
"return",
"index",
"else",
":",
"# kind='list'",
"return",
"_unique_indices",
"(",
"indexes",
")"
] | 26.146667
| 18.973333
|
def get(self):
"""Return the sparkline."""
ret = sparklines(self.percents)[0]
if self.__with_text:
percents_without_none = [x for x in self.percents if x is not None]
if len(percents_without_none) > 0:
ret = '{}{:5.1f}%'.format(ret, percents_without_none[-1])
return nativestr(ret)
|
[
"def",
"get",
"(",
"self",
")",
":",
"ret",
"=",
"sparklines",
"(",
"self",
".",
"percents",
")",
"[",
"0",
"]",
"if",
"self",
".",
"__with_text",
":",
"percents_without_none",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"percents",
"if",
"x",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"percents_without_none",
")",
">",
"0",
":",
"ret",
"=",
"'{}{:5.1f}%'",
".",
"format",
"(",
"ret",
",",
"percents_without_none",
"[",
"-",
"1",
"]",
")",
"return",
"nativestr",
"(",
"ret",
")"
] | 43.25
| 16.125
|
def read(self, page):
"""Send a READ command to retrieve data from the tag.
The *page* argument specifies the offset in multiples of 4
bytes (i.e. page number 1 will return bytes 4 to 19). The data
returned is a byte array of length 16 or None if the block is
outside the readable memory range.
Command execution errors raise :exc:`Type2TagCommandError`.
"""
log.debug("read pages {0} to {1}".format(page, page+3))
data = self.transceive("\x30"+chr(page % 256), timeout=0.005)
if len(data) == 1 and data[0] & 0xFA == 0x00:
log.debug("received nak response")
self.target.sel_req = self.target.sdd_res[:]
self._target = self.clf.sense(self.target)
raise Type2TagCommandError(
INVALID_PAGE_ERROR if self.target else nfc.tag.RECEIVE_ERROR)
if len(data) != 16:
log.debug("invalid response " + hexlify(data))
raise Type2TagCommandError(INVALID_RESPONSE_ERROR)
return data
|
[
"def",
"read",
"(",
"self",
",",
"page",
")",
":",
"log",
".",
"debug",
"(",
"\"read pages {0} to {1}\"",
".",
"format",
"(",
"page",
",",
"page",
"+",
"3",
")",
")",
"data",
"=",
"self",
".",
"transceive",
"(",
"\"\\x30\"",
"+",
"chr",
"(",
"page",
"%",
"256",
")",
",",
"timeout",
"=",
"0.005",
")",
"if",
"len",
"(",
"data",
")",
"==",
"1",
"and",
"data",
"[",
"0",
"]",
"&",
"0xFA",
"==",
"0x00",
":",
"log",
".",
"debug",
"(",
"\"received nak response\"",
")",
"self",
".",
"target",
".",
"sel_req",
"=",
"self",
".",
"target",
".",
"sdd_res",
"[",
":",
"]",
"self",
".",
"_target",
"=",
"self",
".",
"clf",
".",
"sense",
"(",
"self",
".",
"target",
")",
"raise",
"Type2TagCommandError",
"(",
"INVALID_PAGE_ERROR",
"if",
"self",
".",
"target",
"else",
"nfc",
".",
"tag",
".",
"RECEIVE_ERROR",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"16",
":",
"log",
".",
"debug",
"(",
"\"invalid response \"",
"+",
"hexlify",
"(",
"data",
")",
")",
"raise",
"Type2TagCommandError",
"(",
"INVALID_RESPONSE_ERROR",
")",
"return",
"data"
] | 38.148148
| 23.185185
|
def validate_stream(stream):
"""
Check that the stream name is well-formed.
"""
if not STREAM_REGEX.match(stream) or len(stream) > MAX_STREAM_LENGTH:
raise InvalidStreamName(stream)
|
[
"def",
"validate_stream",
"(",
"stream",
")",
":",
"if",
"not",
"STREAM_REGEX",
".",
"match",
"(",
"stream",
")",
"or",
"len",
"(",
"stream",
")",
">",
"MAX_STREAM_LENGTH",
":",
"raise",
"InvalidStreamName",
"(",
"stream",
")"
] | 31.333333
| 8.666667
|
def get_operation(cls, morph_type):
""" Maps morphological operation type to function
:param morph_type: Morphological operation type
:type morph_type: MorphologicalOperations
:return: function
"""
return {
cls.OPENING: skimage.morphology.opening,
cls.CLOSING: skimage.morphology.closing,
cls.DILATION: skimage.morphology.dilation,
cls.EROSION: skimage.morphology.erosion,
cls.MEDIAN: skimage.filters.rank.median
}[morph_type]
|
[
"def",
"get_operation",
"(",
"cls",
",",
"morph_type",
")",
":",
"return",
"{",
"cls",
".",
"OPENING",
":",
"skimage",
".",
"morphology",
".",
"opening",
",",
"cls",
".",
"CLOSING",
":",
"skimage",
".",
"morphology",
".",
"closing",
",",
"cls",
".",
"DILATION",
":",
"skimage",
".",
"morphology",
".",
"dilation",
",",
"cls",
".",
"EROSION",
":",
"skimage",
".",
"morphology",
".",
"erosion",
",",
"cls",
".",
"MEDIAN",
":",
"skimage",
".",
"filters",
".",
"rank",
".",
"median",
"}",
"[",
"morph_type",
"]"
] | 38.785714
| 13.642857
|
def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=VIZ_ENABLED,
nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
source_samples=SOURCE_SAMPLES,
learning_rate=LEARNING_RATE,
attack_iterations=ATTACK_ITERATIONS,
model_path=MODEL_PATH,
targeted=TARGETED):
"""
MNIST tutorial for Carlini and Wagner's attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'filename': os.path.split(model_path)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
else:
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
saver = tf.train.Saver()
saver.save(sess, model_path)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == test_end - test_start, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, sess=sess)
if viz_enabled:
assert source_samples == nb_classes
idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
if targeted:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols,
nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = np.array(
[[instance] * nb_classes for instance in x_test[idxs]],
dtype=np.float32)
else:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in x_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, nchannels))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
yname = "y_target"
else:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, 2, img_rows, img_cols, nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = x_test[idxs]
else:
adv_inputs = x_test[:source_samples]
adv_ys = None
yname = "y"
if targeted:
cw_params_batch_size = source_samples * nb_classes
else:
cw_params_batch_size = source_samples
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': attack_iterations,
'learning_rate': CW_LEARNING_RATE,
'batch_size': cw_params_batch_size,
'initial_const': 10}
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}
if targeted:
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
else:
if viz_enabled:
err = model_eval(sess, x, y, preds, adv, y_test[idxs], args=eval_params)
adv_accuracy = 1 - err
else:
err = model_eval(sess, x, y, preds, adv, y_test[:source_samples],
args=eval_params)
adv_accuracy = 1 - err
if viz_enabled:
for j in range(nb_classes):
if targeted:
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
_ = grid_visual(grid_viz_data)
return report
|
[
"def",
"mnist_tutorial_cw",
"(",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
",",
"viz_enabled",
"=",
"VIZ_ENABLED",
",",
"nb_epochs",
"=",
"NB_EPOCHS",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"source_samples",
"=",
"SOURCE_SAMPLES",
",",
"learning_rate",
"=",
"LEARNING_RATE",
",",
"attack_iterations",
"=",
"ATTACK_ITERATIONS",
",",
"model_path",
"=",
"MODEL_PATH",
",",
"targeted",
"=",
"TARGETED",
")",
":",
"# Object used to keep track of (and return) key accuracies",
"report",
"=",
"AccuracyReport",
"(",
")",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Create TF session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"print",
"(",
"\"Created TensorFlow session.\"",
")",
"set_log_level",
"(",
"logging",
".",
"DEBUG",
")",
"# Get MNIST test data",
"mnist",
"=",
"MNIST",
"(",
"train_start",
"=",
"train_start",
",",
"train_end",
"=",
"train_end",
",",
"test_start",
"=",
"test_start",
",",
"test_end",
"=",
"test_end",
")",
"x_train",
",",
"y_train",
"=",
"mnist",
".",
"get_set",
"(",
"'train'",
")",
"x_test",
",",
"y_test",
"=",
"mnist",
".",
"get_set",
"(",
"'test'",
")",
"# Obtain Image Parameters",
"img_rows",
",",
"img_cols",
",",
"nchannels",
"=",
"x_train",
".",
"shape",
"[",
"1",
":",
"4",
"]",
"nb_classes",
"=",
"y_train",
".",
"shape",
"[",
"1",
"]",
"# Define input TF placeholder",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"y",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"nb_classes",
")",
")",
"nb_filters",
"=",
"64",
"# Define TF model graph",
"model",
"=",
"ModelBasicCNN",
"(",
"'model1'",
",",
"nb_classes",
",",
"nb_filters",
")",
"preds",
"=",
"model",
".",
"get_logits",
"(",
"x",
")",
"loss",
"=",
"CrossEntropy",
"(",
"model",
",",
"smoothing",
"=",
"0.1",
")",
"print",
"(",
"\"Defined TensorFlow model graph.\"",
")",
"###########################################################################",
"# Training the model using TensorFlow",
"###########################################################################",
"# Train an MNIST model",
"train_params",
"=",
"{",
"'nb_epochs'",
":",
"nb_epochs",
",",
"'batch_size'",
":",
"batch_size",
",",
"'learning_rate'",
":",
"learning_rate",
",",
"'filename'",
":",
"os",
".",
"path",
".",
"split",
"(",
"model_path",
")",
"[",
"-",
"1",
"]",
"}",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"[",
"2017",
",",
"8",
",",
"30",
"]",
")",
"# check if we've trained before, and if we have, use that pre-trained model",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"model_path",
"+",
"\".meta\"",
")",
":",
"tf_model_load",
"(",
"sess",
",",
"model_path",
")",
"else",
":",
"train",
"(",
"sess",
",",
"loss",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"model_path",
")",
"# Evaluate the accuracy of the MNIST model on legitimate test examples",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_params",
")",
"assert",
"x_test",
".",
"shape",
"[",
"0",
"]",
"==",
"test_end",
"-",
"test_start",
",",
"x_test",
".",
"shape",
"print",
"(",
"'Test accuracy on legitimate test examples: {0}'",
".",
"format",
"(",
"accuracy",
")",
")",
"report",
".",
"clean_train_clean_eval",
"=",
"accuracy",
"###########################################################################",
"# Craft adversarial examples using Carlini and Wagner's approach",
"###########################################################################",
"nb_adv_per_sample",
"=",
"str",
"(",
"nb_classes",
"-",
"1",
")",
"if",
"targeted",
"else",
"'1'",
"print",
"(",
"'Crafting '",
"+",
"str",
"(",
"source_samples",
")",
"+",
"' * '",
"+",
"nb_adv_per_sample",
"+",
"' adversarial examples'",
")",
"print",
"(",
"\"This could take some time ...\"",
")",
"# Instantiate a CW attack object",
"cw",
"=",
"CarliniWagnerL2",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"if",
"viz_enabled",
":",
"assert",
"source_samples",
"==",
"nb_classes",
"idxs",
"=",
"[",
"np",
".",
"where",
"(",
"np",
".",
"argmax",
"(",
"y_test",
",",
"axis",
"=",
"1",
")",
"==",
"i",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"nb_classes",
")",
"]",
"if",
"targeted",
":",
"if",
"viz_enabled",
":",
"# Initialize our array for grid visualization",
"grid_shape",
"=",
"(",
"nb_classes",
",",
"nb_classes",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
"grid_viz_data",
"=",
"np",
".",
"zeros",
"(",
"grid_shape",
",",
"dtype",
"=",
"'f'",
")",
"adv_inputs",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"instance",
"]",
"*",
"nb_classes",
"for",
"instance",
"in",
"x_test",
"[",
"idxs",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"else",
":",
"adv_inputs",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"instance",
"]",
"*",
"nb_classes",
"for",
"instance",
"in",
"x_test",
"[",
":",
"source_samples",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"one_hot",
"=",
"np",
".",
"zeros",
"(",
"(",
"nb_classes",
",",
"nb_classes",
")",
")",
"one_hot",
"[",
"np",
".",
"arange",
"(",
"nb_classes",
")",
",",
"np",
".",
"arange",
"(",
"nb_classes",
")",
"]",
"=",
"1",
"adv_inputs",
"=",
"adv_inputs",
".",
"reshape",
"(",
"(",
"source_samples",
"*",
"nb_classes",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"adv_ys",
"=",
"np",
".",
"array",
"(",
"[",
"one_hot",
"]",
"*",
"source_samples",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
".",
"reshape",
"(",
"(",
"source_samples",
"*",
"nb_classes",
",",
"nb_classes",
")",
")",
"yname",
"=",
"\"y_target\"",
"else",
":",
"if",
"viz_enabled",
":",
"# Initialize our array for grid visualization",
"grid_shape",
"=",
"(",
"nb_classes",
",",
"2",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
"grid_viz_data",
"=",
"np",
".",
"zeros",
"(",
"grid_shape",
",",
"dtype",
"=",
"'f'",
")",
"adv_inputs",
"=",
"x_test",
"[",
"idxs",
"]",
"else",
":",
"adv_inputs",
"=",
"x_test",
"[",
":",
"source_samples",
"]",
"adv_ys",
"=",
"None",
"yname",
"=",
"\"y\"",
"if",
"targeted",
":",
"cw_params_batch_size",
"=",
"source_samples",
"*",
"nb_classes",
"else",
":",
"cw_params_batch_size",
"=",
"source_samples",
"cw_params",
"=",
"{",
"'binary_search_steps'",
":",
"1",
",",
"yname",
":",
"adv_ys",
",",
"'max_iterations'",
":",
"attack_iterations",
",",
"'learning_rate'",
":",
"CW_LEARNING_RATE",
",",
"'batch_size'",
":",
"cw_params_batch_size",
",",
"'initial_const'",
":",
"10",
"}",
"adv",
"=",
"cw",
".",
"generate_np",
"(",
"adv_inputs",
",",
"*",
"*",
"cw_params",
")",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"np",
".",
"minimum",
"(",
"nb_classes",
",",
"source_samples",
")",
"}",
"if",
"targeted",
":",
"adv_accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"adv_ys",
",",
"args",
"=",
"eval_params",
")",
"else",
":",
"if",
"viz_enabled",
":",
"err",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"y_test",
"[",
"idxs",
"]",
",",
"args",
"=",
"eval_params",
")",
"adv_accuracy",
"=",
"1",
"-",
"err",
"else",
":",
"err",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"y_test",
"[",
":",
"source_samples",
"]",
",",
"args",
"=",
"eval_params",
")",
"adv_accuracy",
"=",
"1",
"-",
"err",
"if",
"viz_enabled",
":",
"for",
"j",
"in",
"range",
"(",
"nb_classes",
")",
":",
"if",
"targeted",
":",
"for",
"i",
"in",
"range",
"(",
"nb_classes",
")",
":",
"grid_viz_data",
"[",
"i",
",",
"j",
"]",
"=",
"adv",
"[",
"i",
"*",
"nb_classes",
"+",
"j",
"]",
"else",
":",
"grid_viz_data",
"[",
"j",
",",
"0",
"]",
"=",
"adv_inputs",
"[",
"j",
"]",
"grid_viz_data",
"[",
"j",
",",
"1",
"]",
"=",
"adv",
"[",
"j",
"]",
"print",
"(",
"grid_viz_data",
".",
"shape",
")",
"print",
"(",
"'--------------------------------------'",
")",
"# Compute the number of adversarial examples that were successfully found",
"print",
"(",
"'Avg. rate of successful adv. examples {0:.4f}'",
".",
"format",
"(",
"adv_accuracy",
")",
")",
"report",
".",
"clean_train_adv_eval",
"=",
"1.",
"-",
"adv_accuracy",
"# Compute the average distortion introduced by the algorithm",
"percent_perturbed",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sum",
"(",
"(",
"adv",
"-",
"adv_inputs",
")",
"**",
"2",
",",
"axis",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
")",
"**",
".5",
")",
"print",
"(",
"'Avg. L_2 norm of perturbations {0:.4f}'",
".",
"format",
"(",
"percent_perturbed",
")",
")",
"# Close TF session",
"sess",
".",
"close",
"(",
")",
"# Finally, block & display a grid of all the adversarial examples",
"if",
"viz_enabled",
":",
"_",
"=",
"grid_visual",
"(",
"grid_viz_data",
")",
"return",
"report"
] | 35.835897
| 19.774359
|
def translate(self, dx, dy):
"""
Move the polygons from one place to another
Parameters
----------
dx : number
distance to move in the x-direction
dy : number
distance to move in the y-direction
Returns
-------
out : ``PolygonSet``
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
return self
|
[
"def",
"translate",
"(",
"self",
",",
"dx",
",",
"dy",
")",
":",
"vec",
"=",
"numpy",
".",
"array",
"(",
"(",
"dx",
",",
"dy",
")",
")",
"self",
".",
"polygons",
"=",
"[",
"points",
"+",
"vec",
"for",
"points",
"in",
"self",
".",
"polygons",
"]",
"return",
"self"
] | 24.789474
| 17.526316
|
def validate(self, value):
'''Return a boolean indicating if the value is a valid URI'''
if self.blank and value == '':
return True
if URIValidator.uri_regex.match(value):
self._choice = value
return True
else:
self.error_message = '%s is not a valid URI' % value
return False
|
[
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"blank",
"and",
"value",
"==",
"''",
":",
"return",
"True",
"if",
"URIValidator",
".",
"uri_regex",
".",
"match",
"(",
"value",
")",
":",
"self",
".",
"_choice",
"=",
"value",
"return",
"True",
"else",
":",
"self",
".",
"error_message",
"=",
"'%s is not a valid URI'",
"%",
"value",
"return",
"False"
] | 35.9
| 16.1
|
def loadBinaryItemContainer(zippedfile, jsonHook):
"""Imports binaryItems from a zipfile generated by
:func:`writeBinaryItemContainer`.
:param zipfile: can be either a path to a file (a string) or a file-like
object
:param jsonHook: a custom decoding function for JSON formated strings of the
binaryItems stored in the zipfile.
:returns: a dictionary containing binaryItems
``{binaryItem.id: binaryItem, ... }``
"""
binaryItemContainer = dict()
with zipfile.ZipFile(zippedfile, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
metadataText = io.TextIOWrapper(containerZip.open('metadata'),
encoding='utf-8'
).read()
allMetadata = json.loads(metadataText, object_hook=jsonHook)
metadataIndex = [str(_) for _ in sorted([int(i) for i in
viewkeys(allMetadata)
])
]
binarydataFile = containerZip.open('binarydata')
for index in metadataIndex:
binaryItem = allMetadata[index][0]
for binaryMetadata in allMetadata[index][1]:
arrayKey = binaryMetadata['arrayKey']
rawdata = binarydataFile.read(binaryMetadata['end'] -
binaryMetadata['start']
)
array = _arrayFromBytes(rawdata, binaryMetadata)
binaryItem.arrays[arrayKey] = array
binaryItemContainer[binaryItem.id] = binaryItem
return binaryItemContainer
|
[
"def",
"loadBinaryItemContainer",
"(",
"zippedfile",
",",
"jsonHook",
")",
":",
"binaryItemContainer",
"=",
"dict",
"(",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zippedfile",
",",
"'r'",
")",
"as",
"containerZip",
":",
"#Convert the zipfile data into a str object, necessary since",
"#containerZip.read() returns a bytes object.",
"metadataText",
"=",
"io",
".",
"TextIOWrapper",
"(",
"containerZip",
".",
"open",
"(",
"'metadata'",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"read",
"(",
")",
"allMetadata",
"=",
"json",
".",
"loads",
"(",
"metadataText",
",",
"object_hook",
"=",
"jsonHook",
")",
"metadataIndex",
"=",
"[",
"str",
"(",
"_",
")",
"for",
"_",
"in",
"sorted",
"(",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"viewkeys",
"(",
"allMetadata",
")",
"]",
")",
"]",
"binarydataFile",
"=",
"containerZip",
".",
"open",
"(",
"'binarydata'",
")",
"for",
"index",
"in",
"metadataIndex",
":",
"binaryItem",
"=",
"allMetadata",
"[",
"index",
"]",
"[",
"0",
"]",
"for",
"binaryMetadata",
"in",
"allMetadata",
"[",
"index",
"]",
"[",
"1",
"]",
":",
"arrayKey",
"=",
"binaryMetadata",
"[",
"'arrayKey'",
"]",
"rawdata",
"=",
"binarydataFile",
".",
"read",
"(",
"binaryMetadata",
"[",
"'end'",
"]",
"-",
"binaryMetadata",
"[",
"'start'",
"]",
")",
"array",
"=",
"_arrayFromBytes",
"(",
"rawdata",
",",
"binaryMetadata",
")",
"binaryItem",
".",
"arrays",
"[",
"arrayKey",
"]",
"=",
"array",
"binaryItemContainer",
"[",
"binaryItem",
".",
"id",
"]",
"=",
"binaryItem",
"return",
"binaryItemContainer"
] | 48.666667
| 17.305556
|
def add_serverconnection_methods(cls):
"""Add a bunch of methods to an :class:`irc.client.SimpleIRCClient`
to send commands and messages.
Basically it wraps a bunch of methdos from
:class:`irc.client.ServerConnection` to be
:meth:`irc.schedule.IScheduler.execute_after`.
That way, you can easily send, even if the IRCClient is running in
:class:`IRCClient.process_forever` in another thread.
On the plus side you can use positional and keyword arguments instead of just positional ones.
:param cls: The class to add the methods do.
:type cls: :class:`irc.client.SimpleIRCClient`
:returns: None
"""
methods = ['action', 'admin', 'cap', 'ctcp', 'ctcp_reply',
'globops', 'info', 'invite', 'ison', 'join',
'kick', 'links', 'list', 'lusers', 'mode',
'motd', 'names', 'nick', 'notice', 'oper', 'part',
'part', 'pass_', 'ping', 'pong', 'privmsg',
'privmsg_many', 'quit', 'send_raw', 'squit',
'stats', 'time', 'topic', 'trace', 'user', 'userhost',
'users', 'version', 'wallops', 'who', 'whois', 'whowas']
for m in methods:
method = _wrap_execute_after(m)
f = getattr(irc.client.ServerConnection, m)
method.__doc__ = f.__doc__
setattr(cls, method.__name__, method)
return cls
|
[
"def",
"add_serverconnection_methods",
"(",
"cls",
")",
":",
"methods",
"=",
"[",
"'action'",
",",
"'admin'",
",",
"'cap'",
",",
"'ctcp'",
",",
"'ctcp_reply'",
",",
"'globops'",
",",
"'info'",
",",
"'invite'",
",",
"'ison'",
",",
"'join'",
",",
"'kick'",
",",
"'links'",
",",
"'list'",
",",
"'lusers'",
",",
"'mode'",
",",
"'motd'",
",",
"'names'",
",",
"'nick'",
",",
"'notice'",
",",
"'oper'",
",",
"'part'",
",",
"'part'",
",",
"'pass_'",
",",
"'ping'",
",",
"'pong'",
",",
"'privmsg'",
",",
"'privmsg_many'",
",",
"'quit'",
",",
"'send_raw'",
",",
"'squit'",
",",
"'stats'",
",",
"'time'",
",",
"'topic'",
",",
"'trace'",
",",
"'user'",
",",
"'userhost'",
",",
"'users'",
",",
"'version'",
",",
"'wallops'",
",",
"'who'",
",",
"'whois'",
",",
"'whowas'",
"]",
"for",
"m",
"in",
"methods",
":",
"method",
"=",
"_wrap_execute_after",
"(",
"m",
")",
"f",
"=",
"getattr",
"(",
"irc",
".",
"client",
".",
"ServerConnection",
",",
"m",
")",
"method",
".",
"__doc__",
"=",
"f",
".",
"__doc__",
"setattr",
"(",
"cls",
",",
"method",
".",
"__name__",
",",
"method",
")",
"return",
"cls"
] | 44.566667
| 18.1
|
def pack(self, value=None):
"""Pack this structure updating the length and padding it."""
self._update_length()
packet = super().pack()
return self._complete_last_byte(packet)
|
[
"def",
"pack",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"self",
".",
"_update_length",
"(",
")",
"packet",
"=",
"super",
"(",
")",
".",
"pack",
"(",
")",
"return",
"self",
".",
"_complete_last_byte",
"(",
"packet",
")"
] | 40.6
| 8
|
def post_note(self):
""" Post note and return the URL of the posted note """
if self.args.note_title:
note_title = self.args.note_title
else:
note_title = None
note_content = self.args.note_content
mynote = self.pump.Note(display_name=note_title, content=note_content)
mynote.to = self.pump.me.followers
mynote.cc = self.pump.Public
mynote.send()
return mynote.id or None
|
[
"def",
"post_note",
"(",
"self",
")",
":",
"if",
"self",
".",
"args",
".",
"note_title",
":",
"note_title",
"=",
"self",
".",
"args",
".",
"note_title",
"else",
":",
"note_title",
"=",
"None",
"note_content",
"=",
"self",
".",
"args",
".",
"note_content",
"mynote",
"=",
"self",
".",
"pump",
".",
"Note",
"(",
"display_name",
"=",
"note_title",
",",
"content",
"=",
"note_content",
")",
"mynote",
".",
"to",
"=",
"self",
".",
"pump",
".",
"me",
".",
"followers",
"mynote",
".",
"cc",
"=",
"self",
".",
"pump",
".",
"Public",
"mynote",
".",
"send",
"(",
")",
"return",
"mynote",
".",
"id",
"or",
"None"
] | 32.571429
| 16.214286
|
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
config = load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp(config=config)
# Routing/Session Middleware
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
app = SessionMiddleware(app, config)
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])
# Establish the Registry for this application
app = RegistryManager(app)
if asbool(static_files):
# Serve static files
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app.config = config
return app
|
[
"def",
"make_app",
"(",
"global_conf",
",",
"full_stack",
"=",
"True",
",",
"static_files",
"=",
"True",
",",
"*",
"*",
"app_conf",
")",
":",
"# Configure the Pylons environment",
"config",
"=",
"load_environment",
"(",
"global_conf",
",",
"app_conf",
")",
"# The Pylons WSGI app",
"app",
"=",
"PylonsApp",
"(",
"config",
"=",
"config",
")",
"# Routing/Session Middleware",
"app",
"=",
"RoutesMiddleware",
"(",
"app",
",",
"config",
"[",
"'routes.map'",
"]",
",",
"singleton",
"=",
"False",
")",
"app",
"=",
"SessionMiddleware",
"(",
"app",
",",
"config",
")",
"# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)",
"if",
"asbool",
"(",
"full_stack",
")",
":",
"# Handle Python exceptions",
"app",
"=",
"ErrorHandler",
"(",
"app",
",",
"global_conf",
",",
"*",
"*",
"config",
"[",
"'pylons.errorware'",
"]",
")",
"# Display error documents for 401, 403, 404 status codes (and",
"# 500 when debug is disabled)",
"if",
"asbool",
"(",
"config",
"[",
"'debug'",
"]",
")",
":",
"app",
"=",
"StatusCodeRedirect",
"(",
"app",
")",
"else",
":",
"app",
"=",
"StatusCodeRedirect",
"(",
"app",
",",
"[",
"400",
",",
"401",
",",
"403",
",",
"404",
",",
"500",
"]",
")",
"# Establish the Registry for this application",
"app",
"=",
"RegistryManager",
"(",
"app",
")",
"if",
"asbool",
"(",
"static_files",
")",
":",
"# Serve static files",
"static_app",
"=",
"StaticURLParser",
"(",
"config",
"[",
"'pylons.paths'",
"]",
"[",
"'static_files'",
"]",
")",
"app",
"=",
"Cascade",
"(",
"[",
"static_app",
",",
"app",
"]",
")",
"app",
".",
"config",
"=",
"config",
"return",
"app"
] | 34.472727
| 22.654545
|
def get_idx_by_name(self, name):
# type: (str) -> Optional[int]
""" get_idx_by_name returns the index of a matching registered header
This implementation will prefer returning a static entry index whenever
possible. If multiple matching header name are found in the static
table, there is insurance that the first entry (lowest index number)
will be returned.
If no matching header is found, this method returns None.
"""
name = name.lower()
for key, val in six.iteritems(type(self)._static_entries):
if val.name() == name:
return key
for idx, val in enumerate(self._dynamic_table):
if val.name() == name:
return type(self)._static_entries_last_idx + idx + 1
return None
|
[
"def",
"get_idx_by_name",
"(",
"self",
",",
"name",
")",
":",
"# type: (str) -> Optional[int]",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"type",
"(",
"self",
")",
".",
"_static_entries",
")",
":",
"if",
"val",
".",
"name",
"(",
")",
"==",
"name",
":",
"return",
"key",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"self",
".",
"_dynamic_table",
")",
":",
"if",
"val",
".",
"name",
"(",
")",
"==",
"name",
":",
"return",
"type",
"(",
"self",
")",
".",
"_static_entries_last_idx",
"+",
"idx",
"+",
"1",
"return",
"None"
] | 44.777778
| 18.222222
|
def tokenparser(fmt, keys=None, token_cache={}):
"""Divide the format string into tokens and parse them.
Return stretchy token and list of [initialiser, length, value]
initialiser is one of: hex, oct, bin, uint, int, se, ue, 0x, 0o, 0b etc.
length is None if not known, as is value.
If the token is in the keyword dictionary (keys) then it counts as a
special case and isn't messed with.
tokens must be of the form: [factor*][initialiser][:][length][=value]
"""
try:
return token_cache[(fmt, keys)]
except KeyError:
token_key = (fmt, keys)
# Very inefficient expanding of brackets.
fmt = expand_brackets(fmt)
# Split tokens by ',' and remove whitespace
# The meta_tokens can either be ordinary single tokens or multiple
# struct-format token strings.
meta_tokens = (''.join(f.split()) for f in fmt.split(','))
return_values = []
stretchy_token = False
for meta_token in meta_tokens:
# See if it has a multiplicative factor
m = MULTIPLICATIVE_RE.match(meta_token)
if not m:
factor = 1
else:
factor = int(m.group('factor'))
meta_token = m.group('token')
# See if it's a struct-like format
tokens = structparser(meta_token)
ret_vals = []
for token in tokens:
if keys and token in keys:
# Don't bother parsing it, it's a keyword argument
ret_vals.append([token, None, None])
continue
value = length = None
if token == '':
continue
# Match literal tokens of the form 0x... 0o... and 0b...
m = LITERAL_RE.match(token)
if m:
name = m.group('name')
value = m.group('value')
ret_vals.append([name, length, value])
continue
# Match everything else:
m1 = TOKEN_RE.match(token)
if not m1:
# and if you don't specify a 'name' then the default is 'uint':
m2 = DEFAULT_UINT.match(token)
if not m2:
raise ValueError("Don't understand token '{0}'.".format(token))
if m1:
name = m1.group('name')
length = m1.group('len')
if m1.group('value'):
value = m1.group('value')
else:
assert m2
name = 'uint'
length = m2.group('len')
if m2.group('value'):
value = m2.group('value')
if name == 'bool':
if length is not None:
raise ValueError("You can't specify a length with bool tokens - they are always one bit.")
length = 1
if length is None and name not in ('se', 'ue', 'sie', 'uie'):
stretchy_token = True
if length is not None:
# Try converting length to int, otherwise check it's a key.
try:
length = int(length)
if length < 0:
raise Error
# For the 'bytes' token convert length to bits.
if name == 'bytes':
length *= 8
except Error:
raise ValueError("Can't read a token with a negative length.")
except ValueError:
if not keys or length not in keys:
raise ValueError("Don't understand length '{0}' of token.".format(length))
ret_vals.append([name, length, value])
# This multiplies by the multiplicative factor, but this means that
# we can't allow keyword values as multipliers (e.g. n*uint:8).
# The only way to do this would be to return the factor in some fashion
# (we can't use the key's value here as it would mean that we couldn't
# sensibly continue to cache the function's results. (TODO).
return_values.extend(ret_vals * factor)
return_values = [tuple(x) for x in return_values]
if len(token_cache) < CACHE_SIZE:
token_cache[token_key] = stretchy_token, return_values
return stretchy_token, return_values
|
[
"def",
"tokenparser",
"(",
"fmt",
",",
"keys",
"=",
"None",
",",
"token_cache",
"=",
"{",
"}",
")",
":",
"try",
":",
"return",
"token_cache",
"[",
"(",
"fmt",
",",
"keys",
")",
"]",
"except",
"KeyError",
":",
"token_key",
"=",
"(",
"fmt",
",",
"keys",
")",
"# Very inefficient expanding of brackets.",
"fmt",
"=",
"expand_brackets",
"(",
"fmt",
")",
"# Split tokens by ',' and remove whitespace",
"# The meta_tokens can either be ordinary single tokens or multiple",
"# struct-format token strings.",
"meta_tokens",
"=",
"(",
"''",
".",
"join",
"(",
"f",
".",
"split",
"(",
")",
")",
"for",
"f",
"in",
"fmt",
".",
"split",
"(",
"','",
")",
")",
"return_values",
"=",
"[",
"]",
"stretchy_token",
"=",
"False",
"for",
"meta_token",
"in",
"meta_tokens",
":",
"# See if it has a multiplicative factor",
"m",
"=",
"MULTIPLICATIVE_RE",
".",
"match",
"(",
"meta_token",
")",
"if",
"not",
"m",
":",
"factor",
"=",
"1",
"else",
":",
"factor",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"'factor'",
")",
")",
"meta_token",
"=",
"m",
".",
"group",
"(",
"'token'",
")",
"# See if it's a struct-like format",
"tokens",
"=",
"structparser",
"(",
"meta_token",
")",
"ret_vals",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"if",
"keys",
"and",
"token",
"in",
"keys",
":",
"# Don't bother parsing it, it's a keyword argument",
"ret_vals",
".",
"append",
"(",
"[",
"token",
",",
"None",
",",
"None",
"]",
")",
"continue",
"value",
"=",
"length",
"=",
"None",
"if",
"token",
"==",
"''",
":",
"continue",
"# Match literal tokens of the form 0x... 0o... and 0b...",
"m",
"=",
"LITERAL_RE",
".",
"match",
"(",
"token",
")",
"if",
"m",
":",
"name",
"=",
"m",
".",
"group",
"(",
"'name'",
")",
"value",
"=",
"m",
".",
"group",
"(",
"'value'",
")",
"ret_vals",
".",
"append",
"(",
"[",
"name",
",",
"length",
",",
"value",
"]",
")",
"continue",
"# Match everything else:",
"m1",
"=",
"TOKEN_RE",
".",
"match",
"(",
"token",
")",
"if",
"not",
"m1",
":",
"# and if you don't specify a 'name' then the default is 'uint':",
"m2",
"=",
"DEFAULT_UINT",
".",
"match",
"(",
"token",
")",
"if",
"not",
"m2",
":",
"raise",
"ValueError",
"(",
"\"Don't understand token '{0}'.\"",
".",
"format",
"(",
"token",
")",
")",
"if",
"m1",
":",
"name",
"=",
"m1",
".",
"group",
"(",
"'name'",
")",
"length",
"=",
"m1",
".",
"group",
"(",
"'len'",
")",
"if",
"m1",
".",
"group",
"(",
"'value'",
")",
":",
"value",
"=",
"m1",
".",
"group",
"(",
"'value'",
")",
"else",
":",
"assert",
"m2",
"name",
"=",
"'uint'",
"length",
"=",
"m2",
".",
"group",
"(",
"'len'",
")",
"if",
"m2",
".",
"group",
"(",
"'value'",
")",
":",
"value",
"=",
"m2",
".",
"group",
"(",
"'value'",
")",
"if",
"name",
"==",
"'bool'",
":",
"if",
"length",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You can't specify a length with bool tokens - they are always one bit.\"",
")",
"length",
"=",
"1",
"if",
"length",
"is",
"None",
"and",
"name",
"not",
"in",
"(",
"'se'",
",",
"'ue'",
",",
"'sie'",
",",
"'uie'",
")",
":",
"stretchy_token",
"=",
"True",
"if",
"length",
"is",
"not",
"None",
":",
"# Try converting length to int, otherwise check it's a key.",
"try",
":",
"length",
"=",
"int",
"(",
"length",
")",
"if",
"length",
"<",
"0",
":",
"raise",
"Error",
"# For the 'bytes' token convert length to bits.",
"if",
"name",
"==",
"'bytes'",
":",
"length",
"*=",
"8",
"except",
"Error",
":",
"raise",
"ValueError",
"(",
"\"Can't read a token with a negative length.\"",
")",
"except",
"ValueError",
":",
"if",
"not",
"keys",
"or",
"length",
"not",
"in",
"keys",
":",
"raise",
"ValueError",
"(",
"\"Don't understand length '{0}' of token.\"",
".",
"format",
"(",
"length",
")",
")",
"ret_vals",
".",
"append",
"(",
"[",
"name",
",",
"length",
",",
"value",
"]",
")",
"# This multiplies by the multiplicative factor, but this means that",
"# we can't allow keyword values as multipliers (e.g. n*uint:8).",
"# The only way to do this would be to return the factor in some fashion",
"# (we can't use the key's value here as it would mean that we couldn't",
"# sensibly continue to cache the function's results. (TODO).",
"return_values",
".",
"extend",
"(",
"ret_vals",
"*",
"factor",
")",
"return_values",
"=",
"[",
"tuple",
"(",
"x",
")",
"for",
"x",
"in",
"return_values",
"]",
"if",
"len",
"(",
"token_cache",
")",
"<",
"CACHE_SIZE",
":",
"token_cache",
"[",
"token_key",
"]",
"=",
"stretchy_token",
",",
"return_values",
"return",
"stretchy_token",
",",
"return_values"
] | 42.11
| 15.85
|
def set_templates_dir(self):
"""Auto-connect slot activated when templates dir checkbox is toggled.
"""
is_checked = self.custom_templates_dir_checkbox.isChecked()
if is_checked:
# Show previous templates dir
path = setting(
key='reportTemplatePath',
default='',
expected_type=str,
qsettings=self.settings)
else:
# Set the template report dir to ''
path = ''
self.leReportTemplatePath.setText(path)
self.splitter_custom_report.setEnabled(is_checked)
|
[
"def",
"set_templates_dir",
"(",
"self",
")",
":",
"is_checked",
"=",
"self",
".",
"custom_templates_dir_checkbox",
".",
"isChecked",
"(",
")",
"if",
"is_checked",
":",
"# Show previous templates dir",
"path",
"=",
"setting",
"(",
"key",
"=",
"'reportTemplatePath'",
",",
"default",
"=",
"''",
",",
"expected_type",
"=",
"str",
",",
"qsettings",
"=",
"self",
".",
"settings",
")",
"else",
":",
"# Set the template report dir to ''",
"path",
"=",
"''",
"self",
".",
"leReportTemplatePath",
".",
"setText",
"(",
"path",
")",
"self",
".",
"splitter_custom_report",
".",
"setEnabled",
"(",
"is_checked",
")"
] | 35.411765
| 12.294118
|
def main():
"""
fetches hek data and makes thematic maps as requested
"""
args = get_args()
config = Config(args.config)
# Load dates
if os.path.isfile(args.dates):
with open(args.dates) as f:
dates = [dateparser.parse(line.split(" ")[0]) for line in f.readlines()]
else: # assume it's a date
dates = [dateparser.parse(args.dates)]
if args.verbose:
print("Dates are:")
for date in dates:
print(date)
for date in dates:
if args.verbose:
print('Processing {}'.format(date))
suvi_data = Fetcher(date, ['suvi-l2-ci195'],
suvi_composite_path=config.suvi_composite_path).fetch(multithread=False)['suvi-l2-ci195']
if suvi_data[0] is not None:
config.expert = 'HEK'
responses = query_hek(date)
thmap = make_thmap(suvi_data, responses, config)
Outgest(os.path.join(args.output, "thmap_hek_{}.fits".format(date.strftime("%Y%m%d%H%M%S"))),
thmap, {"c195": suvi_data[0], "suvi-l2-ci195": suvi_data[0]}, args.config).save()
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"get_args",
"(",
")",
"config",
"=",
"Config",
"(",
"args",
".",
"config",
")",
"# Load dates",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"args",
".",
"dates",
")",
":",
"with",
"open",
"(",
"args",
".",
"dates",
")",
"as",
"f",
":",
"dates",
"=",
"[",
"dateparser",
".",
"parse",
"(",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"else",
":",
"# assume it's a date",
"dates",
"=",
"[",
"dateparser",
".",
"parse",
"(",
"args",
".",
"dates",
")",
"]",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"Dates are:\"",
")",
"for",
"date",
"in",
"dates",
":",
"print",
"(",
"date",
")",
"for",
"date",
"in",
"dates",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"'Processing {}'",
".",
"format",
"(",
"date",
")",
")",
"suvi_data",
"=",
"Fetcher",
"(",
"date",
",",
"[",
"'suvi-l2-ci195'",
"]",
",",
"suvi_composite_path",
"=",
"config",
".",
"suvi_composite_path",
")",
".",
"fetch",
"(",
"multithread",
"=",
"False",
")",
"[",
"'suvi-l2-ci195'",
"]",
"if",
"suvi_data",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"config",
".",
"expert",
"=",
"'HEK'",
"responses",
"=",
"query_hek",
"(",
"date",
")",
"thmap",
"=",
"make_thmap",
"(",
"suvi_data",
",",
"responses",
",",
"config",
")",
"Outgest",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output",
",",
"\"thmap_hek_{}.fits\"",
".",
"format",
"(",
"date",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S\"",
")",
")",
")",
",",
"thmap",
",",
"{",
"\"c195\"",
":",
"suvi_data",
"[",
"0",
"]",
",",
"\"suvi-l2-ci195\"",
":",
"suvi_data",
"[",
"0",
"]",
"}",
",",
"args",
".",
"config",
")",
".",
"save",
"(",
")"
] | 37.1
| 21.3
|
def shutdown(self):
"""Forcefully shutdown the entire pool, closing all non-executing
connections.
:raises: ConnectionBusyError
"""
with self._lock:
for cid in list(self.connections.keys()):
if self.connections[cid].executing:
raise ConnectionBusyError(cid)
if self.connections[cid].locked:
self.connections[cid].free()
self.connections[cid].close()
del self.connections[cid]
|
[
"def",
"shutdown",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"for",
"cid",
"in",
"list",
"(",
"self",
".",
"connections",
".",
"keys",
"(",
")",
")",
":",
"if",
"self",
".",
"connections",
"[",
"cid",
"]",
".",
"executing",
":",
"raise",
"ConnectionBusyError",
"(",
"cid",
")",
"if",
"self",
".",
"connections",
"[",
"cid",
"]",
".",
"locked",
":",
"self",
".",
"connections",
"[",
"cid",
"]",
".",
"free",
"(",
")",
"self",
".",
"connections",
"[",
"cid",
"]",
".",
"close",
"(",
")",
"del",
"self",
".",
"connections",
"[",
"cid",
"]"
] | 34.6
| 13.133333
|
def distance_to_current_waypoint():
"""
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
"""
nextwaypoint = vehicle.commands.next
if nextwaypoint==0:
return None
missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed
lat = missionitem.x
lon = missionitem.y
alt = missionitem.z
targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)
distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)
return distancetopoint
|
[
"def",
"distance_to_current_waypoint",
"(",
")",
":",
"nextwaypoint",
"=",
"vehicle",
".",
"commands",
".",
"next",
"if",
"nextwaypoint",
"==",
"0",
":",
"return",
"None",
"missionitem",
"=",
"vehicle",
".",
"commands",
"[",
"nextwaypoint",
"-",
"1",
"]",
"#commands are zero indexed",
"lat",
"=",
"missionitem",
".",
"x",
"lon",
"=",
"missionitem",
".",
"y",
"alt",
"=",
"missionitem",
".",
"z",
"targetWaypointLocation",
"=",
"LocationGlobalRelative",
"(",
"lat",
",",
"lon",
",",
"alt",
")",
"distancetopoint",
"=",
"get_distance_metres",
"(",
"vehicle",
".",
"location",
".",
"global_frame",
",",
"targetWaypointLocation",
")",
"return",
"distancetopoint"
] | 38.2
| 17
|
def apply_completion(self, completion):
"""
Insert a given completion.
"""
assert isinstance(completion, Completion)
# If there was already a completion active, cancel that one.
if self.complete_state:
self.go_to_completion(None)
self.complete_state = None
# Insert text from the given completion.
self.delete_before_cursor(-completion.start_position)
self.insert_text(completion.text)
|
[
"def",
"apply_completion",
"(",
"self",
",",
"completion",
")",
":",
"assert",
"isinstance",
"(",
"completion",
",",
"Completion",
")",
"# If there was already a completion active, cancel that one.",
"if",
"self",
".",
"complete_state",
":",
"self",
".",
"go_to_completion",
"(",
"None",
")",
"self",
".",
"complete_state",
"=",
"None",
"# Insert text from the given completion.",
"self",
".",
"delete_before_cursor",
"(",
"-",
"completion",
".",
"start_position",
")",
"self",
".",
"insert_text",
"(",
"completion",
".",
"text",
")"
] | 33.285714
| 12.142857
|
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if attributes are set on an ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.get_attributes myelb
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while retries:
try:
lbattrs = conn.get_all_lb_attributes(name)
ret = odict.OrderedDict()
ret['access_log'] = odict.OrderedDict()
ret['cross_zone_load_balancing'] = odict.OrderedDict()
ret['connection_draining'] = odict.OrderedDict()
ret['connecting_settings'] = odict.OrderedDict()
al = lbattrs.access_log
czlb = lbattrs.cross_zone_load_balancing
cd = lbattrs.connection_draining
cs = lbattrs.connecting_settings
ret['access_log']['enabled'] = al.enabled
ret['access_log']['s3_bucket_name'] = al.s3_bucket_name
ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix
ret['access_log']['emit_interval'] = al.emit_interval
ret['cross_zone_load_balancing']['enabled'] = czlb.enabled
ret['connection_draining']['enabled'] = cd.enabled
ret['connection_draining']['timeout'] = cd.timeout
ret['connecting_settings']['idle_timeout'] = cs.idle_timeout
return ret
except boto.exception.BotoServerError as e:
if e.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('ELB %s does not exist: %s', name, e.message)
return {}
return {}
|
[
"def",
"get_attributes",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"retries",
"=",
"30",
"while",
"retries",
":",
"try",
":",
"lbattrs",
"=",
"conn",
".",
"get_all_lb_attributes",
"(",
"name",
")",
"ret",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"ret",
"[",
"'access_log'",
"]",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"ret",
"[",
"'cross_zone_load_balancing'",
"]",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"ret",
"[",
"'connection_draining'",
"]",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"ret",
"[",
"'connecting_settings'",
"]",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"al",
"=",
"lbattrs",
".",
"access_log",
"czlb",
"=",
"lbattrs",
".",
"cross_zone_load_balancing",
"cd",
"=",
"lbattrs",
".",
"connection_draining",
"cs",
"=",
"lbattrs",
".",
"connecting_settings",
"ret",
"[",
"'access_log'",
"]",
"[",
"'enabled'",
"]",
"=",
"al",
".",
"enabled",
"ret",
"[",
"'access_log'",
"]",
"[",
"'s3_bucket_name'",
"]",
"=",
"al",
".",
"s3_bucket_name",
"ret",
"[",
"'access_log'",
"]",
"[",
"'s3_bucket_prefix'",
"]",
"=",
"al",
".",
"s3_bucket_prefix",
"ret",
"[",
"'access_log'",
"]",
"[",
"'emit_interval'",
"]",
"=",
"al",
".",
"emit_interval",
"ret",
"[",
"'cross_zone_load_balancing'",
"]",
"[",
"'enabled'",
"]",
"=",
"czlb",
".",
"enabled",
"ret",
"[",
"'connection_draining'",
"]",
"[",
"'enabled'",
"]",
"=",
"cd",
".",
"enabled",
"ret",
"[",
"'connection_draining'",
"]",
"[",
"'timeout'",
"]",
"=",
"cd",
".",
"timeout",
"ret",
"[",
"'connecting_settings'",
"]",
"[",
"'idle_timeout'",
"]",
"=",
"cs",
".",
"idle_timeout",
"return",
"ret",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"if",
"e",
".",
"error_code",
"==",
"'Throttling'",
":",
"log",
".",
"debug",
"(",
"\"Throttled by AWS API, will retry in 5 seconds...\"",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"retries",
"-=",
"1",
"continue",
"log",
".",
"error",
"(",
"'ELB %s does not exist: %s'",
",",
"name",
",",
"e",
".",
"message",
")",
"return",
"{",
"}",
"return",
"{",
"}"
] | 40.674419
| 21.651163
|
def set_locations(self, locations, values):
"""
For a list of locations set the values.
:param locations: list of index locations
:param values: list of values or a single value
:return: nothing
"""
indexes = [self._index[x] for x in locations]
self.set(indexes, values)
|
[
"def",
"set_locations",
"(",
"self",
",",
"locations",
",",
"values",
")",
":",
"indexes",
"=",
"[",
"self",
".",
"_index",
"[",
"x",
"]",
"for",
"x",
"in",
"locations",
"]",
"self",
".",
"set",
"(",
"indexes",
",",
"values",
")"
] | 29.636364
| 13.636364
|
def elemental_abund(self,cycle,zrange=[1,85],ylim=[0,0],title_items=None,
ref=-1,ref_filename=None,z_pin=None,pin=None,
pin_filename=None,zchi2=None,logeps=False,dilution=None,show_names=True,label='',
colour='black',plotlines=':',plotlabels=True,mark='x',**kwargs):
'''
Plot the decayed elemental abundance distribution (PPN).
Plot the elemental abundance distribution (nugridse).
(FH, 06/2014; SJ 07/2014)
Parameters
----------
cycle : string, integer or list
The cycle of interest. If it is a list of cycles, this
method will do a plot for each cycle and save them to a
file.
zrange : list, optional
A 1x2 array containing the lower and upper atomic number
limit
ylim : list, optional
A 1x2 array containing the lower and upper Y limits. If
it is [0,0], then ylim will be determined automatically.
The default is [0,0].
title_items : list, optional
A list of cycle attributes that will be added to the title.
For possible cycle attributes see self.cattrs.
ref : integer, optional
ref = N: plot abundaces relative to cycle N abundance, similar to the
'ref_filename' option.
Cannot be active at the same time as
the 'ref_filename' option.
ref_filename : string, optional
plot abundances relative to solar abundance. For this option,
a cycle number for the 'ref' option must not be provided
z_pin : int, optional
Charge number for an element to be 'pinned'. An offset will be
calculated from the difference between the cycle value and the
value from the pinned reference.
Can be used with the 'pin_filename' option to import an external
abundance file in the same format as solar abundances.
If no file is given, the reference can be either cycle N='ref'
or the value from the 'ref_filename'.
pin : float, optional
A manually provided [X/Fe] abundance to pin the element selected with 'z_pin'
pin_filename: string, optional
use provided file to provide reference to pin an element to. An offset is
calculated and used to shift the plot.
The file requires header columns marked by '#', column spacing of ' ', and at minimum two columns
containing:
'Z': charge number
'[X/Fe]': metallicity
zchi2 : list, optional
A 1x2 array containing atomic numbers of the elements
for which chi2 test is done when plotType == 'PPN' and pin_filename != None
logeps : boolean, optional
Plots log eps instead of [X/Fe] charts.
dilution : float, optional
Provides the dilution factor for mixing nucleosynthesis products to the surface
Cannot be active at the same time as the 'z_pin' option.
label : string, optional
The label for the abundance distribution
The default is '' (i.e. do not show a label)
show_names : boolean, optional
Whether or not to show the element names on the figure.
colour : string, optional
In case you want to dictate marker and line colours. Takes cymkrgb
single-character colours or any other colour string accepted by
matplotlib. The default is '' (automatic colour selection)
plotlines : string, optional
In case you want to dictate line style. Takes MatPlotLib linestyles.
mark : string, optional
In case you want to dictate marker style. Takes MatPlotLib markers.
Default is 'x'.
kwargs : additional keyword arguments
These arguments are equivalent to those of iso_abund, e.g.
mass_range. Routines from iso_abund are called, to perform
averages and get elemental abundances in the correct form.
Output
------
if plotType == 'PPN' and pin_filename != None
chi2 : float
chi-squared deviation of predicted abundances from observed ones
if plotType == 'se'
z_el : array
proton number of elements being returned
el_abu_plot : array
elemental abundances (as you asked for them, could be ref to something else)
This method adds the following data to the abu_vector instance:
self.el_abu_hash : elemental abundance, dictionary
self.el_name : element names, can be used as keys in el_abu_hash
self.el_abu_log : array of log10 of elemental abundance as plotted, including any ref scaling
'''
#from . import utils
from . import ascii_table as asci
plotType=self._classTest()
offset=0
if ref_filename!=None:
ref=-2
if logeps==True:
if zrange[0]!=1:
print("To use logeps, the z range must be [1,X], otherwise the program will exit.")
sys.exit()
z_pin=1
ref=-3
if plotType=='PPN':
self.get(cycle,decayed=True)
z_el=unique(self.z_iso_to_plot)
zmin_ind=min(where(z_el>=zrange[0])[0])
zmax_ind=max(where(z_el<=zrange[1])[0])
# extract some elemental quantities:
a_el=[]; el_name=[]; el_abu=[]; el_abu_hash={}
for z in z_el[zmin_ind:zmax_ind]:
el=self.el_iso_to_plot[where(self.z_iso_to_plot==z)[0].tolist()[0]]
X_el=self.abunds[where(self.el_iso_to_plot==el)[0].tolist()].sum() # take all iso abunds for one Z and sum
a_el.append(self.a_iso_to_plot[where(self.z_iso_to_plot==z)[0].tolist()[0]])
el_abu.append(X_el)
el_name.append(el)
el_abu_hash[el]=X_el
fe_abund=self.abunds[where(self.el_iso_to_plot=='Fe')[0].tolist()].sum() # Fe abund is always needed to find [X/Fe]
self.el_abu_hash = el_abu_hash
self.el_name = el_name
# if we have provided a solar abundance file
if ref==-2:
from . import utils
utils.solar(ref_filename,1)
el_abu_sun=np.array(utils.solar_elem_abund)
el_abu_plot=np.zeros(len(el_abu))
for zs in z_el[zmin_ind:zmax_ind]:
zelidx=where(z_el[zmin_ind:zmax_ind]==zs)[0]
zsolidx=int(zs-1)
if el_abu_sun[zsolidx] > 0. :
el_abu_plot[zelidx]=el_abu[zelidx[0]]/el_abu_sun[zsolidx]
else:
el_abu_plot[zelidx]=-1
# if we have provided a reference cycle number
elif ref>-1:
self.get(ref,decayed=True)
z_el_ref=unique(self.z_iso_to_plot)
zmin_ind=min(where(z_el_ref>=zrange[0])[0])
zmax_ind=max(where(z_el_ref<=zrange[1])[0])
# extract some elemental quantities:
a_el_ref=[]; el_name_ref=[]; el_abu_ref=[]; el_abu_hash_ref={}
el_abu_plot=np.zeros(len(el_abu))
for z_ref in z_el[zmin_ind:zmax_ind]:
el_ref=self.el_iso_to_plot[where(self.z_iso_to_plot==z_ref)[0].tolist()[0]]
X_el_ref=self.abunds[where(self.el_iso_to_plot==el_ref)[0].tolist()].sum()
a_el_ref.append(self.a_iso_to_plot[where(self.z_iso_to_plot==z_ref)[0].tolist()[0]])
el_abu_ref.append(X_el_ref)
el_name_ref.append(el_ref)
el_abu_hash_ref[el_ref]=X_el
for i in range(len(el_abu)):
el_abu_plot[i-1]=el_abu[i-1]/el_abu_ref[i-1]
# if we want to include observation data
if pin_filename!=None:
print('using the pin filename')
obs_file=asci.readTable(pin_filename,header_char='#')
xfe_sigma=[]
el_abu_obs_log=[]
z_ul=[]
for z_i in z_el[zmin_ind:zmax_ind]:
try:
obs_file.data['[X/H]']
x_over='[X/H]'
sigma='sig_[X/H]'
except:
x_over='[X/Fe]'
sigma='sig_[X/Fe]'
zelidx=where(z_el[zmin_ind:zmax_ind]==z_i)[0]
zpinidx=where(obs_file.data['Z']==z_i)[0] #str()
if len(zpinidx)==0:
el_abu_obs_log.append([None])
xfe_sigma.append([None])
z_ul.append([None])
elif len(zpinidx)>1:
'''if any(obs_file.data['ul'][zpinidx].astype(int))==1:
print('hi')
tmp=obs_file.data['[X/Fe]'][zpinidx].astype(float)
z_ul.append(tmp.tolist())
el_abu_obs_log.append([None]*len(zpinidx))
xfe_sigma.append([None]*len(zpinidx))
else:'''
tmp=obs_file.data[x_over][zpinidx]#.astype(float) # array stores multiple values for a
el_abu_obs_log.append(tmp.tolist()) # single element
tmp=obs_file.data[sigma][zpinidx]#.astype(float)
xfe_sigma.append(tmp.tolist())
z_ul.append([None])
else:
if obs_file.data['ul'][zpinidx]==1: #.astype(int)
tmp=obs_file.data[x_over][zpinidx]#.astype(float)
z_ul.append(tmp.tolist())
tmp=obs_file.data[x_over][zpinidx]#.astype(float)
el_abu_obs_log.append([None])
xfe_sigma.append([None])
else:
tmp=obs_file.data[x_over][zpinidx][0]#.astype(float)
el_abu_obs_log.append([tmp])
tmp=obs_file.data[sigma][zpinidx][0]#.astype(float)
xfe_sigma.append([tmp])
z_ul.append([None])
el_abu_obs=[]
# set a pinned element for offset calculation and adjustment
if z_pin!=None:
print("Pinned element: "+str(z_pin))
if pin_filename!=None:
# converting obervation data from log to standard form for compatibility
# with later code
for i in range(len(el_abu_obs_log)):
if all(el_abu_obs_log[i])==None:
el_abu_obs.append(None)
else:
el_abu_obs.append(np.power(10,el_abu_obs_log[i]))
el_abu_pin=el_abu_obs
elif pin!=None:
print('using manual pin')
pin=np.power(10,pin)
el_abu_pin=np.zeros(len(el_abu))
for i in range(len(el_abu)):
el_abu_pin[i-1]=pin
elif logeps==True:
print('finding log eps')
atomic_mass=[1.008, 4.003, 6.94, 9.012, 10.81, 12.011, 14.007, 15.999, 18.998, 20.18, 22.99, 24.305, 26.982, 28.085, 30.74, 32.06, 35.45, 39.948, 39.098, 40.078, 44.956, 47.867, 50.942, 51.996, 54.938, 55.845, 58.933, 58.693, 6.46, 65.38, 69.723, 72.63, 74.922, 78.971, 79.904, 83.798, 85.468, 87.62, 88.906, 91.224, 92.906, 95.95, 97., 01.07, 102.906, 106.42, 107.868, 112.414, 114.818, 118.71, 121.76, 127.6, 126.904, 131.293, 132.905, 137.27, 138.905, 140.116, 140.908, 144.242, 145. , 150.36, 151.964, 157.25, 158.925, 162.5, 164.93, 167.259, 18.934, 173.045, 174.967, 178.49, 180.948, 183.84, 186.207, 190.23, 192.217, 195.084, 196.967, 200.592, 24.38, 207.2, 208.98, 209., 210., 222., 223., 226., 227., 232.038, 231.036, 238.029, 237., 244., 243., 247., 247., 251., 252., 257., 258., 259., 262., 267., 270., 269., 270., 270., 278., 281., 281., 285., 286., 289., 289., 293., 293., 294.] # this belongs in utils! (FH)
el_abu_pin=atomic_mass
el_abu_plot=np.zeros(len(el_abu))
for i in range(len(el_abu)):
el_abu_plot[i-1]=el_abu[i-1]/el_abu_pin[i-1]
elif ref==-2:
print('using solar pin')
el_abu_pin=np.zeros(len(el_abu))
for i in range(len(el_abu)):
el_abu_pin[i-1]=el_abu[i-1]/el_abu_sun[i-1]
elif ref>=0:
print("Error: A reference file or manual pin is required - the plot will fail")
'''elif ref>=0:
print('using ref pin')
el_abu_pin=np.zeros(len(el_abu))
for i in range(len(el_abu)):nacon
el_abu_pin[i-1]=el_abu[i-1]/el_abu_ref[i-1]
print(el_abu)
print(el_abu_ref)
print(el_abu_pin)
print(el_abu_plot)'''
# calculating the offset value
zelidx=where(z_el[zmin_ind:zmax_ind]==z_pin)[0][0]
offset=np.log10(el_abu_pin[zelidx])-np.log10(el_abu_plot[zelidx])
if ref!=-1 and dilution==None:
el_abu=el_abu_plot
if dilution!=None:
self.get(0,decayed=True)
z_el_ini=unique(self.z_iso_to_plot)
zmin_ind=min(where(z_el>=zrange[0])[0])
zmax_ind=max(where(z_el<=zrange[1])[0])
# extract some elemental quantities:
a_el_ini=[]; el_name_ini=[]; el_abu_ini=[]; el_abu_hash_ini={}
for z in z_el_ini:
el_ini=self.el_iso_to_plot[where(self.z_iso_to_plot==z)[0].tolist()[0]]
X_el_ini=self.abunds[where(self.el_iso_to_plot==el_ini)[0].tolist()].sum() # take all iso abunds for one Z and sum
a_el_ini.append(self.a_iso_to_plot[where(self.z_iso_to_plot==z)[0].tolist()[0]])
el_abu_ini.append(X_el_ini)
el_name_ini.append(el_ini)
el_abu_hash_ini[el]=X_el_ini
el_abu_dilution=[]
for i in range(len(el_abu)):
el_adjusted=(dilution*el_abu[i])+((1-dilution)*el_abu_ini[zmin_ind+i])
fe_adjusted=(dilution*fe_abund)+((1-dilution)*el_abu_ini[24])
num=el_adjusted*el_abu_sun[25]
iadd = 1
if z_el[zmin_ind+i] > 43:
iadd = 2
if z_el[zmin_ind+i] > 61:
iadd = 3
denom=fe_adjusted*el_abu_sun[zmin_ind+iadd+i]
el_abu_dilution.append(num/denom)
#print(el_abu_dilution)
el_abu=el_abu_dilution
# plot an elemental abundance distribution with labels:
self.el_abu_log = np.log10(el_abu)
chi2 = 0.
if pin_filename!=None: # plotting the observation data
# using zip() to plot multiple values for a single element
# also calculate and return chi squared
for xi,yi,wi in zip(z_el[zmin_ind:zmax_ind],el_abu_obs_log,xfe_sigma):
#print(xi)
pl.scatter([xi]*len(yi),yi,marker='o',s=25,color='black')
if all(wi)!=None:
pl.errorbar([xi]*len(yi),yi,wi,color='black',capsize=5)
if zchi2 != None:
#if zchi2[0] <= xi and xi <= zchi2[1]:
if xi in zchi2:
zelidx=where(z_el[zmin_ind:zmax_ind]==xi)[0][0]
chi2 += (((sum(yi)/len(yi)) - (np.log10(el_abu[zelidx])+offset))/\
(sum(wi)/len(wi)))**2
#pl.scatter(z_el[zmin_ind:zmax_ind],z_ul,label='Upper limits',marker='v',color='black')
pl.scatter(z_el[zmin_ind:zmax_ind],z_ul,marker='v',color='black')
# plotting simulation data
pl.plot(z_el[zmin_ind:zmax_ind],np.log10(el_abu)+offset,label=label,\
linestyle=plotlines,color=colour,marker=mark)#,np.log10(el_abu))#,**kwargs)
j=0 # add labels
if plotlabels==True:
for z in z_el[zmin_ind:zmax_ind]:
pl.text(z+0.15,log10(el_abu[j])+offset+0.05,el_name[j])
j += 1
if title_items is not None:
pl.title(self._do_title_string(title_items,cycle))
if ylim[0]==0 and ylim[1]==0:
ylim[0]=max(-15.0,min(np.log10(el_abu)+offset))
ylim[1]=max(ylim[0]+1.0,max(np.log10(el_abu)+offset))
pl.ylim(ylim[0],ylim[1])
pl.xlabel('Z')
#pl.legend()
pl.grid(True)
ylab=['log X/X$_{'+str(ref)+'}$','log mass fraction','log X/X$_{ref}$','log$\epsilon$']
if ref==-2:
pl.ylabel(ylab[2])
elif ref>-1:
if plotlabels==True:
pl.annotate('Offset: '+str(offset),xy=(0.05,0.95),xycoords='axes fraction')
pl.ylabel(ylab[0])
elif logeps==True:
pl.ylabel(ylab[3])
else:
pl.ylabel(ylab[1])
return chi2
elif plotType=='se':
# get self.***_iso_to_plot by calling iso_abund function, which writes them
self.iso_abund(cycle,elemaburtn=True,**kwargs)
z_el=unique(self.se.Z)
zmin_ind=min(where(z_el>=zrange[0])[0])
zmax_ind=max(where(z_el<=zrange[1])[0])
# extract some elemental quantities:
a_el=[]; el_name=[]; el_abu=[]; el_abu_hash={}
for z in z_el[zmin_ind:zmax_ind]:
el=self.el_iso_to_plot[where(self.se.Z==z)[0].tolist()[0]]
X_el=self.abunds[where(self.el_iso_to_plot==el)[0].tolist()].sum()
a_el.append(self.a_iso_to_plot[where(self.z_iso_to_plot==z)[0].tolist()[0]])
el_abu.append(X_el)
el_name.append(el)
el_abu_hash[el]=X_el
# plot an elemental abundance distribution with labels:
if ref==0:
el_abu_plot=el_abu
ylab='log mass fraction'
elif ref==1:
from . import utils
if ref_filename=='':
raise IOError('You chose to plot relative to the solar abundance dist. However, you did not supply the solar abundance file!')
else:
nuutils.solar(ref_filename,1)
menow = where(unique(nuutils.z_sol)==44.)[0][0]
print(1, menow, nuutils.solar_elem_abund[menow])
el_abu_sun=np.array(nuutils.solar_elem_abund)
print(2, el_abu_sun)
print(3, el_abu_sun[42])
el_abu_plot=np.zeros(len(el_abu))
for zs in z_el[zmin_ind:zmax_ind]:
zelidx=where(z_el[zmin_ind:zmax_ind]==zs)[0]
zsolidx=zs-1
if el_abu_sun[zsolidx] > 0. :
el_abu_plot[zelidx]=old_div(el_abu[zelidx],el_abu_sun[zsolidx])
else:
el_abu_plot[zelidx]=-1
ylab='log X/X$_\odot$'
else:
raise IOError('Your choice of ref is not available yet. Please use another.')
if label != '':
if colour!='':
print("Plotting without color and label:")
pl.plot(z_el[zmin_ind:zmax_ind],np.log10(el_abu_plot),
'o-',label=label,color=colour,markeredgecolor='None')
else:
pl.plot(z_el[zmin_ind:zmax_ind],np.log10(el_abu_plot)
,'o-',label=label,markeredgecolor='None')
else:
if colour!='':
pl.plot(z_el[zmin_ind:zmax_ind],np.log10(el_abu_plot),
'o-',color=colour,markeredgecolor='None')
else:
pl.plot(z_el[zmin_ind:zmax_ind],np.log10(el_abu_plot),
'o-',markeredgecolor='None')
if show_names:
j=0 # add labels
for z in z_el[zmin_ind:zmax_ind]:
# pl.text(z+0.15,log10(el_abu_plot[j])+0.05,el_name[j])
if el_abu_plot[j] > 0.:
pl.text(z,log10(el_abu_plot[j])+0.5,el_name[j],
horizontalalignment='center')
j += 1
if title_items is not None:
pl.title(self._do_title_string(title_items,cycle))
if ylim[0]==0 and ylim[1]==0:
ylim[0]=max(-15.0,min(np.log10(el_abu_plot)))
ylim[1]=max(ylim[0]+1.0,max(np.log10(el_abu_plot)))
pl.ylim(ylim[0],ylim[1])
pl.xlabel('Z')
pl.ylabel(ylab)
if label != '':
pl.legend(loc='best').draw_frame(False)
return z_el[zmin_ind:zmax_ind],el_abu_plot
else:
print('This method is not supported for '+plotType)
return
self.el_abu_hash = el_abu_hash
self.el_name = el_name
self.el_abu_log = np.log10(el_abu)
|
[
"def",
"elemental_abund",
"(",
"self",
",",
"cycle",
",",
"zrange",
"=",
"[",
"1",
",",
"85",
"]",
",",
"ylim",
"=",
"[",
"0",
",",
"0",
"]",
",",
"title_items",
"=",
"None",
",",
"ref",
"=",
"-",
"1",
",",
"ref_filename",
"=",
"None",
",",
"z_pin",
"=",
"None",
",",
"pin",
"=",
"None",
",",
"pin_filename",
"=",
"None",
",",
"zchi2",
"=",
"None",
",",
"logeps",
"=",
"False",
",",
"dilution",
"=",
"None",
",",
"show_names",
"=",
"True",
",",
"label",
"=",
"''",
",",
"colour",
"=",
"'black'",
",",
"plotlines",
"=",
"':'",
",",
"plotlabels",
"=",
"True",
",",
"mark",
"=",
"'x'",
",",
"*",
"*",
"kwargs",
")",
":",
"#from . import utils",
"from",
".",
"import",
"ascii_table",
"as",
"asci",
"plotType",
"=",
"self",
".",
"_classTest",
"(",
")",
"offset",
"=",
"0",
"if",
"ref_filename",
"!=",
"None",
":",
"ref",
"=",
"-",
"2",
"if",
"logeps",
"==",
"True",
":",
"if",
"zrange",
"[",
"0",
"]",
"!=",
"1",
":",
"print",
"(",
"\"To use logeps, the z range must be [1,X], otherwise the program will exit.\"",
")",
"sys",
".",
"exit",
"(",
")",
"z_pin",
"=",
"1",
"ref",
"=",
"-",
"3",
"if",
"plotType",
"==",
"'PPN'",
":",
"self",
".",
"get",
"(",
"cycle",
",",
"decayed",
"=",
"True",
")",
"z_el",
"=",
"unique",
"(",
"self",
".",
"z_iso_to_plot",
")",
"zmin_ind",
"=",
"min",
"(",
"where",
"(",
"z_el",
">=",
"zrange",
"[",
"0",
"]",
")",
"[",
"0",
"]",
")",
"zmax_ind",
"=",
"max",
"(",
"where",
"(",
"z_el",
"<=",
"zrange",
"[",
"1",
"]",
")",
"[",
"0",
"]",
")",
"# extract some elemental quantities:",
"a_el",
"=",
"[",
"]",
"el_name",
"=",
"[",
"]",
"el_abu",
"=",
"[",
"]",
"el_abu_hash",
"=",
"{",
"}",
"for",
"z",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"el",
"=",
"self",
".",
"el_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
"X_el",
"=",
"self",
".",
"abunds",
"[",
"where",
"(",
"self",
".",
"el_iso_to_plot",
"==",
"el",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"]",
".",
"sum",
"(",
")",
"# take all iso abunds for one Z and sum",
"a_el",
".",
"append",
"(",
"self",
".",
"a_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
")",
"el_abu",
".",
"append",
"(",
"X_el",
")",
"el_name",
".",
"append",
"(",
"el",
")",
"el_abu_hash",
"[",
"el",
"]",
"=",
"X_el",
"fe_abund",
"=",
"self",
".",
"abunds",
"[",
"where",
"(",
"self",
".",
"el_iso_to_plot",
"==",
"'Fe'",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"]",
".",
"sum",
"(",
")",
"# Fe abund is always needed to find [X/Fe]",
"self",
".",
"el_abu_hash",
"=",
"el_abu_hash",
"self",
".",
"el_name",
"=",
"el_name",
"# if we have provided a solar abundance file",
"if",
"ref",
"==",
"-",
"2",
":",
"from",
".",
"import",
"utils",
"utils",
".",
"solar",
"(",
"ref_filename",
",",
"1",
")",
"el_abu_sun",
"=",
"np",
".",
"array",
"(",
"utils",
".",
"solar_elem_abund",
")",
"el_abu_plot",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"zs",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"zelidx",
"=",
"where",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
"==",
"zs",
")",
"[",
"0",
"]",
"zsolidx",
"=",
"int",
"(",
"zs",
"-",
"1",
")",
"if",
"el_abu_sun",
"[",
"zsolidx",
"]",
">",
"0.",
":",
"el_abu_plot",
"[",
"zelidx",
"]",
"=",
"el_abu",
"[",
"zelidx",
"[",
"0",
"]",
"]",
"/",
"el_abu_sun",
"[",
"zsolidx",
"]",
"else",
":",
"el_abu_plot",
"[",
"zelidx",
"]",
"=",
"-",
"1",
"# if we have provided a reference cycle number",
"elif",
"ref",
">",
"-",
"1",
":",
"self",
".",
"get",
"(",
"ref",
",",
"decayed",
"=",
"True",
")",
"z_el_ref",
"=",
"unique",
"(",
"self",
".",
"z_iso_to_plot",
")",
"zmin_ind",
"=",
"min",
"(",
"where",
"(",
"z_el_ref",
">=",
"zrange",
"[",
"0",
"]",
")",
"[",
"0",
"]",
")",
"zmax_ind",
"=",
"max",
"(",
"where",
"(",
"z_el_ref",
"<=",
"zrange",
"[",
"1",
"]",
")",
"[",
"0",
"]",
")",
"# extract some elemental quantities:",
"a_el_ref",
"=",
"[",
"]",
"el_name_ref",
"=",
"[",
"]",
"el_abu_ref",
"=",
"[",
"]",
"el_abu_hash_ref",
"=",
"{",
"}",
"el_abu_plot",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"z_ref",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"el_ref",
"=",
"self",
".",
"el_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z_ref",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
"X_el_ref",
"=",
"self",
".",
"abunds",
"[",
"where",
"(",
"self",
".",
"el_iso_to_plot",
"==",
"el_ref",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"]",
".",
"sum",
"(",
")",
"a_el_ref",
".",
"append",
"(",
"self",
".",
"a_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z_ref",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
")",
"el_abu_ref",
".",
"append",
"(",
"X_el_ref",
")",
"el_name_ref",
".",
"append",
"(",
"el_ref",
")",
"el_abu_hash_ref",
"[",
"el_ref",
"]",
"=",
"X_el",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu",
")",
")",
":",
"el_abu_plot",
"[",
"i",
"-",
"1",
"]",
"=",
"el_abu",
"[",
"i",
"-",
"1",
"]",
"/",
"el_abu_ref",
"[",
"i",
"-",
"1",
"]",
"# if we want to include observation data",
"if",
"pin_filename",
"!=",
"None",
":",
"print",
"(",
"'using the pin filename'",
")",
"obs_file",
"=",
"asci",
".",
"readTable",
"(",
"pin_filename",
",",
"header_char",
"=",
"'#'",
")",
"xfe_sigma",
"=",
"[",
"]",
"el_abu_obs_log",
"=",
"[",
"]",
"z_ul",
"=",
"[",
"]",
"for",
"z_i",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"try",
":",
"obs_file",
".",
"data",
"[",
"'[X/H]'",
"]",
"x_over",
"=",
"'[X/H]'",
"sigma",
"=",
"'sig_[X/H]'",
"except",
":",
"x_over",
"=",
"'[X/Fe]'",
"sigma",
"=",
"'sig_[X/Fe]'",
"zelidx",
"=",
"where",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
"==",
"z_i",
")",
"[",
"0",
"]",
"zpinidx",
"=",
"where",
"(",
"obs_file",
".",
"data",
"[",
"'Z'",
"]",
"==",
"z_i",
")",
"[",
"0",
"]",
"#str()",
"if",
"len",
"(",
"zpinidx",
")",
"==",
"0",
":",
"el_abu_obs_log",
".",
"append",
"(",
"[",
"None",
"]",
")",
"xfe_sigma",
".",
"append",
"(",
"[",
"None",
"]",
")",
"z_ul",
".",
"append",
"(",
"[",
"None",
"]",
")",
"elif",
"len",
"(",
"zpinidx",
")",
">",
"1",
":",
"'''if any(obs_file.data['ul'][zpinidx].astype(int))==1:\n print('hi')\n tmp=obs_file.data['[X/Fe]'][zpinidx].astype(float)\n z_ul.append(tmp.tolist())\n el_abu_obs_log.append([None]*len(zpinidx))\n xfe_sigma.append([None]*len(zpinidx))\n else:'''",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"x_over",
"]",
"[",
"zpinidx",
"]",
"#.astype(float) # array stores multiple values for a",
"el_abu_obs_log",
".",
"append",
"(",
"tmp",
".",
"tolist",
"(",
")",
")",
"# single element",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"sigma",
"]",
"[",
"zpinidx",
"]",
"#.astype(float)",
"xfe_sigma",
".",
"append",
"(",
"tmp",
".",
"tolist",
"(",
")",
")",
"z_ul",
".",
"append",
"(",
"[",
"None",
"]",
")",
"else",
":",
"if",
"obs_file",
".",
"data",
"[",
"'ul'",
"]",
"[",
"zpinidx",
"]",
"==",
"1",
":",
"#.astype(int)",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"x_over",
"]",
"[",
"zpinidx",
"]",
"#.astype(float)",
"z_ul",
".",
"append",
"(",
"tmp",
".",
"tolist",
"(",
")",
")",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"x_over",
"]",
"[",
"zpinidx",
"]",
"#.astype(float)",
"el_abu_obs_log",
".",
"append",
"(",
"[",
"None",
"]",
")",
"xfe_sigma",
".",
"append",
"(",
"[",
"None",
"]",
")",
"else",
":",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"x_over",
"]",
"[",
"zpinidx",
"]",
"[",
"0",
"]",
"#.astype(float)",
"el_abu_obs_log",
".",
"append",
"(",
"[",
"tmp",
"]",
")",
"tmp",
"=",
"obs_file",
".",
"data",
"[",
"sigma",
"]",
"[",
"zpinidx",
"]",
"[",
"0",
"]",
"#.astype(float)",
"xfe_sigma",
".",
"append",
"(",
"[",
"tmp",
"]",
")",
"z_ul",
".",
"append",
"(",
"[",
"None",
"]",
")",
"el_abu_obs",
"=",
"[",
"]",
"# set a pinned element for offset calculation and adjustment",
"if",
"z_pin",
"!=",
"None",
":",
"print",
"(",
"\"Pinned element: \"",
"+",
"str",
"(",
"z_pin",
")",
")",
"if",
"pin_filename",
"!=",
"None",
":",
"# converting obervation data from log to standard form for compatibility",
"# with later code",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu_obs_log",
")",
")",
":",
"if",
"all",
"(",
"el_abu_obs_log",
"[",
"i",
"]",
")",
"==",
"None",
":",
"el_abu_obs",
".",
"append",
"(",
"None",
")",
"else",
":",
"el_abu_obs",
".",
"append",
"(",
"np",
".",
"power",
"(",
"10",
",",
"el_abu_obs_log",
"[",
"i",
"]",
")",
")",
"el_abu_pin",
"=",
"el_abu_obs",
"elif",
"pin",
"!=",
"None",
":",
"print",
"(",
"'using manual pin'",
")",
"pin",
"=",
"np",
".",
"power",
"(",
"10",
",",
"pin",
")",
"el_abu_pin",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu",
")",
")",
":",
"el_abu_pin",
"[",
"i",
"-",
"1",
"]",
"=",
"pin",
"elif",
"logeps",
"==",
"True",
":",
"print",
"(",
"'finding log eps'",
")",
"atomic_mass",
"=",
"[",
"1.008",
",",
"4.003",
",",
"6.94",
",",
"9.012",
",",
"10.81",
",",
"12.011",
",",
"14.007",
",",
"15.999",
",",
"18.998",
",",
"20.18",
",",
"22.99",
",",
"24.305",
",",
"26.982",
",",
"28.085",
",",
"30.74",
",",
"32.06",
",",
"35.45",
",",
"39.948",
",",
"39.098",
",",
"40.078",
",",
"44.956",
",",
"47.867",
",",
"50.942",
",",
"51.996",
",",
"54.938",
",",
"55.845",
",",
"58.933",
",",
"58.693",
",",
"6.46",
",",
"65.38",
",",
"69.723",
",",
"72.63",
",",
"74.922",
",",
"78.971",
",",
"79.904",
",",
"83.798",
",",
"85.468",
",",
"87.62",
",",
"88.906",
",",
"91.224",
",",
"92.906",
",",
"95.95",
",",
"97.",
",",
"01.07",
",",
"102.906",
",",
"106.42",
",",
"107.868",
",",
"112.414",
",",
"114.818",
",",
"118.71",
",",
"121.76",
",",
"127.6",
",",
"126.904",
",",
"131.293",
",",
"132.905",
",",
"137.27",
",",
"138.905",
",",
"140.116",
",",
"140.908",
",",
"144.242",
",",
"145.",
",",
"150.36",
",",
"151.964",
",",
"157.25",
",",
"158.925",
",",
"162.5",
",",
"164.93",
",",
"167.259",
",",
"18.934",
",",
"173.045",
",",
"174.967",
",",
"178.49",
",",
"180.948",
",",
"183.84",
",",
"186.207",
",",
"190.23",
",",
"192.217",
",",
"195.084",
",",
"196.967",
",",
"200.592",
",",
"24.38",
",",
"207.2",
",",
"208.98",
",",
"209.",
",",
"210.",
",",
"222.",
",",
"223.",
",",
"226.",
",",
"227.",
",",
"232.038",
",",
"231.036",
",",
"238.029",
",",
"237.",
",",
"244.",
",",
"243.",
",",
"247.",
",",
"247.",
",",
"251.",
",",
"252.",
",",
"257.",
",",
"258.",
",",
"259.",
",",
"262.",
",",
"267.",
",",
"270.",
",",
"269.",
",",
"270.",
",",
"270.",
",",
"278.",
",",
"281.",
",",
"281.",
",",
"285.",
",",
"286.",
",",
"289.",
",",
"289.",
",",
"293.",
",",
"293.",
",",
"294.",
"]",
"# this belongs in utils! (FH)",
"el_abu_pin",
"=",
"atomic_mass",
"el_abu_plot",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu",
")",
")",
":",
"el_abu_plot",
"[",
"i",
"-",
"1",
"]",
"=",
"el_abu",
"[",
"i",
"-",
"1",
"]",
"/",
"el_abu_pin",
"[",
"i",
"-",
"1",
"]",
"elif",
"ref",
"==",
"-",
"2",
":",
"print",
"(",
"'using solar pin'",
")",
"el_abu_pin",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu",
")",
")",
":",
"el_abu_pin",
"[",
"i",
"-",
"1",
"]",
"=",
"el_abu",
"[",
"i",
"-",
"1",
"]",
"/",
"el_abu_sun",
"[",
"i",
"-",
"1",
"]",
"elif",
"ref",
">=",
"0",
":",
"print",
"(",
"\"Error: A reference file or manual pin is required - the plot will fail\"",
")",
"'''elif ref>=0:\n print('using ref pin')\n el_abu_pin=np.zeros(len(el_abu))\n for i in range(len(el_abu)):nacon\n el_abu_pin[i-1]=el_abu[i-1]/el_abu_ref[i-1]\n print(el_abu)\n print(el_abu_ref)\n print(el_abu_pin)\n print(el_abu_plot)'''",
"# calculating the offset value",
"zelidx",
"=",
"where",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
"==",
"z_pin",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"offset",
"=",
"np",
".",
"log10",
"(",
"el_abu_pin",
"[",
"zelidx",
"]",
")",
"-",
"np",
".",
"log10",
"(",
"el_abu_plot",
"[",
"zelidx",
"]",
")",
"if",
"ref",
"!=",
"-",
"1",
"and",
"dilution",
"==",
"None",
":",
"el_abu",
"=",
"el_abu_plot",
"if",
"dilution",
"!=",
"None",
":",
"self",
".",
"get",
"(",
"0",
",",
"decayed",
"=",
"True",
")",
"z_el_ini",
"=",
"unique",
"(",
"self",
".",
"z_iso_to_plot",
")",
"zmin_ind",
"=",
"min",
"(",
"where",
"(",
"z_el",
">=",
"zrange",
"[",
"0",
"]",
")",
"[",
"0",
"]",
")",
"zmax_ind",
"=",
"max",
"(",
"where",
"(",
"z_el",
"<=",
"zrange",
"[",
"1",
"]",
")",
"[",
"0",
"]",
")",
"# extract some elemental quantities:",
"a_el_ini",
"=",
"[",
"]",
"el_name_ini",
"=",
"[",
"]",
"el_abu_ini",
"=",
"[",
"]",
"el_abu_hash_ini",
"=",
"{",
"}",
"for",
"z",
"in",
"z_el_ini",
":",
"el_ini",
"=",
"self",
".",
"el_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
"X_el_ini",
"=",
"self",
".",
"abunds",
"[",
"where",
"(",
"self",
".",
"el_iso_to_plot",
"==",
"el_ini",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"]",
".",
"sum",
"(",
")",
"# take all iso abunds for one Z and sum",
"a_el_ini",
".",
"append",
"(",
"self",
".",
"a_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
")",
"el_abu_ini",
".",
"append",
"(",
"X_el_ini",
")",
"el_name_ini",
".",
"append",
"(",
"el_ini",
")",
"el_abu_hash_ini",
"[",
"el",
"]",
"=",
"X_el_ini",
"el_abu_dilution",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"el_abu",
")",
")",
":",
"el_adjusted",
"=",
"(",
"dilution",
"*",
"el_abu",
"[",
"i",
"]",
")",
"+",
"(",
"(",
"1",
"-",
"dilution",
")",
"*",
"el_abu_ini",
"[",
"zmin_ind",
"+",
"i",
"]",
")",
"fe_adjusted",
"=",
"(",
"dilution",
"*",
"fe_abund",
")",
"+",
"(",
"(",
"1",
"-",
"dilution",
")",
"*",
"el_abu_ini",
"[",
"24",
"]",
")",
"num",
"=",
"el_adjusted",
"*",
"el_abu_sun",
"[",
"25",
"]",
"iadd",
"=",
"1",
"if",
"z_el",
"[",
"zmin_ind",
"+",
"i",
"]",
">",
"43",
":",
"iadd",
"=",
"2",
"if",
"z_el",
"[",
"zmin_ind",
"+",
"i",
"]",
">",
"61",
":",
"iadd",
"=",
"3",
"denom",
"=",
"fe_adjusted",
"*",
"el_abu_sun",
"[",
"zmin_ind",
"+",
"iadd",
"+",
"i",
"]",
"el_abu_dilution",
".",
"append",
"(",
"num",
"/",
"denom",
")",
"#print(el_abu_dilution)",
"el_abu",
"=",
"el_abu_dilution",
"# plot an elemental abundance distribution with labels:",
"self",
".",
"el_abu_log",
"=",
"np",
".",
"log10",
"(",
"el_abu",
")",
"chi2",
"=",
"0.",
"if",
"pin_filename",
"!=",
"None",
":",
"# plotting the observation data",
"# using zip() to plot multiple values for a single element",
"# also calculate and return chi squared",
"for",
"xi",
",",
"yi",
",",
"wi",
"in",
"zip",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"el_abu_obs_log",
",",
"xfe_sigma",
")",
":",
"#print(xi)",
"pl",
".",
"scatter",
"(",
"[",
"xi",
"]",
"*",
"len",
"(",
"yi",
")",
",",
"yi",
",",
"marker",
"=",
"'o'",
",",
"s",
"=",
"25",
",",
"color",
"=",
"'black'",
")",
"if",
"all",
"(",
"wi",
")",
"!=",
"None",
":",
"pl",
".",
"errorbar",
"(",
"[",
"xi",
"]",
"*",
"len",
"(",
"yi",
")",
",",
"yi",
",",
"wi",
",",
"color",
"=",
"'black'",
",",
"capsize",
"=",
"5",
")",
"if",
"zchi2",
"!=",
"None",
":",
"#if zchi2[0] <= xi and xi <= zchi2[1]:",
"if",
"xi",
"in",
"zchi2",
":",
"zelidx",
"=",
"where",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
"==",
"xi",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"chi2",
"+=",
"(",
"(",
"(",
"sum",
"(",
"yi",
")",
"/",
"len",
"(",
"yi",
")",
")",
"-",
"(",
"np",
".",
"log10",
"(",
"el_abu",
"[",
"zelidx",
"]",
")",
"+",
"offset",
")",
")",
"/",
"(",
"sum",
"(",
"wi",
")",
"/",
"len",
"(",
"wi",
")",
")",
")",
"**",
"2",
"#pl.scatter(z_el[zmin_ind:zmax_ind],z_ul,label='Upper limits',marker='v',color='black')",
"pl",
".",
"scatter",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"z_ul",
",",
"marker",
"=",
"'v'",
",",
"color",
"=",
"'black'",
")",
"# plotting simulation data",
"pl",
".",
"plot",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"np",
".",
"log10",
"(",
"el_abu",
")",
"+",
"offset",
",",
"label",
"=",
"label",
",",
"linestyle",
"=",
"plotlines",
",",
"color",
"=",
"colour",
",",
"marker",
"=",
"mark",
")",
"#,np.log10(el_abu))#,**kwargs)",
"j",
"=",
"0",
"# add labels",
"if",
"plotlabels",
"==",
"True",
":",
"for",
"z",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"pl",
".",
"text",
"(",
"z",
"+",
"0.15",
",",
"log10",
"(",
"el_abu",
"[",
"j",
"]",
")",
"+",
"offset",
"+",
"0.05",
",",
"el_name",
"[",
"j",
"]",
")",
"j",
"+=",
"1",
"if",
"title_items",
"is",
"not",
"None",
":",
"pl",
".",
"title",
"(",
"self",
".",
"_do_title_string",
"(",
"title_items",
",",
"cycle",
")",
")",
"if",
"ylim",
"[",
"0",
"]",
"==",
"0",
"and",
"ylim",
"[",
"1",
"]",
"==",
"0",
":",
"ylim",
"[",
"0",
"]",
"=",
"max",
"(",
"-",
"15.0",
",",
"min",
"(",
"np",
".",
"log10",
"(",
"el_abu",
")",
"+",
"offset",
")",
")",
"ylim",
"[",
"1",
"]",
"=",
"max",
"(",
"ylim",
"[",
"0",
"]",
"+",
"1.0",
",",
"max",
"(",
"np",
".",
"log10",
"(",
"el_abu",
")",
"+",
"offset",
")",
")",
"pl",
".",
"ylim",
"(",
"ylim",
"[",
"0",
"]",
",",
"ylim",
"[",
"1",
"]",
")",
"pl",
".",
"xlabel",
"(",
"'Z'",
")",
"#pl.legend()",
"pl",
".",
"grid",
"(",
"True",
")",
"ylab",
"=",
"[",
"'log X/X$_{'",
"+",
"str",
"(",
"ref",
")",
"+",
"'}$'",
",",
"'log mass fraction'",
",",
"'log X/X$_{ref}$'",
",",
"'log$\\epsilon$'",
"]",
"if",
"ref",
"==",
"-",
"2",
":",
"pl",
".",
"ylabel",
"(",
"ylab",
"[",
"2",
"]",
")",
"elif",
"ref",
">",
"-",
"1",
":",
"if",
"plotlabels",
"==",
"True",
":",
"pl",
".",
"annotate",
"(",
"'Offset: '",
"+",
"str",
"(",
"offset",
")",
",",
"xy",
"=",
"(",
"0.05",
",",
"0.95",
")",
",",
"xycoords",
"=",
"'axes fraction'",
")",
"pl",
".",
"ylabel",
"(",
"ylab",
"[",
"0",
"]",
")",
"elif",
"logeps",
"==",
"True",
":",
"pl",
".",
"ylabel",
"(",
"ylab",
"[",
"3",
"]",
")",
"else",
":",
"pl",
".",
"ylabel",
"(",
"ylab",
"[",
"1",
"]",
")",
"return",
"chi2",
"elif",
"plotType",
"==",
"'se'",
":",
"# get self.***_iso_to_plot by calling iso_abund function, which writes them",
"self",
".",
"iso_abund",
"(",
"cycle",
",",
"elemaburtn",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"z_el",
"=",
"unique",
"(",
"self",
".",
"se",
".",
"Z",
")",
"zmin_ind",
"=",
"min",
"(",
"where",
"(",
"z_el",
">=",
"zrange",
"[",
"0",
"]",
")",
"[",
"0",
"]",
")",
"zmax_ind",
"=",
"max",
"(",
"where",
"(",
"z_el",
"<=",
"zrange",
"[",
"1",
"]",
")",
"[",
"0",
"]",
")",
"# extract some elemental quantities:",
"a_el",
"=",
"[",
"]",
"el_name",
"=",
"[",
"]",
"el_abu",
"=",
"[",
"]",
"el_abu_hash",
"=",
"{",
"}",
"for",
"z",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"el",
"=",
"self",
".",
"el_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"se",
".",
"Z",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
"X_el",
"=",
"self",
".",
"abunds",
"[",
"where",
"(",
"self",
".",
"el_iso_to_plot",
"==",
"el",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"]",
".",
"sum",
"(",
")",
"a_el",
".",
"append",
"(",
"self",
".",
"a_iso_to_plot",
"[",
"where",
"(",
"self",
".",
"z_iso_to_plot",
"==",
"z",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"]",
")",
"el_abu",
".",
"append",
"(",
"X_el",
")",
"el_name",
".",
"append",
"(",
"el",
")",
"el_abu_hash",
"[",
"el",
"]",
"=",
"X_el",
"# plot an elemental abundance distribution with labels:",
"if",
"ref",
"==",
"0",
":",
"el_abu_plot",
"=",
"el_abu",
"ylab",
"=",
"'log mass fraction'",
"elif",
"ref",
"==",
"1",
":",
"from",
".",
"import",
"utils",
"if",
"ref_filename",
"==",
"''",
":",
"raise",
"IOError",
"(",
"'You chose to plot relative to the solar abundance dist. However, you did not supply the solar abundance file!'",
")",
"else",
":",
"nuutils",
".",
"solar",
"(",
"ref_filename",
",",
"1",
")",
"menow",
"=",
"where",
"(",
"unique",
"(",
"nuutils",
".",
"z_sol",
")",
"==",
"44.",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"print",
"(",
"1",
",",
"menow",
",",
"nuutils",
".",
"solar_elem_abund",
"[",
"menow",
"]",
")",
"el_abu_sun",
"=",
"np",
".",
"array",
"(",
"nuutils",
".",
"solar_elem_abund",
")",
"print",
"(",
"2",
",",
"el_abu_sun",
")",
"print",
"(",
"3",
",",
"el_abu_sun",
"[",
"42",
"]",
")",
"el_abu_plot",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"el_abu",
")",
")",
"for",
"zs",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"zelidx",
"=",
"where",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
"==",
"zs",
")",
"[",
"0",
"]",
"zsolidx",
"=",
"zs",
"-",
"1",
"if",
"el_abu_sun",
"[",
"zsolidx",
"]",
">",
"0.",
":",
"el_abu_plot",
"[",
"zelidx",
"]",
"=",
"old_div",
"(",
"el_abu",
"[",
"zelidx",
"]",
",",
"el_abu_sun",
"[",
"zsolidx",
"]",
")",
"else",
":",
"el_abu_plot",
"[",
"zelidx",
"]",
"=",
"-",
"1",
"ylab",
"=",
"'",
"o",
"dot",
"$",
"'",
"else",
":",
"raise",
"IOError",
"(",
"'Your choice of ref is not available yet. Please use another.'",
")",
"if",
"label",
"!=",
"''",
":",
"if",
"colour",
"!=",
"''",
":",
"print",
"(",
"\"Plotting without color and label:\"",
")",
"pl",
".",
"plot",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
",",
"'o-'",
",",
"label",
"=",
"label",
",",
"color",
"=",
"colour",
",",
"markeredgecolor",
"=",
"'None'",
")",
"else",
":",
"pl",
".",
"plot",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
",",
"'o-'",
",",
"label",
"=",
"label",
",",
"markeredgecolor",
"=",
"'None'",
")",
"else",
":",
"if",
"colour",
"!=",
"''",
":",
"pl",
".",
"plot",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
",",
"'o-'",
",",
"color",
"=",
"colour",
",",
"markeredgecolor",
"=",
"'None'",
")",
"else",
":",
"pl",
".",
"plot",
"(",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
",",
"'o-'",
",",
"markeredgecolor",
"=",
"'None'",
")",
"if",
"show_names",
":",
"j",
"=",
"0",
"# add labels",
"for",
"z",
"in",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
":",
"# pl.text(z+0.15,log10(el_abu_plot[j])+0.05,el_name[j])",
"if",
"el_abu_plot",
"[",
"j",
"]",
">",
"0.",
":",
"pl",
".",
"text",
"(",
"z",
",",
"log10",
"(",
"el_abu_plot",
"[",
"j",
"]",
")",
"+",
"0.5",
",",
"el_name",
"[",
"j",
"]",
",",
"horizontalalignment",
"=",
"'center'",
")",
"j",
"+=",
"1",
"if",
"title_items",
"is",
"not",
"None",
":",
"pl",
".",
"title",
"(",
"self",
".",
"_do_title_string",
"(",
"title_items",
",",
"cycle",
")",
")",
"if",
"ylim",
"[",
"0",
"]",
"==",
"0",
"and",
"ylim",
"[",
"1",
"]",
"==",
"0",
":",
"ylim",
"[",
"0",
"]",
"=",
"max",
"(",
"-",
"15.0",
",",
"min",
"(",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
")",
")",
"ylim",
"[",
"1",
"]",
"=",
"max",
"(",
"ylim",
"[",
"0",
"]",
"+",
"1.0",
",",
"max",
"(",
"np",
".",
"log10",
"(",
"el_abu_plot",
")",
")",
")",
"pl",
".",
"ylim",
"(",
"ylim",
"[",
"0",
"]",
",",
"ylim",
"[",
"1",
"]",
")",
"pl",
".",
"xlabel",
"(",
"'Z'",
")",
"pl",
".",
"ylabel",
"(",
"ylab",
")",
"if",
"label",
"!=",
"''",
":",
"pl",
".",
"legend",
"(",
"loc",
"=",
"'best'",
")",
".",
"draw_frame",
"(",
"False",
")",
"return",
"z_el",
"[",
"zmin_ind",
":",
"zmax_ind",
"]",
",",
"el_abu_plot",
"else",
":",
"print",
"(",
"'This method is not supported for '",
"+",
"plotType",
")",
"return",
"self",
".",
"el_abu_hash",
"=",
"el_abu_hash",
"self",
".",
"el_name",
"=",
"el_name",
"self",
".",
"el_abu_log",
"=",
"np",
".",
"log10",
"(",
"el_abu",
")"
] | 51.540476
| 22.459524
|
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
|
[
"def",
"transform_standard_normal",
"(",
"df",
")",
":",
"import",
"pandas",
"as",
"pd",
"import",
"scipy",
".",
"stats",
"as",
"stats",
"if",
"type",
"(",
"df",
")",
"==",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
":",
"gc_ranks",
"=",
"df",
".",
"rank",
"(",
"axis",
"=",
"1",
")",
"gc_ranks",
"=",
"gc_ranks",
"/",
"(",
"gc_ranks",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
"std_norm",
"=",
"stats",
".",
"norm",
".",
"ppf",
"(",
"gc_ranks",
")",
"std_norm",
"=",
"pd",
".",
"DataFrame",
"(",
"std_norm",
",",
"index",
"=",
"gc_ranks",
".",
"index",
",",
"columns",
"=",
"gc_ranks",
".",
"columns",
")",
"elif",
"type",
"(",
"df",
")",
"==",
"pd",
".",
"core",
".",
"series",
".",
"Series",
":",
"gc_ranks",
"=",
"df",
".",
"rank",
"(",
")",
"gc_ranks",
"=",
"gc_ranks",
"/",
"(",
"gc_ranks",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"std_norm",
"=",
"stats",
".",
"norm",
".",
"ppf",
"(",
"gc_ranks",
")",
"std_norm",
"=",
"pd",
".",
"Series",
"(",
"std_norm",
",",
"index",
"=",
"df",
".",
"index",
")",
"return",
"std_norm"
] | 42.941176
| 9.647059
|
def BitVecSym(
name: str, size: int, annotations: Annotations = None
) -> z3.BitVecRef:
"""Creates a new bit vector with a symbolic value."""
return z3.BitVec(name, size)
|
[
"def",
"BitVecSym",
"(",
"name",
":",
"str",
",",
"size",
":",
"int",
",",
"annotations",
":",
"Annotations",
"=",
"None",
")",
"->",
"z3",
".",
"BitVecRef",
":",
"return",
"z3",
".",
"BitVec",
"(",
"name",
",",
"size",
")"
] | 38.8
| 13.8
|
def get_capabilities_properties(d_info,
capa_keys,
gpu_ids,
fpga_ids=None,
**kwargs):
"""get capabilities properties
This function returns a dictionary which contains keys
and their values from the report.
:param d_info: the dictionary of ipmitool parameters for accessing a node.
:param capa_keys: a list of keys for additional capabilities properties.
:param gpu_ids: the list of string contains <vendorID>/<deviceID>
for GPU.
:param fpga_ids: the list of string contains <vendorID>/<deviceID>
for CPU FPGA.
:param kwargs: additional arguments passed to scciclient.
:returns: a dictionary which contains keys and their values.
"""
snmp_client = snmp.SNMPClient(d_info['irmc_address'],
d_info['irmc_snmp_port'],
d_info['irmc_snmp_version'],
d_info['irmc_snmp_community'],
d_info['irmc_snmp_security'])
try:
v = {}
if 'rom_firmware_version' in capa_keys:
v['rom_firmware_version'] = \
snmp.get_bios_firmware_version(snmp_client)
if 'irmc_firmware_version' in capa_keys:
v['irmc_firmware_version'] = \
snmp.get_irmc_firmware_version(snmp_client)
if 'server_model' in capa_keys:
v['server_model'] = snmp.get_server_model(snmp_client)
# Sometime the server started but PCI device list building is
# still in progress so system will response error. We have to wait
# for some more seconds.
if kwargs.get('sleep_flag', False) and \
any(k in capa_keys for k in ('pci_gpu_devices', 'cpu_fpga')):
time.sleep(5)
if 'pci_gpu_devices' in capa_keys:
v['pci_gpu_devices'] = ipmi.get_pci_device(d_info, gpu_ids)
if fpga_ids is not None and 'cpu_fpga' in capa_keys:
v['cpu_fpga'] = ipmi.get_pci_device(d_info, fpga_ids)
if 'trusted_boot' in capa_keys:
v['trusted_boot'] = ipmi.get_tpm_status(d_info)
return v
except (snmp.SNMPFailure, ipmi.IPMIFailure) as err:
raise SCCIClientError('Capabilities inspection failed: %s' % err)
|
[
"def",
"get_capabilities_properties",
"(",
"d_info",
",",
"capa_keys",
",",
"gpu_ids",
",",
"fpga_ids",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"snmp_client",
"=",
"snmp",
".",
"SNMPClient",
"(",
"d_info",
"[",
"'irmc_address'",
"]",
",",
"d_info",
"[",
"'irmc_snmp_port'",
"]",
",",
"d_info",
"[",
"'irmc_snmp_version'",
"]",
",",
"d_info",
"[",
"'irmc_snmp_community'",
"]",
",",
"d_info",
"[",
"'irmc_snmp_security'",
"]",
")",
"try",
":",
"v",
"=",
"{",
"}",
"if",
"'rom_firmware_version'",
"in",
"capa_keys",
":",
"v",
"[",
"'rom_firmware_version'",
"]",
"=",
"snmp",
".",
"get_bios_firmware_version",
"(",
"snmp_client",
")",
"if",
"'irmc_firmware_version'",
"in",
"capa_keys",
":",
"v",
"[",
"'irmc_firmware_version'",
"]",
"=",
"snmp",
".",
"get_irmc_firmware_version",
"(",
"snmp_client",
")",
"if",
"'server_model'",
"in",
"capa_keys",
":",
"v",
"[",
"'server_model'",
"]",
"=",
"snmp",
".",
"get_server_model",
"(",
"snmp_client",
")",
"# Sometime the server started but PCI device list building is",
"# still in progress so system will response error. We have to wait",
"# for some more seconds.",
"if",
"kwargs",
".",
"get",
"(",
"'sleep_flag'",
",",
"False",
")",
"and",
"any",
"(",
"k",
"in",
"capa_keys",
"for",
"k",
"in",
"(",
"'pci_gpu_devices'",
",",
"'cpu_fpga'",
")",
")",
":",
"time",
".",
"sleep",
"(",
"5",
")",
"if",
"'pci_gpu_devices'",
"in",
"capa_keys",
":",
"v",
"[",
"'pci_gpu_devices'",
"]",
"=",
"ipmi",
".",
"get_pci_device",
"(",
"d_info",
",",
"gpu_ids",
")",
"if",
"fpga_ids",
"is",
"not",
"None",
"and",
"'cpu_fpga'",
"in",
"capa_keys",
":",
"v",
"[",
"'cpu_fpga'",
"]",
"=",
"ipmi",
".",
"get_pci_device",
"(",
"d_info",
",",
"fpga_ids",
")",
"if",
"'trusted_boot'",
"in",
"capa_keys",
":",
"v",
"[",
"'trusted_boot'",
"]",
"=",
"ipmi",
".",
"get_tpm_status",
"(",
"d_info",
")",
"return",
"v",
"except",
"(",
"snmp",
".",
"SNMPFailure",
",",
"ipmi",
".",
"IPMIFailure",
")",
"as",
"err",
":",
"raise",
"SCCIClientError",
"(",
"'Capabilities inspection failed: %s'",
"%",
"err",
")"
] | 40.086207
| 21.517241
|
def config():
'''
Shows the current configuration.
'''
config = get_config()
print('Client version: {0}'.format(click.style(__version__, bold=True)))
print('API endpoint: {0}'.format(click.style(str(config.endpoint), bold=True)))
print('API version: {0}'.format(click.style(config.version, bold=True)))
print('Access key: "{0}"'.format(click.style(config.access_key, bold=True)))
masked_skey = config.secret_key[:6] + ('*' * 24) + config.secret_key[-10:]
print('Secret key: "{0}"'.format(click.style(masked_skey, bold=True)))
print('Signature hash type: {0}'.format(
click.style(config.hash_type, bold=True)))
print('Skip SSL certificate validation? {0}'.format(
click.style(str(config.skip_sslcert_validation), bold=True)))
|
[
"def",
"config",
"(",
")",
":",
"config",
"=",
"get_config",
"(",
")",
"print",
"(",
"'Client version: {0}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"__version__",
",",
"bold",
"=",
"True",
")",
")",
")",
"print",
"(",
"'API endpoint: {0}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"str",
"(",
"config",
".",
"endpoint",
")",
",",
"bold",
"=",
"True",
")",
")",
")",
"print",
"(",
"'API version: {0}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"config",
".",
"version",
",",
"bold",
"=",
"True",
")",
")",
")",
"print",
"(",
"'Access key: \"{0}\"'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"config",
".",
"access_key",
",",
"bold",
"=",
"True",
")",
")",
")",
"masked_skey",
"=",
"config",
".",
"secret_key",
"[",
":",
"6",
"]",
"+",
"(",
"'*'",
"*",
"24",
")",
"+",
"config",
".",
"secret_key",
"[",
"-",
"10",
":",
"]",
"print",
"(",
"'Secret key: \"{0}\"'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"masked_skey",
",",
"bold",
"=",
"True",
")",
")",
")",
"print",
"(",
"'Signature hash type: {0}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"config",
".",
"hash_type",
",",
"bold",
"=",
"True",
")",
")",
")",
"print",
"(",
"'Skip SSL certificate validation? {0}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"str",
"(",
"config",
".",
"skip_sslcert_validation",
")",
",",
"bold",
"=",
"True",
")",
")",
")"
] | 51.6
| 26.533333
|
def polls_slug_get(self, slug, **kwargs):
"""
Poll
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique Poll identifier. For example: `gallup-26892`. (required)
:return: Poll
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.polls_slug_get_with_http_info(slug, **kwargs)
else:
(data) = self.polls_slug_get_with_http_info(slug, **kwargs)
return data
|
[
"def",
"polls_slug_get",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"polls_slug_get_with_http_info",
"(",
"slug",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"polls_slug_get_with_http_info",
"(",
"slug",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 55.769231
| 34.461538
|
def parse_datetime(value):
"""
Convert datetime string to datetime object.
Helper function to convert a datetime string found in json responses to a datetime object with timezone information.
The server is storing all datetime strings as UTC (ZULU time). This function supports time zone offsets. When
the input contains one, the output uses a timezone with a fixed offset from UTC.
Inspired on the Django project. From `django.utils.dateparse.parse_datetime`. The code is copyrighted and
licences with an MIT license in the following fashion::
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
..versionadded 2.5:
:param value: datetime string
:type value: str or None
:return: datetime of the value is well formatted. Otherwise (including if value is None) returns None
:rtype: datetime or None
:raises ValueError: if the value is well formatted but not a valid datetime
"""
if value is None:
# do not process the value
return None
def _get_fixed_timezone(offset):
"""Return a tzinfo instance with a fixed offset from UTC."""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return pytz.FixedOffset(offset, name)
DATETIME_RE = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
match = DATETIME_RE.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = pytz.UTC
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = _get_fixed_timezone(offset)
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
kw['tzinfo'] = tzinfo
return datetime(**kw)
|
[
"def",
"parse_datetime",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"# do not process the value",
"return",
"None",
"def",
"_get_fixed_timezone",
"(",
"offset",
")",
":",
"\"\"\"Return a tzinfo instance with a fixed offset from UTC.\"\"\"",
"if",
"isinstance",
"(",
"offset",
",",
"timedelta",
")",
":",
"offset",
"=",
"offset",
".",
"seconds",
"//",
"60",
"sign",
"=",
"'-'",
"if",
"offset",
"<",
"0",
"else",
"'+'",
"hhmm",
"=",
"'%02d%02d'",
"%",
"divmod",
"(",
"abs",
"(",
"offset",
")",
",",
"60",
")",
"name",
"=",
"sign",
"+",
"hhmm",
"return",
"pytz",
".",
"FixedOffset",
"(",
"offset",
",",
"name",
")",
"DATETIME_RE",
"=",
"re",
".",
"compile",
"(",
"r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'",
"r'[T ](?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'",
"r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'",
"r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'",
")",
"match",
"=",
"DATETIME_RE",
".",
"match",
"(",
"value",
")",
"if",
"match",
":",
"kw",
"=",
"match",
".",
"groupdict",
"(",
")",
"if",
"kw",
"[",
"'microsecond'",
"]",
":",
"kw",
"[",
"'microsecond'",
"]",
"=",
"kw",
"[",
"'microsecond'",
"]",
".",
"ljust",
"(",
"6",
",",
"'0'",
")",
"tzinfo",
"=",
"kw",
".",
"pop",
"(",
"'tzinfo'",
")",
"if",
"tzinfo",
"==",
"'Z'",
":",
"tzinfo",
"=",
"pytz",
".",
"UTC",
"elif",
"tzinfo",
"is",
"not",
"None",
":",
"offset_mins",
"=",
"int",
"(",
"tzinfo",
"[",
"-",
"2",
":",
"]",
")",
"if",
"len",
"(",
"tzinfo",
")",
">",
"3",
"else",
"0",
"offset",
"=",
"60",
"*",
"int",
"(",
"tzinfo",
"[",
"1",
":",
"3",
"]",
")",
"+",
"offset_mins",
"if",
"tzinfo",
"[",
"0",
"]",
"==",
"'-'",
":",
"offset",
"=",
"-",
"offset",
"tzinfo",
"=",
"_get_fixed_timezone",
"(",
"offset",
")",
"kw",
"=",
"{",
"k",
":",
"int",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kw",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"kw",
"[",
"'tzinfo'",
"]",
"=",
"tzinfo",
"return",
"datetime",
"(",
"*",
"*",
"kw",
")"
] | 39.338983
| 21.508475
|
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'mac_addr_adv_interval' must "
"be in the range 1-3600")
cmd = self.command_builder('vrrp %d mac-address advertisement-interval'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
|
[
"def",
"set_mac_addr_adv_interval",
"(",
"self",
",",
"name",
",",
"vrid",
",",
"value",
"=",
"None",
",",
"disable",
"=",
"False",
",",
"default",
"=",
"False",
",",
"run",
"=",
"True",
")",
":",
"if",
"not",
"default",
"and",
"not",
"disable",
":",
"if",
"not",
"int",
"(",
"value",
")",
"or",
"int",
"(",
"value",
")",
"<",
"1",
"or",
"int",
"(",
"value",
")",
">",
"3600",
":",
"raise",
"ValueError",
"(",
"\"vrrp property 'mac_addr_adv_interval' must \"",
"\"be in the range 1-3600\"",
")",
"cmd",
"=",
"self",
".",
"command_builder",
"(",
"'vrrp %d mac-address advertisement-interval'",
"%",
"vrid",
",",
"value",
"=",
"value",
",",
"default",
"=",
"default",
",",
"disable",
"=",
"disable",
")",
"# Run the command if requested",
"if",
"run",
":",
"result",
"=",
"self",
".",
"configure_interface",
"(",
"name",
",",
"cmd",
")",
"# And verify the command succeeded",
"if",
"result",
"is",
"False",
":",
"return",
"self",
".",
"error",
"return",
"result",
"# Otherwise return the formatted command",
"return",
"cmd"
] | 40.136364
| 23
|
def from_numpy_arrays(freq_data, noise_data, length, delta_f, low_freq_cutoff):
"""Interpolate n PSD (as two 1-dimensional arrays of frequency and data)
to the desired length, delta_f and low frequency cutoff.
Parameters
----------
freq_data : array
Array of frequencies.
noise_data : array
PSD values corresponding to frequencies in freq_arr.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series in Herz.
low_freq_cutoff : float
Frequencies below this value are set to zero.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# Only include points above the low frequency cutoff
if freq_data[0] > low_freq_cutoff:
raise ValueError('Lowest frequency in input data '
' is higher than requested low-frequency cutoff ' + str(low_freq_cutoff))
kmin = int(low_freq_cutoff / delta_f)
flow = kmin * delta_f
data_start = (0 if freq_data[0]==low_freq_cutoff else numpy.searchsorted(freq_data, flow) - 1)
# If the cutoff is exactly in the file, start there
if freq_data[data_start+1] == low_freq_cutoff:
data_start += 1
freq_data = freq_data[data_start:]
noise_data = noise_data[data_start:]
flog = numpy.log(freq_data)
slog = numpy.log(noise_data)
psd_interp = scipy.interpolate.interp1d(flog, slog)
kmin = int(low_freq_cutoff / delta_f)
psd = numpy.zeros(length, dtype=numpy.float64)
vals = numpy.log(numpy.arange(kmin, length) * delta_f)
psd[kmin:] = numpy.exp(psd_interp(vals))
return FrequencySeries(psd, delta_f=delta_f)
|
[
"def",
"from_numpy_arrays",
"(",
"freq_data",
",",
"noise_data",
",",
"length",
",",
"delta_f",
",",
"low_freq_cutoff",
")",
":",
"# Only include points above the low frequency cutoff",
"if",
"freq_data",
"[",
"0",
"]",
">",
"low_freq_cutoff",
":",
"raise",
"ValueError",
"(",
"'Lowest frequency in input data '",
"' is higher than requested low-frequency cutoff '",
"+",
"str",
"(",
"low_freq_cutoff",
")",
")",
"kmin",
"=",
"int",
"(",
"low_freq_cutoff",
"/",
"delta_f",
")",
"flow",
"=",
"kmin",
"*",
"delta_f",
"data_start",
"=",
"(",
"0",
"if",
"freq_data",
"[",
"0",
"]",
"==",
"low_freq_cutoff",
"else",
"numpy",
".",
"searchsorted",
"(",
"freq_data",
",",
"flow",
")",
"-",
"1",
")",
"# If the cutoff is exactly in the file, start there",
"if",
"freq_data",
"[",
"data_start",
"+",
"1",
"]",
"==",
"low_freq_cutoff",
":",
"data_start",
"+=",
"1",
"freq_data",
"=",
"freq_data",
"[",
"data_start",
":",
"]",
"noise_data",
"=",
"noise_data",
"[",
"data_start",
":",
"]",
"flog",
"=",
"numpy",
".",
"log",
"(",
"freq_data",
")",
"slog",
"=",
"numpy",
".",
"log",
"(",
"noise_data",
")",
"psd_interp",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"flog",
",",
"slog",
")",
"kmin",
"=",
"int",
"(",
"low_freq_cutoff",
"/",
"delta_f",
")",
"psd",
"=",
"numpy",
".",
"zeros",
"(",
"length",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
"vals",
"=",
"numpy",
".",
"log",
"(",
"numpy",
".",
"arange",
"(",
"kmin",
",",
"length",
")",
"*",
"delta_f",
")",
"psd",
"[",
"kmin",
":",
"]",
"=",
"numpy",
".",
"exp",
"(",
"psd_interp",
"(",
"vals",
")",
")",
"return",
"FrequencySeries",
"(",
"psd",
",",
"delta_f",
"=",
"delta_f",
")"
] | 32.529412
| 20.901961
|
def load_msgpack(blob, **kwargs):
"""
Load a dict packed with msgpack into kwargs for
a Trimesh constructor
Parameters
----------
blob : bytes
msgpack packed dict containing
keys 'vertices' and 'faces'
Returns
----------
loaded : dict
Keyword args for Trimesh constructor, aka
mesh=trimesh.Trimesh(**loaded)
"""
import msgpack
if hasattr(blob, 'read'):
data = msgpack.load(blob)
else:
data = msgpack.loads(blob)
loaded = load_dict(data)
return loaded
|
[
"def",
"load_msgpack",
"(",
"blob",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"msgpack",
"if",
"hasattr",
"(",
"blob",
",",
"'read'",
")",
":",
"data",
"=",
"msgpack",
".",
"load",
"(",
"blob",
")",
"else",
":",
"data",
"=",
"msgpack",
".",
"loads",
"(",
"blob",
")",
"loaded",
"=",
"load_dict",
"(",
"data",
")",
"return",
"loaded"
] | 21.08
| 17.64
|
def _validate(url):
"""Validate a url.
:param str url: Polling URL extracted from response header.
:raises: ValueError if URL has no scheme or host.
"""
if url is None:
return
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Invalid URL header")
|
[
"def",
"_validate",
"(",
"url",
")",
":",
"if",
"url",
"is",
"None",
":",
"return",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"if",
"not",
"parsed",
".",
"scheme",
"or",
"not",
"parsed",
".",
"netloc",
":",
"raise",
"ValueError",
"(",
"\"Invalid URL header\"",
")"
] | 28.636364
| 15.454545
|
def add_data_file(self, from_fp, timestamp=None, content_type=None):
# type: (IO, Optional[datetime.datetime], Optional[str]) -> Text
"""Copy inputs to data/ folder."""
self.self_check()
tmp_dir, tmp_prefix = os.path.split(self.temp_prefix)
with tempfile.NamedTemporaryFile(
prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp:
checksum = checksum_copy(from_fp, tmp)
# Calculate hash-based file path
folder = os.path.join(self.folder, DATA, checksum[0:2])
path = os.path.join(folder, checksum)
# os.rename assumed safe, as our temp file should
# be in same file system as our temp folder
if not os.path.isdir(folder):
os.makedirs(folder)
os.rename(tmp.name, path)
# Relative posix path
# (to avoid \ on Windows)
rel_path = _posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum)
else:
_logger.warning(
u"[provenance] Unknown hash method %s for bagit manifest",
Hasher)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug(u"[provenance] Added data file %s", path)
if timestamp is not None:
self._file_provenance[rel_path] = self._self_made(timestamp)
_logger.debug(u"[provenance] Relative path for data file %s", rel_path)
if content_type is not None:
self._content_types[rel_path] = content_type
return rel_path
|
[
"def",
"add_data_file",
"(",
"self",
",",
"from_fp",
",",
"timestamp",
"=",
"None",
",",
"content_type",
"=",
"None",
")",
":",
"# type: (IO, Optional[datetime.datetime], Optional[str]) -> Text",
"self",
".",
"self_check",
"(",
")",
"tmp_dir",
",",
"tmp_prefix",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"temp_prefix",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"tmp_prefix",
",",
"dir",
"=",
"tmp_dir",
",",
"delete",
"=",
"False",
")",
"as",
"tmp",
":",
"checksum",
"=",
"checksum_copy",
"(",
"from_fp",
",",
"tmp",
")",
"# Calculate hash-based file path",
"folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"folder",
",",
"DATA",
",",
"checksum",
"[",
"0",
":",
"2",
"]",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"checksum",
")",
"# os.rename assumed safe, as our temp file should",
"# be in same file system as our temp folder",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"os",
".",
"rename",
"(",
"tmp",
".",
"name",
",",
"path",
")",
"# Relative posix path",
"# (to avoid \\ on Windows)",
"rel_path",
"=",
"_posix_path",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"self",
".",
"folder",
")",
")",
"# Register in bagit checksum",
"if",
"Hasher",
"==",
"hashlib",
".",
"sha1",
":",
"self",
".",
"_add_to_bagit",
"(",
"rel_path",
",",
"sha1",
"=",
"checksum",
")",
"else",
":",
"_logger",
".",
"warning",
"(",
"u\"[provenance] Unknown hash method %s for bagit manifest\"",
",",
"Hasher",
")",
"# Inefficient, bagit support need to checksum again",
"self",
".",
"_add_to_bagit",
"(",
"rel_path",
")",
"_logger",
".",
"debug",
"(",
"u\"[provenance] Added data file %s\"",
",",
"path",
")",
"if",
"timestamp",
"is",
"not",
"None",
":",
"self",
".",
"_file_provenance",
"[",
"rel_path",
"]",
"=",
"self",
".",
"_self_made",
"(",
"timestamp",
")",
"_logger",
".",
"debug",
"(",
"u\"[provenance] Relative path for data file %s\"",
",",
"rel_path",
")",
"if",
"content_type",
"is",
"not",
"None",
":",
"self",
".",
"_content_types",
"[",
"rel_path",
"]",
"=",
"content_type",
"return",
"rel_path"
] | 42.076923
| 17.717949
|
def is_compatible(self, model, version):
""" Check if this flag is compatible with a YubiKey of version 'ver'. """
if not model in self.models:
return False
if self.max_ykver:
return (version >= self.min_ykver and
version <= self.max_ykver)
else:
return version >= self.min_ykver
|
[
"def",
"is_compatible",
"(",
"self",
",",
"model",
",",
"version",
")",
":",
"if",
"not",
"model",
"in",
"self",
".",
"models",
":",
"return",
"False",
"if",
"self",
".",
"max_ykver",
":",
"return",
"(",
"version",
">=",
"self",
".",
"min_ykver",
"and",
"version",
"<=",
"self",
".",
"max_ykver",
")",
"else",
":",
"return",
"version",
">=",
"self",
".",
"min_ykver"
] | 39.888889
| 8.888889
|
def put(self, request, id, format=None):
"""
Update an existing bot
---
serializer: BotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
bot = self.get_bot(id, request.user)
serializer = BotUpdateSerializer(bot, data=request.data)
if serializer.is_valid():
try:
bot = serializer.save()
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(BotSerializer(bot).data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"def",
"put",
"(",
"self",
",",
"request",
",",
"id",
",",
"format",
"=",
"None",
")",
":",
"bot",
"=",
"self",
".",
"get_bot",
"(",
"id",
",",
"request",
".",
"user",
")",
"serializer",
"=",
"BotUpdateSerializer",
"(",
"bot",
",",
"data",
"=",
"request",
".",
"data",
")",
"if",
"serializer",
".",
"is_valid",
"(",
")",
":",
"try",
":",
"bot",
"=",
"serializer",
".",
"save",
"(",
")",
"except",
":",
"return",
"Response",
"(",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")",
"else",
":",
"return",
"Response",
"(",
"BotSerializer",
"(",
"bot",
")",
".",
"data",
")",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | 34.571429
| 13.047619
|
def forest(self, data: ['SASdata', str] = None,
autotune: str = None,
code: str = None,
crossvalidation: str = None,
grow: str = None,
id: str = None,
input: [str, list, dict] = None,
output: [str, bool, 'SASdata'] = None,
partition: str = None,
savestate: str = None,
target: [str, list, dict] = None,
viicode: str = None,
weight: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the FOREST procedure
Documentation link:
https://go.documentation.sas.com/?docsetId=casml&docsetTarget=casml_forest_toc.htm&docsetVersion=8.3&locale=en
:param data: SASdata object or string. This parameter is required.
:parm autotune: The autotune variable can only be a string type.
:parm code: The code variable can only be a string type.
:parm crossvalidation: The crossvalidation variable can only be a string type.
:parm grow: The grow variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm partition: The partition variable can only be a string type.
:parm savestate: The savestate variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm viicode: The viicode variable can only be a string type.
:parm weight: The weight variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
|
[
"def",
"forest",
"(",
"self",
",",
"data",
":",
"[",
"'SASdata'",
",",
"str",
"]",
"=",
"None",
",",
"autotune",
":",
"str",
"=",
"None",
",",
"code",
":",
"str",
"=",
"None",
",",
"crossvalidation",
":",
"str",
"=",
"None",
",",
"grow",
":",
"str",
"=",
"None",
",",
"id",
":",
"str",
"=",
"None",
",",
"input",
":",
"[",
"str",
",",
"list",
",",
"dict",
"]",
"=",
"None",
",",
"output",
":",
"[",
"str",
",",
"bool",
",",
"'SASdata'",
"]",
"=",
"None",
",",
"partition",
":",
"str",
"=",
"None",
",",
"savestate",
":",
"str",
"=",
"None",
",",
"target",
":",
"[",
"str",
",",
"list",
",",
"dict",
"]",
"=",
"None",
",",
"viicode",
":",
"str",
"=",
"None",
",",
"weight",
":",
"str",
"=",
"None",
",",
"procopts",
":",
"str",
"=",
"None",
",",
"stmtpassthrough",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
":",
"dict",
")",
"->",
"'SASresults'",
":"
] | 57.25641
| 26.435897
|
def alter_function(self, dbName, funcName, newFunc):
"""
Parameters:
- dbName
- funcName
- newFunc
"""
self.send_alter_function(dbName, funcName, newFunc)
self.recv_alter_function()
|
[
"def",
"alter_function",
"(",
"self",
",",
"dbName",
",",
"funcName",
",",
"newFunc",
")",
":",
"self",
".",
"send_alter_function",
"(",
"dbName",
",",
"funcName",
",",
"newFunc",
")",
"self",
".",
"recv_alter_function",
"(",
")"
] | 23.111111
| 15.555556
|
def add_target(self, name=None):
"""
Add an SCons target to this nest.
The function decorated will be immediately called with each of the
output directories and current control dictionaries. Each result will
be added to the respective control dictionary for later nests to
access.
:param name: Name for the target in the name (default: function name).
"""
def deco(func):
def nestfunc(control):
destdir = os.path.join(self.dest_dir, control['OUTDIR'])
return [func(destdir, control)]
key = name or func.__name__
self.nest.add(key, nestfunc, create_dir=False)
self._register_alias(key)
return func
return deco
|
[
"def",
"add_target",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"def",
"deco",
"(",
"func",
")",
":",
"def",
"nestfunc",
"(",
"control",
")",
":",
"destdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dest_dir",
",",
"control",
"[",
"'OUTDIR'",
"]",
")",
"return",
"[",
"func",
"(",
"destdir",
",",
"control",
")",
"]",
"key",
"=",
"name",
"or",
"func",
".",
"__name__",
"self",
".",
"nest",
".",
"add",
"(",
"key",
",",
"nestfunc",
",",
"create_dir",
"=",
"False",
")",
"self",
".",
"_register_alias",
"(",
"key",
")",
"return",
"func",
"return",
"deco"
] | 38.15
| 18.85
|
def map(self, **kwargs):
''' Change a name on the fly. Compat with kr/env. '''
return { key: str(self._envars[kwargs[key]]) # str strips Entry
for key in kwargs }
|
[
"def",
"map",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"{",
"key",
":",
"str",
"(",
"self",
".",
"_envars",
"[",
"kwargs",
"[",
"key",
"]",
"]",
")",
"# str strips Entry",
"for",
"key",
"in",
"kwargs",
"}"
] | 48.5
| 18.5
|
def get_sections(need_info):
"""Gets the hierarchy of the section nodes as a list starting at the
section of the current need and then its parent sections"""
sections = []
current_node = need_info['target_node']
while current_node:
if isinstance(current_node, nodes.section):
title = current_node.children[0].astext()
# If using auto-section numbering, then Sphinx inserts
# multiple non-breaking space unicode characters into the title
# we'll replace those with a simple space to make them easier to
# use in filters
title = NON_BREAKING_SPACE.sub(' ', title)
sections.append(title)
current_node = getattr(current_node, 'parent', None)
return sections
|
[
"def",
"get_sections",
"(",
"need_info",
")",
":",
"sections",
"=",
"[",
"]",
"current_node",
"=",
"need_info",
"[",
"'target_node'",
"]",
"while",
"current_node",
":",
"if",
"isinstance",
"(",
"current_node",
",",
"nodes",
".",
"section",
")",
":",
"title",
"=",
"current_node",
".",
"children",
"[",
"0",
"]",
".",
"astext",
"(",
")",
"# If using auto-section numbering, then Sphinx inserts",
"# multiple non-breaking space unicode characters into the title",
"# we'll replace those with a simple space to make them easier to",
"# use in filters",
"title",
"=",
"NON_BREAKING_SPACE",
".",
"sub",
"(",
"' '",
",",
"title",
")",
"sections",
".",
"append",
"(",
"title",
")",
"current_node",
"=",
"getattr",
"(",
"current_node",
",",
"'parent'",
",",
"None",
")",
"return",
"sections"
] | 47.625
| 15.5625
|
def aggregate_rate(rate_key, count_key):
"""
Compute an aggregate rate for `rate_key` weighted according to
`count_rate`.
"""
def inner(docs):
total = sum(doc[count_key] for doc in docs)
weighted_total = sum(doc[rate_key] * doc[count_key] for doc in docs)
total_rate = weighted_total / total
return total_rate
return rate_key, inner
|
[
"def",
"aggregate_rate",
"(",
"rate_key",
",",
"count_key",
")",
":",
"def",
"inner",
"(",
"docs",
")",
":",
"total",
"=",
"sum",
"(",
"doc",
"[",
"count_key",
"]",
"for",
"doc",
"in",
"docs",
")",
"weighted_total",
"=",
"sum",
"(",
"doc",
"[",
"rate_key",
"]",
"*",
"doc",
"[",
"count_key",
"]",
"for",
"doc",
"in",
"docs",
")",
"total_rate",
"=",
"weighted_total",
"/",
"total",
"return",
"total_rate",
"return",
"rate_key",
",",
"inner"
] | 31.5
| 15.666667
|
def check_messages(filename, report_empty=False):
"""
Checks messages in `filename` in various ways:
* Translations must have the same slots as the English.
* Messages can't have astral characters in them.
If `report_empty` is True, will also report empty translation strings.
Returns the problems, a list of tuples. Each is a description, a msgid, and
then zero or more translations.
"""
problems = []
pomsgs = polib.pofile(filename)
for msg in pomsgs:
# Check for characters Javascript can't support.
# https://code.djangoproject.com/ticket/21725
if astral(msg.msgstr):
problems.append(("Non-BMP char", msg.msgid, msg.msgstr))
if is_format_message(msg):
# LONG_DATE_FORMAT, etc, have %s etc in them, and that's ok.
continue
if msg.msgid_plural:
# Plurals: two strings in, N strings out.
source = msg.msgid + " | " + msg.msgid_plural
translation = " | ".join(v for k, v in sorted(msg.msgstr_plural.items()))
empty = any(not t.strip() for t in msg.msgstr_plural.values())
else:
# Singular: just one string in and one string out.
source = msg.msgid
translation = msg.msgstr
empty = not msg.msgstr.strip()
if empty:
if report_empty:
problems.append(("Empty translation", source))
else:
id_tags = tags_in_string(source)
tx_tags = tags_in_string(translation)
# Check if tags don't match
if id_tags != tx_tags:
id_has = u", ".join(sorted(u'"{}"'.format(t) for t in id_tags - tx_tags))
tx_has = u", ".join(sorted(u'"{}"'.format(t) for t in tx_tags - id_tags))
if id_has and tx_has:
diff = u"{} vs {}".format(id_has, tx_has)
elif id_has:
diff = u"{} missing".format(id_has)
else:
diff = u"{} added".format(tx_has)
problems.append((
"Different tags in source and translation",
source,
translation,
diff
))
return problems
|
[
"def",
"check_messages",
"(",
"filename",
",",
"report_empty",
"=",
"False",
")",
":",
"problems",
"=",
"[",
"]",
"pomsgs",
"=",
"polib",
".",
"pofile",
"(",
"filename",
")",
"for",
"msg",
"in",
"pomsgs",
":",
"# Check for characters Javascript can't support.",
"# https://code.djangoproject.com/ticket/21725",
"if",
"astral",
"(",
"msg",
".",
"msgstr",
")",
":",
"problems",
".",
"append",
"(",
"(",
"\"Non-BMP char\"",
",",
"msg",
".",
"msgid",
",",
"msg",
".",
"msgstr",
")",
")",
"if",
"is_format_message",
"(",
"msg",
")",
":",
"# LONG_DATE_FORMAT, etc, have %s etc in them, and that's ok.",
"continue",
"if",
"msg",
".",
"msgid_plural",
":",
"# Plurals: two strings in, N strings out.",
"source",
"=",
"msg",
".",
"msgid",
"+",
"\" | \"",
"+",
"msg",
".",
"msgid_plural",
"translation",
"=",
"\" | \"",
".",
"join",
"(",
"v",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"msg",
".",
"msgstr_plural",
".",
"items",
"(",
")",
")",
")",
"empty",
"=",
"any",
"(",
"not",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"msg",
".",
"msgstr_plural",
".",
"values",
"(",
")",
")",
"else",
":",
"# Singular: just one string in and one string out.",
"source",
"=",
"msg",
".",
"msgid",
"translation",
"=",
"msg",
".",
"msgstr",
"empty",
"=",
"not",
"msg",
".",
"msgstr",
".",
"strip",
"(",
")",
"if",
"empty",
":",
"if",
"report_empty",
":",
"problems",
".",
"append",
"(",
"(",
"\"Empty translation\"",
",",
"source",
")",
")",
"else",
":",
"id_tags",
"=",
"tags_in_string",
"(",
"source",
")",
"tx_tags",
"=",
"tags_in_string",
"(",
"translation",
")",
"# Check if tags don't match",
"if",
"id_tags",
"!=",
"tx_tags",
":",
"id_has",
"=",
"u\", \"",
".",
"join",
"(",
"sorted",
"(",
"u'\"{}\"'",
".",
"format",
"(",
"t",
")",
"for",
"t",
"in",
"id_tags",
"-",
"tx_tags",
")",
")",
"tx_has",
"=",
"u\", \"",
".",
"join",
"(",
"sorted",
"(",
"u'\"{}\"'",
".",
"format",
"(",
"t",
")",
"for",
"t",
"in",
"tx_tags",
"-",
"id_tags",
")",
")",
"if",
"id_has",
"and",
"tx_has",
":",
"diff",
"=",
"u\"{} vs {}\"",
".",
"format",
"(",
"id_has",
",",
"tx_has",
")",
"elif",
"id_has",
":",
"diff",
"=",
"u\"{} missing\"",
".",
"format",
"(",
"id_has",
")",
"else",
":",
"diff",
"=",
"u\"{} added\"",
".",
"format",
"(",
"tx_has",
")",
"problems",
".",
"append",
"(",
"(",
"\"Different tags in source and translation\"",
",",
"source",
",",
"translation",
",",
"diff",
")",
")",
"return",
"problems"
] | 36.032258
| 20.645161
|
def init_with_context(self, context):
"""
Please refer to
:meth:`~admin_tools.menu.items.MenuItem.init_with_context`
documentation from :class:`~admin_tools.menu.items.MenuItem` class.
"""
items = self._visible_models(context['request'])
apps = {}
for model, perms in items:
if not (perms['change'] or perms.get('view', False)):
continue
app_label = model._meta.app_label
if app_label not in apps:
apps[app_label] = {
'title':
django_apps.get_app_config(app_label).verbose_name,
'url': self._get_admin_app_list_url(model, context),
'models': []
}
apps[app_label]['models'].append({
'title': model._meta.verbose_name_plural,
'url': self._get_admin_change_url(model, context)
})
for app in sorted(apps.keys()):
app_dict = apps[app]
item = MenuItem(title=app_dict['title'], url=app_dict['url'])
# sort model list alphabetically
apps[app]['models'].sort(key=lambda x: x['title'])
for model_dict in apps[app]['models']:
item.children.append(MenuItem(**model_dict))
self.children.append(item)
|
[
"def",
"init_with_context",
"(",
"self",
",",
"context",
")",
":",
"items",
"=",
"self",
".",
"_visible_models",
"(",
"context",
"[",
"'request'",
"]",
")",
"apps",
"=",
"{",
"}",
"for",
"model",
",",
"perms",
"in",
"items",
":",
"if",
"not",
"(",
"perms",
"[",
"'change'",
"]",
"or",
"perms",
".",
"get",
"(",
"'view'",
",",
"False",
")",
")",
":",
"continue",
"app_label",
"=",
"model",
".",
"_meta",
".",
"app_label",
"if",
"app_label",
"not",
"in",
"apps",
":",
"apps",
"[",
"app_label",
"]",
"=",
"{",
"'title'",
":",
"django_apps",
".",
"get_app_config",
"(",
"app_label",
")",
".",
"verbose_name",
",",
"'url'",
":",
"self",
".",
"_get_admin_app_list_url",
"(",
"model",
",",
"context",
")",
",",
"'models'",
":",
"[",
"]",
"}",
"apps",
"[",
"app_label",
"]",
"[",
"'models'",
"]",
".",
"append",
"(",
"{",
"'title'",
":",
"model",
".",
"_meta",
".",
"verbose_name_plural",
",",
"'url'",
":",
"self",
".",
"_get_admin_change_url",
"(",
"model",
",",
"context",
")",
"}",
")",
"for",
"app",
"in",
"sorted",
"(",
"apps",
".",
"keys",
"(",
")",
")",
":",
"app_dict",
"=",
"apps",
"[",
"app",
"]",
"item",
"=",
"MenuItem",
"(",
"title",
"=",
"app_dict",
"[",
"'title'",
"]",
",",
"url",
"=",
"app_dict",
"[",
"'url'",
"]",
")",
"# sort model list alphabetically",
"apps",
"[",
"app",
"]",
"[",
"'models'",
"]",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'title'",
"]",
")",
"for",
"model_dict",
"in",
"apps",
"[",
"app",
"]",
"[",
"'models'",
"]",
":",
"item",
".",
"children",
".",
"append",
"(",
"MenuItem",
"(",
"*",
"*",
"model_dict",
")",
")",
"self",
".",
"children",
".",
"append",
"(",
"item",
")"
] | 41.875
| 15.75
|
def check_event(self, event):
# type: (ServiceEvent) -> bool
"""
Tests if the given service event must be handled or ignored, based
on the state of the iPOPO service and on the content of the event.
:param event: A service event
:return: True if the event can be handled, False if it must be ignored
"""
with self._lock:
if self.state == StoredInstance.KILLED:
# This call may have been blocked by the internal state lock,
# ignore it
return False
return self.__safe_handlers_callback("check_event", event)
|
[
"def",
"check_event",
"(",
"self",
",",
"event",
")",
":",
"# type: (ServiceEvent) -> bool",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"state",
"==",
"StoredInstance",
".",
"KILLED",
":",
"# This call may have been blocked by the internal state lock,",
"# ignore it",
"return",
"False",
"return",
"self",
".",
"__safe_handlers_callback",
"(",
"\"check_event\"",
",",
"event",
")"
] | 39.3125
| 20.0625
|
def is_icao_assigned(icao):
""" Check whether the ICAO address is assigned (Annex 10, Vol 3)"""
if (icao is None) or (not isinstance(icao, str)) or (len(icao)!=6):
return False
icaoint = hex2int(icao)
if 0x200000 < icaoint < 0x27FFFF: return False # AFI
if 0x280000 < icaoint < 0x28FFFF: return False # SAM
if 0x500000 < icaoint < 0x5FFFFF: return False # EUR, NAT
if 0x600000 < icaoint < 0x67FFFF: return False # MID
if 0x680000 < icaoint < 0x6F0000: return False # ASIA
if 0x900000 < icaoint < 0x9FFFFF: return False # NAM, PAC
if 0xB00000 < icaoint < 0xBFFFFF: return False # CAR
if 0xD00000 < icaoint < 0xDFFFFF: return False # future
if 0xF00000 < icaoint < 0xFFFFFF: return False # future
return True
|
[
"def",
"is_icao_assigned",
"(",
"icao",
")",
":",
"if",
"(",
"icao",
"is",
"None",
")",
"or",
"(",
"not",
"isinstance",
"(",
"icao",
",",
"str",
")",
")",
"or",
"(",
"len",
"(",
"icao",
")",
"!=",
"6",
")",
":",
"return",
"False",
"icaoint",
"=",
"hex2int",
"(",
"icao",
")",
"if",
"0x200000",
"<",
"icaoint",
"<",
"0x27FFFF",
":",
"return",
"False",
"# AFI",
"if",
"0x280000",
"<",
"icaoint",
"<",
"0x28FFFF",
":",
"return",
"False",
"# SAM",
"if",
"0x500000",
"<",
"icaoint",
"<",
"0x5FFFFF",
":",
"return",
"False",
"# EUR, NAT",
"if",
"0x600000",
"<",
"icaoint",
"<",
"0x67FFFF",
":",
"return",
"False",
"# MID",
"if",
"0x680000",
"<",
"icaoint",
"<",
"0x6F0000",
":",
"return",
"False",
"# ASIA",
"if",
"0x900000",
"<",
"icaoint",
"<",
"0x9FFFFF",
":",
"return",
"False",
"# NAM, PAC",
"if",
"0xB00000",
"<",
"icaoint",
"<",
"0xBFFFFF",
":",
"return",
"False",
"# CAR",
"if",
"0xD00000",
"<",
"icaoint",
"<",
"0xDFFFFF",
":",
"return",
"False",
"# future",
"if",
"0xF00000",
"<",
"icaoint",
"<",
"0xFFFFFF",
":",
"return",
"False",
"# future",
"return",
"True"
] | 41.947368
| 24.631579
|
def info(self, **kwargs):
"""
Get the detailed information about a particular credit record. This is
currently only supported with the new credit model found in TV. These
ids can be found from any TV credit response as well as the tv_credits
and combined_credits methods for people.
The episodes object returns a list of episodes and are generally going
to be guest stars. The season array will return a list of season
numbers. Season credits are credits that were marked with the
"add to every season" option in the editing interface and are
assumed to be "season regulars".
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
[
"def",
"info",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"self",
".",
"_get_credit_id_path",
"(",
"'info'",
")",
"response",
"=",
"self",
".",
"_GET",
"(",
"path",
",",
"kwargs",
")",
"self",
".",
"_set_attrs_to_values",
"(",
"response",
")",
"return",
"response"
] | 40.208333
| 22.875
|
def add_widget_to_content(self, widget):
"""Subclasses should call this to add content in the section's top level column."""
self.__section_content_column.add_spacing(4)
self.__section_content_column.add(widget)
|
[
"def",
"add_widget_to_content",
"(",
"self",
",",
"widget",
")",
":",
"self",
".",
"__section_content_column",
".",
"add_spacing",
"(",
"4",
")",
"self",
".",
"__section_content_column",
".",
"add",
"(",
"widget",
")"
] | 58
| 5.25
|
def main(arguments=None):
"""Main command line entry point."""
if not arguments:
arguments = sys.argv[1:]
wordlist, sowpods, by_length, start, end = argument_parser(arguments)
for word in wordlist:
pretty_print(
word,
anagrams_in_word(word, sowpods, start, end),
by_length,
)
|
[
"def",
"main",
"(",
"arguments",
"=",
"None",
")",
":",
"if",
"not",
"arguments",
":",
"arguments",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"wordlist",
",",
"sowpods",
",",
"by_length",
",",
"start",
",",
"end",
"=",
"argument_parser",
"(",
"arguments",
")",
"for",
"word",
"in",
"wordlist",
":",
"pretty_print",
"(",
"word",
",",
"anagrams_in_word",
"(",
"word",
",",
"sowpods",
",",
"start",
",",
"end",
")",
",",
"by_length",
",",
")"
] | 26.230769
| 21.307692
|
def create_dump():
""" Create the grammar for the 'dump' statement """
dump = upkey("dump").setResultsName("action")
return (
dump
+ upkey("schema")
+ Optional(Group(delimitedList(table)).setResultsName("tables"))
)
|
[
"def",
"create_dump",
"(",
")",
":",
"dump",
"=",
"upkey",
"(",
"\"dump\"",
")",
".",
"setResultsName",
"(",
"\"action\"",
")",
"return",
"(",
"dump",
"+",
"upkey",
"(",
"\"schema\"",
")",
"+",
"Optional",
"(",
"Group",
"(",
"delimitedList",
"(",
"table",
")",
")",
".",
"setResultsName",
"(",
"\"tables\"",
")",
")",
")"
] | 31
| 21.125
|
def get_scale(self, gg):
"""
Create a scale
"""
# This method does some introspection to save users from
# scale mismatch error. This could happen when the
# aesthetic is mapped to a categorical but the limits
# are not provided in categorical form. We only handle
# the case where the mapping uses an expression to
# conver to categorical e.g `aes(color='factor(cyl)')`.
# However if `'cyl'` column is a categorical and the
# mapping is `aes(color='cyl')`, that will result in
# an error. If later case proves common enough then we
# could inspect the data and be clever based on that too!!
ae = self.aesthetic
series = self.limits_series
ae_values = []
# Look through all the mappings for this aesthetic,
# if we detect any factor stuff then we convert the
# limits data to categorical so that the right scale
# can be choosen. This should take care of the most
# common use cases.
for layer in gg.layers:
with suppress(KeyError):
value = layer.mapping[ae]
if isinstance(value, str):
ae_values.append(value)
for value in ae_values:
if ('factor(' in value or
'Categorical(' in value):
series = pd.Categorical(self.limits_series)
break
return make_scale(self.aesthetic,
series,
limits=self.limits,
trans=self.trans)
|
[
"def",
"get_scale",
"(",
"self",
",",
"gg",
")",
":",
"# This method does some introspection to save users from",
"# scale mismatch error. This could happen when the",
"# aesthetic is mapped to a categorical but the limits",
"# are not provided in categorical form. We only handle",
"# the case where the mapping uses an expression to",
"# conver to categorical e.g `aes(color='factor(cyl)')`.",
"# However if `'cyl'` column is a categorical and the",
"# mapping is `aes(color='cyl')`, that will result in",
"# an error. If later case proves common enough then we",
"# could inspect the data and be clever based on that too!!",
"ae",
"=",
"self",
".",
"aesthetic",
"series",
"=",
"self",
".",
"limits_series",
"ae_values",
"=",
"[",
"]",
"# Look through all the mappings for this aesthetic,",
"# if we detect any factor stuff then we convert the",
"# limits data to categorical so that the right scale",
"# can be choosen. This should take care of the most",
"# common use cases.",
"for",
"layer",
"in",
"gg",
".",
"layers",
":",
"with",
"suppress",
"(",
"KeyError",
")",
":",
"value",
"=",
"layer",
".",
"mapping",
"[",
"ae",
"]",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"ae_values",
".",
"append",
"(",
"value",
")",
"for",
"value",
"in",
"ae_values",
":",
"if",
"(",
"'factor('",
"in",
"value",
"or",
"'Categorical('",
"in",
"value",
")",
":",
"series",
"=",
"pd",
".",
"Categorical",
"(",
"self",
".",
"limits_series",
")",
"break",
"return",
"make_scale",
"(",
"self",
".",
"aesthetic",
",",
"series",
",",
"limits",
"=",
"self",
".",
"limits",
",",
"trans",
"=",
"self",
".",
"trans",
")"
] | 41.526316
| 14.315789
|
def read_setup_py_source(self): # type: () -> None
"""
Read setup.py to string
:return:
"""
if not self.setup_file_name:
self.setup_source = ""
if not self.setup_source:
self.setup_source = self._read_file(self.setup_file_name)
|
[
"def",
"read_setup_py_source",
"(",
"self",
")",
":",
"# type: () -> None",
"if",
"not",
"self",
".",
"setup_file_name",
":",
"self",
".",
"setup_source",
"=",
"\"\"",
"if",
"not",
"self",
".",
"setup_source",
":",
"self",
".",
"setup_source",
"=",
"self",
".",
"_read_file",
"(",
"self",
".",
"setup_file_name",
")"
] | 32.444444
| 10
|
def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
if _debug: IOQueue._debug(" - not blocking and empty")
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb
|
[
"def",
"get",
"(",
"self",
",",
"block",
"=",
"1",
",",
"delay",
"=",
"None",
")",
":",
"if",
"_debug",
":",
"IOQueue",
".",
"_debug",
"(",
"\"get block=%r delay=%r\"",
",",
"block",
",",
"delay",
")",
"# if the queue is empty and we do not block return None",
"if",
"not",
"block",
"and",
"not",
"self",
".",
"notempty",
".",
"isSet",
"(",
")",
":",
"if",
"_debug",
":",
"IOQueue",
".",
"_debug",
"(",
"\" - not blocking and empty\"",
")",
"return",
"None",
"# wait for something to be in the queue",
"if",
"delay",
":",
"self",
".",
"notempty",
".",
"wait",
"(",
"delay",
")",
"if",
"not",
"self",
".",
"notempty",
".",
"isSet",
"(",
")",
":",
"return",
"None",
"else",
":",
"self",
".",
"notempty",
".",
"wait",
"(",
")",
"# extract the first element",
"priority",
",",
"iocb",
"=",
"self",
".",
"queue",
"[",
"0",
"]",
"del",
"self",
".",
"queue",
"[",
"0",
"]",
"iocb",
".",
"ioQueue",
"=",
"None",
"# if the queue is empty, clear the event",
"qlen",
"=",
"len",
"(",
"self",
".",
"queue",
")",
"if",
"not",
"qlen",
":",
"self",
".",
"notempty",
".",
"clear",
"(",
")",
"# return the request",
"return",
"iocb"
] | 30.833333
| 17.066667
|
def _pki_minions(self):
'''
Retreive complete minion list from PKI dir.
Respects cache if configured
'''
minions = []
pki_cache_fn = os.path.join(self.opts['pki_dir'], self.acc, '.key_cache')
try:
os.makedirs(os.path.dirname(pki_cache_fn))
except OSError:
pass
try:
if self.opts['key_cache'] and os.path.exists(pki_cache_fn):
log.debug('Returning cached minion list')
if six.PY2:
with salt.utils.files.fopen(pki_cache_fn) as fn_:
return self.serial.load(fn_)
else:
with salt.utils.files.fopen(pki_cache_fn, mode='rb') as fn_:
return self.serial.load(fn_)
else:
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)):
minions.append(fn_)
return minions
except OSError as exc:
log.error(
'Encountered OSError while evaluating minions in PKI dir: %s',
exc
)
return minions
|
[
"def",
"_pki_minions",
"(",
"self",
")",
":",
"minions",
"=",
"[",
"]",
"pki_cache_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
",",
"self",
".",
"acc",
",",
"'.key_cache'",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"pki_cache_fn",
")",
")",
"except",
"OSError",
":",
"pass",
"try",
":",
"if",
"self",
".",
"opts",
"[",
"'key_cache'",
"]",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"pki_cache_fn",
")",
":",
"log",
".",
"debug",
"(",
"'Returning cached minion list'",
")",
"if",
"six",
".",
"PY2",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"pki_cache_fn",
")",
"as",
"fn_",
":",
"return",
"self",
".",
"serial",
".",
"load",
"(",
"fn_",
")",
"else",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"pki_cache_fn",
",",
"mode",
"=",
"'rb'",
")",
"as",
"fn_",
":",
"return",
"self",
".",
"serial",
".",
"load",
"(",
"fn_",
")",
"else",
":",
"for",
"fn_",
"in",
"salt",
".",
"utils",
".",
"data",
".",
"sorted_ignorecase",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
",",
"self",
".",
"acc",
")",
")",
")",
":",
"if",
"not",
"fn_",
".",
"startswith",
"(",
"'.'",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
",",
"self",
".",
"acc",
",",
"fn_",
")",
")",
":",
"minions",
".",
"append",
"(",
"fn_",
")",
"return",
"minions",
"except",
"OSError",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'Encountered OSError while evaluating minions in PKI dir: %s'",
",",
"exc",
")",
"return",
"minions"
] | 41.580645
| 24.483871
|
def wait_command(self, start_func, turns=1, end_func=None):
"""Call ``start_func``, and wait to call ``end_func`` after simulating ``turns`` (default 1)
:param start_func: function to call before waiting
:param turns: number of turns to wait
:param end_func: function to call after waiting
:return: ``None``
"""
start_func()
self.wait_turns(turns, cb=end_func)
|
[
"def",
"wait_command",
"(",
"self",
",",
"start_func",
",",
"turns",
"=",
"1",
",",
"end_func",
"=",
"None",
")",
":",
"start_func",
"(",
")",
"self",
".",
"wait_turns",
"(",
"turns",
",",
"cb",
"=",
"end_func",
")"
] | 37.818182
| 15.909091
|
def std(self, axis=None, keepdims=False):
"""
Return the standard deviation of the array over the given axis.
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
"""
return self._stat(axis, name='stdev', keepdims=keepdims)
|
[
"def",
"std",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"return",
"self",
".",
"_stat",
"(",
"axis",
",",
"name",
"=",
"'stdev'",
",",
"keepdims",
"=",
"keepdims",
")"
] | 34.642857
| 16.785714
|
def TeArraySizeCheck(self):
"""
Checks that Te and q0 array sizes are compatible
For finite difference solution.
"""
# Only if they are both defined and are arrays
# Both being arrays is a possible bug in this check routine that I have
# intentionally introduced
if type(self.Te) == np.ndarray and type(self.qs) == np.ndarray:
# Doesn't touch non-arrays or 1D arrays
if type(self.Te) is np.ndarray:
if (np.array(self.Te.shape) != np.array(self.qs.shape)).any():
sys.exit("q0 and Te arrays have incompatible shapes. Exiting.")
else:
if self.Debug: print("Te and qs array sizes pass consistency check")
|
[
"def",
"TeArraySizeCheck",
"(",
"self",
")",
":",
"# Only if they are both defined and are arrays",
"# Both being arrays is a possible bug in this check routine that I have ",
"# intentionally introduced",
"if",
"type",
"(",
"self",
".",
"Te",
")",
"==",
"np",
".",
"ndarray",
"and",
"type",
"(",
"self",
".",
"qs",
")",
"==",
"np",
".",
"ndarray",
":",
"# Doesn't touch non-arrays or 1D arrays",
"if",
"type",
"(",
"self",
".",
"Te",
")",
"is",
"np",
".",
"ndarray",
":",
"if",
"(",
"np",
".",
"array",
"(",
"self",
".",
"Te",
".",
"shape",
")",
"!=",
"np",
".",
"array",
"(",
"self",
".",
"qs",
".",
"shape",
")",
")",
".",
"any",
"(",
")",
":",
"sys",
".",
"exit",
"(",
"\"q0 and Te arrays have incompatible shapes. Exiting.\"",
")",
"else",
":",
"if",
"self",
".",
"Debug",
":",
"print",
"(",
"\"Te and qs array sizes pass consistency check\"",
")"
] | 44.2
| 16.6
|
def json_data(self, data=None):
"""Adds the default_data to data and dumps it to a json."""
if data is None:
data = {}
data.update(self.default_data)
return json.dumps(data)
|
[
"def",
"json_data",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"data",
".",
"update",
"(",
"self",
".",
"default_data",
")",
"return",
"json",
".",
"dumps",
"(",
"data",
")"
] | 35.333333
| 9.166667
|
def mean_rate(self):
"""
Returns the mean rate of the events since the start of the process.
"""
if self.counter.value == 0:
return 0.0
else:
elapsed = time() - self.start_time
return self.counter.value / elapsed
|
[
"def",
"mean_rate",
"(",
"self",
")",
":",
"if",
"self",
".",
"counter",
".",
"value",
"==",
"0",
":",
"return",
"0.0",
"else",
":",
"elapsed",
"=",
"time",
"(",
")",
"-",
"self",
".",
"start_time",
"return",
"self",
".",
"counter",
".",
"value",
"/",
"elapsed"
] | 31.111111
| 13.111111
|
def load(self):
"""Loads configuration file"""
# Config files prior to 0.2.4 dor not have config version keys
old_config = not self.cfg_file.Exists("config_version")
# Reset data
self.data.__dict__.update(self.defaults.__dict__)
for key in self.defaults.__dict__:
if self.cfg_file.Exists(key):
setattr(self.data, key, self.cfg_file.Read(key))
# Reset keys that should be reset on version upgrades
if old_config or self.version != self.data.config_version:
for key in self.reset_on_version_change:
setattr(self.data, key, getattr(DefaultConfig(), key))
self.data.config_version = self.version
# Delete gpg_key_uid and insert fingerprint key
if hasattr(self.data, "gpg_key_uid"):
oldkey = "gpg_key_uid"
delattr(self.data, oldkey)
newkey = "gpg_key_fingerprint"
setattr(self.data, newkey, getattr(DefaultConfig(), newkey))
|
[
"def",
"load",
"(",
"self",
")",
":",
"# Config files prior to 0.2.4 dor not have config version keys",
"old_config",
"=",
"not",
"self",
".",
"cfg_file",
".",
"Exists",
"(",
"\"config_version\"",
")",
"# Reset data",
"self",
".",
"data",
".",
"__dict__",
".",
"update",
"(",
"self",
".",
"defaults",
".",
"__dict__",
")",
"for",
"key",
"in",
"self",
".",
"defaults",
".",
"__dict__",
":",
"if",
"self",
".",
"cfg_file",
".",
"Exists",
"(",
"key",
")",
":",
"setattr",
"(",
"self",
".",
"data",
",",
"key",
",",
"self",
".",
"cfg_file",
".",
"Read",
"(",
"key",
")",
")",
"# Reset keys that should be reset on version upgrades",
"if",
"old_config",
"or",
"self",
".",
"version",
"!=",
"self",
".",
"data",
".",
"config_version",
":",
"for",
"key",
"in",
"self",
".",
"reset_on_version_change",
":",
"setattr",
"(",
"self",
".",
"data",
",",
"key",
",",
"getattr",
"(",
"DefaultConfig",
"(",
")",
",",
"key",
")",
")",
"self",
".",
"data",
".",
"config_version",
"=",
"self",
".",
"version",
"# Delete gpg_key_uid and insert fingerprint key",
"if",
"hasattr",
"(",
"self",
".",
"data",
",",
"\"gpg_key_uid\"",
")",
":",
"oldkey",
"=",
"\"gpg_key_uid\"",
"delattr",
"(",
"self",
".",
"data",
",",
"oldkey",
")",
"newkey",
"=",
"\"gpg_key_fingerprint\"",
"setattr",
"(",
"self",
".",
"data",
",",
"newkey",
",",
"getattr",
"(",
"DefaultConfig",
"(",
")",
",",
"newkey",
")",
")"
] | 38.307692
| 20.923077
|
async def update(self) -> None:
"""Get the latest data from api.entur.org."""
headers = {'ET-Client-Name': self._client_name}
request = {
'query': self.get_gql_query(),
'variables': {
'stops': self.stops,
'quays': self.quays,
'whitelist': {
'lines': self.line_whitelist
},
'numberOfDepartures': self.number_of_departures,
'omitNonBoarding': self.omit_non_boarding
}
}
with async_timeout.timeout(10):
resp = await self.web_session.post(RESOURCE,
json=request,
headers=headers)
if resp.status != 200:
_LOGGER.error(
"Error connecting to Entur, response http status code: %s",
resp.status)
return None
result = await resp.json()
if 'errors' in result:
_LOGGER.warning("Entur API responded with error message: {error}",
result['errors'])
return
self._data = result['data']
if 'stopPlaces' in self._data:
for stop in self._data['stopPlaces']:
self._process_place(stop, False)
if 'quays' in self._data:
for quay in self._data['quays']:
self._process_place(quay, True)
|
[
"async",
"def",
"update",
"(",
"self",
")",
"->",
"None",
":",
"headers",
"=",
"{",
"'ET-Client-Name'",
":",
"self",
".",
"_client_name",
"}",
"request",
"=",
"{",
"'query'",
":",
"self",
".",
"get_gql_query",
"(",
")",
",",
"'variables'",
":",
"{",
"'stops'",
":",
"self",
".",
"stops",
",",
"'quays'",
":",
"self",
".",
"quays",
",",
"'whitelist'",
":",
"{",
"'lines'",
":",
"self",
".",
"line_whitelist",
"}",
",",
"'numberOfDepartures'",
":",
"self",
".",
"number_of_departures",
",",
"'omitNonBoarding'",
":",
"self",
".",
"omit_non_boarding",
"}",
"}",
"with",
"async_timeout",
".",
"timeout",
"(",
"10",
")",
":",
"resp",
"=",
"await",
"self",
".",
"web_session",
".",
"post",
"(",
"RESOURCE",
",",
"json",
"=",
"request",
",",
"headers",
"=",
"headers",
")",
"if",
"resp",
".",
"status",
"!=",
"200",
":",
"_LOGGER",
".",
"error",
"(",
"\"Error connecting to Entur, response http status code: %s\"",
",",
"resp",
".",
"status",
")",
"return",
"None",
"result",
"=",
"await",
"resp",
".",
"json",
"(",
")",
"if",
"'errors'",
"in",
"result",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Entur API responded with error message: {error}\"",
",",
"result",
"[",
"'errors'",
"]",
")",
"return",
"self",
".",
"_data",
"=",
"result",
"[",
"'data'",
"]",
"if",
"'stopPlaces'",
"in",
"self",
".",
"_data",
":",
"for",
"stop",
"in",
"self",
".",
"_data",
"[",
"'stopPlaces'",
"]",
":",
"self",
".",
"_process_place",
"(",
"stop",
",",
"False",
")",
"if",
"'quays'",
"in",
"self",
".",
"_data",
":",
"for",
"quay",
"in",
"self",
".",
"_data",
"[",
"'quays'",
"]",
":",
"self",
".",
"_process_place",
"(",
"quay",
",",
"True",
")"
] | 33.395349
| 17.651163
|
def _truncate_to_field(model, field_name, value):
"""
Shorten data to fit in the specified model field.
If the data were too big for the field, it would cause a failure to
insert, so we shorten it, truncating in the middle (because
valuable information often shows up at the end.
"""
field = model._meta.get_field(field_name) # pylint: disable=protected-access
if len(value) > field.max_length:
midpoint = field.max_length // 2
len_after_midpoint = field.max_length - midpoint
first = value[:midpoint]
sep = '...'
last = value[len(value) - len_after_midpoint + len(sep):]
value = sep.join([first, last])
return value
|
[
"def",
"_truncate_to_field",
"(",
"model",
",",
"field_name",
",",
"value",
")",
":",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"# pylint: disable=protected-access",
"if",
"len",
"(",
"value",
")",
">",
"field",
".",
"max_length",
":",
"midpoint",
"=",
"field",
".",
"max_length",
"//",
"2",
"len_after_midpoint",
"=",
"field",
".",
"max_length",
"-",
"midpoint",
"first",
"=",
"value",
"[",
":",
"midpoint",
"]",
"sep",
"=",
"'...'",
"last",
"=",
"value",
"[",
"len",
"(",
"value",
")",
"-",
"len_after_midpoint",
"+",
"len",
"(",
"sep",
")",
":",
"]",
"value",
"=",
"sep",
".",
"join",
"(",
"[",
"first",
",",
"last",
"]",
")",
"return",
"value"
] | 40.352941
| 15.647059
|
def _post_analysis(self):
"""
Post-CFG-construction.
:return: None
"""
self._make_completed_functions()
new_changes = self._iteratively_analyze_function_features()
functions_do_not_return = new_changes['functions_do_not_return']
self._update_function_callsites(functions_do_not_return)
# Create all pending edges
for _, edges in self._pending_edges.items():
for src_node, dst_node, data in edges:
self._graph_add_edge(src_node, dst_node, **data)
# Remove those edges that will never be taken!
self._remove_non_return_edges()
CFGBase._post_analysis(self)
|
[
"def",
"_post_analysis",
"(",
"self",
")",
":",
"self",
".",
"_make_completed_functions",
"(",
")",
"new_changes",
"=",
"self",
".",
"_iteratively_analyze_function_features",
"(",
")",
"functions_do_not_return",
"=",
"new_changes",
"[",
"'functions_do_not_return'",
"]",
"self",
".",
"_update_function_callsites",
"(",
"functions_do_not_return",
")",
"# Create all pending edges",
"for",
"_",
",",
"edges",
"in",
"self",
".",
"_pending_edges",
".",
"items",
"(",
")",
":",
"for",
"src_node",
",",
"dst_node",
",",
"data",
"in",
"edges",
":",
"self",
".",
"_graph_add_edge",
"(",
"src_node",
",",
"dst_node",
",",
"*",
"*",
"data",
")",
"# Remove those edges that will never be taken!",
"self",
".",
"_remove_non_return_edges",
"(",
")",
"CFGBase",
".",
"_post_analysis",
"(",
"self",
")"
] | 31.904762
| 18.952381
|
def update_source(self, id, **kwargs): # noqa: E501
"""Update metadata (description or tags) for a specific source. # noqa: E501
The \"hidden\" property is stored as a tag. To set the value, add \"hidden\": <value> to the list of tags. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_source(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Source body: Example Body: <pre>{ \"sourceName\": \"source.name\", \"tags\": {\"sourceTag1\": true}, \"description\": \"Source Description\" }</pre>
:return: ResponseContainerSource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_source_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_source_with_http_info(id, **kwargs) # noqa: E501
return data
|
[
"def",
"update_source",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"update_source_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"update_source_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 53.045455
| 26.318182
|
def IsFile(path):
'''
:param unicode path:
Path to a file (local or ftp)
:raises NotImplementedProtocol:
If checking for a non-local, non-ftp file
:rtype: bool
:returns:
True if the file exists
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
url = urlparse(path)
if _UrlIsLocal(url):
if IsLink(path):
return IsFile(ReadLink(path))
return os.path.isfile(path)
elif url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
|
[
"def",
"IsFile",
"(",
"path",
")",
":",
"from",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
"import",
"urlparse",
"url",
"=",
"urlparse",
"(",
"path",
")",
"if",
"_UrlIsLocal",
"(",
"url",
")",
":",
"if",
"IsLink",
"(",
"path",
")",
":",
"return",
"IsFile",
"(",
"ReadLink",
"(",
"path",
")",
")",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"elif",
"url",
".",
"scheme",
"==",
"'ftp'",
":",
"from",
".",
"_exceptions",
"import",
"NotImplementedProtocol",
"raise",
"NotImplementedProtocol",
"(",
"url",
".",
"scheme",
")",
"else",
":",
"from",
".",
"_exceptions",
"import",
"NotImplementedProtocol",
"raise",
"NotImplementedProtocol",
"(",
"url",
".",
"scheme",
")"
] | 27.25
| 20.678571
|
def import_jwks_as_json(self, jwks, issuer):
"""
Imports all the keys that are represented in a JWKS expressed as a
JSON object
:param jwks: JSON representation of a JWKS
:param issuer: Who 'owns' the JWKS
"""
return self.import_jwks(json.loads(jwks), issuer)
|
[
"def",
"import_jwks_as_json",
"(",
"self",
",",
"jwks",
",",
"issuer",
")",
":",
"return",
"self",
".",
"import_jwks",
"(",
"json",
".",
"loads",
"(",
"jwks",
")",
",",
"issuer",
")"
] | 34.222222
| 14.222222
|
def kafka_kip(enrich):
""" Kafka Improvement Proposals process study """
def extract_vote_and_binding(body):
""" Extracts the vote and binding for a KIP process included in message body """
vote = 0
binding = 0 # by default the votes are binding for +1
nlines = 0
for line in body.split("\n"):
if nlines > MAX_LINES_FOR_VOTE:
# The vote must be in the first MAX_LINES_VOTE
break
if line.startswith(">"):
# This line is from a previous email
continue
elif "+1" in line and "-1" in line:
# Report summary probably
continue
elif "to -1" in line or "is -1" in line or "= -1" in line or "-1 or" in line:
continue
elif line.startswith("+1") or " +1 " in line or line.endswith("+1") \
or " +1." in line or " +1," in line:
vote = 1
binding = 1 # by default the votes are binding for +1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
elif line.startswith("-1") or line.endswith(" -1") or " -1 " in line \
or " -1." in line or " -1," in line:
vote = -1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
nlines += 1
return (vote, binding)
def extract_kip(subject):
""" Extracts a KIP number from an email subject """
kip = None
if not subject:
return kip
if 'KIP' not in subject:
return kip
kip_tokens = subject.split('KIP')
if len(kip_tokens) > 2:
# [KIP-DISCUSSION] KIP-7 Security
for token in kip_tokens:
kip = extract_kip("KIP" + token)
if kip:
break
# logger.debug("Several KIPs in %s. Found: %i", subject, kip)
return kip
str_with_kip = kip_tokens[1]
if not str_with_kip:
# Sample use case subject: Create a space template for KIP
return kip
if str_with_kip[0] == '-':
try:
# KIP-120: Control
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-8 Add
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-11- Authorization
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Bound fetch response size (KIP-74)
str_kip = str_with_kip[1:].split(")")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31&
str_kip = str_with_kip[1:].split("&")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31/
str_kip = str_with_kip[1:].split("/")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: Copycat (KIP-26. PR-99) - plan on moving forward
str_kip = str_with_kip[1:].split(".")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ' ':
try:
# KIP 20 Enable
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: [DISCUSS] KIP 88: DescribeGroups Protocol Update
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# [jira] [Updated] (KAFKA-5092) KIP 141- ProducerRecordBuilder
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ':':
try:
# Re: [VOTE] KIP:71 Enable log compaction and deletion to co-exist
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
if not kip:
# logger.debug("Can not extract KIP from %s", subject)
pass
return kip
def lazy_result(votes):
""" Compute the result of a votation using lazy consensus
which requires 3 binding +1 votes and no binding vetoes.
"""
yes = 0
yes_binding = 0
veto = 0
veto_binding = 0
result = -1
for (vote, binding) in votes:
if vote == 1:
if binding:
yes_binding += 1
else:
yes += 1
if vote == -1:
if binding:
veto_binding += 1
else:
veto += 1
if veto_binding == 0 and yes_binding >= 3:
result = 1
return result
def add_kip_final_status_field(enrich):
""" Add kip final status field """
total = 0
for eitem in enrich.fetch():
if "kip" not in eitem:
# It is not a KIP message
continue
if eitem['kip'] in enrich.kips_final_status:
eitem.update({"kip_final_status":
enrich.kips_final_status[eitem['kip']]})
else:
logger.warning("No final status for kip: %i", eitem['kip'])
eitem.update({"kip_final_status": None})
yield eitem
total += 1
logger.info("Total eitems with kafka final status kip field %i", total)
def add_kip_time_status_fields(enrich):
""" Add kip fields with final status and times """
total = 0
max_inactive_days = 90 # days
enrich.kips_final_status = {} # final status for each kip
for eitem in enrich.fetch():
# kip_status: adopted (closed), discussion (open), voting (open),
# inactive (open), discarded (closed)
# kip_start_end: discuss_start, discuss_end, voting_start, voting_end
kip_fields = {
"kip_status": None,
"kip_discuss_time_days": None,
"kip_discuss_inactive_days": None,
"kip_voting_time_days": None,
"kip_voting_inactive_days": None,
"kip_is_first_discuss": 0,
"kip_is_first_vote": 0,
"kip_is_last_discuss": 0,
"kip_is_last_vote": 0,
"kip_result": None,
"kip_start_end": None
}
if "kip" not in eitem:
# It is not a KIP message
continue
kip = eitem["kip"]
kip_date = parser.parse(eitem["email_date"])
if eitem['kip_is_discuss']:
kip_fields["kip_discuss_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_discuss'],
enrich.kips_dates[kip]['kip_max_discuss'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_discuss']:
kip_fields['kip_is_first_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_discuss']:
kip_fields['kip_is_last_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_end'
# Detect discussion status
if "kip_min_vote" not in enrich.kips_dates[kip]:
kip_fields['kip_status'] = 'discussion'
max_discuss_date = enrich.kips_dates[kip]['kip_max_discuss']
kip_fields['kip_discuss_inactive_days'] = \
get_time_diff_days(max_discuss_date.replace(tzinfo=None),
datetime.utcnow())
if eitem['kip_is_vote']:
kip_fields["kip_voting_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_vote'],
enrich.kips_dates[kip]['kip_max_vote'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_vote']:
kip_fields['kip_is_first_vote'] = 1
kip_fields['kip_start_end'] = 'voting_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_vote']:
kip_fields['kip_is_last_vote'] = 1
kip_fields['kip_start_end'] = 'voting_end'
# Detect discussion status
kip_fields['kip_status'] = 'voting'
max_vote_date = enrich.kips_dates[kip]['kip_max_vote']
kip_fields['kip_voting_inactive_days'] = \
get_time_diff_days(max_vote_date.replace(tzinfo=None),
datetime.utcnow())
# Now check if there is a result from enrich.kips_scores
kip_fields['kip_result'] = lazy_result(enrich.kips_scores[kip])
if kip_fields['kip_result'] == 1:
kip_fields['kip_status'] = 'adopted'
elif kip_fields['kip_result'] == -1:
kip_fields['kip_status'] = 'discarded'
# And now change the status inactive
if kip_fields['kip_status'] not in ['adopted', 'discarded']:
inactive_days = kip_fields['kip_discuss_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
inactive_days = kip_fields['kip_voting_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
# The final status is in the kip_is_last_discuss or kip_is_last_vote
# It will be filled in the next enrichment round
eitem.update(kip_fields)
if eitem['kip'] not in enrich.kips_final_status:
enrich.kips_final_status[kip] = None
if eitem['kip_is_last_discuss'] and not enrich.kips_final_status[kip]:
enrich.kips_final_status[kip] = kip_fields['kip_status']
if eitem['kip_is_last_vote']:
enrich.kips_final_status[kip] = kip_fields['kip_status']
yield eitem
total += 1
logger.info("Total eitems with kafka extra kip fields %i", total)
def add_kip_fields(enrich):
""" Add extra fields needed for kip analysis"""
total = 0
enrich.kips_dates = {
0: {
"kip_min_discuss": None,
"kip_max_discuss": None,
"kip_min_vote": None,
"kip_max_vote": None,
}
}
enrich.kips_scores = {}
# First iteration
for eitem in enrich.fetch():
kip_fields = {
"kip_is_vote": 0,
"kip_is_discuss": 0,
"kip_vote": 0,
"kip_binding": 0,
"kip": 0,
"kip_type": "general"
}
kip = extract_kip(eitem['Subject'])
if not kip:
# It is not a KIP message
continue
if kip not in enrich.kips_dates:
enrich.kips_dates[kip] = {}
if kip not in enrich.kips_scores:
enrich.kips_scores[kip] = []
kip_date = parser.parse(eitem["email_date"])
# Analyze the subject to fill the kip fields
if '[discuss]' in eitem['Subject'].lower() or \
'[kip-discussion]'in eitem['Subject'].lower() or \
'[discussion]'in eitem['Subject'].lower():
kip_fields['kip_is_discuss'] = 1
kip_fields['kip_type'] = "discuss"
kip_fields['kip'] = kip
# Update kip discuss dates
if "kip_min_discuss" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_discuss": kip_date,
"kip_max_discuss": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_discuss"] >= kip_date:
enrich.kips_dates[kip]["kip_min_discuss"] = kip_date
if enrich.kips_dates[kip]["kip_max_discuss"] <= kip_date:
enrich.kips_dates[kip]["kip_max_discuss"] = kip_date
if '[vote]' in eitem['Subject'].lower():
kip_fields['kip_is_vote'] = 1
kip_fields['kip_type'] = "vote"
kip_fields['kip'] = kip
if 'body_extract' in eitem:
(vote, binding) = extract_vote_and_binding(eitem['body_extract'])
enrich.kips_scores[kip] += [(vote, binding)]
kip_fields['kip_vote'] = vote
kip_fields['kip_binding'] = binding
else:
logger.debug("Message %s without body", eitem['Subject'])
# Update kip discuss dates
if "kip_min_vote" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_vote": kip_date,
"kip_max_vote": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_vote"] >= kip_date:
enrich.kips_dates[kip]["kip_min_vote"] = kip_date
if enrich.kips_dates[kip]["kip_max_vote"] <= kip_date:
enrich.kips_dates[kip]["kip_max_vote"] = kip_date
eitem.update(kip_fields)
yield eitem
total += 1
logger.info("Total eitems with kafka kip fields %i", total)
logger.debug("Doing kafka_kip study from %s", enrich.elastic.anonymize_url(enrich.elastic.index_url))
# First iteration with the basic fields
eitems = add_kip_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Second iteration with the final time and status fields
eitems = add_kip_time_status_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Third iteration to compute the end status field for all KIPs
eitems = add_kip_final_status_field(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
|
[
"def",
"kafka_kip",
"(",
"enrich",
")",
":",
"def",
"extract_vote_and_binding",
"(",
"body",
")",
":",
"\"\"\" Extracts the vote and binding for a KIP process included in message body \"\"\"",
"vote",
"=",
"0",
"binding",
"=",
"0",
"# by default the votes are binding for +1",
"nlines",
"=",
"0",
"for",
"line",
"in",
"body",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"nlines",
">",
"MAX_LINES_FOR_VOTE",
":",
"# The vote must be in the first MAX_LINES_VOTE",
"break",
"if",
"line",
".",
"startswith",
"(",
"\">\"",
")",
":",
"# This line is from a previous email",
"continue",
"elif",
"\"+1\"",
"in",
"line",
"and",
"\"-1\"",
"in",
"line",
":",
"# Report summary probably",
"continue",
"elif",
"\"to -1\"",
"in",
"line",
"or",
"\"is -1\"",
"in",
"line",
"or",
"\"= -1\"",
"in",
"line",
"or",
"\"-1 or\"",
"in",
"line",
":",
"continue",
"elif",
"line",
".",
"startswith",
"(",
"\"+1\"",
")",
"or",
"\" +1 \"",
"in",
"line",
"or",
"line",
".",
"endswith",
"(",
"\"+1\"",
")",
"or",
"\" +1.\"",
"in",
"line",
"or",
"\" +1,\"",
"in",
"line",
":",
"vote",
"=",
"1",
"binding",
"=",
"1",
"# by default the votes are binding for +1",
"if",
"'non-binding'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"binding",
"=",
"0",
"elif",
"'binding'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"binding",
"=",
"1",
"break",
"elif",
"line",
".",
"startswith",
"(",
"\"-1\"",
")",
"or",
"line",
".",
"endswith",
"(",
"\" -1\"",
")",
"or",
"\" -1 \"",
"in",
"line",
"or",
"\" -1.\"",
"in",
"line",
"or",
"\" -1,\"",
"in",
"line",
":",
"vote",
"=",
"-",
"1",
"if",
"'non-binding'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"binding",
"=",
"0",
"elif",
"'binding'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"binding",
"=",
"1",
"break",
"nlines",
"+=",
"1",
"return",
"(",
"vote",
",",
"binding",
")",
"def",
"extract_kip",
"(",
"subject",
")",
":",
"\"\"\" Extracts a KIP number from an email subject \"\"\"",
"kip",
"=",
"None",
"if",
"not",
"subject",
":",
"return",
"kip",
"if",
"'KIP'",
"not",
"in",
"subject",
":",
"return",
"kip",
"kip_tokens",
"=",
"subject",
".",
"split",
"(",
"'KIP'",
")",
"if",
"len",
"(",
"kip_tokens",
")",
">",
"2",
":",
"# [KIP-DISCUSSION] KIP-7 Security",
"for",
"token",
"in",
"kip_tokens",
":",
"kip",
"=",
"extract_kip",
"(",
"\"KIP\"",
"+",
"token",
")",
"if",
"kip",
":",
"break",
"# logger.debug(\"Several KIPs in %s. Found: %i\", subject, kip)",
"return",
"kip",
"str_with_kip",
"=",
"kip_tokens",
"[",
"1",
"]",
"if",
"not",
"str_with_kip",
":",
"# Sample use case subject: Create a space template for KIP",
"return",
"kip",
"if",
"str_with_kip",
"[",
"0",
"]",
"==",
"'-'",
":",
"try",
":",
"# KIP-120: Control",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# KIP-8 Add",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# KIP-11- Authorization",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"-\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# Bound fetch response size (KIP-74)",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\")\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# KIP-31&",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"&\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# KIP-31/",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# Re: Copycat (KIP-26. PR-99) - plan on moving forward",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"elif",
"str_with_kip",
"[",
"0",
"]",
"==",
"' '",
":",
"try",
":",
"# KIP 20 Enable",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# Re: [DISCUSS] KIP 88: DescribeGroups Protocol Update",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"try",
":",
"# [jira] [Updated] (KAFKA-5092) KIP 141- ProducerRecordBuilder",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"-\"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"elif",
"str_with_kip",
"[",
"0",
"]",
"==",
"':'",
":",
"try",
":",
"# Re: [VOTE] KIP:71 Enable log compaction and deletion to co-exist",
"str_kip",
"=",
"str_with_kip",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"kip",
"=",
"int",
"(",
"str_kip",
")",
"return",
"kip",
"except",
"ValueError",
":",
"pass",
"if",
"not",
"kip",
":",
"# logger.debug(\"Can not extract KIP from %s\", subject)",
"pass",
"return",
"kip",
"def",
"lazy_result",
"(",
"votes",
")",
":",
"\"\"\" Compute the result of a votation using lazy consensus\n which requires 3 binding +1 votes and no binding vetoes.\n \"\"\"",
"yes",
"=",
"0",
"yes_binding",
"=",
"0",
"veto",
"=",
"0",
"veto_binding",
"=",
"0",
"result",
"=",
"-",
"1",
"for",
"(",
"vote",
",",
"binding",
")",
"in",
"votes",
":",
"if",
"vote",
"==",
"1",
":",
"if",
"binding",
":",
"yes_binding",
"+=",
"1",
"else",
":",
"yes",
"+=",
"1",
"if",
"vote",
"==",
"-",
"1",
":",
"if",
"binding",
":",
"veto_binding",
"+=",
"1",
"else",
":",
"veto",
"+=",
"1",
"if",
"veto_binding",
"==",
"0",
"and",
"yes_binding",
">=",
"3",
":",
"result",
"=",
"1",
"return",
"result",
"def",
"add_kip_final_status_field",
"(",
"enrich",
")",
":",
"\"\"\" Add kip final status field \"\"\"",
"total",
"=",
"0",
"for",
"eitem",
"in",
"enrich",
".",
"fetch",
"(",
")",
":",
"if",
"\"kip\"",
"not",
"in",
"eitem",
":",
"# It is not a KIP message",
"continue",
"if",
"eitem",
"[",
"'kip'",
"]",
"in",
"enrich",
".",
"kips_final_status",
":",
"eitem",
".",
"update",
"(",
"{",
"\"kip_final_status\"",
":",
"enrich",
".",
"kips_final_status",
"[",
"eitem",
"[",
"'kip'",
"]",
"]",
"}",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"No final status for kip: %i\"",
",",
"eitem",
"[",
"'kip'",
"]",
")",
"eitem",
".",
"update",
"(",
"{",
"\"kip_final_status\"",
":",
"None",
"}",
")",
"yield",
"eitem",
"total",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Total eitems with kafka final status kip field %i\"",
",",
"total",
")",
"def",
"add_kip_time_status_fields",
"(",
"enrich",
")",
":",
"\"\"\" Add kip fields with final status and times \"\"\"",
"total",
"=",
"0",
"max_inactive_days",
"=",
"90",
"# days",
"enrich",
".",
"kips_final_status",
"=",
"{",
"}",
"# final status for each kip",
"for",
"eitem",
"in",
"enrich",
".",
"fetch",
"(",
")",
":",
"# kip_status: adopted (closed), discussion (open), voting (open),",
"# inactive (open), discarded (closed)",
"# kip_start_end: discuss_start, discuss_end, voting_start, voting_end",
"kip_fields",
"=",
"{",
"\"kip_status\"",
":",
"None",
",",
"\"kip_discuss_time_days\"",
":",
"None",
",",
"\"kip_discuss_inactive_days\"",
":",
"None",
",",
"\"kip_voting_time_days\"",
":",
"None",
",",
"\"kip_voting_inactive_days\"",
":",
"None",
",",
"\"kip_is_first_discuss\"",
":",
"0",
",",
"\"kip_is_first_vote\"",
":",
"0",
",",
"\"kip_is_last_discuss\"",
":",
"0",
",",
"\"kip_is_last_vote\"",
":",
"0",
",",
"\"kip_result\"",
":",
"None",
",",
"\"kip_start_end\"",
":",
"None",
"}",
"if",
"\"kip\"",
"not",
"in",
"eitem",
":",
"# It is not a KIP message",
"continue",
"kip",
"=",
"eitem",
"[",
"\"kip\"",
"]",
"kip_date",
"=",
"parser",
".",
"parse",
"(",
"eitem",
"[",
"\"email_date\"",
"]",
")",
"if",
"eitem",
"[",
"'kip_is_discuss'",
"]",
":",
"kip_fields",
"[",
"\"kip_discuss_time_days\"",
"]",
"=",
"get_time_diff_days",
"(",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_min_discuss'",
"]",
",",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_discuss'",
"]",
")",
"# Detect first and last discuss messages",
"if",
"kip_date",
"==",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_min_discuss'",
"]",
":",
"kip_fields",
"[",
"'kip_is_first_discuss'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_start_end'",
"]",
"=",
"'discuss_start'",
"elif",
"kip_date",
"==",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_discuss'",
"]",
":",
"kip_fields",
"[",
"'kip_is_last_discuss'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_start_end'",
"]",
"=",
"'discuss_end'",
"# Detect discussion status",
"if",
"\"kip_min_vote\"",
"not",
"in",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
":",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'discussion'",
"max_discuss_date",
"=",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_discuss'",
"]",
"kip_fields",
"[",
"'kip_discuss_inactive_days'",
"]",
"=",
"get_time_diff_days",
"(",
"max_discuss_date",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"if",
"eitem",
"[",
"'kip_is_vote'",
"]",
":",
"kip_fields",
"[",
"\"kip_voting_time_days\"",
"]",
"=",
"get_time_diff_days",
"(",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_min_vote'",
"]",
",",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_vote'",
"]",
")",
"# Detect first and last discuss messages",
"if",
"kip_date",
"==",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_min_vote'",
"]",
":",
"kip_fields",
"[",
"'kip_is_first_vote'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_start_end'",
"]",
"=",
"'voting_start'",
"elif",
"kip_date",
"==",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_vote'",
"]",
":",
"kip_fields",
"[",
"'kip_is_last_vote'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_start_end'",
"]",
"=",
"'voting_end'",
"# Detect discussion status",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'voting'",
"max_vote_date",
"=",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"'kip_max_vote'",
"]",
"kip_fields",
"[",
"'kip_voting_inactive_days'",
"]",
"=",
"get_time_diff_days",
"(",
"max_vote_date",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"# Now check if there is a result from enrich.kips_scores",
"kip_fields",
"[",
"'kip_result'",
"]",
"=",
"lazy_result",
"(",
"enrich",
".",
"kips_scores",
"[",
"kip",
"]",
")",
"if",
"kip_fields",
"[",
"'kip_result'",
"]",
"==",
"1",
":",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'adopted'",
"elif",
"kip_fields",
"[",
"'kip_result'",
"]",
"==",
"-",
"1",
":",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'discarded'",
"# And now change the status inactive",
"if",
"kip_fields",
"[",
"'kip_status'",
"]",
"not",
"in",
"[",
"'adopted'",
",",
"'discarded'",
"]",
":",
"inactive_days",
"=",
"kip_fields",
"[",
"'kip_discuss_inactive_days'",
"]",
"if",
"inactive_days",
"and",
"inactive_days",
">",
"max_inactive_days",
":",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'inactive'",
"inactive_days",
"=",
"kip_fields",
"[",
"'kip_voting_inactive_days'",
"]",
"if",
"inactive_days",
"and",
"inactive_days",
">",
"max_inactive_days",
":",
"kip_fields",
"[",
"'kip_status'",
"]",
"=",
"'inactive'",
"# The final status is in the kip_is_last_discuss or kip_is_last_vote",
"# It will be filled in the next enrichment round",
"eitem",
".",
"update",
"(",
"kip_fields",
")",
"if",
"eitem",
"[",
"'kip'",
"]",
"not",
"in",
"enrich",
".",
"kips_final_status",
":",
"enrich",
".",
"kips_final_status",
"[",
"kip",
"]",
"=",
"None",
"if",
"eitem",
"[",
"'kip_is_last_discuss'",
"]",
"and",
"not",
"enrich",
".",
"kips_final_status",
"[",
"kip",
"]",
":",
"enrich",
".",
"kips_final_status",
"[",
"kip",
"]",
"=",
"kip_fields",
"[",
"'kip_status'",
"]",
"if",
"eitem",
"[",
"'kip_is_last_vote'",
"]",
":",
"enrich",
".",
"kips_final_status",
"[",
"kip",
"]",
"=",
"kip_fields",
"[",
"'kip_status'",
"]",
"yield",
"eitem",
"total",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Total eitems with kafka extra kip fields %i\"",
",",
"total",
")",
"def",
"add_kip_fields",
"(",
"enrich",
")",
":",
"\"\"\" Add extra fields needed for kip analysis\"\"\"",
"total",
"=",
"0",
"enrich",
".",
"kips_dates",
"=",
"{",
"0",
":",
"{",
"\"kip_min_discuss\"",
":",
"None",
",",
"\"kip_max_discuss\"",
":",
"None",
",",
"\"kip_min_vote\"",
":",
"None",
",",
"\"kip_max_vote\"",
":",
"None",
",",
"}",
"}",
"enrich",
".",
"kips_scores",
"=",
"{",
"}",
"# First iteration",
"for",
"eitem",
"in",
"enrich",
".",
"fetch",
"(",
")",
":",
"kip_fields",
"=",
"{",
"\"kip_is_vote\"",
":",
"0",
",",
"\"kip_is_discuss\"",
":",
"0",
",",
"\"kip_vote\"",
":",
"0",
",",
"\"kip_binding\"",
":",
"0",
",",
"\"kip\"",
":",
"0",
",",
"\"kip_type\"",
":",
"\"general\"",
"}",
"kip",
"=",
"extract_kip",
"(",
"eitem",
"[",
"'Subject'",
"]",
")",
"if",
"not",
"kip",
":",
"# It is not a KIP message",
"continue",
"if",
"kip",
"not",
"in",
"enrich",
".",
"kips_dates",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"=",
"{",
"}",
"if",
"kip",
"not",
"in",
"enrich",
".",
"kips_scores",
":",
"enrich",
".",
"kips_scores",
"[",
"kip",
"]",
"=",
"[",
"]",
"kip_date",
"=",
"parser",
".",
"parse",
"(",
"eitem",
"[",
"\"email_date\"",
"]",
")",
"# Analyze the subject to fill the kip fields",
"if",
"'[discuss]'",
"in",
"eitem",
"[",
"'Subject'",
"]",
".",
"lower",
"(",
")",
"or",
"'[kip-discussion]'",
"in",
"eitem",
"[",
"'Subject'",
"]",
".",
"lower",
"(",
")",
"or",
"'[discussion]'",
"in",
"eitem",
"[",
"'Subject'",
"]",
".",
"lower",
"(",
")",
":",
"kip_fields",
"[",
"'kip_is_discuss'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_type'",
"]",
"=",
"\"discuss\"",
"kip_fields",
"[",
"'kip'",
"]",
"=",
"kip",
"# Update kip discuss dates",
"if",
"\"kip_min_discuss\"",
"not",
"in",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
".",
"update",
"(",
"{",
"\"kip_min_discuss\"",
":",
"kip_date",
",",
"\"kip_max_discuss\"",
":",
"kip_date",
"}",
")",
"else",
":",
"if",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_min_discuss\"",
"]",
">=",
"kip_date",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_min_discuss\"",
"]",
"=",
"kip_date",
"if",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_max_discuss\"",
"]",
"<=",
"kip_date",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_max_discuss\"",
"]",
"=",
"kip_date",
"if",
"'[vote]'",
"in",
"eitem",
"[",
"'Subject'",
"]",
".",
"lower",
"(",
")",
":",
"kip_fields",
"[",
"'kip_is_vote'",
"]",
"=",
"1",
"kip_fields",
"[",
"'kip_type'",
"]",
"=",
"\"vote\"",
"kip_fields",
"[",
"'kip'",
"]",
"=",
"kip",
"if",
"'body_extract'",
"in",
"eitem",
":",
"(",
"vote",
",",
"binding",
")",
"=",
"extract_vote_and_binding",
"(",
"eitem",
"[",
"'body_extract'",
"]",
")",
"enrich",
".",
"kips_scores",
"[",
"kip",
"]",
"+=",
"[",
"(",
"vote",
",",
"binding",
")",
"]",
"kip_fields",
"[",
"'kip_vote'",
"]",
"=",
"vote",
"kip_fields",
"[",
"'kip_binding'",
"]",
"=",
"binding",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Message %s without body\"",
",",
"eitem",
"[",
"'Subject'",
"]",
")",
"# Update kip discuss dates",
"if",
"\"kip_min_vote\"",
"not",
"in",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
".",
"update",
"(",
"{",
"\"kip_min_vote\"",
":",
"kip_date",
",",
"\"kip_max_vote\"",
":",
"kip_date",
"}",
")",
"else",
":",
"if",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_min_vote\"",
"]",
">=",
"kip_date",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_min_vote\"",
"]",
"=",
"kip_date",
"if",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_max_vote\"",
"]",
"<=",
"kip_date",
":",
"enrich",
".",
"kips_dates",
"[",
"kip",
"]",
"[",
"\"kip_max_vote\"",
"]",
"=",
"kip_date",
"eitem",
".",
"update",
"(",
"kip_fields",
")",
"yield",
"eitem",
"total",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Total eitems with kafka kip fields %i\"",
",",
"total",
")",
"logger",
".",
"debug",
"(",
"\"Doing kafka_kip study from %s\"",
",",
"enrich",
".",
"elastic",
".",
"anonymize_url",
"(",
"enrich",
".",
"elastic",
".",
"index_url",
")",
")",
"# First iteration with the basic fields",
"eitems",
"=",
"add_kip_fields",
"(",
"enrich",
")",
"enrich",
".",
"elastic",
".",
"bulk_upload",
"(",
"eitems",
",",
"enrich",
".",
"get_field_unique_id",
"(",
")",
")",
"# Second iteration with the final time and status fields",
"eitems",
"=",
"add_kip_time_status_fields",
"(",
"enrich",
")",
"enrich",
".",
"elastic",
".",
"bulk_upload",
"(",
"eitems",
",",
"enrich",
".",
"get_field_unique_id",
"(",
")",
")",
"# Third iteration to compute the end status field for all KIPs",
"eitems",
"=",
"add_kip_final_status_field",
"(",
"enrich",
")",
"enrich",
".",
"elastic",
".",
"bulk_upload",
"(",
"eitems",
",",
"enrich",
".",
"get_field_unique_id",
"(",
")",
")"
] | 36.886199
| 19.581114
|
def get_tasks():
'''Get a list of known tasks with their routing queue'''
return {
name: get_task_queue(name, cls)
for name, cls in celery.tasks.items()
# Exclude celery internal tasks
if not name.startswith('celery.')
# Exclude udata test tasks
and not name.startswith('test-')
}
|
[
"def",
"get_tasks",
"(",
")",
":",
"return",
"{",
"name",
":",
"get_task_queue",
"(",
"name",
",",
"cls",
")",
"for",
"name",
",",
"cls",
"in",
"celery",
".",
"tasks",
".",
"items",
"(",
")",
"# Exclude celery internal tasks",
"if",
"not",
"name",
".",
"startswith",
"(",
"'celery.'",
")",
"# Exclude udata test tasks",
"and",
"not",
"name",
".",
"startswith",
"(",
"'test-'",
")",
"}"
] | 33.1
| 12.1
|
def _extract_zip_if_possible(descriptor):
"""If descriptor is a path to zip file extract and return (tempdir, descriptor)
"""
tempdir = None
result = descriptor
try:
if isinstance(descriptor, six.string_types):
res = requests.get(descriptor)
res.raise_for_status()
result = res.content
except (IOError,
ValueError,
requests.exceptions.RequestException):
pass
try:
the_zip = result
if isinstance(the_zip, bytes):
try:
os.path.isfile(the_zip)
except (TypeError, ValueError):
# the_zip contains the zip file contents
the_zip = io.BytesIO(the_zip)
if zipfile.is_zipfile(the_zip):
with zipfile.ZipFile(the_zip, 'r') as z:
_validate_zip(z)
descriptor_path = [
f for f in z.namelist() if f.endswith('datapackage.json')][0]
tempdir = tempfile.mkdtemp('-datapackage')
z.extractall(tempdir)
result = os.path.join(tempdir, descriptor_path)
else:
result = descriptor
except (TypeError,
zipfile.BadZipfile):
pass
if hasattr(descriptor, 'seek'):
# Rewind descriptor if it's a file, as we read it for testing if it's
# a zip file
descriptor.seek(0)
return (tempdir, result)
|
[
"def",
"_extract_zip_if_possible",
"(",
"descriptor",
")",
":",
"tempdir",
"=",
"None",
"result",
"=",
"descriptor",
"try",
":",
"if",
"isinstance",
"(",
"descriptor",
",",
"six",
".",
"string_types",
")",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"descriptor",
")",
"res",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"res",
".",
"content",
"except",
"(",
"IOError",
",",
"ValueError",
",",
"requests",
".",
"exceptions",
".",
"RequestException",
")",
":",
"pass",
"try",
":",
"the_zip",
"=",
"result",
"if",
"isinstance",
"(",
"the_zip",
",",
"bytes",
")",
":",
"try",
":",
"os",
".",
"path",
".",
"isfile",
"(",
"the_zip",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"# the_zip contains the zip file contents",
"the_zip",
"=",
"io",
".",
"BytesIO",
"(",
"the_zip",
")",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"the_zip",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"the_zip",
",",
"'r'",
")",
"as",
"z",
":",
"_validate_zip",
"(",
"z",
")",
"descriptor_path",
"=",
"[",
"f",
"for",
"f",
"in",
"z",
".",
"namelist",
"(",
")",
"if",
"f",
".",
"endswith",
"(",
"'datapackage.json'",
")",
"]",
"[",
"0",
"]",
"tempdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'-datapackage'",
")",
"z",
".",
"extractall",
"(",
"tempdir",
")",
"result",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"descriptor_path",
")",
"else",
":",
"result",
"=",
"descriptor",
"except",
"(",
"TypeError",
",",
"zipfile",
".",
"BadZipfile",
")",
":",
"pass",
"if",
"hasattr",
"(",
"descriptor",
",",
"'seek'",
")",
":",
"# Rewind descriptor if it's a file, as we read it for testing if it's",
"# a zip file",
"descriptor",
".",
"seek",
"(",
"0",
")",
"return",
"(",
"tempdir",
",",
"result",
")"
] | 35.175
| 14.075
|
def Run(self):
"""Run the iteration."""
count = 0
for count, input_data in enumerate(self.GetInput()):
if count % 2000 == 0:
logging.debug("%d processed.", count)
args = (input_data, self.out_queue, self.token)
self.thread_pool.AddTask(
target=self.IterFunction, args=args, name=self.THREAD_POOL_NAME)
while count >= 0:
try:
# We only use the timeout to wait if we got to the end of the Queue but
# didn't process everything yet.
out = self.out_queue.get(timeout=self.QUEUE_TIMEOUT, block=True)
if out:
yield out
count -= 1
except queue.Empty:
break
# Join and stop to clean up the threadpool.
self.thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)
|
[
"def",
"Run",
"(",
"self",
")",
":",
"count",
"=",
"0",
"for",
"count",
",",
"input_data",
"in",
"enumerate",
"(",
"self",
".",
"GetInput",
"(",
")",
")",
":",
"if",
"count",
"%",
"2000",
"==",
"0",
":",
"logging",
".",
"debug",
"(",
"\"%d processed.\"",
",",
"count",
")",
"args",
"=",
"(",
"input_data",
",",
"self",
".",
"out_queue",
",",
"self",
".",
"token",
")",
"self",
".",
"thread_pool",
".",
"AddTask",
"(",
"target",
"=",
"self",
".",
"IterFunction",
",",
"args",
"=",
"args",
",",
"name",
"=",
"self",
".",
"THREAD_POOL_NAME",
")",
"while",
"count",
">=",
"0",
":",
"try",
":",
"# We only use the timeout to wait if we got to the end of the Queue but",
"# didn't process everything yet.",
"out",
"=",
"self",
".",
"out_queue",
".",
"get",
"(",
"timeout",
"=",
"self",
".",
"QUEUE_TIMEOUT",
",",
"block",
"=",
"True",
")",
"if",
"out",
":",
"yield",
"out",
"count",
"-=",
"1",
"except",
"queue",
".",
"Empty",
":",
"break",
"# Join and stop to clean up the threadpool.",
"self",
".",
"thread_pool",
".",
"Stop",
"(",
"join_timeout",
"=",
"THREADPOOL_JOIN_TIMEOUT",
")"
] | 33.26087
| 20.913043
|
def _add_raster_layer(self, raster_layer, layer_name, save_style=False):
"""Add a raster layer to the folder.
:param raster_layer: The layer to add.
:type raster_layer: QgsRasterLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
Not implemented in geopackage !
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
"""
source = gdal.Open(raster_layer.source())
array = source.GetRasterBand(1).ReadAsArray()
x_size = source.RasterXSize
y_size = source.RasterYSize
output = self.raster_driver.Create(
self.uri.absoluteFilePath(),
x_size,
y_size,
1,
gdal.GDT_Byte,
['APPEND_SUBDATASET=YES', 'RASTER_TABLE=%s' % layer_name]
)
output.SetGeoTransform(source.GetGeoTransform())
output.SetProjection(source.GetProjection())
output.GetRasterBand(1).WriteArray(array)
# Once we're done, close properly the dataset
output = None
source = None
return True, layer_name
|
[
"def",
"_add_raster_layer",
"(",
"self",
",",
"raster_layer",
",",
"layer_name",
",",
"save_style",
"=",
"False",
")",
":",
"source",
"=",
"gdal",
".",
"Open",
"(",
"raster_layer",
".",
"source",
"(",
")",
")",
"array",
"=",
"source",
".",
"GetRasterBand",
"(",
"1",
")",
".",
"ReadAsArray",
"(",
")",
"x_size",
"=",
"source",
".",
"RasterXSize",
"y_size",
"=",
"source",
".",
"RasterYSize",
"output",
"=",
"self",
".",
"raster_driver",
".",
"Create",
"(",
"self",
".",
"uri",
".",
"absoluteFilePath",
"(",
")",
",",
"x_size",
",",
"y_size",
",",
"1",
",",
"gdal",
".",
"GDT_Byte",
",",
"[",
"'APPEND_SUBDATASET=YES'",
",",
"'RASTER_TABLE=%s'",
"%",
"layer_name",
"]",
")",
"output",
".",
"SetGeoTransform",
"(",
"source",
".",
"GetGeoTransform",
"(",
")",
")",
"output",
".",
"SetProjection",
"(",
"source",
".",
"GetProjection",
"(",
")",
")",
"output",
".",
"GetRasterBand",
"(",
"1",
")",
".",
"WriteArray",
"(",
"array",
")",
"# Once we're done, close properly the dataset",
"output",
"=",
"None",
"source",
"=",
"None",
"return",
"True",
",",
"layer_name"
] | 32.136364
| 20.704545
|
def removeDuplicates(inFileName, outFileName) :
"""removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName'"""
f = open(inFileName)
legend = f.readline()
data = ''
h = {}
h[legend] = 0
lines = f.readlines()
for l in lines :
if not h.has_key(l) :
h[l] = 0
data += l
f.flush()
f.close()
f = open(outFileName, 'w')
f.write(legend+data)
f.flush()
f.close()
|
[
"def",
"removeDuplicates",
"(",
"inFileName",
",",
"outFileName",
")",
":",
"f",
"=",
"open",
"(",
"inFileName",
")",
"legend",
"=",
"f",
".",
"readline",
"(",
")",
"data",
"=",
"''",
"h",
"=",
"{",
"}",
"h",
"[",
"legend",
"]",
"=",
"0",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"for",
"l",
"in",
"lines",
":",
"if",
"not",
"h",
".",
"has_key",
"(",
"l",
")",
":",
"h",
"[",
"l",
"]",
"=",
"0",
"data",
"+=",
"l",
"f",
".",
"flush",
"(",
")",
"f",
".",
"close",
"(",
")",
"f",
"=",
"open",
"(",
"outFileName",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"legend",
"+",
"data",
")",
"f",
".",
"flush",
"(",
")",
"f",
".",
"close",
"(",
")"
] | 19.047619
| 24.52381
|
def runSearchRnaQuantificationSets(self, request):
"""
Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.SearchRnaQuantificationSetsRequest,
protocol.SearchRnaQuantificationSetsResponse,
self.rnaQuantificationSetsGenerator)
|
[
"def",
"runSearchRnaQuantificationSets",
"(",
"self",
",",
"request",
")",
":",
"return",
"self",
".",
"runSearchRequest",
"(",
"request",
",",
"protocol",
".",
"SearchRnaQuantificationSetsRequest",
",",
"protocol",
".",
"SearchRnaQuantificationSetsResponse",
",",
"self",
".",
"rnaQuantificationSetsGenerator",
")"
] | 44.444444
| 11.555556
|
def accel_increase_height(self, *args):
"""Callback to increase height.
"""
height = self.settings.general.get_int('window-height')
self.settings.general.set_int('window-height', min(height + 2, 100))
return True
|
[
"def",
"accel_increase_height",
"(",
"self",
",",
"*",
"args",
")",
":",
"height",
"=",
"self",
".",
"settings",
".",
"general",
".",
"get_int",
"(",
"'window-height'",
")",
"self",
".",
"settings",
".",
"general",
".",
"set_int",
"(",
"'window-height'",
",",
"min",
"(",
"height",
"+",
"2",
",",
"100",
")",
")",
"return",
"True"
] | 41.166667
| 13.5
|
def get_foreign_keys(cls):
"""Get foreign keys and models they refer to, so we can pre-process
the data for load_bulk
"""
foreign_keys = {}
for field in cls._meta.fields:
if (
field.get_internal_type() == 'ForeignKey' and
field.name != 'parent'
):
if django.VERSION >= (1, 9):
foreign_keys[field.name] = field.remote_field.model
else:
foreign_keys[field.name] = field.rel.to
return foreign_keys
|
[
"def",
"get_foreign_keys",
"(",
"cls",
")",
":",
"foreign_keys",
"=",
"{",
"}",
"for",
"field",
"in",
"cls",
".",
"_meta",
".",
"fields",
":",
"if",
"(",
"field",
".",
"get_internal_type",
"(",
")",
"==",
"'ForeignKey'",
"and",
"field",
".",
"name",
"!=",
"'parent'",
")",
":",
"if",
"django",
".",
"VERSION",
">=",
"(",
"1",
",",
"9",
")",
":",
"foreign_keys",
"[",
"field",
".",
"name",
"]",
"=",
"field",
".",
"remote_field",
".",
"model",
"else",
":",
"foreign_keys",
"[",
"field",
".",
"name",
"]",
"=",
"field",
".",
"rel",
".",
"to",
"return",
"foreign_keys"
] | 37.066667
| 13.333333
|
def _spoken_representation_L2(lst_lst_char):
"""
>>> lst = [['M', 'O', 'R', 'S', 'E'], ['C', 'O', 'D', 'E']]
>>> _spoken_representation_L2(lst)
'-- --- .-. ... . (space) -.-. --- -.. .'
"""
s = ''
inter_char = ' '
inter_word = ' (space) '
for i, word in enumerate(lst_lst_char):
if i >= 1:
s += inter_word
for j, c in enumerate(word):
if j != 0:
s += inter_char
s += mtalk.encoding.morsetab[c]
return s
|
[
"def",
"_spoken_representation_L2",
"(",
"lst_lst_char",
")",
":",
"s",
"=",
"''",
"inter_char",
"=",
"' '",
"inter_word",
"=",
"' (space) '",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"lst_lst_char",
")",
":",
"if",
"i",
">=",
"1",
":",
"s",
"+=",
"inter_word",
"for",
"j",
",",
"c",
"in",
"enumerate",
"(",
"word",
")",
":",
"if",
"j",
"!=",
"0",
":",
"s",
"+=",
"inter_char",
"s",
"+=",
"mtalk",
".",
"encoding",
".",
"morsetab",
"[",
"c",
"]",
"return",
"s"
] | 29.058824
| 11.529412
|
def _put_resource_dict(self):
""" Creates PDF reference to resource objects.
"""
self.session._add_object(2)
self.session._out('<<')
self.session._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
self.session._out('/Font <<')
for font in self.document.fonts:
self.session._out('/F%s %s 0 R' % (font.index, font.number))
self.session._out('>>')
if self.document.images:
self.session._out('/XObject <<')
for image in self.document.images:
self.session._out('/I%s %s 0 R' % (image.index, image.number))
self.session._out('>>')
self.session._out('>>')
self.session._out('endobj')
|
[
"def",
"_put_resource_dict",
"(",
"self",
")",
":",
"self",
".",
"session",
".",
"_add_object",
"(",
"2",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'<<'",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]'",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'/Font <<'",
")",
"for",
"font",
"in",
"self",
".",
"document",
".",
"fonts",
":",
"self",
".",
"session",
".",
"_out",
"(",
"'/F%s %s 0 R'",
"%",
"(",
"font",
".",
"index",
",",
"font",
".",
"number",
")",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'>>'",
")",
"if",
"self",
".",
"document",
".",
"images",
":",
"self",
".",
"session",
".",
"_out",
"(",
"'/XObject <<'",
")",
"for",
"image",
"in",
"self",
".",
"document",
".",
"images",
":",
"self",
".",
"session",
".",
"_out",
"(",
"'/I%s %s 0 R'",
"%",
"(",
"image",
".",
"index",
",",
"image",
".",
"number",
")",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'>>'",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'>>'",
")",
"self",
".",
"session",
".",
"_out",
"(",
"'endobj'",
")"
] | 40.666667
| 11.944444
|
def DisplayTree(node, children, level=0):
"""Recursively display a node and each of its children.
Args:
node: The node we're displaying the children of.
children: Children of the parent node.
level: How deep in the tree we are.
"""
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print ('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
|
[
"def",
"DisplayTree",
"(",
"node",
",",
"children",
",",
"level",
"=",
"0",
")",
":",
"value",
"=",
"''",
"node_type",
"=",
"''",
"if",
"'caseValue'",
"in",
"node",
":",
"case_value",
"=",
"node",
"[",
"'caseValue'",
"]",
"node_type",
"=",
"case_value",
"[",
"'ProductDimension.Type'",
"]",
"if",
"node_type",
"==",
"'ProductCanonicalCondition'",
":",
"value",
"=",
"(",
"case_value",
"[",
"'condition'",
"]",
"if",
"'condition'",
"in",
"case_value",
"else",
"'OTHER'",
")",
"elif",
"node_type",
"==",
"'ProductBiddingCategory'",
":",
"value",
"=",
"'%s(%s)'",
"%",
"(",
"case_value",
"[",
"'type'",
"]",
",",
"case_value",
"[",
"'value'",
"]",
"if",
"'value'",
"in",
"case_value",
"else",
"'OTHER'",
")",
"else",
":",
"value",
"=",
"(",
"case_value",
"[",
"'value'",
"]",
"if",
"'value'",
"in",
"case_value",
"else",
"'OTHER'",
")",
"print",
"(",
"'%sid: %s, node_type: %s, value: %s\\n'",
"%",
"(",
"' '",
"*",
"level",
",",
"node",
"[",
"'id'",
"]",
",",
"node_type",
",",
"value",
")",
")",
"for",
"child_node",
"in",
"children",
"[",
"node",
"[",
"'id'",
"]",
"]",
":",
"DisplayTree",
"(",
"child_node",
",",
"children",
",",
"level",
"+",
"1",
")"
] | 33.551724
| 18.310345
|
def mkrelease(finish='yes', version=''):
"""Allocates the next version number and marks current develop branch
state as a new release with the allocated version number.
Syncs new state with origin repository.
"""
if not version:
version = _version_format(_version_guess_next())
if _git_get_current_branch() != 'release/' + version:
_tool_run('git checkout develop',
'git flow release start ' + version)
_project_patch_version(version)
_project_patch_changelog()
patched_files = ' '.join([VERSION_FILE, CHANGES_FILE])
run('git diff ' + patched_files, pty=True)
_tool_run(('git commit -m "Bump Version to {0!s}" {1!s}'
.format(version, patched_files)))
if finish not in ('no', 'n', ):
_tool_run("git flow release finish -m '{0}' {0}".format(version),
env={b'GIT_MERGE_AUTOEDIT': b'no', })
_tool_run('git push origin --tags develop master')
|
[
"def",
"mkrelease",
"(",
"finish",
"=",
"'yes'",
",",
"version",
"=",
"''",
")",
":",
"if",
"not",
"version",
":",
"version",
"=",
"_version_format",
"(",
"_version_guess_next",
"(",
")",
")",
"if",
"_git_get_current_branch",
"(",
")",
"!=",
"'release/'",
"+",
"version",
":",
"_tool_run",
"(",
"'git checkout develop'",
",",
"'git flow release start '",
"+",
"version",
")",
"_project_patch_version",
"(",
"version",
")",
"_project_patch_changelog",
"(",
")",
"patched_files",
"=",
"' '",
".",
"join",
"(",
"[",
"VERSION_FILE",
",",
"CHANGES_FILE",
"]",
")",
"run",
"(",
"'git diff '",
"+",
"patched_files",
",",
"pty",
"=",
"True",
")",
"_tool_run",
"(",
"(",
"'git commit -m \"Bump Version to {0!s}\" {1!s}'",
".",
"format",
"(",
"version",
",",
"patched_files",
")",
")",
")",
"if",
"finish",
"not",
"in",
"(",
"'no'",
",",
"'n'",
",",
")",
":",
"_tool_run",
"(",
"\"git flow release finish -m '{0}' {0}\"",
".",
"format",
"(",
"version",
")",
",",
"env",
"=",
"{",
"b'GIT_MERGE_AUTOEDIT'",
":",
"b'no'",
",",
"}",
")",
"_tool_run",
"(",
"'git push origin --tags develop master'",
")"
] | 39.541667
| 16.291667
|
def aitoffImageToSphere(x, y):
"""
Inverse Hammer-Aitoff projection (deg).
"""
x = x - 360.*(x>180)
x = np.asarray(np.radians(x))
y = np.asarray(np.radians(y))
z = np.sqrt(1. - (x / 4.)**2 - (y / 2.)**2) # rad
lon = 2. * np.arctan2((2. * z**2) - 1, (z / 2.) * x)
lat = np.arcsin( y * z)
return ((180. - np.degrees(lon)) % 360.), np.degrees(lat)
|
[
"def",
"aitoffImageToSphere",
"(",
"x",
",",
"y",
")",
":",
"x",
"=",
"x",
"-",
"360.",
"*",
"(",
"x",
">",
"180",
")",
"x",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"radians",
"(",
"x",
")",
")",
"y",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"radians",
"(",
"y",
")",
")",
"z",
"=",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"(",
"x",
"/",
"4.",
")",
"**",
"2",
"-",
"(",
"y",
"/",
"2.",
")",
"**",
"2",
")",
"# rad",
"lon",
"=",
"2.",
"*",
"np",
".",
"arctan2",
"(",
"(",
"2.",
"*",
"z",
"**",
"2",
")",
"-",
"1",
",",
"(",
"z",
"/",
"2.",
")",
"*",
"x",
")",
"lat",
"=",
"np",
".",
"arcsin",
"(",
"y",
"*",
"z",
")",
"return",
"(",
"(",
"180.",
"-",
"np",
".",
"degrees",
"(",
"lon",
")",
")",
"%",
"360.",
")",
",",
"np",
".",
"degrees",
"(",
"lat",
")"
] | 34
| 9.636364
|
def encode(cls, value):
"""
convert a boolean value into something we can persist to redis.
An empty string is the representation for False.
:param value: bool
:return: bytes
"""
if value not in [True, False]:
raise InvalidValue('not a boolean')
return b'1' if value else b''
|
[
"def",
"encode",
"(",
"cls",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"InvalidValue",
"(",
"'not a boolean'",
")",
"return",
"b'1'",
"if",
"value",
"else",
"b''"
] | 28.5
| 15.666667
|
def _metrics_options(p):
""" Add options specific to metrics subcommand. """
_default_options(p, blacklist=['log-group', 'output-dir', 'cache', 'quiet'])
p.add_argument(
'--start', type=date_parse,
help='Start date (requires --end, overrides --days)')
p.add_argument(
'--end', type=date_parse, help='End date')
p.add_argument(
'--days', type=int, default=14,
help='Number of days of history to consider (default: %(default)i)')
p.add_argument('--period', type=int, default=60 * 24 * 24)
|
[
"def",
"_metrics_options",
"(",
"p",
")",
":",
"_default_options",
"(",
"p",
",",
"blacklist",
"=",
"[",
"'log-group'",
",",
"'output-dir'",
",",
"'cache'",
",",
"'quiet'",
"]",
")",
"p",
".",
"add_argument",
"(",
"'--start'",
",",
"type",
"=",
"date_parse",
",",
"help",
"=",
"'Start date (requires --end, overrides --days)'",
")",
"p",
".",
"add_argument",
"(",
"'--end'",
",",
"type",
"=",
"date_parse",
",",
"help",
"=",
"'End date'",
")",
"p",
".",
"add_argument",
"(",
"'--days'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"14",
",",
"help",
"=",
"'Number of days of history to consider (default: %(default)i)'",
")",
"p",
".",
"add_argument",
"(",
"'--period'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"60",
"*",
"24",
"*",
"24",
")"
] | 41.461538
| 19.538462
|
def set_key(self, key, modifiers: typing.List[Key]=None):
"""This is called when the user successfully finishes recording a key combination."""
if modifiers is None:
modifiers = [] # type: typing.List[Key]
if key in self.KEY_MAP:
key = self.KEY_MAP[key]
self._setKeyLabel(key)
self.key = key
self.controlButton.setChecked(Key.CONTROL in modifiers)
self.altButton.setChecked(Key.ALT in modifiers)
self.shiftButton.setChecked(Key.SHIFT in modifiers)
self.superButton.setChecked(Key.SUPER in modifiers)
self.hyperButton.setChecked(Key.HYPER in modifiers)
self.metaButton.setChecked(Key.META in modifiers)
self.recording_finished.emit(True)
|
[
"def",
"set_key",
"(",
"self",
",",
"key",
",",
"modifiers",
":",
"typing",
".",
"List",
"[",
"Key",
"]",
"=",
"None",
")",
":",
"if",
"modifiers",
"is",
"None",
":",
"modifiers",
"=",
"[",
"]",
"# type: typing.List[Key]",
"if",
"key",
"in",
"self",
".",
"KEY_MAP",
":",
"key",
"=",
"self",
".",
"KEY_MAP",
"[",
"key",
"]",
"self",
".",
"_setKeyLabel",
"(",
"key",
")",
"self",
".",
"key",
"=",
"key",
"self",
".",
"controlButton",
".",
"setChecked",
"(",
"Key",
".",
"CONTROL",
"in",
"modifiers",
")",
"self",
".",
"altButton",
".",
"setChecked",
"(",
"Key",
".",
"ALT",
"in",
"modifiers",
")",
"self",
".",
"shiftButton",
".",
"setChecked",
"(",
"Key",
".",
"SHIFT",
"in",
"modifiers",
")",
"self",
".",
"superButton",
".",
"setChecked",
"(",
"Key",
".",
"SUPER",
"in",
"modifiers",
")",
"self",
".",
"hyperButton",
".",
"setChecked",
"(",
"Key",
".",
"HYPER",
"in",
"modifiers",
")",
"self",
".",
"metaButton",
".",
"setChecked",
"(",
"Key",
".",
"META",
"in",
"modifiers",
")",
"self",
".",
"recording_finished",
".",
"emit",
"(",
"True",
")"
] | 49.533333
| 13.066667
|
def compare_balance(self, operator, or_equals, amount):
"""Additional step using regex matcher to compare the current balance with some number"""
amount = int(amount)
if operator == 'less':
if or_equals:
self.assertLessEqual(self.balance, amount)
else:
self.assertLess(self.balance, amount)
elif or_equals:
self.assertGreaterEqual(self.balance, amount)
else:
self.assertGreater(self.balance, amount)
|
[
"def",
"compare_balance",
"(",
"self",
",",
"operator",
",",
"or_equals",
",",
"amount",
")",
":",
"amount",
"=",
"int",
"(",
"amount",
")",
"if",
"operator",
"==",
"'less'",
":",
"if",
"or_equals",
":",
"self",
".",
"assertLessEqual",
"(",
"self",
".",
"balance",
",",
"amount",
")",
"else",
":",
"self",
".",
"assertLess",
"(",
"self",
".",
"balance",
",",
"amount",
")",
"elif",
"or_equals",
":",
"self",
".",
"assertGreaterEqual",
"(",
"self",
".",
"balance",
",",
"amount",
")",
"else",
":",
"self",
".",
"assertGreater",
"(",
"self",
".",
"balance",
",",
"amount",
")"
] | 42.333333
| 14.916667
|
def QA_util_format_date2str(cursor_date):
"""
对输入日期进行格式化处理,返回格式为 "%Y-%m-%d" 格式字符串
支持格式包括:
1. str: "%Y%m%d" "%Y%m%d%H%M%S", "%Y%m%d %H:%M:%S",
"%Y-%m-%d", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H%M%S"
2. datetime.datetime
3. pd.Timestamp
4. int -> 自动在右边加 0 然后转换,譬如 '20190302093' --> "2019-03-02"
:param cursor_date: str/datetime.datetime/int 日期或时间
:return: str 返回字符串格式日期
"""
if isinstance(cursor_date, datetime.datetime):
cursor_date = str(cursor_date)[:10]
elif isinstance(cursor_date, str):
try:
cursor_date = str(pd.Timestamp(cursor_date))[:10]
except:
raise ValueError('请输入正确的日期格式, 建议 "%Y-%m-%d"')
elif isinstance(cursor_date, int):
cursor_date = str(pd.Timestamp("{:<014d}".format(cursor_date)))[:10]
else:
raise ValueError('请输入正确的日期格式,建议 "%Y-%m-%d"')
return cursor_date
|
[
"def",
"QA_util_format_date2str",
"(",
"cursor_date",
")",
":",
"if",
"isinstance",
"(",
"cursor_date",
",",
"datetime",
".",
"datetime",
")",
":",
"cursor_date",
"=",
"str",
"(",
"cursor_date",
")",
"[",
":",
"10",
"]",
"elif",
"isinstance",
"(",
"cursor_date",
",",
"str",
")",
":",
"try",
":",
"cursor_date",
"=",
"str",
"(",
"pd",
".",
"Timestamp",
"(",
"cursor_date",
")",
")",
"[",
":",
"10",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"'请输入正确的日期格式, 建议 \"%Y-%m-%d\"')",
"",
"elif",
"isinstance",
"(",
"cursor_date",
",",
"int",
")",
":",
"cursor_date",
"=",
"str",
"(",
"pd",
".",
"Timestamp",
"(",
"\"{:<014d}\"",
".",
"format",
"(",
"cursor_date",
")",
")",
")",
"[",
":",
"10",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'请输入正确的日期格式,建议 \"%Y-%m-%d\"')",
"",
"return",
"cursor_date"
] | 35.2
| 16
|
def _resizeColumnToContents(self, header, data, col, limit_ms):
"""Resize a column by its contents."""
hdr_width = self._sizeHintForColumn(header, col, limit_ms)
data_width = self._sizeHintForColumn(data, col, limit_ms)
if data_width > hdr_width:
width = min(self.max_width, data_width)
elif hdr_width > data_width * 2:
width = max(min(hdr_width, self.min_trunc), min(self.max_width,
data_width))
else:
width = max(min(self.max_width, hdr_width), self.min_trunc)
header.setColumnWidth(col, width)
|
[
"def",
"_resizeColumnToContents",
"(",
"self",
",",
"header",
",",
"data",
",",
"col",
",",
"limit_ms",
")",
":",
"hdr_width",
"=",
"self",
".",
"_sizeHintForColumn",
"(",
"header",
",",
"col",
",",
"limit_ms",
")",
"data_width",
"=",
"self",
".",
"_sizeHintForColumn",
"(",
"data",
",",
"col",
",",
"limit_ms",
")",
"if",
"data_width",
">",
"hdr_width",
":",
"width",
"=",
"min",
"(",
"self",
".",
"max_width",
",",
"data_width",
")",
"elif",
"hdr_width",
">",
"data_width",
"*",
"2",
":",
"width",
"=",
"max",
"(",
"min",
"(",
"hdr_width",
",",
"self",
".",
"min_trunc",
")",
",",
"min",
"(",
"self",
".",
"max_width",
",",
"data_width",
")",
")",
"else",
":",
"width",
"=",
"max",
"(",
"min",
"(",
"self",
".",
"max_width",
",",
"hdr_width",
")",
",",
"self",
".",
"min_trunc",
")",
"header",
".",
"setColumnWidth",
"(",
"col",
",",
"width",
")"
] | 51
| 16.083333
|
def connect(self):
"""
Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
"""
retries = 0
while retries < MAX_CONNECTION_RETRIES:
try:
self._connection.connect()
logger.info('Connected to APNs')
return
except Exception: # pylint: disable=broad-except
# close the connnection, otherwise next connect() call would do nothing
self._connection.close()
retries += 1
logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES)
raise ConnectionFailed()
|
[
"def",
"connect",
"(",
"self",
")",
":",
"retries",
"=",
"0",
"while",
"retries",
"<",
"MAX_CONNECTION_RETRIES",
":",
"try",
":",
"self",
".",
"_connection",
".",
"connect",
"(",
")",
"logger",
".",
"info",
"(",
"'Connected to APNs'",
")",
"return",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"# close the connnection, otherwise next connect() call would do nothing",
"self",
".",
"_connection",
".",
"close",
"(",
")",
"retries",
"+=",
"1",
"logger",
".",
"exception",
"(",
"'Failed connecting to APNs (attempt %s of %s)'",
",",
"retries",
",",
"MAX_CONNECTION_RETRIES",
")",
"raise",
"ConnectionFailed",
"(",
")"
] | 42.888889
| 22.222222
|
def login_token(api, username, password):
"""Login using pre routeros 6.43 authorization method."""
sentence = api('/login')
token = tuple(sentence)[0]['ret']
encoded = encode_password(token, password)
tuple(api('/login', **{'name': username, 'response': encoded}))
|
[
"def",
"login_token",
"(",
"api",
",",
"username",
",",
"password",
")",
":",
"sentence",
"=",
"api",
"(",
"'/login'",
")",
"token",
"=",
"tuple",
"(",
"sentence",
")",
"[",
"0",
"]",
"[",
"'ret'",
"]",
"encoded",
"=",
"encode_password",
"(",
"token",
",",
"password",
")",
"tuple",
"(",
"api",
"(",
"'/login'",
",",
"*",
"*",
"{",
"'name'",
":",
"username",
",",
"'response'",
":",
"encoded",
"}",
")",
")"
] | 46.666667
| 8.166667
|
def add_message(self, text, type=None):
"""Add a message with an optional type."""
key = self._msg_key
self.setdefault(key, [])
self[key].append(message(type, text))
self.save()
|
[
"def",
"add_message",
"(",
"self",
",",
"text",
",",
"type",
"=",
"None",
")",
":",
"key",
"=",
"self",
".",
"_msg_key",
"self",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
"self",
"[",
"key",
"]",
".",
"append",
"(",
"message",
"(",
"type",
",",
"text",
")",
")",
"self",
".",
"save",
"(",
")"
] | 35.333333
| 8
|
def _dlinear_seaborn_(self, label=None, style=None, opts=None):
"""
Returns a Seaborn linear regression plot with marginal distribution
"""
color, size = self._get_color_size(style)
try:
fig = sns.jointplot(self.x, self.y, color=color,
size=size, data=self.df, kind="reg")
fig = self._set_with_height(fig, opts)
return fig
except Exception as e:
self.err(e, self.dlinear_,
"Can not draw linear regression chart with distribution")
|
[
"def",
"_dlinear_seaborn_",
"(",
"self",
",",
"label",
"=",
"None",
",",
"style",
"=",
"None",
",",
"opts",
"=",
"None",
")",
":",
"color",
",",
"size",
"=",
"self",
".",
"_get_color_size",
"(",
"style",
")",
"try",
":",
"fig",
"=",
"sns",
".",
"jointplot",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"color",
"=",
"color",
",",
"size",
"=",
"size",
",",
"data",
"=",
"self",
".",
"df",
",",
"kind",
"=",
"\"reg\"",
")",
"fig",
"=",
"self",
".",
"_set_with_height",
"(",
"fig",
",",
"opts",
")",
"return",
"fig",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"self",
".",
"dlinear_",
",",
"\"Can not draw linear regression chart with distribution\"",
")"
] | 43.615385
| 17
|
def matrix_to_blockmatrix(self, blocksize):
"""
turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix.
"""
if self.get_width() % blocksize or self.get_height() % blocksize:
raise ValueError("Number of rows and columns have to be evenly dividable by blocksize")
selfBlocks = []
for columnIndex in range(0, self.get_width() - 1, blocksize):
for rowIndex in range(0, self.get_height() - 1, blocksize):
currentBlock = []
for blockRows in self.get_array(False)[columnIndex:columnIndex + blocksize]:
currentBlock += blockRows[rowIndex:rowIndex + blocksize]
selfBlocks.append(Matrix(blocksize, blocksize, currentBlock, rowBased=False))
return Matrix(self.get_width() / blocksize, self.get_height() / blocksize, selfBlocks, rowBased=False)
|
[
"def",
"matrix_to_blockmatrix",
"(",
"self",
",",
"blocksize",
")",
":",
"if",
"self",
".",
"get_width",
"(",
")",
"%",
"blocksize",
"or",
"self",
".",
"get_height",
"(",
")",
"%",
"blocksize",
":",
"raise",
"ValueError",
"(",
"\"Number of rows and columns have to be evenly dividable by blocksize\"",
")",
"selfBlocks",
"=",
"[",
"]",
"for",
"columnIndex",
"in",
"range",
"(",
"0",
",",
"self",
".",
"get_width",
"(",
")",
"-",
"1",
",",
"blocksize",
")",
":",
"for",
"rowIndex",
"in",
"range",
"(",
"0",
",",
"self",
".",
"get_height",
"(",
")",
"-",
"1",
",",
"blocksize",
")",
":",
"currentBlock",
"=",
"[",
"]",
"for",
"blockRows",
"in",
"self",
".",
"get_array",
"(",
"False",
")",
"[",
"columnIndex",
":",
"columnIndex",
"+",
"blocksize",
"]",
":",
"currentBlock",
"+=",
"blockRows",
"[",
"rowIndex",
":",
"rowIndex",
"+",
"blocksize",
"]",
"selfBlocks",
".",
"append",
"(",
"Matrix",
"(",
"blocksize",
",",
"blocksize",
",",
"currentBlock",
",",
"rowBased",
"=",
"False",
")",
")",
"return",
"Matrix",
"(",
"self",
".",
"get_width",
"(",
")",
"/",
"blocksize",
",",
"self",
".",
"get_height",
"(",
")",
"/",
"blocksize",
",",
"selfBlocks",
",",
"rowBased",
"=",
"False",
")"
] | 62.2
| 29.266667
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.