text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def render_html_attributes(**kwargs):
"""Returns a string representation of attributes for html entities
:param kwargs: attributes and values
:return: a well-formed string representation of attributes"""
attr = list()
if kwargs:
attr = ['{}="{}"'.format(key, val) for key, val in kwargs.items()]
return " ".join(attr).replace("css_class", "class")
|
[
"def",
"render_html_attributes",
"(",
"*",
"*",
"kwargs",
")",
":",
"attr",
"=",
"list",
"(",
")",
"if",
"kwargs",
":",
"attr",
"=",
"[",
"'{}=\"{}\"'",
".",
"format",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
"return",
"\" \"",
".",
"join",
"(",
"attr",
")",
".",
"replace",
"(",
"\"css_class\"",
",",
"\"class\"",
")"
] | 46.5
| 12.625
|
def validate(self, obj, pointer=None):
"""
Validate object against validator
:param obj: the object to validate
"""
pointer = pointer or '#'
validator = deepcopy(self)
validator.errors = []
validator.fail_fast = False
obj = deepcopy(obj)
obj = validator.validate_enum(obj, pointer)
obj = validator.validate_type(obj, pointer)
obj = validator.validate_not(obj, pointer)
obj = validator.validate_all_of(obj, pointer)
obj = validator.validate_any_of(obj, pointer)
obj = validator.validate_one_of(obj, pointer)
if self.is_array(obj):
obj = validator.validate_items(obj, pointer)
obj = validator.validate_max_items(obj, pointer)
obj = validator.validate_min_items(obj, pointer)
obj = validator.validate_unique_items(obj, pointer)
elif self.is_number(obj):
obj = validator.validate_maximum(obj, pointer)
obj = validator.validate_minimum(obj, pointer)
obj = validator.validate_multiple_of(obj, pointer)
elif self.is_object(obj):
obj = validator.validate_required(obj, pointer)
obj = validator.validate_max_properties(obj, pointer)
obj = validator.validate_min_properties(obj, pointer)
obj = validator.validate_dependencies(obj, pointer)
obj = validator.validate_properties(obj, pointer)
obj = validator.validate_default_properties(obj, pointer)
elif self.is_string(obj):
obj = validator.validate_max_length(obj, pointer)
obj = validator.validate_min_length(obj, pointer)
obj = validator.validate_pattern(obj, pointer)
obj = validator.validate_format(obj, pointer)
if validator.errors:
raise ValidationError('multiple errors',
obj,
errors=validator.errors)
return obj
|
[
"def",
"validate",
"(",
"self",
",",
"obj",
",",
"pointer",
"=",
"None",
")",
":",
"pointer",
"=",
"pointer",
"or",
"'#'",
"validator",
"=",
"deepcopy",
"(",
"self",
")",
"validator",
".",
"errors",
"=",
"[",
"]",
"validator",
".",
"fail_fast",
"=",
"False",
"obj",
"=",
"deepcopy",
"(",
"obj",
")",
"obj",
"=",
"validator",
".",
"validate_enum",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_type",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_not",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_all_of",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_any_of",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_one_of",
"(",
"obj",
",",
"pointer",
")",
"if",
"self",
".",
"is_array",
"(",
"obj",
")",
":",
"obj",
"=",
"validator",
".",
"validate_items",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_max_items",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_min_items",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_unique_items",
"(",
"obj",
",",
"pointer",
")",
"elif",
"self",
".",
"is_number",
"(",
"obj",
")",
":",
"obj",
"=",
"validator",
".",
"validate_maximum",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_minimum",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_multiple_of",
"(",
"obj",
",",
"pointer",
")",
"elif",
"self",
".",
"is_object",
"(",
"obj",
")",
":",
"obj",
"=",
"validator",
".",
"validate_required",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_max_properties",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_min_properties",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_dependencies",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_properties",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_default_properties",
"(",
"obj",
",",
"pointer",
")",
"elif",
"self",
".",
"is_string",
"(",
"obj",
")",
":",
"obj",
"=",
"validator",
".",
"validate_max_length",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_min_length",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_pattern",
"(",
"obj",
",",
"pointer",
")",
"obj",
"=",
"validator",
".",
"validate_format",
"(",
"obj",
",",
"pointer",
")",
"if",
"validator",
".",
"errors",
":",
"raise",
"ValidationError",
"(",
"'multiple errors'",
",",
"obj",
",",
"errors",
"=",
"validator",
".",
"errors",
")",
"return",
"obj"
] | 40.204082
| 17.387755
|
def rmdir(self, target_directory, allow_symlink=False):
"""Remove a leaf Fake directory.
Args:
target_directory: (str) Name of directory to remove.
allow_symlink: (bool) if `target_directory` is a symlink,
the function just returns, otherwise it raises (Posix only)
Raises:
OSError: if target_directory does not exist.
OSError: if target_directory does not point to a directory.
OSError: if removal failed per FakeFilesystem.RemoveObject.
Cannot remove '.'.
"""
if target_directory in (b'.', u'.'):
error_nr = errno.EACCES if self.is_windows_fs else errno.EINVAL
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory):
if not self.is_windows_fs and self.islink(target_directory):
if allow_symlink:
return
if not ends_with_sep or not self.is_macos:
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory)
if dir_object.contents:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
try:
self.remove_object(target_directory)
except IOError as exc:
self.raise_os_error(exc.errno, exc.filename)
|
[
"def",
"rmdir",
"(",
"self",
",",
"target_directory",
",",
"allow_symlink",
"=",
"False",
")",
":",
"if",
"target_directory",
"in",
"(",
"b'.'",
",",
"u'.'",
")",
":",
"error_nr",
"=",
"errno",
".",
"EACCES",
"if",
"self",
".",
"is_windows_fs",
"else",
"errno",
".",
"EINVAL",
"self",
".",
"raise_os_error",
"(",
"error_nr",
",",
"target_directory",
")",
"ends_with_sep",
"=",
"self",
".",
"ends_with_path_separator",
"(",
"target_directory",
")",
"target_directory",
"=",
"self",
".",
"absnormpath",
"(",
"target_directory",
")",
"if",
"self",
".",
"confirmdir",
"(",
"target_directory",
")",
":",
"if",
"not",
"self",
".",
"is_windows_fs",
"and",
"self",
".",
"islink",
"(",
"target_directory",
")",
":",
"if",
"allow_symlink",
":",
"return",
"if",
"not",
"ends_with_sep",
"or",
"not",
"self",
".",
"is_macos",
":",
"self",
".",
"raise_os_error",
"(",
"errno",
".",
"ENOTDIR",
",",
"target_directory",
")",
"dir_object",
"=",
"self",
".",
"resolve",
"(",
"target_directory",
")",
"if",
"dir_object",
".",
"contents",
":",
"self",
".",
"raise_os_error",
"(",
"errno",
".",
"ENOTEMPTY",
",",
"target_directory",
")",
"try",
":",
"self",
".",
"remove_object",
"(",
"target_directory",
")",
"except",
"IOError",
"as",
"exc",
":",
"self",
".",
"raise_os_error",
"(",
"exc",
".",
"errno",
",",
"exc",
".",
"filename",
")"
] | 45.818182
| 20.878788
|
def fill_row(self, forward, items, idx, row, ro, ri, overlap,lengths):
"Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented"
ibuf = n = 0
ro -= 1
while ibuf < row.size:
ro += 1
ix = idx[ro]
rag = items[ix]
if forward:
ri = 0 if ibuf else ri
n = min(lengths[ix] - ri, row.size - ibuf)
row[ibuf:ibuf+n] = rag[ri:ri+n]
else:
ri = lengths[ix] if ibuf else ri
n = min(ri, row.size - ibuf)
row[ibuf:ibuf+n] = rag[ri-n:ri][::-1]
ibuf += n
return ro, ri + ((n-overlap) if forward else -(n-overlap))
|
[
"def",
"fill_row",
"(",
"self",
",",
"forward",
",",
"items",
",",
"idx",
",",
"row",
",",
"ro",
",",
"ri",
",",
"overlap",
",",
"lengths",
")",
":",
"ibuf",
"=",
"n",
"=",
"0",
"ro",
"-=",
"1",
"while",
"ibuf",
"<",
"row",
".",
"size",
":",
"ro",
"+=",
"1",
"ix",
"=",
"idx",
"[",
"ro",
"]",
"rag",
"=",
"items",
"[",
"ix",
"]",
"if",
"forward",
":",
"ri",
"=",
"0",
"if",
"ibuf",
"else",
"ri",
"n",
"=",
"min",
"(",
"lengths",
"[",
"ix",
"]",
"-",
"ri",
",",
"row",
".",
"size",
"-",
"ibuf",
")",
"row",
"[",
"ibuf",
":",
"ibuf",
"+",
"n",
"]",
"=",
"rag",
"[",
"ri",
":",
"ri",
"+",
"n",
"]",
"else",
":",
"ri",
"=",
"lengths",
"[",
"ix",
"]",
"if",
"ibuf",
"else",
"ri",
"n",
"=",
"min",
"(",
"ri",
",",
"row",
".",
"size",
"-",
"ibuf",
")",
"row",
"[",
"ibuf",
":",
"ibuf",
"+",
"n",
"]",
"=",
"rag",
"[",
"ri",
"-",
"n",
":",
"ri",
"]",
"[",
":",
":",
"-",
"1",
"]",
"ibuf",
"+=",
"n",
"return",
"ro",
",",
"ri",
"+",
"(",
"(",
"n",
"-",
"overlap",
")",
"if",
"forward",
"else",
"-",
"(",
"n",
"-",
"overlap",
")",
")"
] | 40.722222
| 18.277778
|
def multipart_parse_json(api_url, data):
"""
Send a post request and parse the JSON response (potentially containing
non-ascii characters).
@param api_url: the url endpoint to post to.
@param data: a dictionary that will be passed to requests.post
"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response_text = requests.post(api_url, data=data, headers=headers)\
.text.encode('ascii', errors='replace')
return json.loads(response_text.decode())
|
[
"def",
"multipart_parse_json",
"(",
"api_url",
",",
"data",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
"}",
"response_text",
"=",
"requests",
".",
"post",
"(",
"api_url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
".",
"text",
".",
"encode",
"(",
"'ascii'",
",",
"errors",
"=",
"'replace'",
")",
"return",
"json",
".",
"loads",
"(",
"response_text",
".",
"decode",
"(",
")",
")"
] | 41.583333
| 16.083333
|
def timeInfo(self):
"""Return the time info for this Map Service"""
time_info = self._json_struct.get('timeInfo', {})
if not time_info:
return None
time_info = time_info.copy()
if 'timeExtent' in time_info:
time_info['timeExtent'] = utils.timetopythonvalue(
time_info['timeExtent'])
return time_info
|
[
"def",
"timeInfo",
"(",
"self",
")",
":",
"time_info",
"=",
"self",
".",
"_json_struct",
".",
"get",
"(",
"'timeInfo'",
",",
"{",
"}",
")",
"if",
"not",
"time_info",
":",
"return",
"None",
"time_info",
"=",
"time_info",
".",
"copy",
"(",
")",
"if",
"'timeExtent'",
"in",
"time_info",
":",
"time_info",
"[",
"'timeExtent'",
"]",
"=",
"utils",
".",
"timetopythonvalue",
"(",
"time_info",
"[",
"'timeExtent'",
"]",
")",
"return",
"time_info"
] | 41.4
| 15.1
|
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
|
[
"def",
"verify_schema",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
",",
"schema",
")",
":",
"fields_remote",
"=",
"self",
".",
"_clean_schema_fields",
"(",
"self",
".",
"schema",
"(",
"dataset_id",
",",
"table_id",
")",
")",
"fields_local",
"=",
"self",
".",
"_clean_schema_fields",
"(",
"schema",
"[",
"\"fields\"",
"]",
")",
"return",
"fields_remote",
"==",
"fields_local"
] | 30.931034
| 20.103448
|
def get_localized_property(context, field=None, language=None):
'''
When accessing to the name of the field itself, the value
in the current language will be returned. Unless it's set,
the value in the default language will be returned.
'''
if language:
return getattr(context, get_real_fieldname(field, language))
if hasattr(settings, 'FALLBACK_LANGUAGES'):
attrs = [translation.get_language()]
attrs += get_fallback_languages()
else:
attrs = [
translation.get_language(),
translation.get_language()[:2],
settings.LANGUAGE_CODE,
]
def predicate(x):
value = getattr(context, get_real_fieldname(field, x), None)
return value if valid_for_gettext(value) else None
return first_match(predicate, attrs)
|
[
"def",
"get_localized_property",
"(",
"context",
",",
"field",
"=",
"None",
",",
"language",
"=",
"None",
")",
":",
"if",
"language",
":",
"return",
"getattr",
"(",
"context",
",",
"get_real_fieldname",
"(",
"field",
",",
"language",
")",
")",
"if",
"hasattr",
"(",
"settings",
",",
"'FALLBACK_LANGUAGES'",
")",
":",
"attrs",
"=",
"[",
"translation",
".",
"get_language",
"(",
")",
"]",
"attrs",
"+=",
"get_fallback_languages",
"(",
")",
"else",
":",
"attrs",
"=",
"[",
"translation",
".",
"get_language",
"(",
")",
",",
"translation",
".",
"get_language",
"(",
")",
"[",
":",
"2",
"]",
",",
"settings",
".",
"LANGUAGE_CODE",
",",
"]",
"def",
"predicate",
"(",
"x",
")",
":",
"value",
"=",
"getattr",
"(",
"context",
",",
"get_real_fieldname",
"(",
"field",
",",
"x",
")",
",",
"None",
")",
"return",
"value",
"if",
"valid_for_gettext",
"(",
"value",
")",
"else",
"None",
"return",
"first_match",
"(",
"predicate",
",",
"attrs",
")"
] | 34.125
| 20.041667
|
def append_result(self, results, num_matches):
"""Real-time update of search results"""
filename, lineno, colno, match_end, line = results
if filename not in self.files:
file_item = FileMatchItem(self, filename, self.sorting,
self.text_color)
file_item.setExpanded(True)
self.files[filename] = file_item
self.num_files += 1
search_text = self.search_text
title = "'%s' - " % search_text
nb_files = self.num_files
if nb_files == 0:
text = _('String not found')
else:
text_matches = _('matches in')
text_files = _('file')
if nb_files > 1:
text_files += 's'
text = "%d %s %d %s" % (num_matches, text_matches,
nb_files, text_files)
self.set_title(title + text)
file_item = self.files[filename]
line = self.truncate_result(line, colno, match_end)
item = LineMatchItem(file_item, lineno, colno, line, self.text_color)
self.data[id(item)] = (filename, lineno, colno)
|
[
"def",
"append_result",
"(",
"self",
",",
"results",
",",
"num_matches",
")",
":",
"filename",
",",
"lineno",
",",
"colno",
",",
"match_end",
",",
"line",
"=",
"results",
"if",
"filename",
"not",
"in",
"self",
".",
"files",
":",
"file_item",
"=",
"FileMatchItem",
"(",
"self",
",",
"filename",
",",
"self",
".",
"sorting",
",",
"self",
".",
"text_color",
")",
"file_item",
".",
"setExpanded",
"(",
"True",
")",
"self",
".",
"files",
"[",
"filename",
"]",
"=",
"file_item",
"self",
".",
"num_files",
"+=",
"1",
"search_text",
"=",
"self",
".",
"search_text",
"title",
"=",
"\"'%s' - \"",
"%",
"search_text",
"nb_files",
"=",
"self",
".",
"num_files",
"if",
"nb_files",
"==",
"0",
":",
"text",
"=",
"_",
"(",
"'String not found'",
")",
"else",
":",
"text_matches",
"=",
"_",
"(",
"'matches in'",
")",
"text_files",
"=",
"_",
"(",
"'file'",
")",
"if",
"nb_files",
">",
"1",
":",
"text_files",
"+=",
"'s'",
"text",
"=",
"\"%d %s %d %s\"",
"%",
"(",
"num_matches",
",",
"text_matches",
",",
"nb_files",
",",
"text_files",
")",
"self",
".",
"set_title",
"(",
"title",
"+",
"text",
")",
"file_item",
"=",
"self",
".",
"files",
"[",
"filename",
"]",
"line",
"=",
"self",
".",
"truncate_result",
"(",
"line",
",",
"colno",
",",
"match_end",
")",
"item",
"=",
"LineMatchItem",
"(",
"file_item",
",",
"lineno",
",",
"colno",
",",
"line",
",",
"self",
".",
"text_color",
")",
"self",
".",
"data",
"[",
"id",
"(",
"item",
")",
"]",
"=",
"(",
"filename",
",",
"lineno",
",",
"colno",
")"
] | 40.137931
| 13.482759
|
def _read_eeprom(self, address, size):
'''Read EEPROM
'''
self._intf.write(self._base_addr + self.CAL_EEPROM_ADD, array('B', pack('>H', address & 0x3FFF))) # 14-bit address, 16384 bytes
n_pages, n_bytes = divmod(size, self.CAL_EEPROM_PAGE_SIZE)
data = array('B')
for _ in range(n_pages):
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=self.CAL_EEPROM_PAGE_SIZE))
if n_bytes > 0:
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=n_bytes))
return data
|
[
"def",
"_read_eeprom",
"(",
"self",
",",
"address",
",",
"size",
")",
":",
"self",
".",
"_intf",
".",
"write",
"(",
"self",
".",
"_base_addr",
"+",
"self",
".",
"CAL_EEPROM_ADD",
",",
"array",
"(",
"'B'",
",",
"pack",
"(",
"'>H'",
",",
"address",
"&",
"0x3FFF",
")",
")",
")",
"# 14-bit address, 16384 bytes",
"n_pages",
",",
"n_bytes",
"=",
"divmod",
"(",
"size",
",",
"self",
".",
"CAL_EEPROM_PAGE_SIZE",
")",
"data",
"=",
"array",
"(",
"'B'",
")",
"for",
"_",
"in",
"range",
"(",
"n_pages",
")",
":",
"data",
".",
"extend",
"(",
"self",
".",
"_intf",
".",
"read",
"(",
"self",
".",
"_base_addr",
"+",
"self",
".",
"CAL_EEPROM_ADD",
"|",
"1",
",",
"size",
"=",
"self",
".",
"CAL_EEPROM_PAGE_SIZE",
")",
")",
"if",
"n_bytes",
">",
"0",
":",
"data",
".",
"extend",
"(",
"self",
".",
"_intf",
".",
"read",
"(",
"self",
".",
"_base_addr",
"+",
"self",
".",
"CAL_EEPROM_ADD",
"|",
"1",
",",
"size",
"=",
"n_bytes",
")",
")",
"return",
"data"
] | 41.714286
| 34.571429
|
def _extend_breaks(self, major):
"""
Append 2 extra breaks at either end of major
If breaks of transform space are non-equidistant,
:func:`minor_breaks` add minor breaks beyond the first
and last major breaks. The solutions is to extend those
breaks (in transformed space) before the minor break call
is made. How the breaks depends on the type of transform.
"""
trans = self.trans
trans = trans if isinstance(trans, type) else trans.__class__
# so far we are only certain about this extending stuff
# making sense for log transform
is_log = trans.__name__.startswith('log')
diff = np.diff(major)
step = diff[0]
if is_log and all(diff == step):
major = np.hstack([major[0]-step, major, major[-1]+step])
return major
|
[
"def",
"_extend_breaks",
"(",
"self",
",",
"major",
")",
":",
"trans",
"=",
"self",
".",
"trans",
"trans",
"=",
"trans",
"if",
"isinstance",
"(",
"trans",
",",
"type",
")",
"else",
"trans",
".",
"__class__",
"# so far we are only certain about this extending stuff",
"# making sense for log transform",
"is_log",
"=",
"trans",
".",
"__name__",
".",
"startswith",
"(",
"'log'",
")",
"diff",
"=",
"np",
".",
"diff",
"(",
"major",
")",
"step",
"=",
"diff",
"[",
"0",
"]",
"if",
"is_log",
"and",
"all",
"(",
"diff",
"==",
"step",
")",
":",
"major",
"=",
"np",
".",
"hstack",
"(",
"[",
"major",
"[",
"0",
"]",
"-",
"step",
",",
"major",
",",
"major",
"[",
"-",
"1",
"]",
"+",
"step",
"]",
")",
"return",
"major"
] | 42.25
| 16.25
|
def keys(self, prefix=None, delimiter=None):
"""
:param prefix: NOT A STRING PREFIX, RATHER PATH ID PREFIX (MUST MATCH TO NEXT "." OR ":")
:param delimiter: TO GET Prefix OBJECTS, RATHER THAN WHOLE KEYS
:return: SET OF KEYS IN BUCKET, OR
"""
if delimiter:
# WE REALLY DO NOT GET KEYS, BUT RATHER Prefix OBJECTS
# AT LEAST THEY ARE UNIQUE
candidates = [k.name.rstrip(delimiter) for k in self.bucket.list(prefix=prefix, delimiter=delimiter)]
else:
candidates = [strip_extension(k.key) for k in self.bucket.list(prefix=prefix)]
if prefix == None:
return set(c for c in candidates if c != "0.json")
else:
return set(k for k in candidates if k == prefix or k.startswith(prefix + ".") or k.startswith(prefix + ":"))
|
[
"def",
"keys",
"(",
"self",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"if",
"delimiter",
":",
"# WE REALLY DO NOT GET KEYS, BUT RATHER Prefix OBJECTS",
"# AT LEAST THEY ARE UNIQUE",
"candidates",
"=",
"[",
"k",
".",
"name",
".",
"rstrip",
"(",
"delimiter",
")",
"for",
"k",
"in",
"self",
".",
"bucket",
".",
"list",
"(",
"prefix",
"=",
"prefix",
",",
"delimiter",
"=",
"delimiter",
")",
"]",
"else",
":",
"candidates",
"=",
"[",
"strip_extension",
"(",
"k",
".",
"key",
")",
"for",
"k",
"in",
"self",
".",
"bucket",
".",
"list",
"(",
"prefix",
"=",
"prefix",
")",
"]",
"if",
"prefix",
"==",
"None",
":",
"return",
"set",
"(",
"c",
"for",
"c",
"in",
"candidates",
"if",
"c",
"!=",
"\"0.json\"",
")",
"else",
":",
"return",
"set",
"(",
"k",
"for",
"k",
"in",
"candidates",
"if",
"k",
"==",
"prefix",
"or",
"k",
".",
"startswith",
"(",
"prefix",
"+",
"\".\"",
")",
"or",
"k",
".",
"startswith",
"(",
"prefix",
"+",
"\":\"",
")",
")"
] | 49.411765
| 28
|
def _update_data(self, data):
# type: (Any) -> Dict[str, List]
"""Set our data and notify any subscribers of children what has changed
Args:
data (object): The new data
Returns:
dict: {child_name: [path_list, optional child_data]} of the change
that needs to be passed to a child as a result of this
"""
self.data = data
child_change_dict = {}
# Reflect change of data to children
for name in self.children:
child_data = getattr(data, name, None)
if child_data is None:
# Deletion
child_change_dict[name] = [[]]
else:
# Change
child_change_dict[name] = [[], child_data]
return child_change_dict
|
[
"def",
"_update_data",
"(",
"self",
",",
"data",
")",
":",
"# type: (Any) -> Dict[str, List]",
"self",
".",
"data",
"=",
"data",
"child_change_dict",
"=",
"{",
"}",
"# Reflect change of data to children",
"for",
"name",
"in",
"self",
".",
"children",
":",
"child_data",
"=",
"getattr",
"(",
"data",
",",
"name",
",",
"None",
")",
"if",
"child_data",
"is",
"None",
":",
"# Deletion",
"child_change_dict",
"[",
"name",
"]",
"=",
"[",
"[",
"]",
"]",
"else",
":",
"# Change",
"child_change_dict",
"[",
"name",
"]",
"=",
"[",
"[",
"]",
",",
"child_data",
"]",
"return",
"child_change_dict"
] | 34.521739
| 15.130435
|
def allocate(n, dtype=numpy.float32):
""" allocate context-portable pinned host memory """
return drv.pagelocked_empty(int(n), dtype, order='C', mem_flags=drv.host_alloc_flags.PORTABLE)
|
[
"def",
"allocate",
"(",
"n",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
":",
"return",
"drv",
".",
"pagelocked_empty",
"(",
"int",
"(",
"n",
")",
",",
"dtype",
",",
"order",
"=",
"'C'",
",",
"mem_flags",
"=",
"drv",
".",
"host_alloc_flags",
".",
"PORTABLE",
")"
] | 63.666667
| 20.333333
|
def maelstrom(args):
"""Run the maelstrom method."""
infile = args.inputfile
genome = args.genome
outdir = args.outdir
pwmfile = args.pwmfile
methods = args.methods
ncpus = args.ncpus
if not os.path.exists(infile):
raise ValueError("file {} does not exist".format(infile))
if methods:
methods = [x.strip() for x in methods.split(",")]
run_maelstrom(infile, genome, outdir, pwmfile, methods=methods, ncpus=ncpus)
|
[
"def",
"maelstrom",
"(",
"args",
")",
":",
"infile",
"=",
"args",
".",
"inputfile",
"genome",
"=",
"args",
".",
"genome",
"outdir",
"=",
"args",
".",
"outdir",
"pwmfile",
"=",
"args",
".",
"pwmfile",
"methods",
"=",
"args",
".",
"methods",
"ncpus",
"=",
"args",
".",
"ncpus",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"infile",
")",
":",
"raise",
"ValueError",
"(",
"\"file {} does not exist\"",
".",
"format",
"(",
"infile",
")",
")",
"if",
"methods",
":",
"methods",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"methods",
".",
"split",
"(",
"\",\"",
")",
"]",
"run_maelstrom",
"(",
"infile",
",",
"genome",
",",
"outdir",
",",
"pwmfile",
",",
"methods",
"=",
"methods",
",",
"ncpus",
"=",
"ncpus",
")"
] | 28.6875
| 21.25
|
def oauth_scope(*scope_names):
""" Return a decorator that restricts requests to those authorized with
a certain scope or scopes.
For example, to restrict access to a given endpoint like this:
.. code-block:: python
@require_login
def secret_attribute_endpoint(request, *args, **kwargs):
user = request.user
return HttpResponse(json.dumps({
'super_secret_attribute' : user.super_secret_attribute
})
...just add the decorator and an additional argument to the function's
signature:
.. code-block:: python
@oauth_scope('foo', 'bar')
def secret_attribute_endpoint(access_token, request, *args, **kwargs):
# Because of the decorator, the function is guaranteed to only be run
# if the request includes proper access to the 'foo' and 'bar'
# scopes.
user = access_token.user
return HttpResponse(json.dumps({
'super_secret_attribute' : user.super_secret_attribute
})
The first argument to the wrapped endpoint will now be an
:py:class:`djoauth2.models.AccessToken` object. The second argument will be
the original Django ``HttpRequest``, and all other parameters included in the
requests (due to URL-matching or any other method) will follow.
We **strongly recommend** that you use this decorator to protect your API
endpoints instead of manually instantiating a
djoauth2.access_token.AccessTokenAuthenticator object.
"""
authenticator = AccessTokenAuthenticator(required_scope_names=scope_names)
def scope_decorator(view_func):
@wraps(view_func)
def wrapper(request, *args, **kwargs):
access_token, error_response_arguments = authenticator.validate(request)
if not access_token:
return authenticator.make_error_response(*error_response_arguments)
return view_func(access_token, request, *args, **kwargs)
return wrapper
return scope_decorator
|
[
"def",
"oauth_scope",
"(",
"*",
"scope_names",
")",
":",
"authenticator",
"=",
"AccessTokenAuthenticator",
"(",
"required_scope_names",
"=",
"scope_names",
")",
"def",
"scope_decorator",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"access_token",
",",
"error_response_arguments",
"=",
"authenticator",
".",
"validate",
"(",
"request",
")",
"if",
"not",
"access_token",
":",
"return",
"authenticator",
".",
"make_error_response",
"(",
"*",
"error_response_arguments",
")",
"return",
"view_func",
"(",
"access_token",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"scope_decorator"
] | 34.436364
| 25.309091
|
def gen_weights(self, f_target):
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched.
f_target np.array: the desired forcing term trajectory
"""
# calculate x and psi
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
#efficiently calculate weights for BFs using weighted linear regression
self.w = np.zeros((self.dmps, self.bfs))
for d in range(self.dmps):
# spatial scaling term
k = 1.#(self.goal[d] - self.y0[d])
for b in range(self.bfs):
numer = np.sum(x_track * psi_track[:,b] * f_target[:,d])
denom = np.sum(x_track**2 * psi_track[:,b])
self.w[d,b] = numer / (k * denom)
|
[
"def",
"gen_weights",
"(",
"self",
",",
"f_target",
")",
":",
"# calculate x and psi ",
"x_track",
"=",
"self",
".",
"cs",
".",
"rollout",
"(",
")",
"psi_track",
"=",
"self",
".",
"gen_psi",
"(",
"x_track",
")",
"#efficiently calculate weights for BFs using weighted linear regression",
"self",
".",
"w",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"dmps",
",",
"self",
".",
"bfs",
")",
")",
"for",
"d",
"in",
"range",
"(",
"self",
".",
"dmps",
")",
":",
"# spatial scaling term",
"k",
"=",
"1.",
"#(self.goal[d] - self.y0[d])",
"for",
"b",
"in",
"range",
"(",
"self",
".",
"bfs",
")",
":",
"numer",
"=",
"np",
".",
"sum",
"(",
"x_track",
"*",
"psi_track",
"[",
":",
",",
"b",
"]",
"*",
"f_target",
"[",
":",
",",
"d",
"]",
")",
"denom",
"=",
"np",
".",
"sum",
"(",
"x_track",
"**",
"2",
"*",
"psi_track",
"[",
":",
",",
"b",
"]",
")",
"self",
".",
"w",
"[",
"d",
",",
"b",
"]",
"=",
"numer",
"/",
"(",
"k",
"*",
"denom",
")"
] | 40.25
| 15.15
|
def plot(self, format='segments', bits=None, **kwargs):
"""Plot the data for this `StateVector`
Parameters
----------
format : `str`, optional, default: ``'segments'``
The type of plot to make, either 'segments' to plot the
SegmentList for each bit, or 'timeseries' to plot the raw
data for this `StateVector`
bits : `list`, optional
A list of bit indices or bit names, defaults to
`~StateVector.bits`. This argument is ignored if ``format`` is
not ``'segments'``
**kwargs
Other keyword arguments to be passed to either
`~gwpy.plot.SegmentAxes.plot` or
`~gwpy.plot.Axes.plot`, depending
on ``format``.
Returns
-------
plot : `~gwpy.plot.Plot`
output plot object
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_flag
for documentation of keyword arguments used in rendering each
statevector flag.
"""
if format == 'timeseries':
return super(StateVector, self).plot(**kwargs)
if format == 'segments':
from ..plot import Plot
kwargs.setdefault('xscale', 'auto-gps')
return Plot(*self.to_dqflags(bits=bits).values(),
projection='segments', **kwargs)
raise ValueError("'format' argument must be one of: 'timeseries' or "
"'segments'")
|
[
"def",
"plot",
"(",
"self",
",",
"format",
"=",
"'segments'",
",",
"bits",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"format",
"==",
"'timeseries'",
":",
"return",
"super",
"(",
"StateVector",
",",
"self",
")",
".",
"plot",
"(",
"*",
"*",
"kwargs",
")",
"if",
"format",
"==",
"'segments'",
":",
"from",
".",
".",
"plot",
"import",
"Plot",
"kwargs",
".",
"setdefault",
"(",
"'xscale'",
",",
"'auto-gps'",
")",
"return",
"Plot",
"(",
"*",
"self",
".",
"to_dqflags",
"(",
"bits",
"=",
"bits",
")",
".",
"values",
"(",
")",
",",
"projection",
"=",
"'segments'",
",",
"*",
"*",
"kwargs",
")",
"raise",
"ValueError",
"(",
"\"'format' argument must be one of: 'timeseries' or \"",
"\"'segments'\"",
")"
] | 36.574468
| 18.531915
|
def encode(self, raw_string, add_eos=False):
"""Encodes a string into a list of int subtoken ids."""
ret = []
tokens = _split_string_to_tokens(_native_to_unicode(raw_string))
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
if add_eos:
ret.append(EOS_ID)
return ret
|
[
"def",
"encode",
"(",
"self",
",",
"raw_string",
",",
"add_eos",
"=",
"False",
")",
":",
"ret",
"=",
"[",
"]",
"tokens",
"=",
"_split_string_to_tokens",
"(",
"_native_to_unicode",
"(",
"raw_string",
")",
")",
"for",
"token",
"in",
"tokens",
":",
"ret",
".",
"extend",
"(",
"self",
".",
"_token_to_subtoken_ids",
"(",
"token",
")",
")",
"if",
"add_eos",
":",
"ret",
".",
"append",
"(",
"EOS_ID",
")",
"return",
"ret"
] | 34.666667
| 17.222222
|
def block_create(
self,
type,
account,
wallet=None,
representative=None,
key=None,
destination=None,
amount=None,
balance=None,
previous=None,
source=None,
work=None,
):
"""
Creates a json representations of new block based on input data &
signed with private key or account in **wallet** for offline signing
.. enable_control required
.. version 8.1 required
:param type: Type of block to create one of **open**, **receive**,
**change**, **send**
:type type: str
:param account: Account for the signed block
:type account: str
:param wallet: Wallet to use
:type wallet: str
:param representative: Representative account for **open** and
**change** blocks
:type representative: str
:param key: Private key to use to open account for **open** blocks
:type key: str
:param destination: Destination account for **send** blocks
:type destination: str
:param amount: Amount in raw for **send** blocks
:type amount: int
:param balance: Balance in raw of account for **send** blocks
:type balance: int
:param previous: Previous block hash for **receive**, **send**
and **change** blocks
:type previous: str
:param source: Source block for **open** and **receive** blocks
:type source: str
:param work: Work value to use for block from external source
:type work: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.block_create(
... type="open",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
... representative="xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1",
... key="0000000000000000000000000000000000000000000000000000000000000001"
... )
{
"block": {
"account": "xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
"representative": "xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1",
"signature": "5974324F8CC42DA56F62FC212A17886BDCB18DE363D04DA84EEDC99CB4A33919D14A2CF9DE9D534FAA6D0B91D01F0622205D898293525E692586C84F2DCF9208",
"source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
"type": "open",
"work": "4ec76c9bda2325ed"
},
"hash": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4"
}
>>> rpc.block_create(
... type="receive",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... previous="F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4",
... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... )
{
"block": {
"previous": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4",
"signature": "A13FD22527771667D5DFF33D69787D734836A3561D8A490C1F4917A05D77EA09860461D5FBFC99246A4EAB5627F119AD477598E22EE021C4711FACF4F3C80D0E",
"source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
"type": "receive",
"work": "6acb5dd43a38d76a"
},
"hash": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E"
}
>>> rpc.block_create(
... type="send",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... amount=10000000000000000000000000000000,
... balance=20000000000000000000000000000000,
... destination="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
... previous="314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... work="478563b2d9facfd4",
... )
{
"block": {
"balance": "0000007E37BE2022C0914B2680000000",
"destination": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
"previous": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E",
"signature": "F19CA177EFA8692C8CBF7478CE3213F56E4A85DF760DA7A9E69141849831F8FD79BA9ED89CEC807B690FB4AA42D5008F9DBA7115E63C935401F1F0EFA547BC00",
"type": "send",
"work": "478563b2d9facfd4"
},
"hash": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A"
}
>>> rpc.block_create(
... type="change",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... representative="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
... previous="F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... )
{
"block": {
"previous": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A",
"representative": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
"signature": "98B4D56881D9A88B170A6B2976AE21900C26A27F0E2C338D93FDED56183B73D19AA5BEB48E43FCBB8FF8293FDD368CEF50600FECEFD490A0855ED702ED209E04",
"type": "change",
"work": "55e5b7a83edc3f4f"
},
"hash": "654FA425CEBFC9E7726089E4EDE7A105462D93DBC915FFB70B50909920A7D286"
}
"""
payload = {
"type": self._process_value(type, 'blocktype'),
"account": self._process_value(account, 'account'),
}
if representative is not None:
payload['representative'] = self._process_value(representative, 'account')
if key is not None:
payload['key'] = self._process_value(key, 'privatekey')
if source is not None:
payload['source'] = self._process_value(source, 'block')
if destination is not None:
payload['destination'] = self._process_value(destination, 'account')
if amount is not None:
payload['amount'] = self._process_value(amount, 'int')
if balance is not None:
payload['balance'] = self._process_value(balance, 'int')
if previous is not None:
payload['previous'] = self._process_value(previous, 'block')
if wallet is not None:
payload['wallet'] = self._process_value(wallet, 'wallet')
if work is not None:
payload['work'] = self._process_value(work, 'work')
resp = self.call('block_create', payload)
resp['block'] = json.loads(resp['block'])
return resp
|
[
"def",
"block_create",
"(",
"self",
",",
"type",
",",
"account",
",",
"wallet",
"=",
"None",
",",
"representative",
"=",
"None",
",",
"key",
"=",
"None",
",",
"destination",
"=",
"None",
",",
"amount",
"=",
"None",
",",
"balance",
"=",
"None",
",",
"previous",
"=",
"None",
",",
"source",
"=",
"None",
",",
"work",
"=",
"None",
",",
")",
":",
"payload",
"=",
"{",
"\"type\"",
":",
"self",
".",
"_process_value",
"(",
"type",
",",
"'blocktype'",
")",
",",
"\"account\"",
":",
"self",
".",
"_process_value",
"(",
"account",
",",
"'account'",
")",
",",
"}",
"if",
"representative",
"is",
"not",
"None",
":",
"payload",
"[",
"'representative'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"representative",
",",
"'account'",
")",
"if",
"key",
"is",
"not",
"None",
":",
"payload",
"[",
"'key'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"key",
",",
"'privatekey'",
")",
"if",
"source",
"is",
"not",
"None",
":",
"payload",
"[",
"'source'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"source",
",",
"'block'",
")",
"if",
"destination",
"is",
"not",
"None",
":",
"payload",
"[",
"'destination'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"destination",
",",
"'account'",
")",
"if",
"amount",
"is",
"not",
"None",
":",
"payload",
"[",
"'amount'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"amount",
",",
"'int'",
")",
"if",
"balance",
"is",
"not",
"None",
":",
"payload",
"[",
"'balance'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"balance",
",",
"'int'",
")",
"if",
"previous",
"is",
"not",
"None",
":",
"payload",
"[",
"'previous'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"previous",
",",
"'block'",
")",
"if",
"wallet",
"is",
"not",
"None",
":",
"payload",
"[",
"'wallet'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"if",
"work",
"is",
"not",
"None",
":",
"payload",
"[",
"'work'",
"]",
"=",
"self",
".",
"_process_value",
"(",
"work",
",",
"'work'",
")",
"resp",
"=",
"self",
".",
"call",
"(",
"'block_create'",
",",
"payload",
")",
"resp",
"[",
"'block'",
"]",
"=",
"json",
".",
"loads",
"(",
"resp",
"[",
"'block'",
"]",
")",
"return",
"resp"
] | 41.514451
| 30.057803
|
def _labeledInput(activeInputs, cellsPerCol=32):
"""Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
"""
if cellsPerCol == 0:
cellsPerCol = 1
cols = activeInputs.size / cellsPerCol
activeInputs = activeInputs.reshape(cols, cellsPerCol)
(cols, cellIdxs) = activeInputs.nonzero()
if len(cols) == 0:
return "NONE"
items = ["(%d): " % (len(cols))]
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
items.append("] ")
items.append("Col %d: [" % col)
prevCol = col
items.append("%d," % cellIdx)
items.append("]")
return " ".join(items)
|
[
"def",
"_labeledInput",
"(",
"activeInputs",
",",
"cellsPerCol",
"=",
"32",
")",
":",
"if",
"cellsPerCol",
"==",
"0",
":",
"cellsPerCol",
"=",
"1",
"cols",
"=",
"activeInputs",
".",
"size",
"/",
"cellsPerCol",
"activeInputs",
"=",
"activeInputs",
".",
"reshape",
"(",
"cols",
",",
"cellsPerCol",
")",
"(",
"cols",
",",
"cellIdxs",
")",
"=",
"activeInputs",
".",
"nonzero",
"(",
")",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"return",
"\"NONE\"",
"items",
"=",
"[",
"\"(%d): \"",
"%",
"(",
"len",
"(",
"cols",
")",
")",
"]",
"prevCol",
"=",
"-",
"1",
"for",
"(",
"col",
",",
"cellIdx",
")",
"in",
"zip",
"(",
"cols",
",",
"cellIdxs",
")",
":",
"if",
"col",
"!=",
"prevCol",
":",
"if",
"prevCol",
"!=",
"-",
"1",
":",
"items",
".",
"append",
"(",
"\"] \"",
")",
"items",
".",
"append",
"(",
"\"Col %d: [\"",
"%",
"col",
")",
"prevCol",
"=",
"col",
"items",
".",
"append",
"(",
"\"%d,\"",
"%",
"cellIdx",
")",
"items",
".",
"append",
"(",
"\"]\"",
")",
"return",
"\" \"",
".",
"join",
"(",
"items",
")"
] | 25.346154
| 16.807692
|
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
|
[
"def",
"IsCppString",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"r'\\\\'",
",",
"'XX'",
")",
"# after this, \\\\\" does not match to \\\"",
"return",
"(",
"(",
"line",
".",
"count",
"(",
"'\"'",
")",
"-",
"line",
".",
"count",
"(",
"r'\\\"'",
")",
"-",
"line",
".",
"count",
"(",
"\"'\\\"'\"",
")",
")",
"&",
"1",
")",
"==",
"1"
] | 31.666667
| 26.666667
|
def combine(path1, path2):
# type: (Text, Text) -> Text
"""Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo/bar/baz'
"""
if not path1:
return path2.lstrip()
return "{}/{}".format(path1.rstrip("/"), path2.lstrip("/"))
|
[
"def",
"combine",
"(",
"path1",
",",
"path2",
")",
":",
"# type: (Text, Text) -> Text",
"if",
"not",
"path1",
":",
"return",
"path2",
".",
"lstrip",
"(",
")",
"return",
"\"{}/{}\"",
".",
"format",
"(",
"path1",
".",
"rstrip",
"(",
"\"/\"",
")",
",",
"path2",
".",
"lstrip",
"(",
"\"/\"",
")",
")"
] | 24.304348
| 21.173913
|
def cancel_all_builds_in_group(self, id, **kwargs):
"""
Cancel all builds running in the build group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_all_builds_in_group(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cancel_all_builds_in_group_with_http_info(id, **kwargs)
else:
(data) = self.cancel_all_builds_in_group_with_http_info(id, **kwargs)
return data
|
[
"def",
"cancel_all_builds_in_group",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"cancel_all_builds_in_group_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"cancel_all_builds_in_group_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 42.36
| 18.92
|
def generate_run_info():
"""
获取当前运行状态
"""
uptime = datetime.datetime.now() - datetime.datetime.fromtimestamp(glb.run_info.create_time())
memory_usage = glb.run_info.memory_info().rss
msg = '[当前时间] {now:%H:%M:%S}\n[运行时间] {uptime}\n[内存占用] {memory}\n[发送消息] {messages}'.format(
now=datetime.datetime.now(),
uptime=str(uptime).split('.')[0],
memory='{:.2f} MB'.format(memory_usage / 1024 ** 2),
messages=len(glb.wxbot.bot.messages)
)
return msg
|
[
"def",
"generate_run_info",
"(",
")",
":",
"uptime",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"glb",
".",
"run_info",
".",
"create_time",
"(",
")",
")",
"memory_usage",
"=",
"glb",
".",
"run_info",
".",
"memory_info",
"(",
")",
".",
"rss",
"msg",
"=",
"'[当前时间] {now:%H:%M:%S}\\n[运行时间] {uptime}\\n[内存占用] {memory}\\n[发送消息] {messages}'.format(",
"",
"",
"",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"uptime",
"=",
"str",
"(",
"uptime",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"memory",
"=",
"'{:.2f} MB'",
".",
"format",
"(",
"memory_usage",
"/",
"1024",
"**",
"2",
")",
",",
"messages",
"=",
"len",
"(",
"glb",
".",
"wxbot",
".",
"bot",
".",
"messages",
")",
")",
"return",
"msg"
] | 37.769231
| 19.615385
|
def _DeepCopy(self, obj):
"""Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue.
"""
precondition.AssertType(obj, rdfvalue.RDFValue)
return obj.__class__.FromSerializedString(obj.SerializeToString())
|
[
"def",
"_DeepCopy",
"(",
"self",
",",
"obj",
")",
":",
"precondition",
".",
"AssertType",
"(",
"obj",
",",
"rdfvalue",
".",
"RDFValue",
")",
"return",
"obj",
".",
"__class__",
".",
"FromSerializedString",
"(",
"obj",
".",
"SerializeToString",
"(",
")",
")"
] | 27
| 22.133333
|
def robust_init(stochclass, tries, *args, **kwds):
"""Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True)
"""
# Find the direct parents
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
# Find the extended parents
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
# Select the parents with a random method.
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
'random')]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc)
|
[
"def",
"robust_init",
"(",
"stochclass",
",",
"tries",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"# Find the direct parents",
"stochs",
"=",
"[",
"arg",
"for",
"arg",
"in",
"(",
"list",
"(",
"args",
")",
"+",
"list",
"(",
"kwds",
".",
"values",
"(",
")",
")",
")",
"if",
"isinstance",
"(",
"arg",
".",
"__class__",
",",
"StochasticMeta",
")",
"]",
"# Find the extended parents",
"parents",
"=",
"stochs",
"for",
"s",
"in",
"stochs",
":",
"parents",
".",
"extend",
"(",
"s",
".",
"extended_parents",
")",
"extended_parents",
"=",
"set",
"(",
"parents",
")",
"# Select the parents with a random method.",
"random_parents",
"=",
"[",
"p",
"for",
"p",
"in",
"extended_parents",
"if",
"p",
".",
"rseed",
"is",
"True",
"and",
"hasattr",
"(",
"p",
",",
"'random'",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"tries",
")",
":",
"try",
":",
"return",
"stochclass",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"except",
"ZeroProbability",
":",
"exc",
"=",
"sys",
".",
"exc_info",
"(",
")",
"for",
"parent",
"in",
"random_parents",
":",
"try",
":",
"parent",
".",
"random",
"(",
")",
"except",
":",
"six",
".",
"reraise",
"(",
"*",
"exc",
")",
"six",
".",
"reraise",
"(",
"*",
"exc",
")"
] | 33.192308
| 21
|
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['wday'] = self._wday
json_dict['hour'] = self._hour
json_dict['min'] = self._min
return json.dumps(json_dict)
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"json_dict",
"=",
"self",
".",
"to_json_basic",
"(",
")",
"json_dict",
"[",
"'wday'",
"]",
"=",
"self",
".",
"_wday",
"json_dict",
"[",
"'hour'",
"]",
"=",
"self",
".",
"_hour",
"json_dict",
"[",
"'min'",
"]",
"=",
"self",
".",
"_min",
"return",
"json",
".",
"dumps",
"(",
"json_dict",
")"
] | 27.666667
| 5.888889
|
def get_pixel(framebuf, x, y):
"""Get the color of a given pixel"""
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
return (framebuf.buf[index] >> offset) & 0x01
|
[
"def",
"get_pixel",
"(",
"framebuf",
",",
"x",
",",
"y",
")",
":",
"index",
"=",
"(",
"y",
">>",
"3",
")",
"*",
"framebuf",
".",
"stride",
"+",
"x",
"offset",
"=",
"y",
"&",
"0x07",
"return",
"(",
"framebuf",
".",
"buf",
"[",
"index",
"]",
">>",
"offset",
")",
"&",
"0x01"
] | 39.6
| 8.8
|
def name(self):
"""
The name for the window as displayed in the title bar and status bar.
"""
# Name, explicitely set for the pane.
if self.chosen_name:
return self.chosen_name
else:
# Name from the process running inside the pane.
name = self.process.get_name()
if name:
return os.path.basename(name)
return ''
|
[
"def",
"name",
"(",
"self",
")",
":",
"# Name, explicitely set for the pane.",
"if",
"self",
".",
"chosen_name",
":",
"return",
"self",
".",
"chosen_name",
"else",
":",
"# Name from the process running inside the pane.",
"name",
"=",
"self",
".",
"process",
".",
"get_name",
"(",
")",
"if",
"name",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"return",
"''"
] | 29.928571
| 15.785714
|
def schedule(self):
"""Schedule the test items on the nodes
If the node's pending list is empty it is a new node which
needs to run all the tests. If the pending list is already
populated (by ``.add_node_collection()``) then it replaces a
dead node and we only need to run those tests.
"""
assert self.collection_is_completed
for node, pending in self.node2pending.items():
if node in self._started:
continue
if not pending:
pending[:] = range(len(self.node2collection[node]))
node.send_runtest_all()
node.shutdown()
else:
node.send_runtest_some(pending)
self._started.append(node)
|
[
"def",
"schedule",
"(",
"self",
")",
":",
"assert",
"self",
".",
"collection_is_completed",
"for",
"node",
",",
"pending",
"in",
"self",
".",
"node2pending",
".",
"items",
"(",
")",
":",
"if",
"node",
"in",
"self",
".",
"_started",
":",
"continue",
"if",
"not",
"pending",
":",
"pending",
"[",
":",
"]",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"node2collection",
"[",
"node",
"]",
")",
")",
"node",
".",
"send_runtest_all",
"(",
")",
"node",
".",
"shutdown",
"(",
")",
"else",
":",
"node",
".",
"send_runtest_some",
"(",
"pending",
")",
"self",
".",
"_started",
".",
"append",
"(",
"node",
")"
] | 39.842105
| 14.473684
|
def populateFromFile(self, dataUrls, indexFiles):
"""
Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file.
"""
assert len(dataUrls) == len(indexFiles)
for dataUrl, indexFile in zip(dataUrls, indexFiles):
varFile = pysam.VariantFile(dataUrl, index_filename=indexFile)
try:
self._populateFromVariantFile(varFile, dataUrl, indexFile)
finally:
varFile.close()
|
[
"def",
"populateFromFile",
"(",
"self",
",",
"dataUrls",
",",
"indexFiles",
")",
":",
"assert",
"len",
"(",
"dataUrls",
")",
"==",
"len",
"(",
"indexFiles",
")",
"for",
"dataUrl",
",",
"indexFile",
"in",
"zip",
"(",
"dataUrls",
",",
"indexFiles",
")",
":",
"varFile",
"=",
"pysam",
".",
"VariantFile",
"(",
"dataUrl",
",",
"index_filename",
"=",
"indexFile",
")",
"try",
":",
"self",
".",
"_populateFromVariantFile",
"(",
"varFile",
",",
"dataUrl",
",",
"indexFile",
")",
"finally",
":",
"varFile",
".",
"close",
"(",
")"
] | 45.384615
| 18
|
def boundless_vrt_doc(
src_dataset, nodata=None, background=None, hidenodata=False,
width=None, height=None, transform=None, bands=None):
"""Make a VRT XML document.
Parameters
----------
src_dataset : Dataset
The dataset to wrap.
background : Dataset, optional
A dataset that provides the optional VRT background. NB: this dataset
must have the same number of bands as the src_dataset.
Returns
-------
bytes
An ascii-encoded string (an ElementTree detail)
"""
nodata = nodata or src_dataset.nodata
width = width or src_dataset.width
height = height or src_dataset.height
transform = transform or src_dataset.transform
vrt = BaseVRT(width, height, src_dataset.crs, transform)
bidxs = src_dataset.indexes if bands is None else bands
for bidx in bidxs:
ci = src_dataset.colorinterp[bidx - 1]
block_shape = src_dataset.block_shapes[bidx - 1]
dtype = src_dataset.dtypes[bidx - 1]
band_element = vrt.add_band(dtype, bidx, ci.name, nodata=nodata, hidenodata=True)
if background is not None:
src_window = Window(0, 0, background.width, background.height)
dst_window = Window(0, 0, width, height)
vrt.add_band_simplesource(band_element, bidx, dtype, False, background.name,
width, height, block_shape[1], block_shape[0],
src_window, dst_window)
src_window = Window(0, 0, src_dataset.width, src_dataset.height)
xoff = (src_dataset.transform.xoff - transform.xoff) / transform.a
yoff = (src_dataset.transform.yoff - transform.yoff) / transform.e
xsize = src_dataset.width * src_dataset.transform.a / transform.a
ysize = src_dataset.height * src_dataset.transform.e / transform.e
dst_window = Window(xoff, yoff, xsize, ysize)
vrt.add_band_simplesource(band_element, bidx, dtype, False, src_dataset.name,
width, height, block_shape[1], block_shape[0],
src_window, dst_window, nodata=src_dataset.nodata)
if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):
mask_band = vrt.add_mask_band('Byte')
src_window = Window(0, 0, src_dataset.width, src_dataset.height)
xoff = (src_dataset.transform.xoff - transform.xoff) / transform.a
yoff = (src_dataset.transform.yoff - transform.yoff) / transform.e
xsize = src_dataset.width
ysize = src_dataset.height
dst_window = Window(xoff, yoff, xsize, ysize)
vrt.add_band_simplesource(mask_band, 'mask,1', 'Byte', False, src_dataset.name,
width, height, block_shape[1], block_shape[0], src_window, dst_window)
return vrt
|
[
"def",
"boundless_vrt_doc",
"(",
"src_dataset",
",",
"nodata",
"=",
"None",
",",
"background",
"=",
"None",
",",
"hidenodata",
"=",
"False",
",",
"width",
"=",
"None",
",",
"height",
"=",
"None",
",",
"transform",
"=",
"None",
",",
"bands",
"=",
"None",
")",
":",
"nodata",
"=",
"nodata",
"or",
"src_dataset",
".",
"nodata",
"width",
"=",
"width",
"or",
"src_dataset",
".",
"width",
"height",
"=",
"height",
"or",
"src_dataset",
".",
"height",
"transform",
"=",
"transform",
"or",
"src_dataset",
".",
"transform",
"vrt",
"=",
"BaseVRT",
"(",
"width",
",",
"height",
",",
"src_dataset",
".",
"crs",
",",
"transform",
")",
"bidxs",
"=",
"src_dataset",
".",
"indexes",
"if",
"bands",
"is",
"None",
"else",
"bands",
"for",
"bidx",
"in",
"bidxs",
":",
"ci",
"=",
"src_dataset",
".",
"colorinterp",
"[",
"bidx",
"-",
"1",
"]",
"block_shape",
"=",
"src_dataset",
".",
"block_shapes",
"[",
"bidx",
"-",
"1",
"]",
"dtype",
"=",
"src_dataset",
".",
"dtypes",
"[",
"bidx",
"-",
"1",
"]",
"band_element",
"=",
"vrt",
".",
"add_band",
"(",
"dtype",
",",
"bidx",
",",
"ci",
".",
"name",
",",
"nodata",
"=",
"nodata",
",",
"hidenodata",
"=",
"True",
")",
"if",
"background",
"is",
"not",
"None",
":",
"src_window",
"=",
"Window",
"(",
"0",
",",
"0",
",",
"background",
".",
"width",
",",
"background",
".",
"height",
")",
"dst_window",
"=",
"Window",
"(",
"0",
",",
"0",
",",
"width",
",",
"height",
")",
"vrt",
".",
"add_band_simplesource",
"(",
"band_element",
",",
"bidx",
",",
"dtype",
",",
"False",
",",
"background",
".",
"name",
",",
"width",
",",
"height",
",",
"block_shape",
"[",
"1",
"]",
",",
"block_shape",
"[",
"0",
"]",
",",
"src_window",
",",
"dst_window",
")",
"src_window",
"=",
"Window",
"(",
"0",
",",
"0",
",",
"src_dataset",
".",
"width",
",",
"src_dataset",
".",
"height",
")",
"xoff",
"=",
"(",
"src_dataset",
".",
"transform",
".",
"xoff",
"-",
"transform",
".",
"xoff",
")",
"/",
"transform",
".",
"a",
"yoff",
"=",
"(",
"src_dataset",
".",
"transform",
".",
"yoff",
"-",
"transform",
".",
"yoff",
")",
"/",
"transform",
".",
"e",
"xsize",
"=",
"src_dataset",
".",
"width",
"*",
"src_dataset",
".",
"transform",
".",
"a",
"/",
"transform",
".",
"a",
"ysize",
"=",
"src_dataset",
".",
"height",
"*",
"src_dataset",
".",
"transform",
".",
"e",
"/",
"transform",
".",
"e",
"dst_window",
"=",
"Window",
"(",
"xoff",
",",
"yoff",
",",
"xsize",
",",
"ysize",
")",
"vrt",
".",
"add_band_simplesource",
"(",
"band_element",
",",
"bidx",
",",
"dtype",
",",
"False",
",",
"src_dataset",
".",
"name",
",",
"width",
",",
"height",
",",
"block_shape",
"[",
"1",
"]",
",",
"block_shape",
"[",
"0",
"]",
",",
"src_window",
",",
"dst_window",
",",
"nodata",
"=",
"src_dataset",
".",
"nodata",
")",
"if",
"all",
"(",
"MaskFlags",
".",
"per_dataset",
"in",
"flags",
"for",
"flags",
"in",
"src_dataset",
".",
"mask_flag_enums",
")",
":",
"mask_band",
"=",
"vrt",
".",
"add_mask_band",
"(",
"'Byte'",
")",
"src_window",
"=",
"Window",
"(",
"0",
",",
"0",
",",
"src_dataset",
".",
"width",
",",
"src_dataset",
".",
"height",
")",
"xoff",
"=",
"(",
"src_dataset",
".",
"transform",
".",
"xoff",
"-",
"transform",
".",
"xoff",
")",
"/",
"transform",
".",
"a",
"yoff",
"=",
"(",
"src_dataset",
".",
"transform",
".",
"yoff",
"-",
"transform",
".",
"yoff",
")",
"/",
"transform",
".",
"e",
"xsize",
"=",
"src_dataset",
".",
"width",
"ysize",
"=",
"src_dataset",
".",
"height",
"dst_window",
"=",
"Window",
"(",
"xoff",
",",
"yoff",
",",
"xsize",
",",
"ysize",
")",
"vrt",
".",
"add_band_simplesource",
"(",
"mask_band",
",",
"'mask,1'",
",",
"'Byte'",
",",
"False",
",",
"src_dataset",
".",
"name",
",",
"width",
",",
"height",
",",
"block_shape",
"[",
"1",
"]",
",",
"block_shape",
"[",
"0",
"]",
",",
"src_window",
",",
"dst_window",
")",
"return",
"vrt"
] | 46.85
| 25.45
|
def _enable_read_access(self):
"""! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible.
"""
if not self.algo_inited_for_read:
self.flash.init(self.flash.Operation.VERIFY)
self.algo_inited_for_read = True
|
[
"def",
"_enable_read_access",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"algo_inited_for_read",
":",
"self",
".",
"flash",
".",
"init",
"(",
"self",
".",
"flash",
".",
"Operation",
".",
"VERIFY",
")",
"self",
".",
"algo_inited_for_read",
"=",
"True"
] | 49.1
| 17.2
|
def heartbeat(self):
'''Record the current worker state in the registry.
This records the worker's current mode, plus the contents of
:meth:`environment`, in the data store for inspection by others.
:returns mode: Current mode, as :meth:`TaskMaster.get_mode`
'''
mode = self.task_master.get_mode()
self.task_master.worker_heartbeat(self.worker_id, mode,
self.lifetime, self.environment(),
parent=self.parent)
return mode
|
[
"def",
"heartbeat",
"(",
"self",
")",
":",
"mode",
"=",
"self",
".",
"task_master",
".",
"get_mode",
"(",
")",
"self",
".",
"task_master",
".",
"worker_heartbeat",
"(",
"self",
".",
"worker_id",
",",
"mode",
",",
"self",
".",
"lifetime",
",",
"self",
".",
"environment",
"(",
")",
",",
"parent",
"=",
"self",
".",
"parent",
")",
"return",
"mode"
] | 39.857143
| 27
|
def read_info(self):
'''
:rtype: :class:`~kitty.data.data_manager.SessionInfo`
:return: current session info
'''
self.select('*')
row = self._cursor.fetchone()
if not row:
return None
info_d = self.row_to_dict(row)
return SessionInfo.from_dict(info_d)
|
[
"def",
"read_info",
"(",
"self",
")",
":",
"self",
".",
"select",
"(",
"'*'",
")",
"row",
"=",
"self",
".",
"_cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"return",
"None",
"info_d",
"=",
"self",
".",
"row_to_dict",
"(",
"row",
")",
"return",
"SessionInfo",
".",
"from_dict",
"(",
"info_d",
")"
] | 29.545455
| 15
|
def apply(self, function: "function", *cols, axis=1, **kwargs):
"""
Apply a function on columns values
:param function: a function to apply to the columns
:type function: function
:param cols: columns names
:type cols: name of columns
:param axis: index (0) or column (1), default is 1
:param kwargs: arguments for ``df.apply``
:type kwargs: optional
:example:
.. code-block:: python
def f(row):
# add a new column with a value
row["newcol"] = row["Col 1] + 1
return row
ds.apply(f)
"""
try:
if len(cols) == 0:
self.df = self.df.apply(function, axis=axis, **kwargs)
else:
cols = list(cols)
self.df[cols] = self.df[cols].apply(function, **kwargs)
except Exception as e:
self.err(e, "Can not apply function")
|
[
"def",
"apply",
"(",
"self",
",",
"function",
":",
"\"function\"",
",",
"*",
"cols",
",",
"axis",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"self",
".",
"df",
"=",
"self",
".",
"df",
".",
"apply",
"(",
"function",
",",
"axis",
"=",
"axis",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"cols",
"=",
"list",
"(",
"cols",
")",
"self",
".",
"df",
"[",
"cols",
"]",
"=",
"self",
".",
"df",
"[",
"cols",
"]",
".",
"apply",
"(",
"function",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not apply function\"",
")"
] | 34.741935
| 17.903226
|
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
|
[
"def",
"_get_cu_and_fu_status",
"(",
"self",
")",
":",
"# adjust headers",
"headers",
"=",
"HEADERS",
".",
"copy",
"(",
")",
"headers",
"[",
"'Accept'",
"]",
"=",
"'*/*'",
"headers",
"[",
"'X-Requested-With'",
"]",
"=",
"'XMLHttpRequest'",
"headers",
"[",
"'X-CSRFToken'",
"]",
"=",
"self",
".",
"_parent",
".",
"csrftoken",
"args",
"=",
"'?controller_serial='",
"+",
"self",
".",
"serial",
"+",
"'&faucet_serial='",
"+",
"self",
".",
"faucet",
".",
"serial",
"req",
"=",
"self",
".",
"_parent",
".",
"client",
".",
"get",
"(",
"STATUS_ENDPOINT",
"+",
"args",
",",
"headers",
"=",
"headers",
")",
"# token probably expired, then try again",
"if",
"req",
".",
"status_code",
"==",
"403",
":",
"self",
".",
"_parent",
".",
"login",
"(",
")",
"self",
".",
"update",
"(",
")",
"elif",
"req",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"attributes",
"=",
"req",
".",
"json",
"(",
")",
"else",
":",
"req",
".",
"raise_for_status",
"(",
")"
] | 34.954545
| 14.727273
|
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
clone = self._clone()
clone._fields=fields
return clone
|
[
"def",
"only",
"(",
"self",
",",
"*",
"fields",
")",
":",
"clone",
"=",
"self",
".",
"_clone",
"(",
")",
"clone",
".",
"_fields",
"=",
"fields",
"return",
"clone"
] | 35.777778
| 15.333333
|
def ansi_len(string):
"""Extra length due to any ANSI sequences in the string."""
return len(string) - wcswidth(re.compile(r'\x1b[^m]*m').sub('', string))
|
[
"def",
"ansi_len",
"(",
"string",
")",
":",
"return",
"len",
"(",
"string",
")",
"-",
"wcswidth",
"(",
"re",
".",
"compile",
"(",
"r'\\x1b[^m]*m'",
")",
".",
"sub",
"(",
"''",
",",
"string",
")",
")"
] | 53.333333
| 18.333333
|
def simulate(protocol_file,
propagate_logs=False,
log_level='warning') -> List[Mapping[str, Any]]:
"""
Simulate the protocol itself.
This is a one-stop function to simulate a protocol, whether python or json,
no matter the api version, from external (i.e. not bound up in other
internal server infrastructure) sources.
To simulate an opentrons protocol from other places, pass in a file like
object as protocol_file; this function either returns (if the simulation
has no problems) or raises an exception.
To call from the command line use either the autogenerated entrypoint
``opentrons_simulate`` (``opentrons_simulate.exe``, on windows) or
``python -m opentrons.simulate``.
The return value is the run log, a list of dicts that represent the
commands executed by the robot. Each dict has the following keys:
- ``level``: The depth at which this command is nested - if this an
aspirate inside a mix inside a transfer, for instance,
it would be 3.
- ``payload``: The command, its arguments, and how to format its text.
For more specific details see
:py:mod:`opentrons.commands`. To format a message from
a payload do ``payload['text'].format(**payload)``.
- ``logs``: Any log messages that occurred during execution of this
command, as a logging.LogRecord
:param file-like protocol_file: The protocol file to simulate.
:param propagate_logs: Whether this function should allow logs from the
Opentrons stack to propagate up to the root handler.
This can be useful if you're integrating this
function in a larger application, but most logs that
occur during protocol simulation are best associated
with the actions in the protocol that cause them.
:type propagate_logs: bool
:param log_level: The level of logs to capture in the runlog
:type log_level: 'debug', 'info', 'warning', or 'error'
:returns List[Dict[str, Dict[str, Any]]]: A run log for user output.
"""
stack_logger = logging.getLogger('opentrons')
stack_logger.propagate = propagate_logs
contents = protocol_file.read()
if opentrons.config.feature_flags.use_protocol_api_v2():
try:
execute_args = {'protocol_json': json.loads(contents)}
except json.JSONDecodeError:
execute_args = {'protocol_code': contents}
context = opentrons.protocol_api.contexts.ProtocolContext()
context.home()
scraper = CommandScraper(stack_logger, log_level, context.broker)
execute_args.update({'simulate': True,
'context': context})
opentrons.protocol_api.execute.run_protocol(**execute_args)
else:
try:
proto = json.loads(contents)
except json.JSONDecodeError:
proto = contents
opentrons.robot.disconnect()
scraper = CommandScraper(stack_logger, log_level,
opentrons.robot.broker)
if isinstance(proto, dict):
opentrons.protocols.execute_protocol(proto)
else:
exec(proto, {})
return scraper.commands
|
[
"def",
"simulate",
"(",
"protocol_file",
",",
"propagate_logs",
"=",
"False",
",",
"log_level",
"=",
"'warning'",
")",
"->",
"List",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"stack_logger",
"=",
"logging",
".",
"getLogger",
"(",
"'opentrons'",
")",
"stack_logger",
".",
"propagate",
"=",
"propagate_logs",
"contents",
"=",
"protocol_file",
".",
"read",
"(",
")",
"if",
"opentrons",
".",
"config",
".",
"feature_flags",
".",
"use_protocol_api_v2",
"(",
")",
":",
"try",
":",
"execute_args",
"=",
"{",
"'protocol_json'",
":",
"json",
".",
"loads",
"(",
"contents",
")",
"}",
"except",
"json",
".",
"JSONDecodeError",
":",
"execute_args",
"=",
"{",
"'protocol_code'",
":",
"contents",
"}",
"context",
"=",
"opentrons",
".",
"protocol_api",
".",
"contexts",
".",
"ProtocolContext",
"(",
")",
"context",
".",
"home",
"(",
")",
"scraper",
"=",
"CommandScraper",
"(",
"stack_logger",
",",
"log_level",
",",
"context",
".",
"broker",
")",
"execute_args",
".",
"update",
"(",
"{",
"'simulate'",
":",
"True",
",",
"'context'",
":",
"context",
"}",
")",
"opentrons",
".",
"protocol_api",
".",
"execute",
".",
"run_protocol",
"(",
"*",
"*",
"execute_args",
")",
"else",
":",
"try",
":",
"proto",
"=",
"json",
".",
"loads",
"(",
"contents",
")",
"except",
"json",
".",
"JSONDecodeError",
":",
"proto",
"=",
"contents",
"opentrons",
".",
"robot",
".",
"disconnect",
"(",
")",
"scraper",
"=",
"CommandScraper",
"(",
"stack_logger",
",",
"log_level",
",",
"opentrons",
".",
"robot",
".",
"broker",
")",
"if",
"isinstance",
"(",
"proto",
",",
"dict",
")",
":",
"opentrons",
".",
"protocols",
".",
"execute_protocol",
"(",
"proto",
")",
"else",
":",
"exec",
"(",
"proto",
",",
"{",
"}",
")",
"return",
"scraper",
".",
"commands"
] | 46.291667
| 22.652778
|
def _compute_intensity(ccube, bexpcube):
""" Compute the intensity map
"""
bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:])
intensity_data = ccube.data / bexp_data
intensity_map = HpxMap(intensity_data, ccube.hpx)
return intensity_map
|
[
"def",
"_compute_intensity",
"(",
"ccube",
",",
"bexpcube",
")",
":",
"bexp_data",
"=",
"np",
".",
"sqrt",
"(",
"bexpcube",
".",
"data",
"[",
"0",
":",
"-",
"1",
",",
"0",
":",
"]",
"*",
"bexpcube",
".",
"data",
"[",
"1",
":",
",",
"0",
":",
"]",
")",
"intensity_data",
"=",
"ccube",
".",
"data",
"/",
"bexp_data",
"intensity_map",
"=",
"HpxMap",
"(",
"intensity_data",
",",
"ccube",
".",
"hpx",
")",
"return",
"intensity_map"
] | 42.285714
| 10.285714
|
def from_ofxparse(data, institution):
"""Instantiate :py:class:`ofxclient.Account` subclass from ofxparse
module
:param data: an ofxparse account
:type data: An :py:class:`ofxparse.Account` object
:param institution: The parent institution of the account
:type institution: :py:class:`ofxclient.Institution` object
"""
description = data.desc if hasattr(data, 'desc') else None
if data.type == AccountType.Bank:
return BankAccount(
institution=institution,
number=data.account_id,
routing_number=data.routing_number,
account_type=data.account_type,
description=description)
elif data.type == AccountType.CreditCard:
return CreditCardAccount(
institution=institution,
number=data.account_id,
description=description)
elif data.type == AccountType.Investment:
return BrokerageAccount(
institution=institution,
number=data.account_id,
broker_id=data.brokerid,
description=description)
raise ValueError("unknown account type: %s" % data.type)
|
[
"def",
"from_ofxparse",
"(",
"data",
",",
"institution",
")",
":",
"description",
"=",
"data",
".",
"desc",
"if",
"hasattr",
"(",
"data",
",",
"'desc'",
")",
"else",
"None",
"if",
"data",
".",
"type",
"==",
"AccountType",
".",
"Bank",
":",
"return",
"BankAccount",
"(",
"institution",
"=",
"institution",
",",
"number",
"=",
"data",
".",
"account_id",
",",
"routing_number",
"=",
"data",
".",
"routing_number",
",",
"account_type",
"=",
"data",
".",
"account_type",
",",
"description",
"=",
"description",
")",
"elif",
"data",
".",
"type",
"==",
"AccountType",
".",
"CreditCard",
":",
"return",
"CreditCardAccount",
"(",
"institution",
"=",
"institution",
",",
"number",
"=",
"data",
".",
"account_id",
",",
"description",
"=",
"description",
")",
"elif",
"data",
".",
"type",
"==",
"AccountType",
".",
"Investment",
":",
"return",
"BrokerageAccount",
"(",
"institution",
"=",
"institution",
",",
"number",
"=",
"data",
".",
"account_id",
",",
"broker_id",
"=",
"data",
".",
"brokerid",
",",
"description",
"=",
"description",
")",
"raise",
"ValueError",
"(",
"\"unknown account type: %s\"",
"%",
"data",
".",
"type",
")"
] | 41.166667
| 9.5
|
def get_yml_content(file_path):
'''Load yaml file content'''
try:
with open(file_path, 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
except yaml.scanner.ScannerError as err:
print_error('yaml file format error!')
exit(1)
except Exception as exception:
print_error(exception)
exit(1)
|
[
"def",
"get_yml_content",
"(",
"file_path",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_path",
",",
"'r'",
")",
"as",
"file",
":",
"return",
"yaml",
".",
"load",
"(",
"file",
",",
"Loader",
"=",
"yaml",
".",
"Loader",
")",
"except",
"yaml",
".",
"scanner",
".",
"ScannerError",
"as",
"err",
":",
"print_error",
"(",
"'yaml file format error!'",
")",
"exit",
"(",
"1",
")",
"except",
"Exception",
"as",
"exception",
":",
"print_error",
"(",
"exception",
")",
"exit",
"(",
"1",
")"
] | 31.909091
| 12.818182
|
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
)
|
[
"def",
"ensure_ndarray",
"(",
"ndarray_or_adjusted_array",
")",
":",
"if",
"isinstance",
"(",
"ndarray_or_adjusted_array",
",",
"ndarray",
")",
":",
"return",
"ndarray_or_adjusted_array",
"elif",
"isinstance",
"(",
"ndarray_or_adjusted_array",
",",
"AdjustedArray",
")",
":",
"return",
"ndarray_or_adjusted_array",
".",
"data",
"else",
":",
"raise",
"TypeError",
"(",
"\"Can't convert %s to ndarray\"",
"%",
"type",
"(",
"ndarray_or_adjusted_array",
")",
".",
"__name__",
")"
] | 31.75
| 20.166667
|
def join_room(self, room_id_or_alias):
""" Join a room.
Args:
room_id_or_alias (str): Room ID or an alias.
Returns:
Room
Raises:
MatrixRequestError
"""
response = self.api.join_room(room_id_or_alias)
room_id = (
response["room_id"] if "room_id" in response else room_id_or_alias
)
return self._mkroom(room_id)
|
[
"def",
"join_room",
"(",
"self",
",",
"room_id_or_alias",
")",
":",
"response",
"=",
"self",
".",
"api",
".",
"join_room",
"(",
"room_id_or_alias",
")",
"room_id",
"=",
"(",
"response",
"[",
"\"room_id\"",
"]",
"if",
"\"room_id\"",
"in",
"response",
"else",
"room_id_or_alias",
")",
"return",
"self",
".",
"_mkroom",
"(",
"room_id",
")"
] | 24.470588
| 21
|
async def _connect(self) -> "Connection":
"""Connect to the actual sqlite database."""
if self._connection is None:
self._connection = await self._execute(self._connector)
return self
|
[
"async",
"def",
"_connect",
"(",
"self",
")",
"->",
"\"Connection\"",
":",
"if",
"self",
".",
"_connection",
"is",
"None",
":",
"self",
".",
"_connection",
"=",
"await",
"self",
".",
"_execute",
"(",
"self",
".",
"_connector",
")",
"return",
"self"
] | 43
| 10.6
|
def from_url(cls, url):
"""
Construct a PostgresConfig from a URL.
"""
parsed = urlparse(url)
return cls(
username=parsed.username,
password=parsed.password,
hostname=parsed.hostname,
port=parsed.port,
database=parsed.path.lstrip('/'),
# Like parse_qs, but produces a scalar per key, instead of a list:
query_params=dict(param.split('=')
for param in parsed.query.split('&'))
if parsed.query else {},
)
|
[
"def",
"from_url",
"(",
"cls",
",",
"url",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"return",
"cls",
"(",
"username",
"=",
"parsed",
".",
"username",
",",
"password",
"=",
"parsed",
".",
"password",
",",
"hostname",
"=",
"parsed",
".",
"hostname",
",",
"port",
"=",
"parsed",
".",
"port",
",",
"database",
"=",
"parsed",
".",
"path",
".",
"lstrip",
"(",
"'/'",
")",
",",
"# Like parse_qs, but produces a scalar per key, instead of a list:",
"query_params",
"=",
"dict",
"(",
"param",
".",
"split",
"(",
"'='",
")",
"for",
"param",
"in",
"parsed",
".",
"query",
".",
"split",
"(",
"'&'",
")",
")",
"if",
"parsed",
".",
"query",
"else",
"{",
"}",
",",
")"
] | 35.0625
| 11.5625
|
def handle(self):
"""
Executes the actual Stratum program.
"""
self.output = PyStratumStyle(self.input, self.output)
command = self.get_application().find('constants')
ret = command.execute(self.input, self.output)
if ret:
return ret
command = self.get_application().find('loader')
ret = command.execute(self.input, self.output)
if ret:
return ret
command = self.get_application().find('wrapper')
ret = command.execute(self.input, self.output)
self.output.writeln('')
return ret
|
[
"def",
"handle",
"(",
"self",
")",
":",
"self",
".",
"output",
"=",
"PyStratumStyle",
"(",
"self",
".",
"input",
",",
"self",
".",
"output",
")",
"command",
"=",
"self",
".",
"get_application",
"(",
")",
".",
"find",
"(",
"'constants'",
")",
"ret",
"=",
"command",
".",
"execute",
"(",
"self",
".",
"input",
",",
"self",
".",
"output",
")",
"if",
"ret",
":",
"return",
"ret",
"command",
"=",
"self",
".",
"get_application",
"(",
")",
".",
"find",
"(",
"'loader'",
")",
"ret",
"=",
"command",
".",
"execute",
"(",
"self",
".",
"input",
",",
"self",
".",
"output",
")",
"if",
"ret",
":",
"return",
"ret",
"command",
"=",
"self",
".",
"get_application",
"(",
")",
".",
"find",
"(",
"'wrapper'",
")",
"ret",
"=",
"command",
".",
"execute",
"(",
"self",
".",
"input",
",",
"self",
".",
"output",
")",
"self",
".",
"output",
".",
"writeln",
"(",
"''",
")",
"return",
"ret"
] | 27.181818
| 20.727273
|
def align_cell(fmt, elem, width):
"""Returns an aligned element."""
if fmt == "<":
return elem + ' ' * (width - len(elem))
if fmt == ">":
return ' ' * (width - len(elem)) + elem
return elem
|
[
"def",
"align_cell",
"(",
"fmt",
",",
"elem",
",",
"width",
")",
":",
"if",
"fmt",
"==",
"\"<\"",
":",
"return",
"elem",
"+",
"' '",
"*",
"(",
"width",
"-",
"len",
"(",
"elem",
")",
")",
"if",
"fmt",
"==",
"\">\"",
":",
"return",
"' '",
"*",
"(",
"width",
"-",
"len",
"(",
"elem",
")",
")",
"+",
"elem",
"return",
"elem"
] | 30.714286
| 12.857143
|
def version_tuple(self):
"""tuple[int]: version tuple or None if version is not set or invalid."""
try:
return tuple([int(digit, 10) for digit in self.version.split('.')])
except (AttributeError, TypeError, ValueError):
return None
|
[
"def",
"version_tuple",
"(",
"self",
")",
":",
"try",
":",
"return",
"tuple",
"(",
"[",
"int",
"(",
"digit",
",",
"10",
")",
"for",
"digit",
"in",
"self",
".",
"version",
".",
"split",
"(",
"'.'",
")",
"]",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"None"
] | 41.666667
| 19.166667
|
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from Skew t distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return loc + scale*Skewt.rvs(shape, skewness, nsims)
|
[
"def",
"draw_variable",
"(",
"loc",
",",
"scale",
",",
"shape",
",",
"skewness",
",",
"nsims",
")",
":",
"return",
"loc",
"+",
"scale",
"*",
"Skewt",
".",
"rvs",
"(",
"shape",
",",
"skewness",
",",
"nsims",
")"
] | 26.92
| 20.84
|
def _set_mpls_interface(self, v, load=False):
"""
Setter method for mpls_interface, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface_type interface_name",mpls_interface.mpls_interface, yang_name="mpls-interface", rest_name="mpls-interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}), is_container='list', yang_name="mpls-interface", rest_name="mpls-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface_type interface_name",mpls_interface.mpls_interface, yang_name="mpls-interface", rest_name="mpls-interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}), is_container='list', yang_name="mpls-interface", rest_name="mpls-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__mpls_interface = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_mpls_interface",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"interface_type interface_name\"",
",",
"mpls_interface",
".",
"mpls_interface",
",",
"yang_name",
"=",
"\"mpls-interface\"",
",",
"rest_name",
"=",
"\"mpls-interface\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'interface-type interface-name'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Define MPLS Interface'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'callpoint'",
":",
"u'MplsInterface'",
",",
"u'cli-mode-name'",
":",
"u'config-router-mpls-if-$(interface-type)-$(interface-name)'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"mpls-interface\"",
",",
"rest_name",
"=",
"\"mpls-interface\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Define MPLS Interface'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'callpoint'",
":",
"u'MplsInterface'",
",",
"u'cli-mode-name'",
":",
"u'config-router-mpls-if-$(interface-type)-$(interface-name)'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls'",
",",
"defining_module",
"=",
"'brocade-mpls'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"mpls_interface must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"interface_type interface_name\",mpls_interface.mpls_interface, yang_name=\"mpls-interface\", rest_name=\"mpls-interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}), is_container='list', yang_name=\"mpls-interface\", rest_name=\"mpls-interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__mpls_interface",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 131.636364
| 63.954545
|
def forced_insert(self):
"""
Insert tokens if self.insert_till hasn't been reached yet
Will respect self.inserted_line and make sure token is inserted before it
Returns True if it appends anything or if it reached the insert_till token
"""
# If we have any tokens we are waiting for
if self.insert_till:
# Determine where to append this token
append_at = -1
if self.inserted_line:
append_at = -self.inserted_line+1
# Reset insert_till if we found it
if self.current.tokenum == self.insert_till[0] and self.current.value == self.insert_till[1]:
self.insert_till = None
else:
# Adjust self.adjust_indent_at to take into account the new token
for index, value in enumerate(self.adjust_indent_at):
if value < len(self.result) - append_at:
self.adjust_indent_at[index] = value + 1
# Insert the new token
self.result.insert(append_at, (self.current.tokenum, self.current.value))
# We appended the token
return True
|
[
"def",
"forced_insert",
"(",
"self",
")",
":",
"# If we have any tokens we are waiting for",
"if",
"self",
".",
"insert_till",
":",
"# Determine where to append this token",
"append_at",
"=",
"-",
"1",
"if",
"self",
".",
"inserted_line",
":",
"append_at",
"=",
"-",
"self",
".",
"inserted_line",
"+",
"1",
"# Reset insert_till if we found it",
"if",
"self",
".",
"current",
".",
"tokenum",
"==",
"self",
".",
"insert_till",
"[",
"0",
"]",
"and",
"self",
".",
"current",
".",
"value",
"==",
"self",
".",
"insert_till",
"[",
"1",
"]",
":",
"self",
".",
"insert_till",
"=",
"None",
"else",
":",
"# Adjust self.adjust_indent_at to take into account the new token",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"self",
".",
"adjust_indent_at",
")",
":",
"if",
"value",
"<",
"len",
"(",
"self",
".",
"result",
")",
"-",
"append_at",
":",
"self",
".",
"adjust_indent_at",
"[",
"index",
"]",
"=",
"value",
"+",
"1",
"# Insert the new token",
"self",
".",
"result",
".",
"insert",
"(",
"append_at",
",",
"(",
"self",
".",
"current",
".",
"tokenum",
",",
"self",
".",
"current",
".",
"value",
")",
")",
"# We appended the token",
"return",
"True"
] | 44.037037
| 22.185185
|
def getOrCreateForeignKey(self, model_class, field_name):
"""
Return related random object to set as ForeignKey.
"""
# Getting related object type
# Eg: <django.db.models.fields.related.ForeignKey: test_ForeignKey>
instance = getattr(model_class, field_name).field
# Getting the model name by instance to find/create first id/pk.
# Eg: <class 'django.contrib.auth.models.User'>
related_model = instance.related_model().__class__
# Trying to get random id from queryset.
objects = related_model.objects.all()
if objects.exists():
return self.randomize(objects)
# Returning first object from tuple `(<User: user_name>, False)`
return related_model.objects.get_or_create(pk=1)[0]
|
[
"def",
"getOrCreateForeignKey",
"(",
"self",
",",
"model_class",
",",
"field_name",
")",
":",
"# Getting related object type",
"# Eg: <django.db.models.fields.related.ForeignKey: test_ForeignKey>",
"instance",
"=",
"getattr",
"(",
"model_class",
",",
"field_name",
")",
".",
"field",
"# Getting the model name by instance to find/create first id/pk.",
"# Eg: <class 'django.contrib.auth.models.User'>",
"related_model",
"=",
"instance",
".",
"related_model",
"(",
")",
".",
"__class__",
"# Trying to get random id from queryset.",
"objects",
"=",
"related_model",
".",
"objects",
".",
"all",
"(",
")",
"if",
"objects",
".",
"exists",
"(",
")",
":",
"return",
"self",
".",
"randomize",
"(",
"objects",
")",
"# Returning first object from tuple `(<User: user_name>, False)`",
"return",
"related_model",
".",
"objects",
".",
"get_or_create",
"(",
"pk",
"=",
"1",
")",
"[",
"0",
"]"
] | 41.315789
| 18.578947
|
def nscolor_from_hex(hex_string):
"""
Convert given hex color to NSColor.
:hex_string: Hex code of the color as #RGB or #RRGGBB
"""
hex_string = hex_string[1:] # Remove leading hash
if len(hex_string) == 3:
hex_string = ''.join([c*2 for c in hex_string]) # 3-digit to 6-digit
hex_int = int(hex_string, 16)
rgb = (
(hex_int >> 16) & 0xff, # Red byte
(hex_int >> 8) & 0xff, # Blue byte
(hex_int) & 0xff # Green byte
)
rgb = [i / 255.0 for i in rgb] # Normalize to range(0.0, 1.0)
return AppKit.NSColor.colorWithSRGBRed_green_blue_alpha_(rgb[0], rgb[1], rgb[2], 1.0)
|
[
"def",
"nscolor_from_hex",
"(",
"hex_string",
")",
":",
"hex_string",
"=",
"hex_string",
"[",
"1",
":",
"]",
"# Remove leading hash",
"if",
"len",
"(",
"hex_string",
")",
"==",
"3",
":",
"hex_string",
"=",
"''",
".",
"join",
"(",
"[",
"c",
"*",
"2",
"for",
"c",
"in",
"hex_string",
"]",
")",
"# 3-digit to 6-digit",
"hex_int",
"=",
"int",
"(",
"hex_string",
",",
"16",
")",
"rgb",
"=",
"(",
"(",
"hex_int",
">>",
"16",
")",
"&",
"0xff",
",",
"# Red byte",
"(",
"hex_int",
">>",
"8",
")",
"&",
"0xff",
",",
"# Blue byte",
"(",
"hex_int",
")",
"&",
"0xff",
"# Green byte",
")",
"rgb",
"=",
"[",
"i",
"/",
"255.0",
"for",
"i",
"in",
"rgb",
"]",
"# Normalize to range(0.0, 1.0)",
"return",
"AppKit",
".",
"NSColor",
".",
"colorWithSRGBRed_green_blue_alpha_",
"(",
"rgb",
"[",
"0",
"]",
",",
"rgb",
"[",
"1",
"]",
",",
"rgb",
"[",
"2",
"]",
",",
"1.0",
")"
] | 35.65
| 21.95
|
def p_function(self, p):
'function : FUNCTION width ID SEMICOLON function_statement ENDFUNCTION'
p[0] = Function(p[3], p[2], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
[
"def",
"p_function",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Function",
"(",
"p",
"[",
"3",
"]",
",",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"5",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 50
| 20
|
def explode_contact_groups_into_contacts(item, contactgroups):
"""
Get all contacts of contact_groups and put them in contacts container
:param item: item where have contact_groups property
:type item: object
:param contactgroups: all contactgroups object
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
if not hasattr(item, 'contact_groups'):
return
# TODO : See if we can remove this if
cgnames = ''
if item.contact_groups:
if isinstance(item.contact_groups, list):
cgnames = item.contact_groups
else:
cgnames = item.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
contactgroup = contactgroups.find_by_name(cgname)
if not contactgroup:
item.add_error("The contact group '%s' defined on the %s '%s' do not exist"
% (cgname, item.__class__.my_type, item.get_name()))
continue
cnames = contactgroups.get_members_of_group(cgname)
# We add contacts into our contacts
if cnames:
if hasattr(item, 'contacts'):
# Fix #1054 - bad contact explosion
# item.contacts.extend(cnames)
item.contacts = item.contacts + cnames
else:
item.contacts = cnames
|
[
"def",
"explode_contact_groups_into_contacts",
"(",
"item",
",",
"contactgroups",
")",
":",
"if",
"not",
"hasattr",
"(",
"item",
",",
"'contact_groups'",
")",
":",
"return",
"# TODO : See if we can remove this if",
"cgnames",
"=",
"''",
"if",
"item",
".",
"contact_groups",
":",
"if",
"isinstance",
"(",
"item",
".",
"contact_groups",
",",
"list",
")",
":",
"cgnames",
"=",
"item",
".",
"contact_groups",
"else",
":",
"cgnames",
"=",
"item",
".",
"contact_groups",
".",
"split",
"(",
"','",
")",
"cgnames",
"=",
"strip_and_uniq",
"(",
"cgnames",
")",
"for",
"cgname",
"in",
"cgnames",
":",
"contactgroup",
"=",
"contactgroups",
".",
"find_by_name",
"(",
"cgname",
")",
"if",
"not",
"contactgroup",
":",
"item",
".",
"add_error",
"(",
"\"The contact group '%s' defined on the %s '%s' do not exist\"",
"%",
"(",
"cgname",
",",
"item",
".",
"__class__",
".",
"my_type",
",",
"item",
".",
"get_name",
"(",
")",
")",
")",
"continue",
"cnames",
"=",
"contactgroups",
".",
"get_members_of_group",
"(",
"cgname",
")",
"# We add contacts into our contacts",
"if",
"cnames",
":",
"if",
"hasattr",
"(",
"item",
",",
"'contacts'",
")",
":",
"# Fix #1054 - bad contact explosion",
"# item.contacts.extend(cnames)",
"item",
".",
"contacts",
"=",
"item",
".",
"contacts",
"+",
"cnames",
"else",
":",
"item",
".",
"contacts",
"=",
"cnames"
] | 41.388889
| 17.333333
|
def combine_files(self, f1, f2, f3):
"""
Combines the files 1 and 2 into 3.
"""
with open(os.path.join(self.datadir, f3), 'wb') as new_file:
with open(os.path.join(self.datadir, f1), 'rb') as file_1:
new_file.write(file_1.read())
with open(os.path.join(self.datadir, f2), 'rb') as file_2:
new_file.write(file_2.read())
|
[
"def",
"combine_files",
"(",
"self",
",",
"f1",
",",
"f2",
",",
"f3",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"datadir",
",",
"f3",
")",
",",
"'wb'",
")",
"as",
"new_file",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"datadir",
",",
"f1",
")",
",",
"'rb'",
")",
"as",
"file_1",
":",
"new_file",
".",
"write",
"(",
"file_1",
".",
"read",
"(",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"datadir",
",",
"f2",
")",
",",
"'rb'",
")",
"as",
"file_2",
":",
"new_file",
".",
"write",
"(",
"file_2",
".",
"read",
"(",
")",
")"
] | 44.666667
| 12
|
def rfft2d_freqs(h, w):
"""Computes 2D spectrum frequencies."""
fy = np.fft.fftfreq(h)[:, None]
# when we have an odd input dimension we need to keep one additional
# frequency and later cut off 1 pixel
if w % 2 == 1:
fx = np.fft.fftfreq(w)[: w // 2 + 2]
else:
fx = np.fft.fftfreq(w)[: w // 2 + 1]
return np.sqrt(fx * fx + fy * fy)
|
[
"def",
"rfft2d_freqs",
"(",
"h",
",",
"w",
")",
":",
"fy",
"=",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"h",
")",
"[",
":",
",",
"None",
"]",
"# when we have an odd input dimension we need to keep one additional",
"# frequency and later cut off 1 pixel",
"if",
"w",
"%",
"2",
"==",
"1",
":",
"fx",
"=",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"w",
")",
"[",
":",
"w",
"//",
"2",
"+",
"2",
"]",
"else",
":",
"fx",
"=",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"w",
")",
"[",
":",
"w",
"//",
"2",
"+",
"1",
"]",
"return",
"np",
".",
"sqrt",
"(",
"fx",
"*",
"fx",
"+",
"fy",
"*",
"fy",
")"
] | 33.272727
| 14.454545
|
def _calculate(self, startingPercentage, endPercentage, startDate, endDate):
"""This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a float representing the error.
:rtype: float
"""
# get the defined subset of error values
errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate)
errorValues = filter(lambda item: item is not None, errorValues)
return float(sum(errorValues)) / float(len(errorValues))
|
[
"def",
"_calculate",
"(",
"self",
",",
"startingPercentage",
",",
"endPercentage",
",",
"startDate",
",",
"endDate",
")",
":",
"# get the defined subset of error values",
"errorValues",
"=",
"self",
".",
"_get_error_values",
"(",
"startingPercentage",
",",
"endPercentage",
",",
"startDate",
",",
"endDate",
")",
"errorValues",
"=",
"filter",
"(",
"lambda",
"item",
":",
"item",
"is",
"not",
"None",
",",
"errorValues",
")",
"return",
"float",
"(",
"sum",
"(",
"errorValues",
")",
")",
"/",
"float",
"(",
"len",
"(",
"errorValues",
")",
")"
] | 62.318182
| 36.818182
|
def connect(self):
"""Connect to the chatroom's server, sets up handlers, invites members as needed."""
for m in self.params['MEMBERS']:
m['ONLINE'] = 0
m.setdefault('STATUS', 'INVITED')
self.client = xmpp.Client(self.jid.getDomain(), debug=[])
conn = self.client.connect(server=self.params['SERVER'])
if not conn:
raise Exception("could not connect to server")
auth = self.client.auth(self.jid.getNode(), self.params['PASSWORD'])
if not auth:
raise Exception("could not authenticate as chat server")
#self.client.RegisterDisconnectHandler(self.on_disconnect)
self.client.RegisterHandler('message', self.on_message)
self.client.RegisterHandler('presence',self.on_presence)
self.client.sendInitPresence(requestRoster=0)
roster = self.client.getRoster()
for m in self.params['MEMBERS']:
self.invite_user(m, roster=roster)
|
[
"def",
"connect",
"(",
"self",
")",
":",
"for",
"m",
"in",
"self",
".",
"params",
"[",
"'MEMBERS'",
"]",
":",
"m",
"[",
"'ONLINE'",
"]",
"=",
"0",
"m",
".",
"setdefault",
"(",
"'STATUS'",
",",
"'INVITED'",
")",
"self",
".",
"client",
"=",
"xmpp",
".",
"Client",
"(",
"self",
".",
"jid",
".",
"getDomain",
"(",
")",
",",
"debug",
"=",
"[",
"]",
")",
"conn",
"=",
"self",
".",
"client",
".",
"connect",
"(",
"server",
"=",
"self",
".",
"params",
"[",
"'SERVER'",
"]",
")",
"if",
"not",
"conn",
":",
"raise",
"Exception",
"(",
"\"could not connect to server\"",
")",
"auth",
"=",
"self",
".",
"client",
".",
"auth",
"(",
"self",
".",
"jid",
".",
"getNode",
"(",
")",
",",
"self",
".",
"params",
"[",
"'PASSWORD'",
"]",
")",
"if",
"not",
"auth",
":",
"raise",
"Exception",
"(",
"\"could not authenticate as chat server\"",
")",
"#self.client.RegisterDisconnectHandler(self.on_disconnect)",
"self",
".",
"client",
".",
"RegisterHandler",
"(",
"'message'",
",",
"self",
".",
"on_message",
")",
"self",
".",
"client",
".",
"RegisterHandler",
"(",
"'presence'",
",",
"self",
".",
"on_presence",
")",
"self",
".",
"client",
".",
"sendInitPresence",
"(",
"requestRoster",
"=",
"0",
")",
"roster",
"=",
"self",
".",
"client",
".",
"getRoster",
"(",
")",
"for",
"m",
"in",
"self",
".",
"params",
"[",
"'MEMBERS'",
"]",
":",
"self",
".",
"invite_user",
"(",
"m",
",",
"roster",
"=",
"roster",
")"
] | 42
| 20.173913
|
def dispatch():
"""
This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases.
:return:
"""
aliases = SignalDispatcher.signals.keys()
for alias in aliases:
handlers = SignalDispatcher.handlers.get(alias)
signal = SignalDispatcher.signals.get(alias)
if signal is None or handlers.__len__() == 0:
continue
for handler in handlers:
signal.connect(handler)
|
[
"def",
"dispatch",
"(",
")",
":",
"aliases",
"=",
"SignalDispatcher",
".",
"signals",
".",
"keys",
"(",
")",
"for",
"alias",
"in",
"aliases",
":",
"handlers",
"=",
"SignalDispatcher",
".",
"handlers",
".",
"get",
"(",
"alias",
")",
"signal",
"=",
"SignalDispatcher",
".",
"signals",
".",
"get",
"(",
"alias",
")",
"if",
"signal",
"is",
"None",
"or",
"handlers",
".",
"__len__",
"(",
")",
"==",
"0",
":",
"continue",
"for",
"handler",
"in",
"handlers",
":",
"signal",
".",
"connect",
"(",
"handler",
")"
] | 30
| 21.764706
|
def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value
|
[
"def",
"in_unit_of",
"(",
"self",
",",
"unit",
",",
"as_quantity",
"=",
"False",
")",
":",
"new_unit",
"=",
"u",
".",
"Unit",
"(",
"unit",
")",
"new_quantity",
"=",
"self",
".",
"as_quantity",
".",
"to",
"(",
"new_unit",
")",
"if",
"as_quantity",
":",
"return",
"new_quantity",
"else",
":",
"return",
"new_quantity",
".",
"value"
] | 32.409091
| 27.5
|
def get_missing_required_annotations(self) -> List[str]:
"""Return missing required annotations."""
return [
required_annotation
for required_annotation in self.required_annotations
if required_annotation not in self.annotations
]
|
[
"def",
"get_missing_required_annotations",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"[",
"required_annotation",
"for",
"required_annotation",
"in",
"self",
".",
"required_annotations",
"if",
"required_annotation",
"not",
"in",
"self",
".",
"annotations",
"]"
] | 40.571429
| 17.428571
|
def p_fromitem_list(self,t):
"""fromitem_list : fromitem_list ',' fromitem
| fromitem
"""
if len(t)==2: t[0] = [t[1]]
elif len(t)==4: t[0] = t[1] + [t[3]]
else: raise NotImplementedError('unk_len', len(t)) # pragma: no cover
|
[
"def",
"p_fromitem_list",
"(",
"self",
",",
"t",
")",
":",
"if",
"len",
"(",
"t",
")",
"==",
"2",
":",
"t",
"[",
"0",
"]",
"=",
"[",
"t",
"[",
"1",
"]",
"]",
"elif",
"len",
"(",
"t",
")",
"==",
"4",
":",
"t",
"[",
"0",
"]",
"=",
"t",
"[",
"1",
"]",
"+",
"[",
"t",
"[",
"3",
"]",
"]",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unk_len'",
",",
"len",
"(",
"t",
")",
")",
"# pragma: no cover"
] | 37
| 9
|
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
|
[
"def",
"gnu_getopt",
"(",
"args",
",",
"shortopts",
",",
"longopts",
"=",
"[",
"]",
")",
":",
"opts",
"=",
"[",
"]",
"prog_args",
"=",
"[",
"]",
"if",
"type",
"(",
"''",
")",
"==",
"type",
"(",
"longopts",
")",
":",
"longopts",
"=",
"[",
"longopts",
"]",
"else",
":",
"longopts",
"=",
"list",
"(",
"longopts",
")",
"# Allow options after non-option arguments?",
"all_options_first",
"=",
"False",
"if",
"shortopts",
".",
"startswith",
"(",
"'+'",
")",
":",
"shortopts",
"=",
"shortopts",
"[",
"1",
":",
"]",
"all_options_first",
"=",
"True",
"while",
"args",
":",
"if",
"args",
"[",
"0",
"]",
"==",
"'--'",
":",
"prog_args",
"+=",
"args",
"[",
"1",
":",
"]",
"break",
"if",
"args",
"[",
"0",
"]",
"[",
":",
"2",
"]",
"==",
"'--'",
":",
"opts",
",",
"args",
"=",
"do_longs",
"(",
"opts",
",",
"args",
"[",
"0",
"]",
"[",
"2",
":",
"]",
",",
"longopts",
",",
"args",
"[",
"1",
":",
"]",
")",
"elif",
"args",
"[",
"0",
"]",
"[",
":",
"1",
"]",
"==",
"'-'",
":",
"opts",
",",
"args",
"=",
"do_shorts",
"(",
"opts",
",",
"args",
"[",
"0",
"]",
"[",
"1",
":",
"]",
",",
"shortopts",
",",
"args",
"[",
"1",
":",
"]",
")",
"else",
":",
"if",
"all_options_first",
":",
"prog_args",
"+=",
"args",
"break",
"else",
":",
"prog_args",
".",
"append",
"(",
"args",
"[",
"0",
"]",
")",
"args",
"=",
"args",
"[",
"1",
":",
"]",
"return",
"opts",
",",
"prog_args"
] | 31.088889
| 19.511111
|
def EnablePlugins(self, plugin_includes):
"""Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note that the default plugin
is handled separately.
"""
super(SyslogParser, self).EnablePlugins(plugin_includes)
self._plugin_by_reporter = {}
for plugin in self._plugins:
self._plugin_by_reporter[plugin.REPORTER] = plugin
|
[
"def",
"EnablePlugins",
"(",
"self",
",",
"plugin_includes",
")",
":",
"super",
"(",
"SyslogParser",
",",
"self",
")",
".",
"EnablePlugins",
"(",
"plugin_includes",
")",
"self",
".",
"_plugin_by_reporter",
"=",
"{",
"}",
"for",
"plugin",
"in",
"self",
".",
"_plugins",
":",
"self",
".",
"_plugin_by_reporter",
"[",
"plugin",
".",
"REPORTER",
"]",
"=",
"plugin"
] | 35.076923
| 19
|
def features(self, other_start, other_end):
"""
return e.g. "intron;exon" if the other_start, end overlap introns and
exons
"""
# completely encases gene.
if other_start <= self.start and other_end >= self.end:
return ['gene' if self.cdsStart != self.cdsEnd else 'nc_gene']
other = Interval(other_start, other_end)
ovls = []
tx = 'txEnd' if self.strand == "-" else 'txStart'
if hasattr(self, tx) and other_start <= getattr(self, tx) <= other_end \
and self.cdsStart != self.cdsEnd:
ovls = ["TSS"]
for ftype in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
feats = getattr(self, ftype)
if not isinstance(feats, list): feats = [feats]
if any(Interval(f[0], f[1]).overlaps(other) for f in feats):
ovls.append(ftype[:-1] if ftype[-1] == 's' else ftype)
if 'cds' in ovls:
ovls = [ft for ft in ovls if ft != 'exon']
if self.cdsStart == self.cdsEnd:
ovls = ['nc_' + ft for ft in ovls]
return ovls
|
[
"def",
"features",
"(",
"self",
",",
"other_start",
",",
"other_end",
")",
":",
"# completely encases gene.",
"if",
"other_start",
"<=",
"self",
".",
"start",
"and",
"other_end",
">=",
"self",
".",
"end",
":",
"return",
"[",
"'gene'",
"if",
"self",
".",
"cdsStart",
"!=",
"self",
".",
"cdsEnd",
"else",
"'nc_gene'",
"]",
"other",
"=",
"Interval",
"(",
"other_start",
",",
"other_end",
")",
"ovls",
"=",
"[",
"]",
"tx",
"=",
"'txEnd'",
"if",
"self",
".",
"strand",
"==",
"\"-\"",
"else",
"'txStart'",
"if",
"hasattr",
"(",
"self",
",",
"tx",
")",
"and",
"other_start",
"<=",
"getattr",
"(",
"self",
",",
"tx",
")",
"<=",
"other_end",
"and",
"self",
".",
"cdsStart",
"!=",
"self",
".",
"cdsEnd",
":",
"ovls",
"=",
"[",
"\"TSS\"",
"]",
"for",
"ftype",
"in",
"(",
"'introns'",
",",
"'exons'",
",",
"'utr5'",
",",
"'utr3'",
",",
"'cdss'",
")",
":",
"feats",
"=",
"getattr",
"(",
"self",
",",
"ftype",
")",
"if",
"not",
"isinstance",
"(",
"feats",
",",
"list",
")",
":",
"feats",
"=",
"[",
"feats",
"]",
"if",
"any",
"(",
"Interval",
"(",
"f",
"[",
"0",
"]",
",",
"f",
"[",
"1",
"]",
")",
".",
"overlaps",
"(",
"other",
")",
"for",
"f",
"in",
"feats",
")",
":",
"ovls",
".",
"append",
"(",
"ftype",
"[",
":",
"-",
"1",
"]",
"if",
"ftype",
"[",
"-",
"1",
"]",
"==",
"'s'",
"else",
"ftype",
")",
"if",
"'cds'",
"in",
"ovls",
":",
"ovls",
"=",
"[",
"ft",
"for",
"ft",
"in",
"ovls",
"if",
"ft",
"!=",
"'exon'",
"]",
"if",
"self",
".",
"cdsStart",
"==",
"self",
".",
"cdsEnd",
":",
"ovls",
"=",
"[",
"'nc_'",
"+",
"ft",
"for",
"ft",
"in",
"ovls",
"]",
"return",
"ovls"
] | 45.583333
| 16.5
|
def is_velar(c,lang):
"""
Is the character a velar
"""
o=get_offset(c,lang)
return (o>=VELAR_RANGE[0] and o<=VELAR_RANGE[1])
|
[
"def",
"is_velar",
"(",
"c",
",",
"lang",
")",
":",
"o",
"=",
"get_offset",
"(",
"c",
",",
"lang",
")",
"return",
"(",
"o",
">=",
"VELAR_RANGE",
"[",
"0",
"]",
"and",
"o",
"<=",
"VELAR_RANGE",
"[",
"1",
"]",
")"
] | 23.333333
| 9.666667
|
def absent(name,
vhost='/',
runas=None):
'''
Ensure the named policy is absent
Reference: http://www.rabbitmq.com/ha.html
name
The name of the policy to remove
runas
Name of the user to run the command as
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
policy_exists = __salt__['rabbitmq.policy_exists'](
vhost, name, runas=runas)
if not policy_exists:
ret['comment'] = 'Policy \'{0} {1}\' is not present.'.format(vhost, name)
return ret
if not __opts__['test']:
result = __salt__['rabbitmq.delete_policy'](vhost, name, runas=runas)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
return ret
elif 'Deleted' in result:
ret['comment'] = 'Deleted'
# If we've reached this far before returning, we have changes.
ret['changes'] = {'new': '', 'old': name}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Policy \'{0} {1}\' will be removed.'.format(vhost, name)
return ret
|
[
"def",
"absent",
"(",
"name",
",",
"vhost",
"=",
"'/'",
",",
"runas",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"policy_exists",
"=",
"__salt__",
"[",
"'rabbitmq.policy_exists'",
"]",
"(",
"vhost",
",",
"name",
",",
"runas",
"=",
"runas",
")",
"if",
"not",
"policy_exists",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Policy \\'{0} {1}\\' is not present.'",
".",
"format",
"(",
"vhost",
",",
"name",
")",
"return",
"ret",
"if",
"not",
"__opts__",
"[",
"'test'",
"]",
":",
"result",
"=",
"__salt__",
"[",
"'rabbitmq.delete_policy'",
"]",
"(",
"vhost",
",",
"name",
",",
"runas",
"=",
"runas",
")",
"if",
"'Error'",
"in",
"result",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"result",
"[",
"'Error'",
"]",
"return",
"ret",
"elif",
"'Deleted'",
"in",
"result",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Deleted'",
"# If we've reached this far before returning, we have changes.",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'new'",
":",
"''",
",",
"'old'",
":",
"name",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Policy \\'{0} {1}\\' will be removed.'",
".",
"format",
"(",
"vhost",
",",
"name",
")",
"return",
"ret"
] | 28.333333
| 22.538462
|
def ref2names2commdct(ref2names, commdct):
"""embed ref2names into commdct"""
for comm in commdct:
for cdct in comm:
try:
refs = cdct['object-list'][0]
validobjects = ref2names[refs]
cdct.update({'validobjects':validobjects})
except KeyError as e:
continue
return commdct
|
[
"def",
"ref2names2commdct",
"(",
"ref2names",
",",
"commdct",
")",
":",
"for",
"comm",
"in",
"commdct",
":",
"for",
"cdct",
"in",
"comm",
":",
"try",
":",
"refs",
"=",
"cdct",
"[",
"'object-list'",
"]",
"[",
"0",
"]",
"validobjects",
"=",
"ref2names",
"[",
"refs",
"]",
"cdct",
".",
"update",
"(",
"{",
"'validobjects'",
":",
"validobjects",
"}",
")",
"except",
"KeyError",
"as",
"e",
":",
"continue",
"return",
"commdct"
] | 33.545455
| 11.909091
|
def execute_ccm_remotely(remote_options, ccm_args):
"""
Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection
"""
if not PARAMIKO_IS_AVAILABLE:
logging.warn("Paramiko is not Availble: Skipping remote execution of CCM command")
return None, None
# Create the SSH client
ssh_client = SSHClient(remote_options.ssh_host, remote_options.ssh_port,
remote_options.ssh_username, remote_options.ssh_password,
remote_options.ssh_private_key)
# Handle CCM arguments that require SFTP
for index, argument in enumerate(ccm_args):
# Determine if DSE credentials argument is being used
if "--dse-credentials" in argument:
# Get the filename being used for the DSE credentials
tokens = argument.split("=")
credentials_path = os.path.join(os.path.expanduser("~"), ".ccm", ".dse.ini")
if len(tokens) == 2:
credentials_path = tokens[1]
# Ensure the credential file exists locally and copy to remote host
if not os.path.isfile(credentials_path):
raise Exception("DSE Credentials File Does not Exist: %s"
% credentials_path)
ssh_client.put(credentials_path, ssh_client.ccm_config_dir)
# Update the DSE credentials argument
ccm_args[index] = "--dse-credentials"
# Determine if SSL or node SSL path argument is being used
if "--ssl" in argument or "--node-ssl" in argument:
# Get the directory being used for the path
tokens = argument.split("=")
if len(tokens) != 2:
raise Exception("Path is not Specified: %s" % argument)
ssl_path = tokens[1]
# Ensure the path exists locally and copy to remote host
if not os.path.isdir(ssl_path):
raise Exception("Path Does not Exist: %s" % ssl_path)
remote_ssl_path = ssh_client.temp + os.path.basename(ssl_path)
ssh_client.put(ssl_path, remote_ssl_path)
# Update the argument
ccm_args[index] = tokens[0] + "=" + remote_ssl_path
# Execute the CCM request, return output and exit status
return ssh_client.execute_ccm_command(ccm_args)
|
[
"def",
"execute_ccm_remotely",
"(",
"remote_options",
",",
"ccm_args",
")",
":",
"if",
"not",
"PARAMIKO_IS_AVAILABLE",
":",
"logging",
".",
"warn",
"(",
"\"Paramiko is not Availble: Skipping remote execution of CCM command\"",
")",
"return",
"None",
",",
"None",
"# Create the SSH client",
"ssh_client",
"=",
"SSHClient",
"(",
"remote_options",
".",
"ssh_host",
",",
"remote_options",
".",
"ssh_port",
",",
"remote_options",
".",
"ssh_username",
",",
"remote_options",
".",
"ssh_password",
",",
"remote_options",
".",
"ssh_private_key",
")",
"# Handle CCM arguments that require SFTP",
"for",
"index",
",",
"argument",
"in",
"enumerate",
"(",
"ccm_args",
")",
":",
"# Determine if DSE credentials argument is being used",
"if",
"\"--dse-credentials\"",
"in",
"argument",
":",
"# Get the filename being used for the DSE credentials",
"tokens",
"=",
"argument",
".",
"split",
"(",
"\"=\"",
")",
"credentials_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"\".ccm\"",
",",
"\".dse.ini\"",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"2",
":",
"credentials_path",
"=",
"tokens",
"[",
"1",
"]",
"# Ensure the credential file exists locally and copy to remote host",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"credentials_path",
")",
":",
"raise",
"Exception",
"(",
"\"DSE Credentials File Does not Exist: %s\"",
"%",
"credentials_path",
")",
"ssh_client",
".",
"put",
"(",
"credentials_path",
",",
"ssh_client",
".",
"ccm_config_dir",
")",
"# Update the DSE credentials argument",
"ccm_args",
"[",
"index",
"]",
"=",
"\"--dse-credentials\"",
"# Determine if SSL or node SSL path argument is being used",
"if",
"\"--ssl\"",
"in",
"argument",
"or",
"\"--node-ssl\"",
"in",
"argument",
":",
"# Get the directory being used for the path",
"tokens",
"=",
"argument",
".",
"split",
"(",
"\"=\"",
")",
"if",
"len",
"(",
"tokens",
")",
"!=",
"2",
":",
"raise",
"Exception",
"(",
"\"Path is not Specified: %s\"",
"%",
"argument",
")",
"ssl_path",
"=",
"tokens",
"[",
"1",
"]",
"# Ensure the path exists locally and copy to remote host",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"ssl_path",
")",
":",
"raise",
"Exception",
"(",
"\"Path Does not Exist: %s\"",
"%",
"ssl_path",
")",
"remote_ssl_path",
"=",
"ssh_client",
".",
"temp",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"ssl_path",
")",
"ssh_client",
".",
"put",
"(",
"ssl_path",
",",
"remote_ssl_path",
")",
"# Update the argument",
"ccm_args",
"[",
"index",
"]",
"=",
"tokens",
"[",
"0",
"]",
"+",
"\"=\"",
"+",
"remote_ssl_path",
"# Execute the CCM request, return output and exit status",
"return",
"ssh_client",
".",
"execute_ccm_command",
"(",
"ccm_args",
")"
] | 46.448276
| 22.37931
|
def is_geographic(element, kdims=None):
"""
Utility to determine whether the supplied element optionally
a subset of its key dimensions represent a geographic coordinate
system.
"""
if isinstance(element, (Overlay, NdOverlay)):
return any(element.traverse(is_geographic, [_Element]))
if kdims:
kdims = [element.get_dimension(d) for d in kdims]
else:
kdims = element.kdims
if len(kdims) != 2 and not isinstance(element, (Graph, Nodes)):
return False
if isinstance(element.data, geographic_types) or isinstance(element, (WMTS, Feature)):
return True
elif isinstance(element, _Element):
return kdims == element.kdims and element.crs
else:
return False
|
[
"def",
"is_geographic",
"(",
"element",
",",
"kdims",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"element",
",",
"(",
"Overlay",
",",
"NdOverlay",
")",
")",
":",
"return",
"any",
"(",
"element",
".",
"traverse",
"(",
"is_geographic",
",",
"[",
"_Element",
"]",
")",
")",
"if",
"kdims",
":",
"kdims",
"=",
"[",
"element",
".",
"get_dimension",
"(",
"d",
")",
"for",
"d",
"in",
"kdims",
"]",
"else",
":",
"kdims",
"=",
"element",
".",
"kdims",
"if",
"len",
"(",
"kdims",
")",
"!=",
"2",
"and",
"not",
"isinstance",
"(",
"element",
",",
"(",
"Graph",
",",
"Nodes",
")",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"element",
".",
"data",
",",
"geographic_types",
")",
"or",
"isinstance",
"(",
"element",
",",
"(",
"WMTS",
",",
"Feature",
")",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"element",
",",
"_Element",
")",
":",
"return",
"kdims",
"==",
"element",
".",
"kdims",
"and",
"element",
".",
"crs",
"else",
":",
"return",
"False"
] | 33.318182
| 21.045455
|
def extract_file_name(content_dispo):
"""Extract file name from the input request body"""
# print type(content_dispo)
# print repr(content_dispo)
# convertion of escape string (str type) from server
# to unicode object
content_dispo = content_dispo.decode('unicode-escape').strip('"')
file_name = ""
for key_val in content_dispo.split(';'):
param = key_val.strip().split('=')
if param[0] == "filename":
file_name = param[1].strip('"')
break
return file_name
|
[
"def",
"extract_file_name",
"(",
"content_dispo",
")",
":",
"# print type(content_dispo)",
"# print repr(content_dispo)",
"# convertion of escape string (str type) from server",
"# to unicode object",
"content_dispo",
"=",
"content_dispo",
".",
"decode",
"(",
"'unicode-escape'",
")",
".",
"strip",
"(",
"'\"'",
")",
"file_name",
"=",
"\"\"",
"for",
"key_val",
"in",
"content_dispo",
".",
"split",
"(",
"';'",
")",
":",
"param",
"=",
"key_val",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'='",
")",
"if",
"param",
"[",
"0",
"]",
"==",
"\"filename\"",
":",
"file_name",
"=",
"param",
"[",
"1",
"]",
".",
"strip",
"(",
"'\"'",
")",
"break",
"return",
"file_name"
] | 37.142857
| 11.642857
|
def opacity(self, value):
"""
Setter for **self.__opacity** attribute.
:param value: Attribute value.
:type value: float
"""
if value is not None:
assert type(value) in (int, float), "'{0}' attribute: '{1}' type is not 'int' or 'float'!".format("opacity",
value)
if value > 1:
value = 1
elif value < 0:
value = 0
self.__opacity = float(value)
self.__set_style_sheet()
|
[
"def",
"opacity",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"in",
"(",
"int",
",",
"float",
")",
",",
"\"'{0}' attribute: '{1}' type is not 'int' or 'float'!\"",
".",
"format",
"(",
"\"opacity\"",
",",
"value",
")",
"if",
"value",
">",
"1",
":",
"value",
"=",
"1",
"elif",
"value",
"<",
"0",
":",
"value",
"=",
"0",
"self",
".",
"__opacity",
"=",
"float",
"(",
"value",
")",
"self",
".",
"__set_style_sheet",
"(",
")"
] | 32.166667
| 22.833333
|
def _get_single_page(self, url_suffix, data, page_num):
"""
Send GET request to API at url_suffix with post_data adding page and per_page parameters to
retrieve a single page. Page size is determined by config.page_size.
:param url_suffix: str URL path we are sending a GET to
:param data: object data we are sending
:param page_num: int: page number to fetch
:return: requests.Response containing the result
"""
data_with_per_page = dict(data)
data_with_per_page['page'] = page_num
data_with_per_page['per_page'] = self._get_page_size()
(url, data_str, headers) = self._url_parts(url_suffix, data_with_per_page,
content_type=ContentType.form)
resp = self.http.get(url, headers=headers, params=data_str)
return self._check_err(resp, url_suffix, data, allow_pagination=True)
|
[
"def",
"_get_single_page",
"(",
"self",
",",
"url_suffix",
",",
"data",
",",
"page_num",
")",
":",
"data_with_per_page",
"=",
"dict",
"(",
"data",
")",
"data_with_per_page",
"[",
"'page'",
"]",
"=",
"page_num",
"data_with_per_page",
"[",
"'per_page'",
"]",
"=",
"self",
".",
"_get_page_size",
"(",
")",
"(",
"url",
",",
"data_str",
",",
"headers",
")",
"=",
"self",
".",
"_url_parts",
"(",
"url_suffix",
",",
"data_with_per_page",
",",
"content_type",
"=",
"ContentType",
".",
"form",
")",
"resp",
"=",
"self",
".",
"http",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"data_str",
")",
"return",
"self",
".",
"_check_err",
"(",
"resp",
",",
"url_suffix",
",",
"data",
",",
"allow_pagination",
"=",
"True",
")"
] | 57.5625
| 21.3125
|
def convert2wavenumber(self):
"""
Convert from wavelengths to wavenumber.
Units:
Wavelength: micro meters (1e-6 m)
Wavenumber: cm-1
"""
self.wavenumber = 1. / (1e-4 * self.wavelength[::-1])
self.irradiance = (self.irradiance[::-1] *
self.wavelength[::-1] * self.wavelength[::-1] * 0.1)
self.wavelength = None
|
[
"def",
"convert2wavenumber",
"(",
"self",
")",
":",
"self",
".",
"wavenumber",
"=",
"1.",
"/",
"(",
"1e-4",
"*",
"self",
".",
"wavelength",
"[",
":",
":",
"-",
"1",
"]",
")",
"self",
".",
"irradiance",
"=",
"(",
"self",
".",
"irradiance",
"[",
":",
":",
"-",
"1",
"]",
"*",
"self",
".",
"wavelength",
"[",
":",
":",
"-",
"1",
"]",
"*",
"self",
".",
"wavelength",
"[",
":",
":",
"-",
"1",
"]",
"*",
"0.1",
")",
"self",
".",
"wavelength",
"=",
"None"
] | 33.416667
| 15.083333
|
def set_segs_names(self):
"""Return a single array that stores integer segment labels."""
segs_names = np.zeros(self._adata.shape[0], dtype=np.int8)
self.segs_names_unique = []
for iseg, seg in enumerate(self.segs):
segs_names[seg] = iseg
self.segs_names_unique.append(iseg)
self.segs_names = segs_names
|
[
"def",
"set_segs_names",
"(",
"self",
")",
":",
"segs_names",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"_adata",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"self",
".",
"segs_names_unique",
"=",
"[",
"]",
"for",
"iseg",
",",
"seg",
"in",
"enumerate",
"(",
"self",
".",
"segs",
")",
":",
"segs_names",
"[",
"seg",
"]",
"=",
"iseg",
"self",
".",
"segs_names_unique",
".",
"append",
"(",
"iseg",
")",
"self",
".",
"segs_names",
"=",
"segs_names"
] | 45
| 8.625
|
def _check_version():
"""Check renku version."""
from ._config import APP_NAME
if VersionCache.load(APP_NAME).is_fresh:
return
from pkg_resources import parse_version
from renku.version import __version__
version = parse_version(__version__)
allow_prereleases = version.is_prerelease
latest_version = find_latest_version(
'renku', allow_prereleases=allow_prereleases
)
if version < latest_version:
click.secho(
'You are using renku version {version}, however version '
'{latest_version} is available.\n'
'You should consider upgrading ...'.format(
version=__version__,
latest_version=latest_version,
),
fg='yellow',
bold=True,
)
VersionCache(pypi_version=str(latest_version)).dump(APP_NAME)
|
[
"def",
"_check_version",
"(",
")",
":",
"from",
".",
"_config",
"import",
"APP_NAME",
"if",
"VersionCache",
".",
"load",
"(",
"APP_NAME",
")",
".",
"is_fresh",
":",
"return",
"from",
"pkg_resources",
"import",
"parse_version",
"from",
"renku",
".",
"version",
"import",
"__version__",
"version",
"=",
"parse_version",
"(",
"__version__",
")",
"allow_prereleases",
"=",
"version",
".",
"is_prerelease",
"latest_version",
"=",
"find_latest_version",
"(",
"'renku'",
",",
"allow_prereleases",
"=",
"allow_prereleases",
")",
"if",
"version",
"<",
"latest_version",
":",
"click",
".",
"secho",
"(",
"'You are using renku version {version}, however version '",
"'{latest_version} is available.\\n'",
"'You should consider upgrading ...'",
".",
"format",
"(",
"version",
"=",
"__version__",
",",
"latest_version",
"=",
"latest_version",
",",
")",
",",
"fg",
"=",
"'yellow'",
",",
"bold",
"=",
"True",
",",
")",
"VersionCache",
"(",
"pypi_version",
"=",
"str",
"(",
"latest_version",
")",
")",
".",
"dump",
"(",
"APP_NAME",
")"
] | 28.233333
| 18.566667
|
def recover_model_from_data(model_class, original_data, modified_data, deleted_data):
"""
Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted
fields.
Necessary for pickle an object
"""
model = model_class()
return set_model_internal_data(model, original_data, modified_data, deleted_data)
|
[
"def",
"recover_model_from_data",
"(",
"model_class",
",",
"original_data",
",",
"modified_data",
",",
"deleted_data",
")",
":",
"model",
"=",
"model_class",
"(",
")",
"return",
"set_model_internal_data",
"(",
"model",
",",
"original_data",
",",
"modified_data",
",",
"deleted_data",
")"
] | 45.5
| 26.25
|
def getFieldsColumnLengths(self):
"""
Gets the maximum length of each column in the field table
"""
nameLen = 0
descLen = 0
for f in self.fields:
nameLen = max(nameLen, len(f['title']))
descLen = max(descLen, len(f['description']))
return (nameLen, descLen)
|
[
"def",
"getFieldsColumnLengths",
"(",
"self",
")",
":",
"nameLen",
"=",
"0",
"descLen",
"=",
"0",
"for",
"f",
"in",
"self",
".",
"fields",
":",
"nameLen",
"=",
"max",
"(",
"nameLen",
",",
"len",
"(",
"f",
"[",
"'title'",
"]",
")",
")",
"descLen",
"=",
"max",
"(",
"descLen",
",",
"len",
"(",
"f",
"[",
"'description'",
"]",
")",
")",
"return",
"(",
"nameLen",
",",
"descLen",
")"
] | 32.8
| 12
|
def editUsageReportSettings(self, samplingInterval,
enabled=True, maxHistory=0):
"""
The usage reports settings are applied to the entire site. A POST
request updates the usage reports settings.
Inputs:
samplingInterval - Defines the duration (in minutes) for which
the usage statistics are aggregated or sampled, in-memory,
before being written out to the statistics database.
enabled - default True - Can be true or false. When usage
reports are enabled, service usage statistics are collected
and persisted to a statistics database. When usage reports are
disabled, the statistics are not collected.
maxHistory - default 0 - Represents the number of days after
which usage statistics are deleted after the statistics
database. If the maxHistory parameter is set to 0, the
statistics are persisted forever.
"""
params = {
"f" : "json",
"maxHistory" : maxHistory,
"enabled" : enabled,
"samplingInterval" : samplingInterval
}
url = self._url + "/settings/edit"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
[
"def",
"editUsageReportSettings",
"(",
"self",
",",
"samplingInterval",
",",
"enabled",
"=",
"True",
",",
"maxHistory",
"=",
"0",
")",
":",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"maxHistory\"",
":",
"maxHistory",
",",
"\"enabled\"",
":",
"enabled",
",",
"\"samplingInterval\"",
":",
"samplingInterval",
"}",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/settings/edit\"",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | 47.677419
| 19.16129
|
def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in bucket with specified key.
"""
self._add_vector(hash_name, bucket_key, v, data, self.redis_object)
|
[
"def",
"store_vector",
"(",
"self",
",",
"hash_name",
",",
"bucket_key",
",",
"v",
",",
"data",
")",
":",
"self",
".",
"_add_vector",
"(",
"hash_name",
",",
"bucket_key",
",",
"v",
",",
"data",
",",
"self",
".",
"redis_object",
")"
] | 46
| 17.6
|
def signmessage(self, address, message):
"""Sign a message with the private key of an address.
Cryptographically signs a message using ECDSA. Since this requires
an address's private key, the wallet must be unlocked first.
Args:
address (str): address used to sign the message
message (str): plaintext message to which apply the signature
Returns:
str: ECDSA signature over the message
"""
signature = self.rpc.call("signmessage", address, message)
self.logger.debug("Signature: %s" % signature)
return signature
|
[
"def",
"signmessage",
"(",
"self",
",",
"address",
",",
"message",
")",
":",
"signature",
"=",
"self",
".",
"rpc",
".",
"call",
"(",
"\"signmessage\"",
",",
"address",
",",
"message",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Signature: %s\"",
"%",
"signature",
")",
"return",
"signature"
] | 35.470588
| 22.647059
|
def asyncStarMap(asyncCallable, iterable):
"""itertools.starmap for deferred callables
"""
deferreds = starmap(asyncCallable, iterable)
return gatherResults(deferreds, consumeErrors=True)
|
[
"def",
"asyncStarMap",
"(",
"asyncCallable",
",",
"iterable",
")",
":",
"deferreds",
"=",
"starmap",
"(",
"asyncCallable",
",",
"iterable",
")",
"return",
"gatherResults",
"(",
"deferreds",
",",
"consumeErrors",
"=",
"True",
")"
] | 39.8
| 5
|
def normalize(expr):
"""Normalize both sides, but don't eliminate the expression."""
lhs = normalize(expr.lhs)
rhs = normalize(expr.rhs)
return type(expr)(lhs, rhs, start=lhs.start, end=rhs.end)
|
[
"def",
"normalize",
"(",
"expr",
")",
":",
"lhs",
"=",
"normalize",
"(",
"expr",
".",
"lhs",
")",
"rhs",
"=",
"normalize",
"(",
"expr",
".",
"rhs",
")",
"return",
"type",
"(",
"expr",
")",
"(",
"lhs",
",",
"rhs",
",",
"start",
"=",
"lhs",
".",
"start",
",",
"end",
"=",
"rhs",
".",
"end",
")"
] | 41.2
| 12.6
|
def get_contract_by_hash(self, contract_hash):
"""get mapped contract_address by its hash, if not found try
indexing."""
contract_address = self.db.reader._get_address_by_hash(contract_hash)
if contract_address is not None:
return contract_address
else:
raise AddressNotFoundError
|
[
"def",
"get_contract_by_hash",
"(",
"self",
",",
"contract_hash",
")",
":",
"contract_address",
"=",
"self",
".",
"db",
".",
"reader",
".",
"_get_address_by_hash",
"(",
"contract_hash",
")",
"if",
"contract_address",
"is",
"not",
"None",
":",
"return",
"contract_address",
"else",
":",
"raise",
"AddressNotFoundError"
] | 37.444444
| 13
|
def handle_json_GET_triprows(self, params):
"""Return a list of rows from the feed file that are related to this
trip."""
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip', None))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
route = schedule.GetRoute(trip.route_id)
trip_row = dict(trip.iteritems())
route_row = dict(route.iteritems())
return [['trips.txt', trip_row], ['routes.txt', route_row]]
|
[
"def",
"handle_json_GET_triprows",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"try",
":",
"trip",
"=",
"schedule",
".",
"GetTrip",
"(",
"params",
".",
"get",
"(",
"'trip'",
",",
"None",
")",
")",
"except",
"KeyError",
":",
"# if a non-existent trip is searched for, the return nothing",
"return",
"route",
"=",
"schedule",
".",
"GetRoute",
"(",
"trip",
".",
"route_id",
")",
"trip_row",
"=",
"dict",
"(",
"trip",
".",
"iteritems",
"(",
")",
")",
"route_row",
"=",
"dict",
"(",
"route",
".",
"iteritems",
"(",
")",
")",
"return",
"[",
"[",
"'trips.txt'",
",",
"trip_row",
"]",
",",
"[",
"'routes.txt'",
",",
"route_row",
"]",
"]"
] | 38.923077
| 12.307692
|
def _additions_remove_use_cd(**kwargs):
'''
Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox.
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_remove_linux_use_cd(mount_point, **kwargs)
|
[
"def",
"_additions_remove_use_cd",
"(",
"*",
"*",
"kwargs",
")",
":",
"with",
"_additions_mounted",
"(",
")",
"as",
"mount_point",
":",
"kernel",
"=",
"__grains__",
".",
"get",
"(",
"'kernel'",
",",
"''",
")",
"if",
"kernel",
"==",
"'Linux'",
":",
"return",
"_additions_remove_linux_use_cd",
"(",
"mount_point",
",",
"*",
"*",
"kwargs",
")"
] | 29.636364
| 18.727273
|
def get_best_answer(self, query):
"""Get best answer to a question.
:param query: A question to get an answer
:type query: :class:`str`
:returns: An answer to a question
:rtype: :class:`str`
:raises: :class:`NoAnswerError` when can not found answer to a question
"""
query = to_unicode(query)
session = self.Session()
grams = self._get_grams(session, query)
if not grams:
raise NoAnswerError('Can not found answer')
documents = set([doc for gram in grams for doc in gram.documents])
self._recalc_idfs(session, grams)
idfs = dict((gram.gram, gram.idf) for gram in grams)
docs = dict(
(doc.answer, _cosine_measure(idfs, self._get_tf_idfs(doc)))
for doc in documents)
docs = dict((key, val) for (key, val) in docs.items() if val)
session.commit()
try:
max_ratio = max(docs.values())
answers = [answer for answer in docs.keys()
if docs.get(answer) == max_ratio]
answer = random.choice(answers)
logger.debug('{0} -> {1} ({2})'.format(query, answer, max_ratio))
return (answer, max_ratio)
except ValueError:
raise NoAnswerError('Can not found answer')
finally:
session.commit()
|
[
"def",
"get_best_answer",
"(",
"self",
",",
"query",
")",
":",
"query",
"=",
"to_unicode",
"(",
"query",
")",
"session",
"=",
"self",
".",
"Session",
"(",
")",
"grams",
"=",
"self",
".",
"_get_grams",
"(",
"session",
",",
"query",
")",
"if",
"not",
"grams",
":",
"raise",
"NoAnswerError",
"(",
"'Can not found answer'",
")",
"documents",
"=",
"set",
"(",
"[",
"doc",
"for",
"gram",
"in",
"grams",
"for",
"doc",
"in",
"gram",
".",
"documents",
"]",
")",
"self",
".",
"_recalc_idfs",
"(",
"session",
",",
"grams",
")",
"idfs",
"=",
"dict",
"(",
"(",
"gram",
".",
"gram",
",",
"gram",
".",
"idf",
")",
"for",
"gram",
"in",
"grams",
")",
"docs",
"=",
"dict",
"(",
"(",
"doc",
".",
"answer",
",",
"_cosine_measure",
"(",
"idfs",
",",
"self",
".",
"_get_tf_idfs",
"(",
"doc",
")",
")",
")",
"for",
"doc",
"in",
"documents",
")",
"docs",
"=",
"dict",
"(",
"(",
"key",
",",
"val",
")",
"for",
"(",
"key",
",",
"val",
")",
"in",
"docs",
".",
"items",
"(",
")",
"if",
"val",
")",
"session",
".",
"commit",
"(",
")",
"try",
":",
"max_ratio",
"=",
"max",
"(",
"docs",
".",
"values",
"(",
")",
")",
"answers",
"=",
"[",
"answer",
"for",
"answer",
"in",
"docs",
".",
"keys",
"(",
")",
"if",
"docs",
".",
"get",
"(",
"answer",
")",
"==",
"max_ratio",
"]",
"answer",
"=",
"random",
".",
"choice",
"(",
"answers",
")",
"logger",
".",
"debug",
"(",
"'{0} -> {1} ({2})'",
".",
"format",
"(",
"query",
",",
"answer",
",",
"max_ratio",
")",
")",
"return",
"(",
"answer",
",",
"max_ratio",
")",
"except",
"ValueError",
":",
"raise",
"NoAnswerError",
"(",
"'Can not found answer'",
")",
"finally",
":",
"session",
".",
"commit",
"(",
")"
] | 30.522727
| 21.295455
|
def create_file(self, path, fp, force=False, update=False):
"""Store a new file at `path` in this storage.
The contents of the file descriptor `fp` (opened in 'rb' mode)
will be uploaded to `path` which is the full path at
which to store the file.
To force overwrite of an existing file, set `force=True`.
To overwrite an existing file only if the files differ, set `update=True`
"""
if 'b' not in fp.mode:
raise ValueError("File has to be opened in binary mode.")
# all paths are assumed to be absolute
path = norm_remote_path(path)
directory, fname = os.path.split(path)
directories = directory.split(os.path.sep)
# navigate to the right parent object for our file
parent = self
for directory in directories:
# skip empty directory names
if directory:
parent = parent.create_folder(directory, exist_ok=True)
url = parent._new_file_url
# When uploading a large file (>a few MB) that already exists
# we sometimes get a ConnectionError instead of a status == 409.
connection_error = False
# peek at the file to check if it is an empty file which needs special
# handling in requests. If we pass a file like object to data that
# turns out to be of length zero then no file is created on the OSF.
# See: https://github.com/osfclient/osfclient/pull/135
if file_empty(fp):
response = self._put(url, params={'name': fname}, data=b'')
else:
try:
response = self._put(url, params={'name': fname}, data=fp)
except ConnectionError:
connection_error = True
if connection_error or response.status_code == 409:
if not force and not update:
# one-liner to get file size from file pointer from
# https://stackoverflow.com/a/283719/2680824
file_size_bytes = get_local_file_size(fp)
large_file_cutoff = 2**20 # 1 MB in bytes
if connection_error and file_size_bytes < large_file_cutoff:
msg = (
"There was a connection error which might mean {} " +
"already exists. Try again with the `--force` flag " +
"specified."
).format(path)
raise RuntimeError(msg)
else:
# note in case of connection error, we are making an inference here
raise FileExistsError(path)
else:
# find the upload URL for the file we are trying to update
for file_ in self.files:
if norm_remote_path(file_.path) == path:
if not force:
if checksum(path) == file_.hashes.get('md5'):
# If the hashes are equal and force is False,
# we're done here
break
# in the process of attempting to upload the file we
# moved through it -> reset read position to beginning
# of the file
fp.seek(0)
file_.update(fp)
break
else:
raise RuntimeError("Could not create a new file at "
"({}) nor update it.".format(path))
|
[
"def",
"create_file",
"(",
"self",
",",
"path",
",",
"fp",
",",
"force",
"=",
"False",
",",
"update",
"=",
"False",
")",
":",
"if",
"'b'",
"not",
"in",
"fp",
".",
"mode",
":",
"raise",
"ValueError",
"(",
"\"File has to be opened in binary mode.\"",
")",
"# all paths are assumed to be absolute",
"path",
"=",
"norm_remote_path",
"(",
"path",
")",
"directory",
",",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"directories",
"=",
"directory",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"# navigate to the right parent object for our file",
"parent",
"=",
"self",
"for",
"directory",
"in",
"directories",
":",
"# skip empty directory names",
"if",
"directory",
":",
"parent",
"=",
"parent",
".",
"create_folder",
"(",
"directory",
",",
"exist_ok",
"=",
"True",
")",
"url",
"=",
"parent",
".",
"_new_file_url",
"# When uploading a large file (>a few MB) that already exists",
"# we sometimes get a ConnectionError instead of a status == 409.",
"connection_error",
"=",
"False",
"# peek at the file to check if it is an empty file which needs special",
"# handling in requests. If we pass a file like object to data that",
"# turns out to be of length zero then no file is created on the OSF.",
"# See: https://github.com/osfclient/osfclient/pull/135",
"if",
"file_empty",
"(",
"fp",
")",
":",
"response",
"=",
"self",
".",
"_put",
"(",
"url",
",",
"params",
"=",
"{",
"'name'",
":",
"fname",
"}",
",",
"data",
"=",
"b''",
")",
"else",
":",
"try",
":",
"response",
"=",
"self",
".",
"_put",
"(",
"url",
",",
"params",
"=",
"{",
"'name'",
":",
"fname",
"}",
",",
"data",
"=",
"fp",
")",
"except",
"ConnectionError",
":",
"connection_error",
"=",
"True",
"if",
"connection_error",
"or",
"response",
".",
"status_code",
"==",
"409",
":",
"if",
"not",
"force",
"and",
"not",
"update",
":",
"# one-liner to get file size from file pointer from",
"# https://stackoverflow.com/a/283719/2680824",
"file_size_bytes",
"=",
"get_local_file_size",
"(",
"fp",
")",
"large_file_cutoff",
"=",
"2",
"**",
"20",
"# 1 MB in bytes",
"if",
"connection_error",
"and",
"file_size_bytes",
"<",
"large_file_cutoff",
":",
"msg",
"=",
"(",
"\"There was a connection error which might mean {} \"",
"+",
"\"already exists. Try again with the `--force` flag \"",
"+",
"\"specified.\"",
")",
".",
"format",
"(",
"path",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"else",
":",
"# note in case of connection error, we are making an inference here",
"raise",
"FileExistsError",
"(",
"path",
")",
"else",
":",
"# find the upload URL for the file we are trying to update",
"for",
"file_",
"in",
"self",
".",
"files",
":",
"if",
"norm_remote_path",
"(",
"file_",
".",
"path",
")",
"==",
"path",
":",
"if",
"not",
"force",
":",
"if",
"checksum",
"(",
"path",
")",
"==",
"file_",
".",
"hashes",
".",
"get",
"(",
"'md5'",
")",
":",
"# If the hashes are equal and force is False,",
"# we're done here",
"break",
"# in the process of attempting to upload the file we",
"# moved through it -> reset read position to beginning",
"# of the file",
"fp",
".",
"seek",
"(",
"0",
")",
"file_",
".",
"update",
"(",
"fp",
")",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Could not create a new file at \"",
"\"({}) nor update it.\"",
".",
"format",
"(",
"path",
")",
")"
] | 45.282051
| 20.987179
|
def _hcn_func(self):
"""Eq. 56 from Barack and Cutler 2004
"""
self.hc = 1./(np.pi*self.dist)*np.sqrt(2.*self._dEndfr())
return
|
[
"def",
"_hcn_func",
"(",
"self",
")",
":",
"self",
".",
"hc",
"=",
"1.",
"/",
"(",
"np",
".",
"pi",
"*",
"self",
".",
"dist",
")",
"*",
"np",
".",
"sqrt",
"(",
"2.",
"*",
"self",
".",
"_dEndfr",
"(",
")",
")",
"return"
] | 25.833333
| 18.5
|
def minimal_residual(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Minimal residual (MR) algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov import minimal_residual
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = minimal_residual(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
7.26369350856
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always',
module='pyamg\.krylov\._minimal_residual')
# determine maxiter
if maxiter is None:
maxiter = int(len(b))
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# setup method
r = M*(b - A*x)
normr = norm(r)
# store initial residual
if residuals is not None:
residuals[:] = [normr]
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 50
iter = 0
while True:
iter = iter+1
p = M*(A*r)
rMAr = np.inner(p.conjugate(), r) # check curvature of M^-1 A
if rMAr < 0.0:
warn("\nIndefinite matrix detected in minimal residual,\
aborting\n")
return (postprocess(x), -1)
alpha = rMAr / np.inner(p.conjugate(), p)
x = x + alpha*r
if np.mod(iter, recompute_r) and iter > 0:
r = M*(b - A*x)
else:
r = r - alpha*p
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter)
|
[
"def",
"minimal_residual",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
"A",
",",
"M",
",",
"x",
",",
"b",
",",
"postprocess",
"=",
"make_system",
"(",
"A",
",",
"M",
",",
"x0",
",",
"b",
")",
"# Ensure that warnings are always reissued from this function",
"import",
"warnings",
"warnings",
".",
"filterwarnings",
"(",
"'always'",
",",
"module",
"=",
"'pyamg\\.krylov\\._minimal_residual'",
")",
"# determine maxiter",
"if",
"maxiter",
"is",
"None",
":",
"maxiter",
"=",
"int",
"(",
"len",
"(",
"b",
")",
")",
"elif",
"maxiter",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Number of iterations must be positive'",
")",
"# setup method",
"r",
"=",
"M",
"*",
"(",
"b",
"-",
"A",
"*",
"x",
")",
"normr",
"=",
"norm",
"(",
"r",
")",
"# store initial residual",
"if",
"residuals",
"is",
"not",
"None",
":",
"residuals",
"[",
":",
"]",
"=",
"[",
"normr",
"]",
"# Check initial guess ( scaling by b, if b != 0,",
"# must account for case when norm(b) is very small)",
"normb",
"=",
"norm",
"(",
"b",
")",
"if",
"normb",
"==",
"0.0",
":",
"normb",
"=",
"1.0",
"if",
"normr",
"<",
"tol",
"*",
"normb",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"0",
")",
"# Scale tol by ||r_0||_M",
"if",
"normr",
"!=",
"0.0",
":",
"tol",
"=",
"tol",
"*",
"normr",
"# How often should r be recomputed",
"recompute_r",
"=",
"50",
"iter",
"=",
"0",
"while",
"True",
":",
"iter",
"=",
"iter",
"+",
"1",
"p",
"=",
"M",
"*",
"(",
"A",
"*",
"r",
")",
"rMAr",
"=",
"np",
".",
"inner",
"(",
"p",
".",
"conjugate",
"(",
")",
",",
"r",
")",
"# check curvature of M^-1 A",
"if",
"rMAr",
"<",
"0.0",
":",
"warn",
"(",
"\"\\nIndefinite matrix detected in minimal residual,\\\n aborting\\n\"",
")",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"-",
"1",
")",
"alpha",
"=",
"rMAr",
"/",
"np",
".",
"inner",
"(",
"p",
".",
"conjugate",
"(",
")",
",",
"p",
")",
"x",
"=",
"x",
"+",
"alpha",
"*",
"r",
"if",
"np",
".",
"mod",
"(",
"iter",
",",
"recompute_r",
")",
"and",
"iter",
">",
"0",
":",
"r",
"=",
"M",
"*",
"(",
"b",
"-",
"A",
"*",
"x",
")",
"else",
":",
"r",
"=",
"r",
"-",
"alpha",
"*",
"p",
"normr",
"=",
"norm",
"(",
"r",
")",
"if",
"residuals",
"is",
"not",
"None",
":",
"residuals",
".",
"append",
"(",
"normr",
")",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"normr",
"<",
"tol",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"0",
")",
"if",
"iter",
"==",
"maxiter",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"iter",
")"
] | 29.676056
| 21.028169
|
def has_children(self):
"""
Checks if there are children tab widgets.
:return: True if there is at least one tab in the children tab widget.
"""
for splitter in self.child_splitters:
if splitter.has_children():
return splitter
return self.main_tab_widget.count() != 0
|
[
"def",
"has_children",
"(",
"self",
")",
":",
"for",
"splitter",
"in",
"self",
".",
"child_splitters",
":",
"if",
"splitter",
".",
"has_children",
"(",
")",
":",
"return",
"splitter",
"return",
"self",
".",
"main_tab_widget",
".",
"count",
"(",
")",
"!=",
"0"
] | 37.222222
| 9.666667
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AssignedAddOnContext for this AssignedAddOnInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.AssignedAddOnContext
"""
if self._context is None:
self._context = AssignedAddOnContext(
self._version,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
sid=self._solution['sid'],
)
return self._context
|
[
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"AssignedAddOnContext",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
"resource_sid",
"=",
"self",
".",
"_solution",
"[",
"'resource_sid'",
"]",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_context"
] | 43.0625
| 21.5625
|
def _set_l2traceroute(self, v, load=False):
"""
Setter method for l2traceroute, mapped from YANG variable /brocade_trilloam_rpc/l2traceroute (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_l2traceroute is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l2traceroute() directly.
YANG Description: Trace a TRILL route from the provided host-source-mac to host-dest-mac
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=l2traceroute.l2traceroute, is_leaf=True, yang_name="l2traceroute", rest_name="l2traceroute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'l2traceroute-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l2traceroute must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=l2traceroute.l2traceroute, is_leaf=True, yang_name="l2traceroute", rest_name="l2traceroute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'l2traceroute-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='rpc', is_config=True)""",
})
self.__l2traceroute = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_l2traceroute",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"l2traceroute",
".",
"l2traceroute",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"l2traceroute\"",
",",
"rest_name",
"=",
"\"l2traceroute\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'l2traceroute-action-point'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-trilloam'",
",",
"defining_module",
"=",
"'brocade-trilloam'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"l2traceroute must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=l2traceroute.l2traceroute, is_leaf=True, yang_name=\"l2traceroute\", rest_name=\"l2traceroute\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'l2traceroute-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-trilloam', defining_module='brocade-trilloam', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__l2traceroute",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 71.208333
| 35.416667
|
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
|
[
"def",
"requeue",
"(",
"rq",
",",
"ctx",
",",
"all",
",",
"job_ids",
")",
":",
"return",
"ctx",
".",
"invoke",
"(",
"rq_cli",
".",
"requeue",
",",
"all",
"=",
"all",
",",
"job_ids",
"=",
"job_ids",
",",
"*",
"*",
"shared_options",
"(",
"rq",
")",
")"
] | 22.375
| 17.625
|
def view(self, request, **kwargs):
'''
allow a file to be viewed as opposed to download. This is particularly needed when a video file is stored
in the fileservice and user wants to be able to use a view the video as opposed to having to download it
first. It passes the serving of the file to nginx/apache which will return all the proper headers allowing,
say, html5's video viewer's 'seek' indicator/knob to work. Otherwise the video is only played sequentially
Note that nginx/apache need to be configured accordingly. nginx for example:
location /var/lib/geoserver_data/file-service-store/ {
# forces requests to be authorized
internal;
alias /var/lib/geoserver_data/file-service-store/;
}
for apache, need to install xsendfile module, enable it, set the path and then
XSendFile on
XSendFilePath /var/lib/geoserver_data/file-service-store
example use:
/fileservice/view/med.mp4
or
/fileservice/med.mp4/view
Note that media players tend to require the route to end with the filename like /fileservice/view/med.mp4
'''
# method check to avoid bad requests
self.method_check(request, allowed=['get'])
# Must be done otherwise endpoint will be wide open
self.is_authenticated(request)
response = None
file_item_name = kwargs.get('name', None)
if file_item_name:
mime = MimeTypes()
url = urllib.pathname2url(file_item_name)
mime_type = mime.guess_type(url)
response = HttpResponse(content_type=mime_type[0])
file_with_route = smart_str('{}{}'.format(helpers.get_fileservice_dir(), file_item_name))
# apache header
response['X-Sendfile'] = file_with_route
# nginx header
response['X-Accel-Redirect'] = file_with_route
if not response:
response = self.create_response(request, {'status': 'filename not specified'})
return response
|
[
"def",
"view",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# method check to avoid bad requests",
"self",
".",
"method_check",
"(",
"request",
",",
"allowed",
"=",
"[",
"'get'",
"]",
")",
"# Must be done otherwise endpoint will be wide open",
"self",
".",
"is_authenticated",
"(",
"request",
")",
"response",
"=",
"None",
"file_item_name",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"if",
"file_item_name",
":",
"mime",
"=",
"MimeTypes",
"(",
")",
"url",
"=",
"urllib",
".",
"pathname2url",
"(",
"file_item_name",
")",
"mime_type",
"=",
"mime",
".",
"guess_type",
"(",
"url",
")",
"response",
"=",
"HttpResponse",
"(",
"content_type",
"=",
"mime_type",
"[",
"0",
"]",
")",
"file_with_route",
"=",
"smart_str",
"(",
"'{}{}'",
".",
"format",
"(",
"helpers",
".",
"get_fileservice_dir",
"(",
")",
",",
"file_item_name",
")",
")",
"# apache header",
"response",
"[",
"'X-Sendfile'",
"]",
"=",
"file_with_route",
"# nginx header",
"response",
"[",
"'X-Accel-Redirect'",
"]",
"=",
"file_with_route",
"if",
"not",
"response",
":",
"response",
"=",
"self",
".",
"create_response",
"(",
"request",
",",
"{",
"'status'",
":",
"'filename not specified'",
"}",
")",
"return",
"response"
] | 43.659574
| 28.425532
|
def sanitize_dict(input_dict):
r"""
Given a nested dictionary, ensures that all nested dicts are normal
Python dicts. This is necessary for pickling, or just converting
an 'auto-vivifying' dict to something that acts normal.
"""
plain_dict = dict()
for key in input_dict.keys():
value = input_dict[key]
if hasattr(value, 'keys'):
plain_dict[key] = sanitize_dict(value)
else:
plain_dict[key] = value
return plain_dict
|
[
"def",
"sanitize_dict",
"(",
"input_dict",
")",
":",
"plain_dict",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"input_dict",
".",
"keys",
"(",
")",
":",
"value",
"=",
"input_dict",
"[",
"key",
"]",
"if",
"hasattr",
"(",
"value",
",",
"'keys'",
")",
":",
"plain_dict",
"[",
"key",
"]",
"=",
"sanitize_dict",
"(",
"value",
")",
"else",
":",
"plain_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"plain_dict"
] | 34.571429
| 13.5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.