text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def set_product(self, product_name, product_version):
"""Set the product name and version that is transmitted as part of our identification
According to 'FinTS Financial Transaction Services, Schnittstellenspezifikation, Formals',
version 3.0, section C.3.1.3, you should fill this with useful information about the
end-user product, *NOT* the FinTS library."""
self.product_name = product_name
self.product_version = product_version | [
"def",
"set_product",
"(",
"self",
",",
"product_name",
",",
"product_version",
")",
":",
"self",
".",
"product_name",
"=",
"product_name",
"self",
".",
"product_version",
"=",
"product_version"
] | 53.666667 | 22.333333 |
def __msg_curse_header(self, ret, process_sort_key, args=None):
"""Build the header and add it to the ret dict."""
sort_style = 'SORT'
if args.disable_irix and 0 < self.nb_log_core < 10:
msg = self.layout_header['cpu'].format('CPU%/' + str(self.nb_log_core))
elif args.disable_irix and self.nb_log_core != 0:
msg = self.layout_header['cpu'].format('CPU%/C')
else:
msg = self.layout_header['cpu'].format('CPU%')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'cpu_percent' else 'DEFAULT'))
msg = self.layout_header['mem'].format('MEM%')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'memory_percent' else 'DEFAULT'))
msg = self.layout_header['virt'].format('VIRT')
ret.append(self.curse_add_line(msg, optional=True))
msg = self.layout_header['res'].format('RES')
ret.append(self.curse_add_line(msg, optional=True))
msg = self.layout_header['pid'].format('PID', width=self.__max_pid_size())
ret.append(self.curse_add_line(msg))
msg = self.layout_header['user'].format('USER')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'username' else 'DEFAULT'))
msg = self.layout_header['time'].format('TIME+')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'cpu_times' else 'DEFAULT', optional=True))
msg = self.layout_header['thread'].format('THR')
ret.append(self.curse_add_line(msg))
msg = self.layout_header['nice'].format('NI')
ret.append(self.curse_add_line(msg))
msg = self.layout_header['status'].format('S')
ret.append(self.curse_add_line(msg))
msg = self.layout_header['ior'].format('R/s')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True))
msg = self.layout_header['iow'].format('W/s')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True))
msg = self.layout_header['command'].format('Command')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'name' else 'DEFAULT')) | [
"def",
"__msg_curse_header",
"(",
"self",
",",
"ret",
",",
"process_sort_key",
",",
"args",
"=",
"None",
")",
":",
"sort_style",
"=",
"'SORT'",
"if",
"args",
".",
"disable_irix",
"and",
"0",
"<",
"self",
".",
"nb_log_core",
"<",
"10",
":",
"msg",
"=",
... | 65.228571 | 29.285714 |
def vocab_convert(vocab, standard, key=''):
"""
Converts MagIC database terms (method codes, geologic_types, etc) to other standards.
May not be comprehensive for each standard. Terms added to standards as people need them
and may not be up-to-date.
'key' can be used to distinguish vocab terms that exist in two different lists.
Returns:
value of the MagIC vocab in the standard requested
Example:
vocab_convert('Egypt','GEOMAGIA') will return '1'
"""
places_to_geomagia = {
'Egypt': "1",
'Japan': "2",
'France': "3",
'Ukraine': "5",
'India': "6",
'China': "7",
'Finland': "8",
'Greece': "9",
'Italy': "11",
'Switzerland': "12",
'Bulgaria': "13",
'Syria': "14",
'Hungary': "15",
'East Pacific Ridge': "17",
'Hawaii': "18",
'Morocco': "19",
'Australia': "20",
'Georgia': "21",
'Azerbaijan': "22",
'Spain': "24",
'England': "25",
'Czech Republic': "26",
'Mexico': "27",
'Iraq': "28",
'Israel': "29",
'Iran': "30",
'Uzbekistan': "31",
'Turkmenistan': "32",
'Mongolia': "33",
'Iceland': "34",
'New Zealand': "35",
'Amsterdam Island': "36",
'Guadeloupe': "37",
'Mid Atlantic Ridge': "38",
'Austria': "39",
'Belgium': "40",
'Romania': "41",
'Guatemala': "42",
'El Salvador': "43",
'Canary Islands': "45",
'Moldova': "46",
'Latvia': "47",
'Lithuania': "48",
'Russia': "49",
'Germany': "51",
'Martinique': "52",
'Netherlands': "53",
'Turkey': "54",
'Denmark': "55",
'Cameroon': "56",
'Honduras': "57",
'Jordan': "58",
'Brazil': "59",
'Estonia': "61",
'Sweden': "62",
'Peru': "63",
'Bolivia': "64",
'Ecuador': "65",
'Ontario': "66",
'New Mexico': "67",
'Arizona': "68",
'California': "69",
'Colorado': "70",
'Utah': "71",
'Washington': "72",
'Oregon': "73",
'British Columbia': "74",
'Idaho': "75",
'Arkansas': "76",
'Tennessee': "78",
'Serbia': "79",
'Kosovo': "80",
'Portugal': "81",
'Thailand': "82",
'South Korea': "83",
'Kazakhstan': "84",
'Nebraska': "85",
'La Reunion': "86",
'Cyprus': "87",
'Papua New Guinea': "88",
'Vanuatu': "89",
'Fiji': "90",
'Argentina': "91",
'Tunisia': "92",
'Mali': "93",
'Senegal': "95",
'Alaska': "96",
'North Atlantic': "97",
'South Atlantic': "98",
'Beaufort Sea': "99",
'Chukchi Sea': "100",
'Kyrgyzstan': "101",
'Indonesia': "102",
'Azores': "103",
'Quebec': "104",
'Norway': "105",
'Northern Ireland': "106",
'Wales': "107",
'Scotland': "108",
'Virginia': "109",
'North West Pacific': "110",
'Mediterranean': "111",
'Slovakia': "121",
'Poland': "124"
}
geologic_types_to_geomagia = {
"Baked Clay": "2",
"Tile": "3",
"Lava": "4",
"Pottery": "5",
"Sun Dried Object": "6",
"Porcelain": "7",
"Ceramic": "8",
"Kiln": "9",
"Oven or Hearth (GEOMAGIA Only)": "10",
"Mixed Archeological Objects": "11",
"Slag": "12",
"Baked Rock": "13",
"Fresco": "14",
"Mosaic": "15",
"Wall": "16",
"Bath": "17",
"Burnt Floor": "18",
"Funeral Pyre": "19",
"Hypocaust": "20",
"Burnt Pit": "21",
"Bell Mould": "22",
"Smoking Chamber": "23",
"Baked Mud": "24",
"Volcanic Ash": "25",
"Burnt Structure": "26",
"Burnt Castle Wall": "27",
"Charcoal Pile": "28",
"Burnt Earth": "29",
"Vitrified Object": "30",
"Unbaked Sediment": "31",
"Tuyere": "32",
"Sauna": "33",
"Pit Structure": "35",
"Room": "36",
"Pit House": "37",
"Salt Kiln": "38",
"Burnt Sediment": "39",
"Archeological Ashes": "40",
"Volcanic Other or Undefined (GEOMAGIA Only)": "41",
"Mural": "42",
"Vitrified Stone": "43",
"Soil": "44",
"Kamadogu": "45",
"Foundry": "46",
"Obsidian": "47",
"Chert": "48",
"Burnt daub": "49",
"Amphora": "50",
"Granite": "51",
"Volcanic Glass": "52",
"Furnace": "53",
"Roasting Pit": "54"
}
# Some of the simple method code mappings are done here
method_codes_to_geomagia = {
"GM-NO": "0",
"GM-CC-ARCH": "101",
"GM-C14-CAL": "102",
"GM-C14-UNCAL": "103",
"GM-LUM-TH": "104",
"GM-HIST": "105",
"GM-PMAG-ARCH": "106",
"GM-ARAR": "107",
"GM-CC-TEPH": "108",
"GM-CC-STRAT": "109",
"GM-CC-REL": "110",
"GM-DENDRO": "111",
"GM-RATH": "112",
"GM-KAR": "113",
"GM-UTH": "114",
"GM-FT": "115",
"GM-C14-AMS": "116",
"GM-LUM-OS": "117",
"GM-HE3": "118",
"GM-VARVE": "119",
"GM-CS137": "120",
"GM-USD-PB210": "121",
"GM-C14-BETA": "122",
"GM-O18": "123",
"GM-PA": "124"
}
standard = standard.lower()
standard_value = ""
if standard == "geomagia":
if vocab in places_to_geomagia.keys():
standard_value = places_to_geomagia[vocab]
if vocab in geologic_types_to_geomagia.keys():
standard_value = geologic_types_to_geomagia[vocab]
if vocab in method_codes_to_geomagia.keys():
standard_value = method_codes_to_geomagia[vocab]
if standard_value == "":
if vocab=='':
standard_value="Fail:vocab_to_convert_is_null"
elif vocab.isspace() or vocab!='':
standard_value="Fail:vocab_to_convert_is_all_whitespace"
else:
print("pmag.vocab_convert:Magic vocab '", vocab, "' not found for standard ", standard, sep='')
return(vocab)
return standard_value | [
"def",
"vocab_convert",
"(",
"vocab",
",",
"standard",
",",
"key",
"=",
"''",
")",
":",
"places_to_geomagia",
"=",
"{",
"'Egypt'",
":",
"\"1\"",
",",
"'Japan'",
":",
"\"2\"",
",",
"'France'",
":",
"\"3\"",
",",
"'Ukraine'",
":",
"\"5\"",
",",
"'India'",
... | 40.973451 | 11.345133 |
def findFirst(self, tableClass, comparison=None,
offset=None, sort=None, default=None):
"""
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
"""
limit = 1
for item in self.query(tableClass, comparison, limit, offset, sort):
return item
return default | [
"def",
"findFirst",
"(",
"self",
",",
"tableClass",
",",
"comparison",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"sort",
"=",
"None",
",",
"default",
"=",
"None",
")",
":",
"limit",
"=",
"1",
"for",
"item",
"in",
"self",
".",
"query",
"(",
"tab... | 33.892857 | 22.392857 |
def read_cstring(self) -> bool:
"""
read a double quoted string
Read following BNF rule else return False::
'"' -> ['\\' #char | ~'\\'] '"'
"""
self._stream.save_context()
idx = self._stream.index
if self.read_char("\"") and self.read_until("\"", "\\"):
txt = self._stream[idx:self._stream.index]
return self._stream.validate_context()
return self._stream.restore_context() | [
"def",
"read_cstring",
"(",
"self",
")",
"->",
"bool",
":",
"self",
".",
"_stream",
".",
"save_context",
"(",
")",
"idx",
"=",
"self",
".",
"_stream",
".",
"index",
"if",
"self",
".",
"read_char",
"(",
"\"\\\"\"",
")",
"and",
"self",
".",
"read_until",... | 29.857143 | 11.714286 |
def search_dashboard_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"search_dashboard_deleted_for_facets",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"search_... | 47.666667 | 21.809524 |
def currentFolder(self, value):
"""gets/sets the current folder (folder id)"""
if value is not None and value.lower() == self._currentFolder['title']:
return
if value is None:
self._location = self.root
self._currentFolder = {
'title': 'root',
'id': None,
'created' : None,
'username' : None
}
self.__init()
elif value == "/" or value.lower() == 'root':
self.location = self.root
self._currentFolder = {
'title': 'root',
'id': None,
'created' : None,
'username' : None
}
self.__init()
else:
for folder in self.folders:
if 'title' in folder:
if folder['title'].lower() == value.lower():
self._location = "%s/%s" % (self.root, folder['id'])
self._currentFolder = folder
self.__init(folder['title'])
break | [
"def",
"currentFolder",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
"and",
"value",
".",
"lower",
"(",
")",
"==",
"self",
".",
"_currentFolder",
"[",
"'title'",
"]",
":",
"return",
"if",
"value",
"is",
"None",
":",
"self"... | 37.766667 | 11.766667 |
def file_md5(self, file_name):
"""Compute MD5 hash of file."""
with open(file_name, "rb") as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash | [
"def",
"file_md5",
"(",
"self",
",",
"file_name",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"\"rb\"",
")",
"as",
"f",
":",
"file_contents",
"=",
"f",
".",
"read",
"(",
")",
"file_hash",
"=",
"hashlib",
".",
"md5",
"(",
"file_contents",
")",
".... | 38.5 | 8.666667 |
def command(self, rule, **options):
"""\
direct=False, override=True, inject=False, flags=0
"""
options.setdefault("direct", False)
options.setdefault("override", True)
options.setdefault("inject", False)
options.setdefault("flags", 0)
if not options["direct"]:
rule = self.regexy(rule)
regex = re.compile(rule, flags=options["flags"])
self.handlers.setdefault(regex, [])
def handler(f):
if f == noop:
f.options = {}
else:
f.options = options
if options["override"]:
self.handlers[regex] = [f]
else:
self.handlers[regex].append(f)
f.no_args = self.no_args(f)
return f
return handler | [
"def",
"command",
"(",
"self",
",",
"rule",
",",
"*",
"*",
"options",
")",
":",
"options",
".",
"setdefault",
"(",
"\"direct\"",
",",
"False",
")",
"options",
".",
"setdefault",
"(",
"\"override\"",
",",
"True",
")",
"options",
".",
"setdefault",
"(",
... | 33.458333 | 8.75 |
def clear_system_configuration(self):
"""Clear the BIOS/UEFI configuration
"""
biosinfo = self._do_web_request(self._biosurl)
rb = biosinfo.get('Actions', {}).get('#Bios.ResetBios', {})
rb = rb.get('target', '')
if not rb:
raise Exception('BIOS reset not detected on this system')
self._do_web_request(rb, {'Action': 'Bios.ResetBios'}) | [
"def",
"clear_system_configuration",
"(",
"self",
")",
":",
"biosinfo",
"=",
"self",
".",
"_do_web_request",
"(",
"self",
".",
"_biosurl",
")",
"rb",
"=",
"biosinfo",
".",
"get",
"(",
"'Actions'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'#Bios.ResetBios'",
... | 39.5 | 16.4 |
def associate_failure_node(self, parent, child=None, **kwargs):
"""Add a node to run on failure.
=====API DOCS=====
Add a node to run on failure.
:param parent: Primary key of parent node to associate failure node to.
:type parent: int
:param child: Primary key of child node to be associated.
:type child: int
:param `**kwargs`: Fields used to create child node if ``child`` is not provided.
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._assoc_or_create('failure', parent, child, **kwargs) | [
"def",
"associate_failure_node",
"(",
"self",
",",
"parent",
",",
"child",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_assoc_or_create",
"(",
"'failure'",
",",
"parent",
",",
"child",
",",
"*",
"*",
"kwargs",
")"
] | 40.176471 | 25.647059 |
def _translate_sdiv(self, oprnd1, oprnd2, oprnd3):
"""Return a formula representation of an DIV instruction.
"""
assert oprnd1.size and oprnd2.size and oprnd3.size
assert oprnd1.size == oprnd2.size
op1_var = self._translate_src_oprnd(oprnd1)
op2_var = self._translate_src_oprnd(oprnd2)
op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3)
if oprnd3.size > oprnd1.size:
op1_var_sx = smtfunction.sign_extend(op1_var, oprnd3.size)
op2_var_sx = smtfunction.sign_extend(op2_var, oprnd3.size)
result = op1_var_sx // op2_var_sx
elif oprnd3.size < oprnd1.size:
result = smtfunction.extract(op1_var // op2_var, 0, oprnd3.size)
else:
result = op1_var // op2_var
return [op3_var == result] + op3_var_constrs | [
"def",
"_translate_sdiv",
"(",
"self",
",",
"oprnd1",
",",
"oprnd2",
",",
"oprnd3",
")",
":",
"assert",
"oprnd1",
".",
"size",
"and",
"oprnd2",
".",
"size",
"and",
"oprnd3",
".",
"size",
"assert",
"oprnd1",
".",
"size",
"==",
"oprnd2",
".",
"size",
"op... | 39.809524 | 18.285714 |
def load_model(location):
"""
Load any Turi Create model that was previously saved.
This function assumes the model (can be any model) was previously saved in
Turi Create model format with model.save(filename).
Parameters
----------
location : string
Location of the model to load. Can be a local path or a remote URL.
Because models are saved as directories, there is no file extension.
Examples
----------
>>> model.save('my_model_file')
>>> loaded_model = tc.load_model('my_model_file')
"""
# Check if the location is a dir_archive, if not, use glunpickler to load
# as pure python model
# If the location is a http location, skip the check, and directly proceed
# to load model as dir_archive. This is because
# 1) exists() does not work with http protocol, and
# 2) GLUnpickler does not support http
protocol = file_util.get_protocol(location)
dir_archive_exists = False
if protocol == '':
model_path = file_util.expand_full_path(location)
dir_archive_exists = file_util.exists(os.path.join(model_path, 'dir_archive.ini'))
else:
model_path = location
if protocol in ['http', 'https']:
dir_archive_exists = True
else:
import posixpath
dir_archive_exists = file_util.exists(posixpath.join(model_path, 'dir_archive.ini'))
if not dir_archive_exists:
raise IOError("Directory %s does not exist" % location)
_internal_url = _make_internal_url(location)
saved_state = glconnect.get_unity().load_model(_internal_url)
saved_state = _wrap_function_return(saved_state)
# The archive version could be both bytes/unicode
key = u'archive_version'
archive_version = saved_state[key] if key in saved_state else saved_state[key.encode()]
if archive_version < 0:
raise ToolkitError("File does not appear to be a Turi Create model.")
elif archive_version > 1:
raise ToolkitError("Unable to load model.\n\n"
"This model looks to have been saved with a future version of Turi Create.\n"
"Please upgrade Turi Create before attempting to load this model file.")
elif archive_version == 1:
name = saved_state['model_name'];
if name in MODEL_NAME_MAP:
cls = MODEL_NAME_MAP[name]
if 'model' in saved_state:
# this is a native model
return cls(saved_state['model'])
else:
# this is a CustomModel
model_data = saved_state['side_data']
model_version = model_data['model_version']
del model_data['model_version']
return cls._load_version(model_data, model_version)
elif hasattr(_extensions, name):
return saved_state["model"]
else:
raise ToolkitError("Unable to load model of name '%s'; model name not registered." % name)
else:
# very legacy model format. Attempt pickle loading
import sys
sys.stderr.write("This model was saved in a legacy model format. Compatibility cannot be guaranteed in future versions.\n")
if _six.PY3:
raise ToolkitError("Unable to load legacy model in Python 3.\n\n"
"To migrate a model, try loading it using Turi Create 4.0 or\n"
"later in Python 2 and then re-save it. The re-saved model should\n"
"work in Python 3.")
if 'graphlab' not in sys.modules:
sys.modules['graphlab'] = sys.modules['turicreate']
# backward compatibility. Otherwise old pickles will not load
sys.modules["turicreate_util"] = sys.modules['turicreate.util']
sys.modules["graphlab_util"] = sys.modules['turicreate.util']
# More backwards compatibility with the turicreate namespace code.
for k, v in list(sys.modules.items()):
if 'turicreate' in k:
sys.modules[k.replace('turicreate', 'graphlab')] = v
#legacy loader
import pickle
model_wrapper = pickle.loads(saved_state[b'model_wrapper'])
return model_wrapper(saved_state[b'model_base']) | [
"def",
"load_model",
"(",
"location",
")",
":",
"# Check if the location is a dir_archive, if not, use glunpickler to load",
"# as pure python model",
"# If the location is a http location, skip the check, and directly proceed",
"# to load model as dir_archive. This is because",
"# 1) exists() d... | 44.442105 | 23.263158 |
def apply(self, doc):
"""
Generate MentionCells from a Document by parsing all of its Cells.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionCells.apply() must be of type Document"
)
for cell in doc.cells:
yield TemporaryCellMention(cell) | [
"def",
"apply",
"(",
"self",
",",
"doc",
")",
":",
"if",
"not",
"isinstance",
"(",
"doc",
",",
"Document",
")",
":",
"raise",
"TypeError",
"(",
"\"Input Contexts to MentionCells.apply() must be of type Document\"",
")",
"for",
"cell",
"in",
"doc",
".",
"cells",
... | 33.533333 | 18.333333 |
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop)) | [
"def",
"connect_combo_data",
"(",
"instance",
",",
"prop",
",",
"widget",
")",
":",
"def",
"update_widget",
"(",
"value",
")",
":",
"try",
":",
"idx",
"=",
"_find_combo_data",
"(",
"widget",
",",
"value",
")",
"except",
"ValueError",
":",
"if",
"value",
... | 26.921053 | 20.552632 |
def is_correct(self):
"""Check if this object configuration is correct ::
* Check if dateranges of timeperiod are valid
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False if at least one daterange
is not correct
:rtype: bool
"""
state = True
for daterange in self.dateranges:
good = daterange.is_correct()
if not good:
self.add_error("[timeperiod::%s] invalid daterange '%s'"
% (self.get_name(), daterange))
state &= good
# Warn about non correct entries
for entry in self.invalid_entries:
self.add_error("[timeperiod::%s] invalid entry '%s'" % (self.get_name(), entry))
return super(Timeperiod, self).is_correct() and state | [
"def",
"is_correct",
"(",
"self",
")",
":",
"state",
"=",
"True",
"for",
"daterange",
"in",
"self",
".",
"dateranges",
":",
"good",
"=",
"daterange",
".",
"is_correct",
"(",
")",
"if",
"not",
"good",
":",
"self",
".",
"add_error",
"(",
"\"[timeperiod::%s... | 37.043478 | 20.782609 |
def _connect_pipeline(self, pipeline, required_outputs, workflow,
subject_inds, visit_inds, filter_array, force=False):
"""
Connects a pipeline to a overarching workflow that sets up iterators
over subjects|visits present in the repository (if required) and
repository source and sink nodes
Parameters
----------
pipeline : Pipeline
The pipeline to connect
required_outputs : set[str] | None
The outputs required to be produced by this pipeline. If None all
are deemed to be required
workflow : nipype.pipeline.engine.Workflow
The overarching workflow to connect the pipeline to
subject_inds : dct[str, int]
A mapping of subject ID to row index in the filter array
visit_inds : dct[str, int]
A mapping of visit ID to column index in the filter array
filter_array : 2-D numpy.array[bool]
A two-dimensional boolean array, where rows correspond to
subjects and columns correspond to visits in the repository. True
values represent a combination of subject & visit ID to include
in the current round of processing. Note that if the 'force'
flag is not set, sessions won't be reprocessed unless the
save provenance doesn't match that of the given pipeline.
force : bool | 'all'
A flag to force the processing of all sessions in the filter
array, regardless of whether the parameters|pipeline used
to generate existing data matches the given pipeline
"""
if self.reprocess == 'force':
force = True
# Close-off construction of the pipeline and created, input and output
# nodes and provenance dictionary
pipeline.cap()
# Prepend prerequisite pipelines to complete workflow if they need
# to be (re)processed
final_nodes = []
# The array that represents the subject/visit pairs for which any
# prerequisite pipeline will be (re)processed, and which therefore
# needs to be included in the processing of the current pipeline. Row
# indices correspond to subjects and column indices visits
prqs_to_process_array = np.zeros((len(subject_inds), len(visit_inds)),
dtype=bool)
# The array that represents the subject/visit pairs for which any
# prerequisite pipeline will be skipped due to missing inputs. Row
# indices correspond to subjects and column indices visits
prqs_to_skip_array = np.zeros((len(subject_inds), len(visit_inds)),
dtype=bool)
for getter_name in pipeline.prerequisites:
prereq = pipeline.study.pipeline(getter_name)
if prereq.to_process_array.any():
final_nodes.append(prereq.node('final'))
prqs_to_process_array |= prereq.to_process_array
prqs_to_skip_array |= prereq.to_skip_array
# Get list of sessions that need to be processed (i.e. if
# they don't contain the outputs of this pipeline)
to_process_array, to_protect_array, to_skip_array = self._to_process(
pipeline, required_outputs, prqs_to_process_array,
prqs_to_skip_array, filter_array, subject_inds, visit_inds, force)
# Store the arrays signifying which nodes to process, protect or skip
# so they can be passed to downstream pipelines
pipeline.to_process_array = to_process_array
pipeline.to_protect_array = to_protect_array
pipeline.to_skip_array = to_skip_array
# Check to see if there are any sessions to process
if not to_process_array.any():
raise ArcanaNoRunRequiredException(
"No sessions to process for '{}' pipeline"
.format(pipeline.name))
# Set up workflow to run the pipeline, loading and saving from the
# repository
workflow.add_nodes([pipeline._workflow])
# If prerequisite pipelines need to be processed, connect their
# "final" nodes to the initial node of this pipeline to ensure that
# they are all processed before this pipeline is run.
if final_nodes:
prereqs = pipeline.add('prereqs', Merge(len(final_nodes)))
for i, final_node in enumerate(final_nodes, start=1):
workflow.connect(final_node, 'out', prereqs, 'in{}'.format(i))
else:
prereqs = None
# Construct iterator structure over subjects and sessions to be
# processed
iter_nodes = self._iterate(pipeline, to_process_array, subject_inds,
visit_inds)
sources = {}
# Loop through each frequency present in the pipeline inputs and
# create a corresponding source node
for freq in pipeline.input_frequencies:
try:
inputs = list(pipeline.frequency_inputs(freq))
except ArcanaMissingDataException as e:
raise ArcanaMissingDataException(
str(e) + ", which is required for pipeline '{}'".format(
pipeline.name))
inputnode = pipeline.inputnode(freq)
sources[freq] = source = pipeline.add(
'{}_source'.format(freq),
RepositorySource(
i.collection for i in inputs),
inputs=({'prereqs': (prereqs, 'out')} if prereqs is not None
else {}))
# Connect iter_nodes to source and input nodes
for iterator in pipeline.iterators(freq):
pipeline.connect(iter_nodes[iterator], iterator, source,
iterator)
pipeline.connect(source, iterator, inputnode,
iterator)
for input in inputs: # @ReservedAssignment
pipeline.connect(source, input.suffixed_name,
inputnode, input.name)
deiter_nodes = {}
def deiter_node_sort_key(it):
"""
If there are two iter_nodes (i.e. both subject and visit ID) and
one depends on the other (i.e. if the visit IDs per subject
vary and vice-versa) we need to ensure that the dependent
iterator is deiterated (joined) first.
"""
return iter_nodes[it].itersource is None
# Connect all outputs to the repository sink, creating a new sink for
# each frequency level (i.e 'per_session', 'per_subject', 'per_visit',
# or 'per_study')
for freq in pipeline.output_frequencies:
outputs = list(pipeline.frequency_outputs(freq))
if pipeline.iterators(freq) - pipeline.iterators():
raise ArcanaDesignError(
"Doesn't make sense to output '{}', which are of '{}' "
"frequency, when the pipeline only iterates over '{}'"
.format("', '".join(o.name for o in outputs), freq,
"', '".join(pipeline.iterators())))
outputnode = pipeline.outputnode(freq)
# Connect filesets/fields to sink to sink node, skipping outputs
# that are study inputs
to_connect = {o.suffixed_name: (outputnode, o.name)
for o in outputs if o.is_spec}
# Connect iterators to sink node
to_connect.update(
{i: (iter_nodes[i], i) for i in pipeline.iterators()})
# Connect checksums/values from sources to sink node in order to
# save in provenance, joining where necessary
for input_freq in pipeline.input_frequencies:
checksums_to_connect = [
i.checksum_suffixed_name
for i in pipeline.frequency_inputs(input_freq)]
if not checksums_to_connect:
# Rare case of a pipeline with no inputs only iter_nodes
# that will only occur in unittests in all likelihood
continue
# Loop over iterators that need to be joined, i.e. that are
# present in the input frequency but not the output frequency
# and create join nodes
source = sources[input_freq]
for iterator in (pipeline.iterators(input_freq) -
pipeline.iterators(freq)):
join = pipeline.add(
'{}_to_{}_{}_checksum_join'.format(
input_freq, freq, iterator),
IdentityInterface(
checksums_to_connect),
inputs={
tc: (source, tc) for tc in checksums_to_connect},
joinsource=iterator,
joinfield=checksums_to_connect)
source = join
to_connect.update(
{tc: (source, tc) for tc in checksums_to_connect})
# Add sink node
sink = pipeline.add(
'{}_sink'.format(freq),
RepositorySink(
(o.collection for o in outputs), pipeline),
inputs=to_connect)
# "De-iterate" (join) over iterators to get back to single child
# node by the time we connect to the final node of the pipeline Set
# the sink and subject_id as the default deiterator if there are no
# deiterates (i.e. per_study) or to use as the upstream node to
# connect the first deiterator for every frequency
deiter_nodes[freq] = sink # for per_study the "deiterator" == sink
for iterator in sorted(pipeline.iterators(freq),
key=deiter_node_sort_key):
# Connect to previous deiterator or sink
# NB: we only need to keep a reference to the last one in the
# chain in order to connect with the "final" node, so we can
# overwrite the entry in the 'deiter_nodes' dict
deiter_nodes[freq] = pipeline.add(
'{}_{}_deiter'.format(freq, iterator),
IdentityInterface(
['checksums']),
inputs={
'checksums': (deiter_nodes[freq], 'checksums')},
joinsource=iterator,
joinfield='checksums')
# Create a final node, which is used to connect with downstream
# pipelines
pipeline.add(
'final',
Merge(
len(deiter_nodes)),
inputs={
'in{}'.format(i): (di, 'checksums')
for i, di in enumerate(deiter_nodes.values(), start=1)}) | [
"def",
"_connect_pipeline",
"(",
"self",
",",
"pipeline",
",",
"required_outputs",
",",
"workflow",
",",
"subject_inds",
",",
"visit_inds",
",",
"filter_array",
",",
"force",
"=",
"False",
")",
":",
"if",
"self",
".",
"reprocess",
"==",
"'force'",
":",
"forc... | 52.371981 | 19.386473 |
def get_git_status(self):
"""
Gets git and init versions and commits since the init version
"""
## get git branch
self._get_git_branch()
## get tag in the init file
self._get_init_release_tag()
## get log commits since <tag>
try:
self._get_log_commits()
except Exception as inst:
raise Exception(
"""
Error: the version in __init__.py is {}, so 'git log' is
looking for commits that have happened since that version, but
it appears there is not existing tag for that version. You may
need to roll back the version in __init__.py to what is actually
commited. Check with `git tag`.
--------
{}
""".format(self.init_version, inst))
## where are we at?
print("__init__.__version__ == '{}':".format(self.init_version))
print("'{}' is {} commits ahead of origin/{}"
.format(self.tag, len(self.commits), self.init_version)) | [
"def",
"get_git_status",
"(",
"self",
")",
":",
"## get git branch",
"self",
".",
"_get_git_branch",
"(",
")",
"## get tag in the init file",
"self",
".",
"_get_init_release_tag",
"(",
")",
"## get log commits since <tag>",
"try",
":",
"self",
".",
"_get_log_commits",
... | 34.724138 | 17.655172 |
def export(self, template_file_name, output_file_name,
sort="public", data=None, limit=0):
"""Export ranking to a file.
Args:
template_file_name (str): where is the template
(moustache template)
output_file_name (str): where create the file with the ranking
sort (str): field to sort the users
"""
exportedData = {}
exportedUsers = self.__exportUsers(sort, limit)
exportedData["users"] = exportedUsers
exportedData["extraData"] = data
with open(template_file_name) as template_file:
template_raw = template_file.read()
template = parse(template_raw)
renderer = Renderer()
output = renderer.render(template, exportedData)
with open(output_file_name, "w") as text_file:
text_file.write(output) | [
"def",
"export",
"(",
"self",
",",
"template_file_name",
",",
"output_file_name",
",",
"sort",
"=",
"\"public\"",
",",
"data",
"=",
"None",
",",
"limit",
"=",
"0",
")",
":",
"exportedData",
"=",
"{",
"}",
"exportedUsers",
"=",
"self",
".",
"__exportUsers",... | 33.038462 | 17.692308 |
def storage(self, *, resource=None):
""" Get an instance to handle file storage (OneDrive / Sharepoint)
for the specified account resource
:param str resource: Custom resource to be used in this drive object
(Defaults to parent main_resource)
:return: a representation of OneDrive File Storage
:rtype: Storage
:raises RuntimeError: if protocol doesn't support the feature
"""
if not isinstance(self.protocol, MSGraphProtocol):
# TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here
raise RuntimeError(
'Drive options only works on Microsoft Graph API')
return Storage(parent=self, main_resource=resource) | [
"def",
"storage",
"(",
"self",
",",
"*",
",",
"resource",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"protocol",
",",
"MSGraphProtocol",
")",
":",
"# TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here",
"raise",
"RuntimeError... | 45.375 | 18.8125 |
def on_menu_criteria_file(self, event):
"""
read pmag_criteria.txt file
and open change criteria dialog
"""
if self.data_model == 3:
dlg = wx.FileDialog(
self, message="choose a file in MagIC Data Model 3.0 format",
defaultDir=self.WD,
defaultFile="criteria.txt",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
else:
dlg = wx.FileDialog(
self, message="choose a file in a MagIC Data Model 2.5 pmagpy format",
defaultDir=self.WD,
defaultFile="pmag_criteria.txt",
# wildcard=wildcard,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
criteria_file = dlg.GetPath()
self.GUI_log.write(
"-I- Read new criteria file: %s\n" % criteria_file)
dlg.Destroy()
replace_acceptance_criteria = pmag.initialize_acceptance_criteria(
data_model=self.data_model)
try:
if self.data_model == 3:
self.read_criteria_file(criteria_file)
replace_acceptance_criteria = self.acceptance_criteria
# replace_acceptance_criteria=pmag.read_criteria_from_file(criteria_file,replace_acceptance_criteria,data_model=self.data_model)
# # just to see if file exists
print(replace_acceptance_criteria)
else:
replace_acceptance_criteria = pmag.read_criteria_from_file(
criteria_file, replace_acceptance_criteria, data_model=self.data_model) # just to see if file exists
except Exception as ex:
print('-W-', ex)
dlg1 = wx.MessageDialog(
self, caption="Error:", message="error in reading file", style=wx.OK)
result = self.show_dlg(dlg1)
if result == wx.ID_OK:
dlg1.Destroy()
return
self.add_thellier_gui_criteria()
self.read_criteria_file(criteria_file)
# check if some statistics are in the new criteria but not in old. If
# yes, add to self.preferences['show_statistics_on_gui']
crit_list_not_in_pref = []
for crit in list(self.acceptance_criteria.keys()):
if self.acceptance_criteria[crit]['category'] == "IE-SPEC":
if self.acceptance_criteria[crit]['value'] != -999:
short_crit = crit.split('specimen_')[-1]
if short_crit not in self.preferences['show_statistics_on_gui']:
print("-I- statistic %s is not in your preferences" % crit)
self.preferences['show_statistics_on_gui'].append(
short_crit)
crit_list_not_in_pref.append(crit)
if len(crit_list_not_in_pref) > 0:
stat_list = ":".join(crit_list_not_in_pref)
dlg1 = wx.MessageDialog(self, caption="WARNING:",
message="statistics '%s' is in the imported criteria file but not in your appearence preferences.\nThis statistic will not appear on the gui panel.\n The program will exit after saving new acceptance criteria, and it will be added automatically the next time you open it " % stat_list,
style=wx.OK | wx.ICON_INFORMATION)
self.show_dlg(dlg1)
dlg1.Destroy()
dia = thellier_gui_dialogs.Criteria_Dialog(
None, self.acceptance_criteria, self.preferences, title='Acceptance Criteria')
dia.Center()
result = self.show_dlg(dia)
if result == wx.ID_OK: # Until the user clicks OK, show the message
self.On_close_criteria_box(dia)
if len(crit_list_not_in_pref) > 0:
dlg1 = wx.MessageDialog(self, caption="WARNING:",
message="Exiting now! When you restart the gui all the new statistics will be added.",
style=wx.OK | wx.ICON_INFORMATION)
self.show_dlg(dlg1)
dlg1.Destroy()
self.on_menu_exit(None)
# self.Destroy()
# sys.exit()
if result == wx.ID_CANCEL: # Until the user clicks OK, show the message
for crit in crit_list_not_in_pref:
short_crit = crit.split('specimen_')[-1]
self.preferences['show_statistics_on_gui'].remove(short_crit) | [
"def",
"on_menu_criteria_file",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"data_model",
"==",
"3",
":",
"dlg",
"=",
"wx",
".",
"FileDialog",
"(",
"self",
",",
"message",
"=",
"\"choose a file in MagIC Data Model 3.0 format\"",
",",
"defaultDir",
... | 51.022727 | 22.590909 |
def monitor_session_span_command_source(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span")
session = ET.SubElement(monitor, "session")
session_number_key = ET.SubElement(session, "session-number")
session_number_key.text = kwargs.pop('session_number')
span_command = ET.SubElement(session, "span-command")
source = ET.SubElement(span_command, "source")
source.text = kwargs.pop('source')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"monitor_session_span_command_source",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"monitor",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"monitor\"",
",",
"xmlns",
"=",
"\"urn... | 46.714286 | 16.928571 |
def _ResponseToClientsFullInfo(self, response):
"""Creates a ClientFullInfo object from a database response."""
c_full_info = None
prev_cid = None
for row in response:
(cid, fs, crt, ping, clk, ip, foreman, first, last_client_ts,
last_crash_ts, last_startup_ts, client_obj, client_startup_obj,
last_startup_obj, label_owner, label_name) = row
if cid != prev_cid:
if c_full_info:
yield db_utils.IntToClientID(prev_cid), c_full_info
metadata = rdf_objects.ClientMetadata(
certificate=crt,
fleetspeak_enabled=fs,
first_seen=mysql_utils.TimestampToRDFDatetime(first),
ping=mysql_utils.TimestampToRDFDatetime(ping),
clock=mysql_utils.TimestampToRDFDatetime(clk),
ip=mysql_utils.StringToRDFProto(rdf_client_network.NetworkAddress,
ip),
last_foreman_time=mysql_utils.TimestampToRDFDatetime(foreman),
startup_info_timestamp=mysql_utils.TimestampToRDFDatetime(
last_startup_ts),
last_crash_timestamp=mysql_utils.TimestampToRDFDatetime(
last_crash_ts))
if client_obj is not None:
l_snapshot = rdf_objects.ClientSnapshot.FromSerializedString(
client_obj)
l_snapshot.timestamp = mysql_utils.TimestampToRDFDatetime(
last_client_ts)
l_snapshot.startup_info = rdf_client.StartupInfo.FromSerializedString(
client_startup_obj)
l_snapshot.startup_info.timestamp = l_snapshot.timestamp
else:
l_snapshot = rdf_objects.ClientSnapshot(
client_id=db_utils.IntToClientID(cid))
if last_startup_obj is not None:
startup_info = rdf_client.StartupInfo.FromSerializedString(
last_startup_obj)
startup_info.timestamp = mysql_utils.TimestampToRDFDatetime(
last_startup_ts)
else:
startup_info = None
prev_cid = cid
c_full_info = rdf_objects.ClientFullInfo(
metadata=metadata,
labels=[],
last_snapshot=l_snapshot,
last_startup_info=startup_info)
if label_owner and label_name:
c_full_info.labels.append(
rdf_objects.ClientLabel(name=label_name, owner=label_owner))
if c_full_info:
yield db_utils.IntToClientID(prev_cid), c_full_info | [
"def",
"_ResponseToClientsFullInfo",
"(",
"self",
",",
"response",
")",
":",
"c_full_info",
"=",
"None",
"prev_cid",
"=",
"None",
"for",
"row",
"in",
"response",
":",
"(",
"cid",
",",
"fs",
",",
"crt",
",",
"ping",
",",
"clk",
",",
"ip",
",",
"foreman"... | 39.916667 | 19.6 |
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir] | [
"def",
"get_albums",
"(",
"self",
",",
"path",
")",
":",
"for",
"name",
"in",
"self",
".",
"albums",
"[",
"path",
"]",
".",
"subdirs",
":",
"subdir",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"join",
"(",
"path",
",",
"name",
")",
")",
"yield... | 42.75 | 13.375 |
def isiterable(element, exclude=None):
"""Check whatever or not if input element is an iterable.
:param element: element to check among iterable types.
:param type/tuple exclude: not allowed types in the test.
:Example:
>>> isiterable({})
True
>>> isiterable({}, exclude=dict)
False
>>> isiterable({}, exclude=(dict,))
False
"""
# check for allowed type
allowed = exclude is None or not isinstance(element, exclude)
result = allowed and isinstance(element, Iterable)
return result | [
"def",
"isiterable",
"(",
"element",
",",
"exclude",
"=",
"None",
")",
":",
"# check for allowed type",
"allowed",
"=",
"exclude",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"element",
",",
"exclude",
")",
"result",
"=",
"allowed",
"and",
"isinstance",
"(... | 25 | 21.857143 |
def supports_version_type(self, version_type=None):
"""Tests if the given version type is supported.
arg: version_type (osid.type.Type): a version Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``VERSION``
raise: NullArgument - ``version_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not version_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``VERSION``']:
raise IllegalState('put more meaninful message here')
return version_type in self.get_version_types | [
"def",
"supports_version_type",
"(",
"self",
",",
"version_type",
"=",
"None",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"from",
".",
"osid_errors",
"import",
"IllegalState",
",",
"NullArgument",
"if",
"not",
"version_type",
":",
... | 47.555556 | 20.166667 |
def delete_objects(Bucket, Delete, MFA=None, RequestPayer=None,
region=None, key=None, keyid=None, profile=None):
'''
Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
and {deleted: false, failed: [key, ...]} otherwise
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}'
'''
if isinstance(Delete, six.string_types):
Delete = salt.utils.json.loads(Delete)
if not isinstance(Delete, dict):
raise SaltInvocationError("Malformed Delete request.")
if 'Objects' not in Delete:
raise SaltInvocationError("Malformed Delete request.")
failed = []
objs = Delete['Objects']
for i in range(0, len(objs), 1000):
chunk = objs[i:i+1000]
subset = {'Objects': chunk, 'Quiet': True}
try:
args = {'Bucket': Bucket}
args.update({'MFA': MFA}) if MFA else None
args.update({'RequestPayer': RequestPayer}) if RequestPayer else None
args.update({'Delete': subset})
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.delete_objects(**args)
failed += ret.get('Errors', [])
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
if failed:
return {'deleted': False, 'failed': failed}
else:
return {'deleted': True} | [
"def",
"delete_objects",
"(",
"Bucket",
",",
"Delete",
",",
"MFA",
"=",
"None",
",",
"RequestPayer",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"if",
"isinstance... | 34.418605 | 21.906977 |
def in_degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, in_degree) at time t.
The node degree is the number of edges incoming to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.in_degree_iter(0, t=0))
[(0, 0)]
>>> list(G.in_degree_iter([0,1], t=0))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = self._pred.items()
else:
nodes_nbrs = ((n, self._pred[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, nbrs in nodes_nbrs:
deg = len(self._pred[n])
yield (n, deg)
else:
for n, nbrs in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(v, n, t)])
if edges_t > 0:
yield (n, edges_t)
else:
yield (n, 0) | [
"def",
"in_degree_iter",
"(",
"self",
",",
"nbunch",
"=",
"None",
",",
"t",
"=",
"None",
")",
":",
"if",
"nbunch",
"is",
"None",
":",
"nodes_nbrs",
"=",
"self",
".",
"_pred",
".",
"items",
"(",
")",
"else",
":",
"nodes_nbrs",
"=",
"(",
"(",
"n",
... | 30.265306 | 21.102041 |
def oauth_link_external_id(user, external_id=None):
"""Link a user to an external id.
:param user: A :class:`invenio_accounts.models.User` instance.
:param external_id: The external id associated with the user.
(Default: ``None``)
:raises invenio_oauthclient.errors.AlreadyLinkedError: Raised if already
exists a link.
"""
try:
with db.session.begin_nested():
db.session.add(UserIdentity(
id=external_id['id'],
method=external_id['method'],
id_user=user.id
))
except IntegrityError:
raise AlreadyLinkedError(user, external_id) | [
"def",
"oauth_link_external_id",
"(",
"user",
",",
"external_id",
"=",
"None",
")",
":",
"try",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"db",
".",
"session",
".",
"add",
"(",
"UserIdentity",
"(",
"id",
"=",
"external_id",
... | 35.666667 | 15 |
def connect(self, url):
"""Connect to the server.
:param str url: server URL.
"""
headers = httputil.HTTPHeaders({'Content-Type': APPLICATION_JSON})
request = httpclient.HTTPRequest(url=url,
connect_timeout=self.connect_timeout,
request_timeout=self.request_timeout,
headers=headers)
ws = websocket_connect(url)
ws.add_done_callback(self._connect_callback) | [
"def",
"connect",
"(",
"self",
",",
"url",
")",
":",
"headers",
"=",
"httputil",
".",
"HTTPHeaders",
"(",
"{",
"'Content-Type'",
":",
"APPLICATION_JSON",
"}",
")",
"request",
"=",
"httpclient",
".",
"HTTPRequest",
"(",
"url",
"=",
"url",
",",
"connect_time... | 43.75 | 17.916667 |
def index(self, refresh=False):
"""Indexes this object, using a document from `to_dict()`"""
es = connections.get_connection("default")
index = self.__class__.search_objects.mapping.index
doc_type = self.__class__.search_objects.mapping.doc_type
es.index(index, doc_type,
id=self.pk,
body=self.to_dict(),
refresh=refresh) | [
"def",
"index",
"(",
"self",
",",
"refresh",
"=",
"False",
")",
":",
"es",
"=",
"connections",
".",
"get_connection",
"(",
"\"default\"",
")",
"index",
"=",
"self",
".",
"__class__",
".",
"search_objects",
".",
"mapping",
".",
"index",
"doc_type",
"=",
"... | 44.888889 | 10.222222 |
def default(self, value):
"""Convert rogue and mysterious data types.
Conversion notes:
- ``datetime.date`` and ``datetime.datetime`` objects are
converted into datetime strings.
"""
if isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, date):
dt = datetime(value.year, value.month, value.day, 0, 0, 0)
return dt.isoformat()
elif isinstance(value, Decimal):
return float(str(value))
elif isinstance(value, set):
return list(value)
# raise TypeError
return super(ESJsonEncoder, self).default(value) | [
"def",
"default",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
")",
":",
"return",
"value",
".",
"isoformat",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"date",
")",
":",
"dt",
"=",
"datetime",
"(",
... | 34.473684 | 12.052632 |
def install_kernel_spec(self, app, dir_name, display_name,
settings_module, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads corral env
Thanks: django extensions
"""
ksm = app.kernel_spec_manager
try_spec_names = ['python3' if six.PY3 else 'python2', 'python']
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception:
continue
if not ks:
self.parser.error("No notebook (Python) kernel specs found")
ks.display_name = display_name
ks.env["CORRAL_SETTINGS_MODULE"] = settings_module
ks.argv.extend(ipython_arguments)
in_corral_dir, in_corral = os.path.split(os.path.realpath(sys.argv[0]))
pythonpath = ks.env.get(
'PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if in_corral_dir not in pythonpath:
pythonpath.append(in_corral_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, conf.PACKAGE)
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
shutil.copy(res.fullpath("logo-64x64.png"), kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json()) | [
"def",
"install_kernel_spec",
"(",
"self",
",",
"app",
",",
"dir_name",
",",
"display_name",
",",
"settings_module",
",",
"ipython_arguments",
")",
":",
"ksm",
"=",
"app",
".",
"kernel_spec_manager",
"try_spec_names",
"=",
"[",
"'python3'",
"if",
"six",
".",
"... | 38.7 | 18.05 |
def remove_root(self, model, setter=None):
''' Remove a model as root model from this Document.
Changes to this model may still trigger ``on_change`` callbacks
on this document, if the model is still referred to by other
root models.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
'''
if model not in self._roots:
return # TODO (bev) ValueError?
self._push_all_models_freeze()
try:
self._roots.remove(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootRemovedEvent(self, model, setter)) | [
"def",
"remove_root",
"(",
"self",
",",
"model",
",",
"setter",
"=",
"None",
")",
":",
"if",
"model",
"not",
"in",
"self",
".",
"_roots",
":",
"return",
"# TODO (bev) ValueError?",
"self",
".",
"_push_all_models_freeze",
"(",
")",
"try",
":",
"self",
".",
... | 41 | 24.16129 |
def all(types, unit, precision, as_json): # pylint: disable=redefined-builtin
"""Get temperatures of all available sensors"""
sensors = W1ThermSensor.get_available_sensors(types)
temperatures = []
for sensor in sensors:
if precision:
sensor.set_precision(precision, persist=False)
temperatures.append(sensor.get_temperature(unit))
if as_json:
data = [
{"id": i, "hwid": s.id, "type": s.type_name, "temperature": t, "unit": unit}
for i, s, t in zip(count(start=1), sensors, temperatures)
]
click.echo(json.dumps(data, indent=4, sort_keys=True))
else:
click.echo(
"Got temperatures of {0} sensors:".format(
click.style(str(len(sensors)), bold=True)
)
)
for i, sensor, temperature in zip(count(start=1), sensors, temperatures):
click.echo(
" Sensor {0} ({1}) measured temperature: {2} {3}".format(
click.style(str(i), bold=True),
click.style(sensor.id, bold=True),
click.style(str(round(temperature, 2)), bold=True),
click.style(unit, bold=True),
)
) | [
"def",
"all",
"(",
"types",
",",
"unit",
",",
"precision",
",",
"as_json",
")",
":",
"# pylint: disable=redefined-builtin",
"sensors",
"=",
"W1ThermSensor",
".",
"get_available_sensors",
"(",
"types",
")",
"temperatures",
"=",
"[",
"]",
"for",
"sensor",
"in",
... | 39.387097 | 24.129032 |
def publish_processed_network_packets(
name="not-set",
task_queue=None,
result_queue=None,
need_response=False,
shutdown_msg="SHUTDOWN"):
"""
# Redis/RabbitMQ/SQS messaging endpoints for pub-sub
routing_key = ev("PUBLISH_EXCHANGE",
"reporting.accounts")
queue_name = ev("PUBLISH_QUEUE",
"reporting.accounts")
auth_url = ev("PUB_BROKER_URL",
"redis://localhost:6379/15")
serializer = "json"
"""
# these keys need to be cycled to prevent
# exploiting static keys
filter_key = ev("IGNORE_KEY",
INCLUDED_IGNORE_KEY)
forward_host = ev("FORWARD_HOST", "127.0.0.1")
forward_port = int(ev("FORWARD_PORT", "80"))
include_filter_key = ev("FILTER_KEY", "")
if not include_filter_key and filter_key:
include_filter_key = filter_key
filter_keys = [filter_key]
log.info(("START consumer={} "
"forward={}:{} with "
"key={} filters={}")
.format(name,
forward_host,
forward_port,
include_filter_key,
filter_key))
forward_skt = None
not_done = True
while not_done:
if not forward_skt:
forward_skt = connect_forwarder(
forward_host=forward_host,
forward_port=forward_port)
next_task = task_queue.get()
if next_task:
if str(next_task) == shutdown_msg:
# Poison pill for shutting down
log.info(("{}: DONE CALLBACK "
"Exiting msg={}")
.format(name,
next_task))
task_queue.task_done()
break
# end of handling shutdown case
try:
log.debug(("{} parsing")
.format(name))
source = next_task.source
packet = next_task.payload
if not packet:
log.error(("{} invalid task found "
"{} missing payload")
.format(name,
next_task))
break
log.debug(("{} found msg from src={}")
.format(name,
source))
network_data = parse_network_data(
data_packet=packet,
include_filter_key=include_filter_key,
filter_keys=filter_keys)
if network_data["status"] == VALID:
if network_data["data_type"] == TCP \
or network_data["data_type"] == UDP \
or network_data["data_type"] == ARP \
or network_data["data_type"] == ICMP:
log.info(("{} valid={} packet={} "
"data={}")
.format(name,
network_data["id"],
network_data["data_type"],
network_data["target_data"]))
if not forward_skt:
forward_skt = connect_forwarder(
forward_host=forward_host,
forward_port=forward_port)
if forward_skt:
if network_data["stream"]:
sent = False
while not sent:
try:
log.info("sending={}".format(
network_data["stream"]))
send_msg(
forward_skt,
network_data["stream"]
.encode("utf-8"))
sent = True
except Exception as e:
sent = False
time.sleep(0.5)
try:
forward_skt.close()
forward_skt = None
except Exception as w:
forward_skt = None
forward_skt = connect_forwarder(
forward_host=forward_host,
forward_port=forward_port)
# end of reconnecting
log.info("sent={}".format(
network_data["stream"]))
if need_response:
log.info("receiving")
cdr_res = forward_skt.recv(1024)
log.info(("cdr - res{}")
.format(cdr_res))
else:
log.info(("{} EMPTY stream={} "
"error={} status={}")
.format(
name,
network_data["stream"],
network_data["err"],
network_data["status"]))
else:
log.info(("{} not_supported valid={} "
"packet data_type={} status={}")
.format(name,
network_data["id"],
network_data["data_type"],
network_data["status"]))
elif network_data["status"] == FILTERED:
log.info(("{} filtered={} status={}")
.format(name,
network_data["filtered"],
network_data["status"]))
else:
if network_data["status"] == INVALID:
log.info(("{} invalid={} packet={} "
"error={} status={}")
.format(name,
network_data["id"],
network_data["data_type"],
network_data["error"],
network_data["status"]))
else:
log.info(("{} unknown={} packet={} "
"error={} status={}")
.format(name,
network_data["id"],
network_data["data_type"],
network_data["error"],
network_data["status"]))
# end of if valid or not data
except KeyboardInterrupt as k:
log.info(("{} stopping")
.format(name))
break
except Exception as e:
log.error(("{} failed packaging packet to forward "
"with ex={}")
.format(name,
e))
break
# end of try/ex during payload processing
# end of if found a next_task
log.info(("Consumer: {} {}")
.format(name, next_task))
task_queue.task_done()
if need_response:
answer = "processed: {}".format(next_task())
result_queue.put(answer)
# end of while
if forward_skt:
try:
forward_skt.close()
log.info("CLOSED connection")
forward_skt = None
except Exception:
log.info("CLOSED connection")
# end of cleaning up forwarding socket
log.info("{} Done".format(name))
return | [
"def",
"publish_processed_network_packets",
"(",
"name",
"=",
"\"not-set\"",
",",
"task_queue",
"=",
"None",
",",
"result_queue",
"=",
"None",
",",
"need_response",
"=",
"False",
",",
"shutdown_msg",
"=",
"\"SHUTDOWN\"",
")",
":",
"# these keys need to be cycled to pr... | 40.066667 | 16.609524 |
def edit_customer(self, id_, **kwargs):
"""
Edits a customer by ID. All fields available at creation can be updated
as well. If you want to update hourly rates retroactively, set the
argument `update_hourly_rate_on_time_entries` to True.
"""
data = self._wrap_dict("customer", kwargs)
return self.patch("/customers/{}.json".format(id_), data=data) | [
"def",
"edit_customer",
"(",
"self",
",",
"id_",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"_wrap_dict",
"(",
"\"customer\"",
",",
"kwargs",
")",
"return",
"self",
".",
"patch",
"(",
"\"/customers/{}.json\"",
".",
"format",
"(",
"id_",... | 49.5 | 17 |
def build_search_otu_table(self, search_results_list, base_list, output_path):
'''
Build an OTU from SequenceSearchResult objects
Parameters
----------
search_results_list: list
Iterable if SequenceSearchResult objects. e.g.
[SequenceSearchResult_1, SequenceSearchResult_2, ...]
base_list: list
Iterable of the basenames for each sequence file provided to graftM
e.g.
[sample_1, sample_2, ...]
output_path: str
Path to output file to which the resultant output file will be
written to.
'''
db_count = self._interpret_hits(search_results_list,
base_list)
self._write_results(db_count, output_path) | [
"def",
"build_search_otu_table",
"(",
"self",
",",
"search_results_list",
",",
"base_list",
",",
"output_path",
")",
":",
"db_count",
"=",
"self",
".",
"_interpret_hits",
"(",
"search_results_list",
",",
"base_list",
")",
"self",
".",
"_write_results",
"(",
"db_co... | 37.045455 | 22.409091 |
def move(self, point: Point) -> 'Location':
"""
Alter the point stored in the location while preserving the labware.
This returns a new Location and does not alter the current one. It
should be used like
.. code-block:: python
>>> loc = Location(Point(1, 1, 1), 'Hi')
>>> new_loc = loc.move(Point(1, 1, 1))
>>> assert loc_2.point == Point(2, 2, 2) # True
>>> assert loc.point == Point(1, 1, 1) # True
"""
return self._replace(point=self.point + point) | [
"def",
"move",
"(",
"self",
",",
"point",
":",
"Point",
")",
"->",
"'Location'",
":",
"return",
"self",
".",
"_replace",
"(",
"point",
"=",
"self",
".",
"point",
"+",
"point",
")"
] | 34.125 | 20.625 |
def connect(self, func=None, event=None, set_method=False):
"""Register a callback function to a given event.
To register a callback function to the `spam` event, where `obj` is
an instance of a class deriving from `EventEmitter`:
```python
@obj.connect
def on_spam(arg1, arg2):
pass
```
This is called when `obj.emit('spam', arg1, arg2)` is called.
Several callback functions can be registered for a given event.
The registration order is conserved and may matter in applications.
"""
if func is None:
return partial(self.connect, set_method=set_method)
# Get the event name from the function.
if event is None:
event = self._get_on_name(func)
# We register the callback function.
self._callbacks[event].append(func)
# A new method self.event() emitting the event is created.
if set_method:
self._create_emitter(event)
return func | [
"def",
"connect",
"(",
"self",
",",
"func",
"=",
"None",
",",
"event",
"=",
"None",
",",
"set_method",
"=",
"False",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"partial",
"(",
"self",
".",
"connect",
",",
"set_method",
"=",
"set_method",
")... | 29.647059 | 23.823529 |
def _insert(self, namespace, stream, events, configuration):
"""
`stream` is the name of a stream and `events` is a list of
(TimeUUID, event) to insert. Make room for the events to insert if
necessary by deleting the oldest events. Then insert each event in time
sorted order.
"""
max_items = configuration['max_items']
for _id, event in events:
while len(self.db[namespace][stream]) >= max_items:
self.db[namespace][stream].pop(0)
bisect.insort(self.db[namespace][stream], Event(_id, event)) | [
"def",
"_insert",
"(",
"self",
",",
"namespace",
",",
"stream",
",",
"events",
",",
"configuration",
")",
":",
"max_items",
"=",
"configuration",
"[",
"'max_items'",
"]",
"for",
"_id",
",",
"event",
"in",
"events",
":",
"while",
"len",
"(",
"self",
".",
... | 44.416667 | 15.583333 |
def fw_romaji_lt(full, regular):
'''
Generates a lookup table with the fullwidth rōmaji characters
on the left side, and the regular rōmaji characters as the values.
'''
lt = {}
for n in range(len(full)):
fw = full[n]
reg = regular[n]
lt[fw] = reg
return lt | [
"def",
"fw_romaji_lt",
"(",
"full",
",",
"regular",
")",
":",
"lt",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"full",
")",
")",
":",
"fw",
"=",
"full",
"[",
"n",
"]",
"reg",
"=",
"regular",
"[",
"n",
"]",
"lt",
"[",
"fw",
"... | 24.916667 | 24.25 |
def _handle_tag_master_disconnected_failback(self, tag, data):
'''
Handle a master_disconnected_failback event
'''
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
persist=True)
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop() | [
"def",
"_handle_tag_master_disconnected_failback",
"(",
"self",
",",
"tag",
",",
"data",
")",
":",
"# if the master disconnect event is for a different master, raise an exception",
"if",
"tag",
".",
"startswith",
"(",
"master_event",
"(",
"type",
"=",
"'disconnected'",
")",... | 53.547826 | 25.669565 |
def render_form_errors(form, error_types="non_field_errors", **kwargs):
"""
Render form errors to a Bootstrap layout
"""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(error_types) | [
"def",
"render_form_errors",
"(",
"form",
",",
"error_types",
"=",
"\"non_field_errors\"",
",",
"*",
"*",
"kwargs",
")",
":",
"renderer_cls",
"=",
"get_form_renderer",
"(",
"*",
"*",
"kwargs",
")",
"return",
"renderer_cls",
"(",
"form",
",",
"*",
"*",
"kwarg... | 40.166667 | 11.166667 |
def build_from_dict(self, tax_benefit_system, input_dict):
"""
Build a simulation from ``input_dict``
This method uses :any:`build_from_entities` if entities are fully specified, or :any:`build_from_variables` if not.
:param dict input_dict: A dict represeting the input of the simulation
:return: A :any:`Simulation`
"""
input_dict = self.explicit_singular_entities(tax_benefit_system, input_dict)
if any(key in tax_benefit_system.entities_plural() for key in input_dict.keys()):
return self.build_from_entities(tax_benefit_system, input_dict)
else:
return self.build_from_variables(tax_benefit_system, input_dict) | [
"def",
"build_from_dict",
"(",
"self",
",",
"tax_benefit_system",
",",
"input_dict",
")",
":",
"input_dict",
"=",
"self",
".",
"explicit_singular_entities",
"(",
"tax_benefit_system",
",",
"input_dict",
")",
"if",
"any",
"(",
"key",
"in",
"tax_benefit_system",
"."... | 47.733333 | 31.2 |
def get(self, label_sn):
"""
Get tags by a label's sn key
:param label_sn: A corresponding label's ``sn`` key.
:type label_sn: str or int
:return: A list of matching tags. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
tags = self.list()
return [
tag
for tag
in tags
if str(label_sn) in tag.get('args', {}).values()
] | [
"def",
"get",
"(",
"self",
",",
"label_sn",
")",
":",
"tags",
"=",
"self",
".",
"list",
"(",
")",
"return",
"[",
"tag",
"for",
"tag",
"in",
"tags",
"if",
"str",
"(",
"label_sn",
")",
"in",
"tag",
".",
"get",
"(",
"'args'",
",",
"{",
"}",
")",
... | 28.909091 | 20 |
def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type, and with
the specified list of rdatas in text format.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
r = RRset(name, rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r | [
"def",
"from_text_list",
"(",
"name",
",",
"ttl",
",",
"rdclass",
",",
"rdtype",
",",
"text_rdatas",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"name",
"=",
"dns",
".",
"name",
".",
"from_text",
"(",
... | 34.894737 | 13.052632 |
def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
# Ensure proper type of the mi command
if type(mi_cmd_to_write) in [str, unicode]:
pass
elif type(mi_cmd_to_write) == list:
mi_cmd_to_write = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
self.logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write
if USING_WINDOWS:
# select not implemented in windows for pipes
# assume it's always ready
outputready = [self.stdin_fileno]
else:
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode())
# don't forget to flush for Python3, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.gdb_process.stdin.flush()
else:
self.logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return [] | [
"def",
"write",
"(",
"self",
",",
"mi_cmd_to_write",
",",
"timeout_sec",
"=",
"DEFAULT_GDB_TIMEOUT_SEC",
",",
"raise_error_on_timeout",
"=",
"True",
",",
"read_response",
"=",
"True",
",",
")",
":",
"self",
".",
"verify_valid_gdb_subprocess",
"(",
")",
"if",
"ti... | 40.298507 | 23.223881 |
def get_access_token(self,
method='POST',
decoder=parse_utf8_qsl,
key='access_token',
**kwargs):
'''
Returns an access token.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key: The key the access token will be decoded by, defaults to
'access_token'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_access_token(method, **kwargs)
access_token, = process_token_request(r, decoder, key)
return access_token | [
"def",
"get_access_token",
"(",
"self",
",",
"method",
"=",
"'POST'",
",",
"decoder",
"=",
"parse_utf8_qsl",
",",
"key",
"=",
"'access_token'",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"get_raw_access_token",
"(",
"method",
",",
"*",
"*"... | 38.130435 | 17.782609 |
def autocorrelation(ts, normalized=False, unbiased=False):
"""
Returns the discrete, linear convolution of a time series with itself,
optionally using unbiased normalization.
N.B. Autocorrelation estimates are necessarily inaccurate for longer lags,
as there are less pairs of points to convolve separated by that lag.
Therefore best to throw out the results except for shorter lags, e.g.
keep lags from tau=0 up to one quarter of the total time series length.
Args:
normalized (boolean): If True, the time series will first be normalized
to a mean of 0 and variance of 1. This gives autocorrelation 1 at
zero lag.
unbiased (boolean): If True, the result at each lag m will be scaled by
1/(N-m). This gives an unbiased estimation of the autocorrelation of a
stationary process from a finite length sample.
Ref: S. J. Orfanidis (1996) "Optimum Signal Processing", 2nd Ed.
"""
ts = np.squeeze(ts)
if ts.ndim <= 1:
if normalized:
ts = (ts - ts.mean())/ts.std()
N = ts.shape[0]
ar = np.asarray(ts)
acf = np.correlate(ar, ar, mode='full')
outlen = (acf.shape[0] + 1) / 2
acf = acf[(outlen - 1):]
if unbiased:
factor = np.array([1.0/(N - m) for m in range(0, outlen)])
acf = acf * factor
dt = (ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1.0)
lags = np.arange(outlen)*dt
return Timeseries(acf, tspan=lags, labels=ts.labels)
else:
# recursively handle arrays of dimension > 1
lastaxis = ts.ndim - 1
m = ts.shape[lastaxis]
acfs = [ts[...,i].autocorrelation(normalized, unbiased)[...,np.newaxis]
for i in range(m)]
res = distob.concatenate(acfs, axis=lastaxis)
res.labels[lastaxis] = ts.labels[lastaxis]
return res | [
"def",
"autocorrelation",
"(",
"ts",
",",
"normalized",
"=",
"False",
",",
"unbiased",
"=",
"False",
")",
":",
"ts",
"=",
"np",
".",
"squeeze",
"(",
"ts",
")",
"if",
"ts",
".",
"ndim",
"<=",
"1",
":",
"if",
"normalized",
":",
"ts",
"=",
"(",
"ts"... | 41.088889 | 21.311111 |
def greedy_trails(subg, odds, verbose):
'''Greedily select trails by making the longest you can until the end'''
if verbose:
print('\tCreating edge map')
edges = defaultdict(list)
for x,y in subg.edges():
edges[x].append(y)
edges[y].append(x)
if verbose:
print('\tSelecting trails')
trails = []
for x in subg.nodes():
if verbose > 2:
print('\t\tNode {0}'.format(x))
while len(edges[x]) > 0:
y = edges[x][0]
trail = [(x,y)]
edges[x].remove(y)
edges[y].remove(x)
while len(edges[y]) > 0:
x = y
y = edges[y][0]
trail.append((x,y))
edges[x].remove(y)
edges[y].remove(x)
trails.append(trail)
return trails | [
"def",
"greedy_trails",
"(",
"subg",
",",
"odds",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'\\tCreating edge map'",
")",
"edges",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"x",
",",
"y",
"in",
"subg",
".",
"edges",
"(",
")",
... | 25.5625 | 16.875 |
def stratified_kfold(df, n_folds):
"""
Create stratified k-folds from an indexed dataframe
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds | [
"def",
"stratified_kfold",
"(",
"df",
",",
"n_folds",
")",
":",
"sessions",
"=",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"list",
"(",
"df",
".",
"index",
".",
"unique",
"(",
")",
")",
")",
".",
"groupby",
"(",
"0",
")",
".",
"apply",
"(",... | 47.166667 | 25.166667 |
def unauthorize_guest(self, guest_mac):
"""
Unauthorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
"""
cmd = 'unauthorize-guest'
js = {'mac': guest_mac}
return self._run_command(cmd, params=js) | [
"def",
"unauthorize_guest",
"(",
"self",
",",
"guest_mac",
")",
":",
"cmd",
"=",
"'unauthorize-guest'",
"js",
"=",
"{",
"'mac'",
":",
"guest_mac",
"}",
"return",
"self",
".",
"_run_command",
"(",
"cmd",
",",
"params",
"=",
"js",
")"
] | 28.181818 | 15.090909 |
def perform_create(self, serializer):
"""Create a resource."""
with transaction.atomic():
instance = serializer.save()
# Assign all permissions to the object contributor.
assign_contributor_permissions(instance) | [
"def",
"perform_create",
"(",
"self",
",",
"serializer",
")",
":",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"instance",
"=",
"serializer",
".",
"save",
"(",
")",
"# Assign all permissions to the object contributor.",
"assign_contributor_permissions",
"(",
... | 36.857143 | 12 |
def is_instance(state, inst, not_instance_msg=None):
"""Check whether an object is an instance of a certain class.
``is_instance()`` can currently only be used when chained from ``check_object()``, the function that is
used to 'zoom in' on the object of interest.
Args:
inst (class): The class that the object should have.
not_instance_msg (str): When specified, this overrides the automatically generated message in case
the object does not have the expected class.
state (State): The state that is passed in through the SCT chain (don't specify this).
:Example:
Student code and solution code::
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
SCT::
# Verify the class of arr
import numpy
Ex().check_object('arr').is_instance(numpy.ndarray)
"""
state.assert_is(["object_assignments"], "is_instance", ["check_object"])
sol_name = state.solution_parts.get("name")
stu_name = state.student_parts.get("name")
if not_instance_msg is None:
not_instance_msg = "Is it a {{inst.__name__}}?"
if not isInstanceInProcess(sol_name, inst, state.solution_process):
raise InstructorError(
"`is_instance()` noticed that `%s` is not a `%s` in the solution process."
% (sol_name, inst.__name__)
)
_msg = state.build_message(not_instance_msg, {"inst": inst})
feedback = Feedback(_msg, state)
state.do_test(InstanceProcessTest(stu_name, inst, state.student_process, feedback))
return state | [
"def",
"is_instance",
"(",
"state",
",",
"inst",
",",
"not_instance_msg",
"=",
"None",
")",
":",
"state",
".",
"assert_is",
"(",
"[",
"\"object_assignments\"",
"]",
",",
"\"is_instance\"",
",",
"[",
"\"check_object\"",
"]",
")",
"sol_name",
"=",
"state",
"."... | 34.688889 | 26.511111 |
def get_chain(self, use_login=False, use_fork=False):
"""
Return the :class:`mitogen.parent.CallChain` to use for executing
function calls.
:param bool use_login:
If :data:`True`, always return the chain for the login account
rather than any active become user.
:param bool use_fork:
If :data:`True`, return the chain for the fork parent.
:returns mitogen.parent.CallChain:
"""
self._connect()
if use_login:
return self.login_context.default_call_chain
# See FORK_SUPPORTED comments in target.py.
if use_fork and self.init_child_result['fork_context'] is not None:
return self.init_child_result['fork_context'].default_call_chain
return self.chain | [
"def",
"get_chain",
"(",
"self",
",",
"use_login",
"=",
"False",
",",
"use_fork",
"=",
"False",
")",
":",
"self",
".",
"_connect",
"(",
")",
"if",
"use_login",
":",
"return",
"self",
".",
"login_context",
".",
"default_call_chain",
"# See FORK_SUPPORTED commen... | 41.368421 | 18 |
def insert(self, index = 0, *widgets):
"""insert widget in the sprites list at the given index.
by default will prepend."""
for widget in widgets:
self._add(widget, index)
index +=1 # as we are moving forwards
self._sort() | [
"def",
"insert",
"(",
"self",
",",
"index",
"=",
"0",
",",
"*",
"widgets",
")",
":",
"for",
"widget",
"in",
"widgets",
":",
"self",
".",
"_add",
"(",
"widget",
",",
"index",
")",
"index",
"+=",
"1",
"# as we are moving forwards",
"self",
".",
"_sort",
... | 38.857143 | 6.428571 |
def centroid(self):
"""
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
centroid = np.average(self.triangles_center,
axis=0,
weights=self.area_faces)
centroid.flags.writeable = False
return centroid | [
"def",
"centroid",
"(",
"self",
")",
":",
"# use the centroid of each triangle weighted by",
"# the area of the triangle to find the overall centroid",
"centroid",
"=",
"np",
".",
"average",
"(",
"self",
".",
"triangles_center",
",",
"axis",
"=",
"0",
",",
"weights",
"=... | 32.666667 | 17.142857 |
def loop(self):
"""
this function will be called every self.dt_set seconds
request data
tm_wday 0=Monday
tm_yday
"""
today = date.today()
# only start new jobs after change the day changed
if self._current_day != gmtime().tm_yday:
self._current_day = gmtime().tm_yday
for job in ScheduledExportTask.objects.filter(active=1): # get all active jobs
add_task = False
if job.export_period == 1: # daily
start_time = '%s %02d:00:00' % ((today - timedelta(1)).strftime('%d-%b-%Y'),
job.day_time) # "%d-%b-%Y %H:%M:%S"
start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple())
filename_suffix = 'daily_export_%d_%s' % (job.pk, job.label)
add_task = True
elif job.export_period == 2 and gmtime().tm_yday % 2 == 0: # on even days (2,4,...)
start_time = '%s %02d:00:00' % ((today - timedelta(2)).strftime('%d-%b-%Y'),
job.day_time) # "%d-%b-%Y %H:%M:%S"
start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple())
filename_suffix = 'two_day_export_%d_%s' % (job.pk, job.label)
add_task = True
elif job.export_period == 7 and gmtime().tm_wday == 0: # on every monday
start_time = '%s %02d:00:00' % ((today - timedelta(7)).strftime('%d-%b-%Y'),
job.day_time) # "%d-%b-%Y %H:%M:%S"
start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple())
filename_suffix = 'weekly_export_%d_%s' % (job.pk, job.label)
add_task = True
elif job.export_period == 14 and gmtime().tm_yday % 14 == 0: # on every second monday
start_time = '%s %02d:00:00' % ((today - timedelta(14)).strftime('%d-%b-%Y'),
job.day_time) # "%d-%b-%Y %H:%M:%S"
start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple())
filename_suffix = 'two_week_export_%d_%s' % (job.pk, job.label)
add_task = True
elif job.export_period == 30 and gmtime().tm_yday % 30 == 0: # on every 30 days
start_time = '%s %02d:00:00' % ((today - timedelta(30)).strftime('%d-%b-%Y'),
job.day_time) # "%d-%b-%Y %H:%M:%S"
start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple())
filename_suffix = '30_day_export_%d_%s' % (job.pk, job.label)
add_task = True
if job.day_time == 0:
end_time = '%s %02d:59:59' % ((today - timedelta(1)).strftime('%d-%b-%Y'),
23) # "%d-%b-%Y %H:%M:%S"
else:
end_time = '%s %02d:59:59' % (today.strftime('%d-%b-%Y'), job.day_time - 1) # "%d-%b-%Y %H:%M:%S"
end_time = mktime(datetime.strptime(end_time, "%d-%b-%Y %H:%M:%S").timetuple())
# create ExportTask
if add_task:
et = ExportTask(
label=filename_suffix,
datetime_max=datetime.fromtimestamp(end_time, UTC),
datetime_min=datetime.fromtimestamp(start_time, UTC),
filename_suffix=filename_suffix,
mean_value_period=job.mean_value_period,
file_format=job.file_format,
datetime_start=datetime.fromtimestamp(end_time + 60, UTC)
)
et.save()
et.variables.add(*job.variables.all())
# check running tasks and start the next Export Task
running_jobs = ExportTask.objects.filter(busy=True, failed=False)
if running_jobs:
for job in running_jobs:
if time() - job.start() < 30:
# only check Task when it is running longer then 30s
continue
if job.backgroundprocess is None:
# if the job has no backgroundprocess assosiated mark as failed
job.failed = True
job.save()
continue
if now() - timedelta(hours=1) > job.backgroundprocess.last_update:
# if the Background Process has been updated in the past 60s wait
continue
if job.backgroundprocess.pid == 0:
# if the job has no valid pid mark as failed
job.failed = True
job.save()
continue
else:
# start the next Export Task
job = ExportTask.objects.filter(
done=False,
busy=False,
failed=False,
datetime_start__lte=datetime.now(UTC)).first() # get all jobs
if job:
bp = BackgroundProcess(label='pyscada.export.ExportProcess-%d' % job.pk,
message='waiting..',
enabled=True,
parent_process_id=self.parent_process_id,
process_class='pyscada.export.worker.ExportProcess',
process_class_kwargs=json.dumps({"job_id": job.pk}))
bp.save()
if job.datetime_start is None:
job.datetime_start = datetime.now(UTC)
job.busy = True
job.save()
# delete all done jobs older the 60 days
for job in ExportTask.objects.filter(done=True, busy=False,
datetime_start__gte=datetime.fromtimestamp(time() + 60 * 24 * 60 * 60,
UTC)):
job.delete()
# delete all failed jobs older the 60 days
for job in ExportTask.objects.filter(failed=True,
datetime_start__gte=datetime.fromtimestamp(time() + 60 * 24 * 60 * 60,
UTC)):
job.delete()
return 1, None | [
"def",
"loop",
"(",
"self",
")",
":",
"today",
"=",
"date",
".",
"today",
"(",
")",
"# only start new jobs after change the day changed",
"if",
"self",
".",
"_current_day",
"!=",
"gmtime",
"(",
")",
".",
"tm_yday",
":",
"self",
".",
"_current_day",
"=",
"gmt... | 53.669355 | 28.959677 |
def _read(self):
"""
Reads a single event from the joystick, blocking until one is
available. Returns `None` if a non-key event was read, or an
`InputEvent` tuple describing the event otherwise.
"""
event = self._stick_file.read(self.EVENT_SIZE)
(tv_sec, tv_usec, type, code, value) = struct.unpack(self.EVENT_FORMAT, event)
if type == self.EV_KEY:
return InputEvent(
timestamp=tv_sec + (tv_usec / 1000000),
direction={
self.KEY_UP: DIRECTION_UP,
self.KEY_DOWN: DIRECTION_DOWN,
self.KEY_LEFT: DIRECTION_LEFT,
self.KEY_RIGHT: DIRECTION_RIGHT,
self.KEY_ENTER: DIRECTION_MIDDLE,
}[code],
action={
self.STATE_PRESS: ACTION_PRESSED,
self.STATE_RELEASE: ACTION_RELEASED,
self.STATE_HOLD: ACTION_HELD,
}[value])
else:
return None | [
"def",
"_read",
"(",
"self",
")",
":",
"event",
"=",
"self",
".",
"_stick_file",
".",
"read",
"(",
"self",
".",
"EVENT_SIZE",
")",
"(",
"tv_sec",
",",
"tv_usec",
",",
"type",
",",
"code",
",",
"value",
")",
"=",
"struct",
".",
"unpack",
"(",
"self"... | 42.08 | 15.52 |
def socket_recvall(socket, length, bufsize=4096):
"""A helper method to read of bytes from a socket to a maximum length"""
data = b""
while len(data) < length:
data += socket.recv(bufsize)
return data | [
"def",
"socket_recvall",
"(",
"socket",
",",
"length",
",",
"bufsize",
"=",
"4096",
")",
":",
"data",
"=",
"b\"\"",
"while",
"len",
"(",
"data",
")",
"<",
"length",
":",
"data",
"+=",
"socket",
".",
"recv",
"(",
"bufsize",
")",
"return",
"data"
] | 36.5 | 12.5 |
def get_default_value_by_type(type_, state=None):
"""
Java specify defaults values for primitive and reference types. This
method returns the default value for a given type.
:param str type_: Name of type.
:return: Default value for this type.
"""
if type_ in ['byte', 'char', 'short', 'int', 'boolean']:
return BVS('default_value_{}'.format(type_), 32)
elif type_ == "long":
return BVS('default_value_{}'.format(type_), 64)
elif type_ == 'float':
return FPS('default_value_{}'.format(type_), FSORT_FLOAT)
elif type_ == 'double':
return FPS('default_value_{}'.format(type_), FSORT_DOUBLE)
elif state is not None:
if type_ == 'java.lang.String':
return SimSootValue_StringRef.new_string(state, StringS('default_value_{}'.format(type_), 1000))
if type_.endswith('[][]'):
raise NotImplementedError
# multiarray = SimSootExpr_NewMultiArray.new_array(self.state, element_type, size)
# multiarray.add_default_value_generator(lambda s: SimSootExpr_NewMultiArray._generate_inner_array(s, element_type, sizes))
# return multiarray
elif type_.endswith('[]'):
array = SimSootExpr_NewArray.new_array(state, type_[:-2], BVV(2, 32))
return array
else:
return SimSootValue_ThisRef.new_object(state, type_, symbolic=True, init_object=False)
else:
# not a primitive type
# => treat it as a reference
return SootNullConstant() | [
"def",
"get_default_value_by_type",
"(",
"type_",
",",
"state",
"=",
"None",
")",
":",
"if",
"type_",
"in",
"[",
"'byte'",
",",
"'char'",
",",
"'short'",
",",
"'int'",
",",
"'boolean'",
"]",
":",
"return",
"BVS",
"(",
"'default_value_{}'",
".",
"format",
... | 49.909091 | 21.242424 |
def open(filename, frame='unspecified'):
"""Create a PointCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created PointCloud.
Returns
-------
:obj:`PointCloud`
A PointCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return PointCloud(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"PointCloud",
"(",
"data",
",",
"frame",
")"
] | 27.388889 | 16.611111 |
def setupitems(self):
"""Lookup available setup items
:returns: catalog brains
"""
query = {
"path": {
"query": api.get_path(self.setup),
"depth": 1,
},
}
items = api.search(query, "portal_catalog")
# filter out items
items = filter(lambda item: not item.exclude_from_nav, items)
# sort by (translated) title
def cmp_by_translated_title(brain1, brain2):
title1 = t(api.get_title(brain1))
title2 = t(api.get_title(brain2))
return cmp(title1, title2)
return sorted(items, cmp=cmp_by_translated_title) | [
"def",
"setupitems",
"(",
"self",
")",
":",
"query",
"=",
"{",
"\"path\"",
":",
"{",
"\"query\"",
":",
"api",
".",
"get_path",
"(",
"self",
".",
"setup",
")",
",",
"\"depth\"",
":",
"1",
",",
"}",
",",
"}",
"items",
"=",
"api",
".",
"search",
"("... | 30 | 16.727273 |
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, str) and self.needsEncoding(s):
for x in self.encodings:
s = re.sub(x[0], x[1], s)
return s | [
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
"and",
"self",
".",
"needsEncoding",
"(",
"s",
")",
":",
"for",
"x",
"in",
"self",
".",
"encodings",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"x",
"... | 29.833333 | 10.666667 |
def __map_entity(self, entity: dal.AssetClass) -> AssetClass:
""" maps the entity onto the model object """
mapper = self.__get_mapper()
ac = mapper.map_entity(entity)
return ac | [
"def",
"__map_entity",
"(",
"self",
",",
"entity",
":",
"dal",
".",
"AssetClass",
")",
"->",
"AssetClass",
":",
"mapper",
"=",
"self",
".",
"__get_mapper",
"(",
")",
"ac",
"=",
"mapper",
".",
"map_entity",
"(",
"entity",
")",
"return",
"ac"
] | 41 | 10 |
def fcoe_get_interface_input_fcoe_intf_include_stats(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
input = ET.SubElement(fcoe_get_interface, "input")
fcoe_intf_include_stats = ET.SubElement(input, "fcoe-intf-include-stats")
fcoe_intf_include_stats.text = kwargs.pop('fcoe_intf_include_stats')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"fcoe_get_interface_input_fcoe_intf_include_stats",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"fcoe_get_interface",
"=",
"ET",
".",
"Element",
"(",
"\"fcoe_get_interface\"",
")",
"config",
... | 45.5 | 18.25 |
def rename(self, old_label_name, new_label_name):
"""
Take into account that a label has been renamed
"""
assert(old_label_name != new_label_name)
self._bayes.pop(old_label_name)
old_baye_dir = self._get_baye_dir(old_label_name)
new_baye_dir = self._get_baye_dir(new_label_name)
logger.info("Renaming label training {} -> {} : {} -> {}".format(
old_label_name, new_label_name, old_baye_dir, new_baye_dir
))
os.rename(old_baye_dir, new_baye_dir) | [
"def",
"rename",
"(",
"self",
",",
"old_label_name",
",",
"new_label_name",
")",
":",
"assert",
"(",
"old_label_name",
"!=",
"new_label_name",
")",
"self",
".",
"_bayes",
".",
"pop",
"(",
"old_label_name",
")",
"old_baye_dir",
"=",
"self",
".",
"_get_baye_dir"... | 43.75 | 13.75 |
def _makedos(self, ax, dos_plotter, dos_options, dos_label=None):
"""This is basically the same as the SDOSPlotter get_plot function."""
# don't use first 4 colours; these are the band structure line colours
cycle = cycler(
'color', rcParams['axes.prop_cycle'].by_key()['color'][4:])
with context({'axes.prop_cycle': cycle}):
plot_data = dos_plotter.dos_plot_data(**dos_options)
mask = plot_data['mask']
energies = plot_data['energies'][mask]
lines = plot_data['lines']
spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \
[Spin.up, Spin.down]
for line_set in plot_data['lines']:
for line, spin in it.product(line_set, spins):
if spin == Spin.up:
label = line['label']
densities = line['dens'][spin][mask]
else:
label = ""
densities = -line['dens'][spin][mask]
ax.fill_betweenx(energies, densities, 0, lw=0,
facecolor=line['colour'],
alpha=line['alpha'])
ax.plot(densities, energies, label=label,
color=line['colour'])
# x and y axis reversed versus normal dos plotting
ax.set_ylim(dos_options['xmin'], dos_options['xmax'])
ax.set_xlim(plot_data['ymin'], plot_data['ymax'])
if dos_label is not None:
ax.set_xlabel(dos_label)
ax.set_xticklabels([])
ax.legend(loc=2, frameon=False, ncol=1, bbox_to_anchor=(1., 1.)) | [
"def",
"_makedos",
"(",
"self",
",",
"ax",
",",
"dos_plotter",
",",
"dos_options",
",",
"dos_label",
"=",
"None",
")",
":",
"# don't use first 4 colours; these are the band structure line colours",
"cycle",
"=",
"cycler",
"(",
"'color'",
",",
"rcParams",
"[",
"'axes... | 42.631579 | 18.789474 |
def num(value):
"""Convert a value from one of several bases to an int."""
if re_hex_num.match(value):
return int(value, base=16)
else:
return int(value) | [
"def",
"num",
"(",
"value",
")",
":",
"if",
"re_hex_num",
".",
"match",
"(",
"value",
")",
":",
"return",
"int",
"(",
"value",
",",
"base",
"=",
"16",
")",
"else",
":",
"return",
"int",
"(",
"value",
")"
] | 29.333333 | 14.333333 |
def xor_key(first, second, trafaret):
"""
xor_key - takes `first` and `second` key names and `trafaret`.
Checks if we have only `first` or only `second` in data, not both,
and at least one.
Then checks key value against trafaret.
"""
trafaret = t.Trafaret._trafaret(trafaret)
def check_(value):
if (first in value) ^ (second in value):
key = first if first in value else second
yield first, t.catch_error(trafaret, value[key]), (key,)
elif first in value and second in value:
yield first, t.DataError(error='correct only if {} is not defined'.format(second)), (first,)
yield second, t.DataError(error='correct only if {} is not defined'.format(first)), (second,)
else:
yield first, t.DataError(error='is required if {} is not defined'.format('second')), (first,)
yield second, t.DataError(error='is required if {} is not defined'.format('first')), (second,)
return check_ | [
"def",
"xor_key",
"(",
"first",
",",
"second",
",",
"trafaret",
")",
":",
"trafaret",
"=",
"t",
".",
"Trafaret",
".",
"_trafaret",
"(",
"trafaret",
")",
"def",
"check_",
"(",
"value",
")",
":",
"if",
"(",
"first",
"in",
"value",
")",
"^",
"(",
"sec... | 42.826087 | 27.434783 |
def to_pb(self):
"""Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object.
"""
if self.gc_rule is None:
return table_v2_pb2.ColumnFamily()
else:
return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) | [
"def",
"to_pb",
"(",
"self",
")",
":",
"if",
"self",
".",
"gc_rule",
"is",
"None",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
")",
"else",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
"gc_rule",
"=",
"self",
".",
"gc_rule",
".",... | 34.2 | 15.7 |
def remove_device(self, path):
"Remove a device from the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("-%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close() | [
"def",
"remove_device",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"__get_control_socket",
"(",
")",
":",
"self",
".",
"sock",
".",
"sendall",
"(",
"\"-%s\\r\\n\\x00\"",
"%",
"path",
")",
"self",
".",
"sock",
".",
"recv",
"(",
"12",
")",
... | 40.666667 | 11.333333 |
def _elapsed_time(begin_time, end_time):
"""Assuming format YYYY-MM-DD hh:mm:ss
Returns the elapsed time in seconds
"""
bt = _str2datetime(begin_time)
et = _str2datetime(end_time)
return float((et - bt).seconds) | [
"def",
"_elapsed_time",
"(",
"begin_time",
",",
"end_time",
")",
":",
"bt",
"=",
"_str2datetime",
"(",
"begin_time",
")",
"et",
"=",
"_str2datetime",
"(",
"end_time",
")",
"return",
"float",
"(",
"(",
"et",
"-",
"bt",
")",
".",
"seconds",
")"
] | 22.9 | 14 |
def parse_exposure(self, node):
"""
Parses <Exposure>
@param node: Node containing the <Exposure> element
@type node: xml.etree.Element
@raise ParseError: Raised when the exposure name is not
being defined in the context of a component type.
"""
if self.current_component_type == None:
self.raise_error('Exposures must be defined in a component type')
try:
name = node.lattrib['name']
except:
self.raise_error('<Exposure> must specify a name')
try:
dimension = node.lattrib['dimension']
except:
self.raise_error("Exposure '{0}' must specify a dimension",
name)
description = node.lattrib.get('description', '')
self.current_component_type.add_exposure(Exposure(name, dimension, description)) | [
"def",
"parse_exposure",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"current_component_type",
"==",
"None",
":",
"self",
".",
"raise_error",
"(",
"'Exposures must be defined in a component type'",
")",
"try",
":",
"name",
"=",
"node",
".",
"lattrib",... | 31.142857 | 23.214286 |
def index(self, value):
"""
Return the smallest index of the row(s) with this column
equal to value.
"""
for i in xrange(len(self.parentNode)):
if getattr(self.parentNode[i], self.Name) == value:
return i
raise ValueError(value) | [
"def",
"index",
"(",
"self",
",",
"value",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"parentNode",
")",
")",
":",
"if",
"getattr",
"(",
"self",
".",
"parentNode",
"[",
"i",
"]",
",",
"self",
".",
"Name",
")",
"==",
"va... | 26.555556 | 12.777778 |
def _assign_to_field(obj, name, val):
'Helper to assign an arbitrary value to a protobuf field'
target = getattr(obj, name)
if isinstance(target, containers.RepeatedScalarFieldContainer):
target.append(val)
elif isinstance(target, containers.RepeatedCompositeFieldContainer):
target = target.add()
target.CopyFrom(val)
elif isinstance(target, (int, float, bool, str, bytes)):
setattr(obj, name, val)
elif isinstance(target, message.Message):
target.CopyFrom(val)
else:
raise RuntimeError("Unsupported type: {}".format(type(target))) | [
"def",
"_assign_to_field",
"(",
"obj",
",",
"name",
",",
"val",
")",
":",
"target",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"if",
"isinstance",
"(",
"target",
",",
"containers",
".",
"RepeatedScalarFieldContainer",
")",
":",
"target",
".",
"append",
... | 39.666667 | 18.466667 |
def add_value(self, name, value):
"""
Add a new value to the list.
:param str name: name of the value that is being parsed
:param str value: value that is being parsed
:raises ietfparse.errors.MalformedLinkValue:
if *strict mode* is enabled and a validation error
is detected
This method implements most of the validation mentioned in
sections 5.3 and 5.4 of :rfc:`5988`. The ``_rfc_values``
dictionary contains the appropriate values for the attributes
that get special handling. If *strict mode* is enabled, then
only values that are acceptable will be added to ``_values``.
"""
try:
if self._rfc_values[name] is None:
self._rfc_values[name] = value
elif self.strict:
if name in ('media', 'type'):
raise errors.MalformedLinkValue(
'More than one {} parameter present'.format(name))
return
except KeyError:
pass
if self.strict and name in ('title', 'title*'):
return
self._values.append((name, value)) | [
"def",
"add_value",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"try",
":",
"if",
"self",
".",
"_rfc_values",
"[",
"name",
"]",
"is",
"None",
":",
"self",
".",
"_rfc_values",
"[",
"name",
"]",
"=",
"value",
"elif",
"self",
".",
"strict",
":",... | 36.3125 | 19.8125 |
def bind_port(requested_port):
"""Bind sockets to an available port, returning sockets and the bound port."""
sockets = tornado.netutil.bind_sockets(requested_port)
if requested_port != 0:
return sockets, requested_port
# Get the actual port number.
for s in sockets:
host, port = s.getsockname()[:2]
if host == '0.0.0.0':
return sockets, port
raise RuntimeError('Could not determine the bound port.') | [
"def",
"bind_port",
"(",
"requested_port",
")",
":",
"sockets",
"=",
"tornado",
".",
"netutil",
".",
"bind_sockets",
"(",
"requested_port",
")",
"if",
"requested_port",
"!=",
"0",
":",
"return",
"sockets",
",",
"requested_port",
"# Get the actual port number.",
"f... | 30.071429 | 17.785714 |
def select(self, domain_or_name, query='', next_token=None,
consistent_read=False):
"""
Returns a set of Attributes for item names within domain_name that
match the query. The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language.
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression' : query}
if consistent_read:
params['ConsistentRead'] = 'true'
if next_token:
params['NextToken'] = next_token
try:
return self.get_list('Select', params, [('Item', self.item_cls)],
parent=domain)
except SDBResponseError, e:
e.body = "Query: %s\n%s" % (query, e.body)
raise e | [
"def",
"select",
"(",
"self",
",",
"domain_or_name",
",",
"query",
"=",
"''",
",",
"next_token",
"=",
"None",
",",
"consistent_read",
"=",
"False",
")",
":",
"domain",
",",
"domain_name",
"=",
"self",
".",
"get_domain_and_name",
"(",
"domain_or_name",
")",
... | 43.285714 | 20.485714 |
def MapByteStream(self, byte_stream, byte_offset=0, **unused_kwargs):
"""Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
return byte_stream[byte_offset:byte_offset + self.byte_size] | [
"def",
"MapByteStream",
"(",
"self",
",",
"byte_stream",
",",
"byte_offset",
"=",
"0",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"return",
"byte_stream",
"[",
"byte_offset",
":",
"byte_offset",
"+",
"self",
".",
"byte_size",
"]"
] | 30.133333 | 23.6 |
def _connect_signals():
"""Connect up post_save, post_delete signals for models."""
for index in settings.get_index_names():
for model in settings.get_index_models(index):
_connect_model_signals(model) | [
"def",
"_connect_signals",
"(",
")",
":",
"for",
"index",
"in",
"settings",
".",
"get_index_names",
"(",
")",
":",
"for",
"model",
"in",
"settings",
".",
"get_index_models",
"(",
"index",
")",
":",
"_connect_model_signals",
"(",
"model",
")"
] | 45 | 7.2 |
def submit_task(self,
function_descriptor,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
max_actor_reconstructions=0,
execution_dependencies=None,
new_actor_handles=None,
num_return_vals=None,
resources=None,
placement_resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with
function_descriptor with arguments args. Retrieve object IDs for the
outputs of the function from the scheduler and immediately return them.
Args:
function_descriptor: The function descriptor to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objects.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
placement_resources: The resources required for placing the task.
If this is not provided or if it is an empty dictionary, then
the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task.
"""
with profiling.profile("submit_task"):
if actor_id is None:
assert actor_handle_id is None
actor_id = ActorID.nil()
actor_handle_id = ActorHandleID.nil()
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ActorID.nil()
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = ObjectID.nil()
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_raylet = []
for arg in args:
if isinstance(arg, ObjectID):
args_for_raylet.append(arg)
elif ray._raylet.check_simple_value(arg):
args_for_raylet.append(arg)
else:
args_for_raylet.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
if new_actor_handles is None:
new_actor_handles = []
if driver_id is None:
driver_id = self.task_driver_id
if resources is None:
raise ValueError("The resources dictionary is required.")
for value in resources.values():
assert (isinstance(value, int) or isinstance(value, float))
if value < 0:
raise ValueError(
"Resource quantities must be nonnegative.")
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers.")
# Remove any resources with zero quantity requirements
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity > 0
}
if placement_resources is None:
placement_resources = {}
# Increment the worker's task index to track how many tasks
# have been submitted by the current task so far.
self.task_context.task_index += 1
# The parent task must be set for the submitted task.
assert not self.current_task_id.is_nil()
# Current driver id must not be nil when submitting a task.
# Because every task must belong to a driver.
assert not self.task_driver_id.is_nil()
# Submit the task to raylet.
function_descriptor_list = (
function_descriptor.get_function_descriptor_list())
assert isinstance(driver_id, DriverID)
task = ray._raylet.Task(
driver_id,
function_descriptor_list,
args_for_raylet,
num_return_vals,
self.current_task_id,
self.task_context.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
max_actor_reconstructions,
actor_id,
actor_handle_id,
actor_counter,
new_actor_handles,
execution_dependencies,
resources,
placement_resources,
)
self.raylet_client.submit_task(task)
return task.returns() | [
"def",
"submit_task",
"(",
"self",
",",
"function_descriptor",
",",
"args",
",",
"actor_id",
"=",
"None",
",",
"actor_handle_id",
"=",
"None",
",",
"actor_counter",
"=",
"0",
",",
"actor_creation_id",
"=",
"None",
",",
"actor_creation_dummy_object_id",
"=",
"Non... | 43.345324 | 17.503597 |
def _utc_datetime_to_epoch(self, activity_datetime):
"""
Convert the specified datetime value to a unix epoch timestamp (seconds since epoch).
:param activity_datetime: A string which may contain tzinfo (offset) or a datetime object (naive datetime will
be considered to be UTC).
:return: Epoch timestamp.
:rtype: int
"""
if isinstance(activity_datetime, str):
activity_datetime = arrow.get(activity_datetime).datetime
assert isinstance(activity_datetime, datetime)
if activity_datetime.tzinfo:
activity_datetime = activity_datetime.astimezone(pytz.utc)
return calendar.timegm(activity_datetime.timetuple()) | [
"def",
"_utc_datetime_to_epoch",
"(",
"self",
",",
"activity_datetime",
")",
":",
"if",
"isinstance",
"(",
"activity_datetime",
",",
"str",
")",
":",
"activity_datetime",
"=",
"arrow",
".",
"get",
"(",
"activity_datetime",
")",
".",
"datetime",
"assert",
"isinst... | 45.875 | 23.5 |
def select(self, limit=0):
"""Match all tags under the targeted tag."""
if limit < 1:
limit = None
for child in self.get_descendants(self.tag):
if self.match(child):
yield child
if limit is not None:
limit -= 1
if limit < 1:
break | [
"def",
"select",
"(",
"self",
",",
"limit",
"=",
"0",
")",
":",
"if",
"limit",
"<",
"1",
":",
"limit",
"=",
"None",
"for",
"child",
"in",
"self",
".",
"get_descendants",
"(",
"self",
".",
"tag",
")",
":",
"if",
"self",
".",
"match",
"(",
"child",... | 28 | 14.769231 |
def update_or_create(self, model, **kwargs):
'''Update or create a new instance of ``model``.
This method can raise an exception if the ``kwargs`` dictionary
contains field data that does not validate.
:param model: a :class:`StdModel`
:param kwargs: dictionary of parameters.
:returns: A two elements tuple containing the instance and a boolean
indicating if the instance was created or not.
'''
backend = self.model(model).backend
return backend.execute(self._update_or_create(model, **kwargs)) | [
"def",
"update_or_create",
"(",
"self",
",",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"backend",
"=",
"self",
".",
"model",
"(",
"model",
")",
".",
"backend",
"return",
"backend",
".",
"execute",
"(",
"self",
".",
"_update_or_create",
"(",
"model",
... | 44.769231 | 21.076923 |
def get_scoreboard(year, month, day):
"""Return the game file for a certain day matching certain criteria."""
try:
data = urlopen(BASE_URL.format(year, month, day) + 'scoreboard.xml')
except HTTPError:
data = os.path.join(PWD, 'default.xml')
return data | [
"def",
"get_scoreboard",
"(",
"year",
",",
"month",
",",
"day",
")",
":",
"try",
":",
"data",
"=",
"urlopen",
"(",
"BASE_URL",
".",
"format",
"(",
"year",
",",
"month",
",",
"day",
")",
"+",
"'scoreboard.xml'",
")",
"except",
"HTTPError",
":",
"data",
... | 39.857143 | 17.428571 |
def mount(self, mountpoint, app, into_worker=False):
"""Load application under mountpoint.
Example:
* .mount('', 'app0.py') -- Root URL part
* .mount('/app1', 'app1.py') -- URL part
* .mount('/pinax/here', '/var/www/pinax/deploy/pinax.wsgi')
* .mount('the_app3', 'app3.py') -- Variable value: application alias (can be set by ``UWSGI_APPID``)
* .mount('example.com', 'app2.py') -- Variable value: Hostname (variable set in nginx)
* http://uwsgi-docs.readthedocs.io/en/latest/Nginx.html#hosting-multiple-apps-in-the-same-process-aka-managing-script-name-and-path-info
:param str|unicode mountpoint: URL part, or variable value.
.. note:: In case of URL part you may also want to set ``manage_script_name`` basic param to ``True``.
.. warning:: In case of URL part a trailing slash may case problems in some cases
(e.g. with Django based projects).
:param str|unicode app: App module/file.
:param bool into_worker: Load application under mountpoint
in the specified worker or after workers spawn.
"""
# todo check worker mount -- uwsgi_init_worker_mount_app() expects worker://
self._set('worker-mount' if into_worker else 'mount', '%s=%s' % (mountpoint, app), multi=True)
return self._section | [
"def",
"mount",
"(",
"self",
",",
"mountpoint",
",",
"app",
",",
"into_worker",
"=",
"False",
")",
":",
"# todo check worker mount -- uwsgi_init_worker_mount_app() expects worker://",
"self",
".",
"_set",
"(",
"'worker-mount'",
"if",
"into_worker",
"else",
"'mount'",
... | 47.103448 | 35.241379 |
def reduce_by(fn: Callable[[T1, T1], T1]) -> Callable[[ActualIterable[T1]], T1]:
"""
>>> from Redy.Collections import Traversal, Flow
>>> def mul(a: int, b: int): return a * b
>>> lst: Iterable[int] = [1, 2, 3]
>>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox
>>> assert x is 6
"""
return lambda collection: functools.reduce(fn, collection) | [
"def",
"reduce_by",
"(",
"fn",
":",
"Callable",
"[",
"[",
"T1",
",",
"T1",
"]",
",",
"T1",
"]",
")",
"->",
"Callable",
"[",
"[",
"ActualIterable",
"[",
"T1",
"]",
"]",
",",
"T1",
"]",
":",
"return",
"lambda",
"collection",
":",
"functools",
".",
... | 40.555556 | 12.555556 |
def generate(basename, xml_list):
'''generate complete MAVLink Java implemenation'''
for xml in xml_list:
generate_one(basename, xml)
generate_enums(basename, xml)
generate_MAVLinkMessage(basename, xml_list)
copy_fixed_headers(basename, xml_list[0]) | [
"def",
"generate",
"(",
"basename",
",",
"xml_list",
")",
":",
"for",
"xml",
"in",
"xml_list",
":",
"generate_one",
"(",
"basename",
",",
"xml",
")",
"generate_enums",
"(",
"basename",
",",
"xml",
")",
"generate_MAVLinkMessage",
"(",
"basename",
",",
"xml_li... | 39.857143 | 8.714286 |
def update_folder_name(self, name, update_folder_data=True):
""" Change this folder name
:param str name: new name to change to
:param bool update_folder_data: whether or not to re-fetch the data
:return: Updated or Not
:rtype: bool
"""
if self.root:
return False
if not name:
return False
url = self.build_url(
self._endpoints.get('get_folder').format(id=self.folder_id))
response = self.con.patch(url, data={self._cc('displayName'): name})
if not response:
return False
self.name = name
if not update_folder_data:
return True
folder = response.json()
self.name = folder.get(self._cc('displayName'), '')
self.parent_id = folder.get(self._cc('parentFolderId'), None)
self.child_folders_count = folder.get(self._cc('childFolderCount'), 0)
self.unread_items_count = folder.get(self._cc('unreadItemCount'), 0)
self.total_items_count = folder.get(self._cc('totalItemCount'), 0)
self.updated_at = dt.datetime.now()
return True | [
"def",
"update_folder_name",
"(",
"self",
",",
"name",
",",
"update_folder_data",
"=",
"True",
")",
":",
"if",
"self",
".",
"root",
":",
"return",
"False",
"if",
"not",
"name",
":",
"return",
"False",
"url",
"=",
"self",
".",
"build_url",
"(",
"self",
... | 33.029412 | 22.911765 |
def listener(self, sock, *args):
'''Asynchronous connection listener. Starts a handler for each connection.'''
conn, addr = sock.accept()
f = conn.makefile(conn)
self.shell = ShoebotCmd(self.bot, stdin=f, stdout=f, intro=INTRO)
print(_("Connected"))
GObject.io_add_watch(conn, GObject.IO_IN, self.handler)
if self.shell.intro:
self.shell.stdout.write(str(self.shell.intro)+"\n")
self.shell.stdout.flush()
return True | [
"def",
"listener",
"(",
"self",
",",
"sock",
",",
"*",
"args",
")",
":",
"conn",
",",
"addr",
"=",
"sock",
".",
"accept",
"(",
")",
"f",
"=",
"conn",
".",
"makefile",
"(",
"conn",
")",
"self",
".",
"shell",
"=",
"ShoebotCmd",
"(",
"self",
".",
... | 41.166667 | 19.5 |
def update_book(self, id, body, doc_type='book'):
''' Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError`
'''
# note that we are NOT overwriting all the _source, just merging
book = self.get_book_by_id(id)
book['_source'].update(body)
validated = validate_book(book['_source'])
ret = self.es.index(index=self.index_name, id=id,
doc_type=doc_type, body=validated, version=book['_version'])
return ret | [
"def",
"update_book",
"(",
"self",
",",
"id",
",",
"body",
",",
"doc_type",
"=",
"'book'",
")",
":",
"# note that we are NOT overwriting all the _source, just merging",
"book",
"=",
"self",
".",
"get_book_by_id",
"(",
"id",
")",
"book",
"[",
"'_source'",
"]",
".... | 40.4375 | 19.5625 |
def query_model(self, model, condition=None, offset=None, limit=None,
group_by=None, having=None, order_by=None, fields=None):
"""
Query all records with limit and offset, it's used for pagination query.
"""
if self._query is not None:
query = self._query
else:
query = self.get_select()
if condition is not None:
if isinstance(query, Result):
query = query.filter(condition)
else:
query = query.where(condition)
if self.pagination:
if offset is not None:
query = query.offset(int(offset))
if limit is not None:
query = query.limit(int(limit))
if order_by is not None:
if isinstance(order_by, (tuple, list)):
for order in order_by:
query = query.order_by(order)
else:
query = query.order_by(order_by)
if group_by is not None:
if isinstance(group_by, (tuple, list)):
query = query.group_by(*group_by)
else:
query = query.group_by(group_by)
if having is not None:
query = query.having(having)
return query | [
"def",
"query_model",
"(",
"self",
",",
"model",
",",
"condition",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"group_by",
"=",
"None",
",",
"having",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"fields",
"=",
"None",... | 38.588235 | 12.117647 |
def get_unspent(cls, address):
"""Gets all unspent transaction outputs belonging to an address.
:param address: The address in question.
:type address: ``str``
:raises ConnectionError: If all API services fail.
:rtype: ``list`` of :class:`~bit.network.meta.Unspent`
"""
for api_call in cls.GET_UNSPENT_MAIN:
try:
return api_call(address)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | [
"def",
"get_unspent",
"(",
"cls",
",",
"address",
")",
":",
"for",
"api_call",
"in",
"cls",
".",
"GET_UNSPENT_MAIN",
":",
"try",
":",
"return",
"api_call",
"(",
"address",
")",
"except",
"cls",
".",
"IGNORED_ERRORS",
":",
"pass",
"raise",
"ConnectionError",
... | 33 | 16.0625 |
def energy_based_strength_of_connection(A, theta=0.0, k=2):
"""Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0)
"""
if (theta < 0):
raise ValueError('expected a positive theta')
if not sparse.isspmatrix(A):
raise ValueError('expected sparse matrix')
if (k < 0):
raise ValueError('expected positive number of steps')
if not isinstance(k, int):
raise ValueError('expected integer')
if sparse.isspmatrix_bsr(A):
bsr_flag = True
numPDEs = A.blocksize[0]
if A.blocksize[0] != A.blocksize[1]:
raise ValueError('expected square blocks in BSR matrix A')
else:
bsr_flag = False
# Convert A to csc and Atilde to csr
if sparse.isspmatrix_csr(A):
Atilde = A.copy()
A = A.tocsc()
else:
A = A.tocsc()
Atilde = A.copy()
Atilde = Atilde.tocsr()
# Calculate the weighted-Jacobi parameter
from pyamg.util.linalg import approximate_spectral_radius
D = A.diagonal()
Dinv = 1.0 / D
Dinv[D == 0] = 0.0
Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
np.arange(A.shape[1]))), shape=A.shape)
DinvA = Dinv * A
omega = 1.0 / approximate_spectral_radius(DinvA)
del DinvA
# Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
S = sparse.csc_matrix(A.shape, dtype=A.dtype) # empty matrix
Id = sparse.eye(A.shape[0], A.shape[1], format='csc')
for i in range(k + 1):
S = S + omega * (Dinv * (Id - A * S))
# Calculate the strength entries in S column-wise, but only strength
# values at the sparsity pattern of A
for i in range(Atilde.shape[0]):
v = np.mat(S[:, i].todense())
Av = np.mat(A * v)
denom = np.sqrt(np.conjugate(v).T * Av)
# replace entries in row i with strength values
for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
col = Atilde.indices[j]
vj = v[col].copy()
v[col] = 0.0
# = (||v_j||_A - ||v||_A) / ||v||_A
val = np.sqrt(np.conjugate(v).T * A * v) / denom - 1.0
# Negative values generally imply a weak connection
if val > -0.01:
Atilde.data[j] = abs(val)
else:
Atilde.data[j] = 0.0
v[col] = vj
# Apply drop tolerance
Atilde = classical_strength_of_connection(Atilde, theta=theta)
Atilde.eliminate_zeros()
# Put ones on the diagonal
Atilde = Atilde + Id.tocsr()
Atilde.sort_indices()
# Amalgamate Atilde for the BSR case, using ones for all strong connections
if bsr_flag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
nblocks = Atilde.indices.shape[0]
uone = np.ones((nblocks,))
Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
shape=(
int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
# Scale C by the largest magnitude entry in each row
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde | [
"def",
"energy_based_strength_of_connection",
"(",
"A",
",",
"theta",
"=",
"0.0",
",",
"k",
"=",
"2",
")",
":",
"if",
"(",
"theta",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'expected a positive theta'",
")",
"if",
"not",
"sparse",
".",
"isspmatrix"... | 34.186207 | 21.468966 |
def mergeActors(actors, tol=0):
"""
Build a new actor formed by the fusion of the polydatas of input objects.
Similar to Assembly, but in this case the input objects become a single mesh.
.. hint:: |thinplate_grid| |thinplate_grid.py|_
"""
polylns = vtk.vtkAppendPolyData()
for a in actors:
polylns.AddInputData(a.polydata())
polylns.Update()
pd = polylns.GetOutput()
return Actor(pd) | [
"def",
"mergeActors",
"(",
"actors",
",",
"tol",
"=",
"0",
")",
":",
"polylns",
"=",
"vtk",
".",
"vtkAppendPolyData",
"(",
")",
"for",
"a",
"in",
"actors",
":",
"polylns",
".",
"AddInputData",
"(",
"a",
".",
"polydata",
"(",
")",
")",
"polylns",
".",... | 32.384615 | 16.538462 |
def add(self, name, filt, info='', params=(), setn=None):
"""
Add filter.
Parameters
----------
name : str
filter name
filt : array_like
boolean filter array
info : str
informative description of the filter
params : tuple
parameters used to make the filter
Returns
-------
None
"""
iname = '{:.0f}_'.format(self.n) + name
self.index[self.n] = iname
if setn is None:
setn = self.maxset + 1
self.maxset = setn
if setn not in self.sets.keys():
self.sets[setn] = [iname]
else:
self.sets[setn].append(iname)
# self.keys is not added to?
self.components[iname] = filt
self.info[iname] = info
self.params[iname] = params
for a in self.analytes:
self.switches[a][iname] = False
self.n += 1
return | [
"def",
"add",
"(",
"self",
",",
"name",
",",
"filt",
",",
"info",
"=",
"''",
",",
"params",
"=",
"(",
")",
",",
"setn",
"=",
"None",
")",
":",
"iname",
"=",
"'{:.0f}_'",
".",
"format",
"(",
"self",
".",
"n",
")",
"+",
"name",
"self",
".",
"in... | 23.775 | 16.925 |
def _write_symlink(self, zf, link_target, link_path):
"""Package symlinks with appropriate zipfile metadata."""
info = zipfile.ZipInfo()
info.filename = link_path
info.create_system = 3
# Magic code for symlinks / py2/3 compat
# 27166663808 = (stat.S_IFLNK | 0755) << 16
info.external_attr = 2716663808
zf.writestr(info, link_target) | [
"def",
"_write_symlink",
"(",
"self",
",",
"zf",
",",
"link_target",
",",
"link_path",
")",
":",
"info",
"=",
"zipfile",
".",
"ZipInfo",
"(",
")",
"info",
".",
"filename",
"=",
"link_path",
"info",
".",
"create_system",
"=",
"3",
"# Magic code for symlinks /... | 43.222222 | 6.666667 |
def dashfn(handle, lenout=_default_len_out):
"""
Return the name of the DAS file associated with a handle.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dashfn_c.html
:param handle: Handle of a DAS file.
:type handle: int
:param lenout: Length of output file name string.
:type lenout: int
:return: Corresponding file name.
:rtype: str
"""
handle = ctypes.c_int(handle)
namlen = ctypes.c_int(lenout)
fname = stypes.stringToCharP(lenout)
libspice.dashfn_c(handle, namlen, fname)
return stypes.toPythonString(fname) | [
"def",
"dashfn",
"(",
"handle",
",",
"lenout",
"=",
"_default_len_out",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"namlen",
"=",
"ctypes",
".",
"c_int",
"(",
"lenout",
")",
"fname",
"=",
"stypes",
".",
"stringToCharP",
"(",
"... | 31.888889 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.