text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def i2repr(self, pkt, x):
# type: (Optional[packet.Packet], int) -> str
""" i2repr is overloaded to restrict the acceptable x values (not None)
@param packet.Packet|None pkt: the packet instance containing this field instance; probably unused. # noqa: E501
@param int x: the value to convert.
@return str: the converted value.
"""
return super(UVarIntField, self).i2repr(pkt, x)
|
[
"def",
"i2repr",
"(",
"self",
",",
"pkt",
",",
"x",
")",
":",
"# type: (Optional[packet.Packet], int) -> str",
"return",
"super",
"(",
"UVarIntField",
",",
"self",
")",
".",
"i2repr",
"(",
"pkt",
",",
"x",
")"
] | 47.555556
| 18.666667
|
def _select_ontology(self, line):
"""try to select an ontology NP: the actual load from FS is in <_load_ontology> """
try:
var = int(line) # it's a string
if var in range(1, len(self.all_ontologies)+1):
self._load_ontology(self.all_ontologies[var-1])
except ValueError:
out = []
for each in self.all_ontologies:
if line in each:
out += [each]
choice = self._selectFromList(out, line, "ontology")
if choice:
self._load_ontology(choice)
|
[
"def",
"_select_ontology",
"(",
"self",
",",
"line",
")",
":",
"try",
":",
"var",
"=",
"int",
"(",
"line",
")",
"# it's a string",
"if",
"var",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"all_ontologies",
")",
"+",
"1",
")",
":",
"self",
".",
"_load_ontology",
"(",
"self",
".",
"all_ontologies",
"[",
"var",
"-",
"1",
"]",
")",
"except",
"ValueError",
":",
"out",
"=",
"[",
"]",
"for",
"each",
"in",
"self",
".",
"all_ontologies",
":",
"if",
"line",
"in",
"each",
":",
"out",
"+=",
"[",
"each",
"]",
"choice",
"=",
"self",
".",
"_selectFromList",
"(",
"out",
",",
"line",
",",
"\"ontology\"",
")",
"if",
"choice",
":",
"self",
".",
"_load_ontology",
"(",
"choice",
")"
] | 41.857143
| 12.785714
|
def find_mappable(*axes):
"""Find the most recently added mappable layer in the given axes
Parameters
----------
*axes : `~matplotlib.axes.Axes`
one or more axes to search for a mappable
"""
for ax in axes:
for aset in ('collections', 'images'):
try:
return getattr(ax, aset)[-1]
except (AttributeError, IndexError):
continue
raise ValueError("Cannot determine mappable layer on any axes "
"for this colorbar")
|
[
"def",
"find_mappable",
"(",
"*",
"axes",
")",
":",
"for",
"ax",
"in",
"axes",
":",
"for",
"aset",
"in",
"(",
"'collections'",
",",
"'images'",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"ax",
",",
"aset",
")",
"[",
"-",
"1",
"]",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"continue",
"raise",
"ValueError",
"(",
"\"Cannot determine mappable layer on any axes \"",
"\"for this colorbar\"",
")"
] | 32.3125
| 14.25
|
def configure(self, config):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self._timeout = config.get_as_long_with_default("options.timeout", self._default_timeout)
self._max_size = config.get_as_long_with_default("options.max_size", self._default_max_size)
|
[
"def",
"configure",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"_timeout",
"=",
"config",
".",
"get_as_long_with_default",
"(",
"\"options.timeout\"",
",",
"self",
".",
"_default_timeout",
")",
"self",
".",
"_max_size",
"=",
"config",
".",
"get_as_long_with_default",
"(",
"\"options.max_size\"",
",",
"self",
".",
"_default_max_size",
")"
] | 46.25
| 26.5
|
def save_to_file(self, data, stamp):
"""Saves data to current dataset.
:param data: data to save to file
:type data: numpy.ndarray
:param stamp: time stamp of when the data was acquired
:type stamp: str
"""
self.datafile.append(self.current_dataset_name, data)
# save stimulu info
info = dict(self._stimulus.componentDoc().items() + self._stimulus.testDoc().items())
print 'saving doc', info
info['time_stamps'] = [stamp]
info['samplerate_ad'] = self.player.aifs
self.datafile.append_trace_info(self.current_dataset_name, info)
|
[
"def",
"save_to_file",
"(",
"self",
",",
"data",
",",
"stamp",
")",
":",
"self",
".",
"datafile",
".",
"append",
"(",
"self",
".",
"current_dataset_name",
",",
"data",
")",
"# save stimulu info",
"info",
"=",
"dict",
"(",
"self",
".",
"_stimulus",
".",
"componentDoc",
"(",
")",
".",
"items",
"(",
")",
"+",
"self",
".",
"_stimulus",
".",
"testDoc",
"(",
")",
".",
"items",
"(",
")",
")",
"print",
"'saving doc'",
",",
"info",
"info",
"[",
"'time_stamps'",
"]",
"=",
"[",
"stamp",
"]",
"info",
"[",
"'samplerate_ad'",
"]",
"=",
"self",
".",
"player",
".",
"aifs",
"self",
".",
"datafile",
".",
"append_trace_info",
"(",
"self",
".",
"current_dataset_name",
",",
"info",
")"
] | 41.2
| 15.2
|
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
|
[
"def",
"extender",
"(",
"self",
",",
"edge",
")",
":",
"(",
"j",
",",
"k",
",",
"B",
",",
"_",
",",
"_",
")",
"=",
"edge",
"for",
"(",
"i",
",",
"j",
",",
"A",
",",
"alpha",
",",
"B1b",
")",
"in",
"self",
".",
"chart",
"[",
"j",
"]",
":",
"if",
"B1b",
"and",
"B",
"==",
"B1b",
"[",
"0",
"]",
":",
"self",
".",
"add_edge",
"(",
"[",
"i",
",",
"k",
",",
"A",
",",
"alpha",
"+",
"[",
"edge",
"]",
",",
"B1b",
"[",
"1",
":",
"]",
"]",
")"
] | 43.333333
| 13.333333
|
def datasets(self) -> tuple:
"""Datasets."""
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
|
[
"def",
"datasets",
"(",
"self",
")",
"->",
"tuple",
":",
"return",
"tuple",
"(",
"v",
"for",
"_",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"v",
",",
"h5py",
".",
"Dataset",
")",
")"
] | 43.333333
| 17
|
def get_verb_function(data, verb):
"""
Return function that implements the verb for given data type
"""
try:
module = type_lookup[type(data)]
except KeyError:
# Some guess work for subclasses
for type_, mod in type_lookup.items():
if isinstance(data, type_):
module = mod
break
try:
return getattr(module, verb)
except (NameError, AttributeError):
msg = "Data source of type '{}' is not supported."
raise TypeError(msg.format(type(data)))
|
[
"def",
"get_verb_function",
"(",
"data",
",",
"verb",
")",
":",
"try",
":",
"module",
"=",
"type_lookup",
"[",
"type",
"(",
"data",
")",
"]",
"except",
"KeyError",
":",
"# Some guess work for subclasses",
"for",
"type_",
",",
"mod",
"in",
"type_lookup",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"type_",
")",
":",
"module",
"=",
"mod",
"break",
"try",
":",
"return",
"getattr",
"(",
"module",
",",
"verb",
")",
"except",
"(",
"NameError",
",",
"AttributeError",
")",
":",
"msg",
"=",
"\"Data source of type '{}' is not supported.\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"data",
")",
")",
")"
] | 31.882353
| 10.705882
|
def calc_networkmeasure(self, networkmeasure, **measureparams):
"""
Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure]
"""
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure
|
[
"def",
"calc_networkmeasure",
"(",
"self",
",",
"networkmeasure",
",",
"*",
"*",
"measureparams",
")",
":",
"availablemeasures",
"=",
"[",
"f",
"for",
"f",
"in",
"dir",
"(",
"teneto",
".",
"networkmeasures",
")",
"if",
"not",
"f",
".",
"startswith",
"(",
"'__'",
")",
"]",
"if",
"networkmeasure",
"not",
"in",
"availablemeasures",
":",
"raise",
"ValueError",
"(",
"'Unknown network measure. Available network measures are: '",
"+",
"', '",
".",
"join",
"(",
"availablemeasures",
")",
")",
"funs",
"=",
"inspect",
".",
"getmembers",
"(",
"teneto",
".",
"networkmeasures",
")",
"funs",
"=",
"{",
"m",
"[",
"0",
"]",
":",
"m",
"[",
"1",
"]",
"for",
"m",
"in",
"funs",
"if",
"not",
"m",
"[",
"0",
"]",
".",
"startswith",
"(",
"'__'",
")",
"}",
"measure",
"=",
"funs",
"[",
"networkmeasure",
"]",
"(",
"self",
",",
"*",
"*",
"measureparams",
")",
"return",
"measure"
] | 40.809524
| 20.714286
|
def small_image_url(self):
"""Optional[:class:`str`]: Returns a URL pointing to the small image asset of this activity if applicable."""
if self.application_id is None:
return None
try:
small_image = self.assets['small_image']
except KeyError:
return None
else:
return 'https://cdn.discordapp.com/app-assets/{0}/{1}.png'.format(self.application_id, small_image)
|
[
"def",
"small_image_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"application_id",
"is",
"None",
":",
"return",
"None",
"try",
":",
"small_image",
"=",
"self",
".",
"assets",
"[",
"'small_image'",
"]",
"except",
"KeyError",
":",
"return",
"None",
"else",
":",
"return",
"'https://cdn.discordapp.com/app-assets/{0}/{1}.png'",
".",
"format",
"(",
"self",
".",
"application_id",
",",
"small_image",
")"
] | 40
| 22.090909
|
def register_eventclass(event_id):
"""Decorator for registering event classes for parsing
"""
def register(cls):
if not issubclass(cls, Event):
raise MessageException(('Cannot register a class that'
' is not a subclass of Event'))
EVENT_REGISTRY[event_id] = cls
logger.debug('######### Event registry is now: {0}'.format(
EVENT_REGISTRY))
return cls
return register
|
[
"def",
"register_eventclass",
"(",
"event_id",
")",
":",
"def",
"register",
"(",
"cls",
")",
":",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"Event",
")",
":",
"raise",
"MessageException",
"(",
"(",
"'Cannot register a class that'",
"' is not a subclass of Event'",
")",
")",
"EVENT_REGISTRY",
"[",
"event_id",
"]",
"=",
"cls",
"logger",
".",
"debug",
"(",
"'######### Event registry is now: {0}'",
".",
"format",
"(",
"EVENT_REGISTRY",
")",
")",
"return",
"cls",
"return",
"register"
] | 38.5
| 13.583333
|
def handle_output(self, workunit, label, stream):
"""Implementation of Reporter callback."""
self._root_id_to_workunit_stack[str(workunit.root().id)][-1]['outputs'][label] += stream
|
[
"def",
"handle_output",
"(",
"self",
",",
"workunit",
",",
"label",
",",
"stream",
")",
":",
"self",
".",
"_root_id_to_workunit_stack",
"[",
"str",
"(",
"workunit",
".",
"root",
"(",
")",
".",
"id",
")",
"]",
"[",
"-",
"1",
"]",
"[",
"'outputs'",
"]",
"[",
"label",
"]",
"+=",
"stream"
] | 46.75
| 25.25
|
def state_full(self):
"""unicode, the full name of the object's state.
>>> address = Address(country='US', state='CO')
>>> address.state
u'CO'
>>> address.state_full
u'Colorado'
"""
if self.is_valid_state:
return STATES[self.country.upper()].get(self.state.upper())
|
[
"def",
"state_full",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_valid_state",
":",
"return",
"STATES",
"[",
"self",
".",
"country",
".",
"upper",
"(",
")",
"]",
".",
"get",
"(",
"self",
".",
"state",
".",
"upper",
"(",
")",
")"
] | 28
| 18.384615
|
def ac_encode(text, probs):
"""Encode a text using arithmetic coding with the provided probabilities.
This is a wrapper for :py:meth:`Arithmetic.encode`.
Parameters
----------
text : str
A string to encode
probs : dict
A probability statistics dictionary generated by
:py:meth:`Arithmetic.train`
Returns
-------
tuple
The arithmetically coded text
Example
-------
>>> pr = ac_train('the quick brown fox jumped over the lazy dog')
>>> ac_encode('align', pr)
(16720586181, 34)
"""
coder = Arithmetic()
coder.set_probs(probs)
return coder.encode(text)
|
[
"def",
"ac_encode",
"(",
"text",
",",
"probs",
")",
":",
"coder",
"=",
"Arithmetic",
"(",
")",
"coder",
".",
"set_probs",
"(",
"probs",
")",
"return",
"coder",
".",
"encode",
"(",
"text",
")"
] | 22.5
| 21.928571
|
def _get_joined_path(ctx):
"""
@type ctx: L{_URLContext}
@param ctx: A URL context.
@return: The path component, un-urlencoded, but joined by slashes.
@rtype: L{bytes}
"""
return b'/' + b'/'.join(seg.encode('utf-8') for seg in ctx.path)
|
[
"def",
"_get_joined_path",
"(",
"ctx",
")",
":",
"return",
"b'/'",
"+",
"b'/'",
".",
"join",
"(",
"seg",
".",
"encode",
"(",
"'utf-8'",
")",
"for",
"seg",
"in",
"ctx",
".",
"path",
")"
] | 28.555556
| 17
|
def check_file_encoding(self, input_file_path):
"""
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking encoding of file '%s'", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File '%s' cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result
|
[
"def",
"check_file_encoding",
"(",
"self",
",",
"input_file_path",
")",
":",
"self",
".",
"log",
"(",
"[",
"u\"Checking encoding of file '%s'\"",
",",
"input_file_path",
"]",
")",
"self",
".",
"result",
"=",
"ValidatorResult",
"(",
")",
"if",
"self",
".",
"_are_safety_checks_disabled",
"(",
"u\"check_file_encoding\"",
")",
":",
"return",
"self",
".",
"result",
"if",
"not",
"gf",
".",
"file_can_be_read",
"(",
"input_file_path",
")",
":",
"self",
".",
"_failed",
"(",
"u\"File '%s' cannot be read.\"",
"%",
"(",
"input_file_path",
")",
")",
"return",
"self",
".",
"result",
"with",
"io",
".",
"open",
"(",
"input_file_path",
",",
"\"rb\"",
")",
"as",
"file_object",
":",
"bstring",
"=",
"file_object",
".",
"read",
"(",
")",
"self",
".",
"_check_utf8_encoding",
"(",
"bstring",
")",
"return",
"self",
".",
"result"
] | 43.777778
| 15.333333
|
def resolve_implicit_levels(storage, debug):
"""Resolving implicit levels (I1, I2)
See: http://unicode.org/reports/tr9/#Resolving_Implicit_Levels
"""
for run in storage['runs']:
start, length = run['start'], run['length']
chars = storage['chars'][start:start+length]
for _ch in chars:
# only those types are allowed at this stage
assert _ch['type'] in ('L', 'R', 'EN', 'AN'),\
'%s not allowed here' % _ch['type']
if _embedding_direction(_ch['level']) == 'L':
# I1. For all characters with an even (left-to-right) embedding
# direction, those of type R go up one level and those of type
# AN or EN go up two levels.
if _ch['type'] == 'R':
_ch['level'] += 1
elif _ch['type'] != 'L':
_ch['level'] += 2
else:
# I2. For all characters with an odd (right-to-left) embedding
# direction, those of type L, EN or AN go up one level.
if _ch['type'] != 'R':
_ch['level'] += 1
if debug:
debug_storage(storage, runs=True)
|
[
"def",
"resolve_implicit_levels",
"(",
"storage",
",",
"debug",
")",
":",
"for",
"run",
"in",
"storage",
"[",
"'runs'",
"]",
":",
"start",
",",
"length",
"=",
"run",
"[",
"'start'",
"]",
",",
"run",
"[",
"'length'",
"]",
"chars",
"=",
"storage",
"[",
"'chars'",
"]",
"[",
"start",
":",
"start",
"+",
"length",
"]",
"for",
"_ch",
"in",
"chars",
":",
"# only those types are allowed at this stage",
"assert",
"_ch",
"[",
"'type'",
"]",
"in",
"(",
"'L'",
",",
"'R'",
",",
"'EN'",
",",
"'AN'",
")",
",",
"'%s not allowed here'",
"%",
"_ch",
"[",
"'type'",
"]",
"if",
"_embedding_direction",
"(",
"_ch",
"[",
"'level'",
"]",
")",
"==",
"'L'",
":",
"# I1. For all characters with an even (left-to-right) embedding",
"# direction, those of type R go up one level and those of type",
"# AN or EN go up two levels.",
"if",
"_ch",
"[",
"'type'",
"]",
"==",
"'R'",
":",
"_ch",
"[",
"'level'",
"]",
"+=",
"1",
"elif",
"_ch",
"[",
"'type'",
"]",
"!=",
"'L'",
":",
"_ch",
"[",
"'level'",
"]",
"+=",
"2",
"else",
":",
"# I2. For all characters with an odd (right-to-left) embedding",
"# direction, those of type L, EN or AN go up one level.",
"if",
"_ch",
"[",
"'type'",
"]",
"!=",
"'R'",
":",
"_ch",
"[",
"'level'",
"]",
"+=",
"1",
"if",
"debug",
":",
"debug_storage",
"(",
"storage",
",",
"runs",
"=",
"True",
")"
] | 38.451613
| 18
|
def decompose_select(selectx):
"return [(parent,setter) for scalar_subquery], wherex_including_on, NameIndexer. helper for run_select"
nix = NameIndexer.ctor_fromlist(selectx.tables)
where = []
for fromx in selectx.tables:
if isinstance(fromx,sqparse2.JoinX) and fromx.on_stmt is not None:
# todo: what happens if on_stmt columns are non-ambiguous in the context of the join tables but ambiguous overall? yuck.
where.append(fromx.on_stmt)
if selectx.where: where.append(selectx.where)
return nix, where
|
[
"def",
"decompose_select",
"(",
"selectx",
")",
":",
"nix",
"=",
"NameIndexer",
".",
"ctor_fromlist",
"(",
"selectx",
".",
"tables",
")",
"where",
"=",
"[",
"]",
"for",
"fromx",
"in",
"selectx",
".",
"tables",
":",
"if",
"isinstance",
"(",
"fromx",
",",
"sqparse2",
".",
"JoinX",
")",
"and",
"fromx",
".",
"on_stmt",
"is",
"not",
"None",
":",
"# todo: what happens if on_stmt columns are non-ambiguous in the context of the join tables but ambiguous overall? yuck.",
"where",
".",
"append",
"(",
"fromx",
".",
"on_stmt",
")",
"if",
"selectx",
".",
"where",
":",
"where",
".",
"append",
"(",
"selectx",
".",
"where",
")",
"return",
"nix",
",",
"where"
] | 52.1
| 27.3
|
def marginals(self, X):
"""
Compute the marginals for the given candidates X.
Note: split into batches to avoid OOM errors.
:param X: The input data which is a (list of Candidate objects, a sparse
matrix of corresponding features) pair or a list of
(Candidate, features) pairs.
:type X: pair or list
"""
nn.Module.train(self, False)
if self._check_input(X):
X = self._preprocess_data(X)
dataloader = DataLoader(
MultiModalDataset(X),
batch_size=self.settings["batch_size"],
collate_fn=self._collate_fn(),
shuffle=False,
)
marginals = torch.Tensor([])
for X_batch in dataloader:
marginal = self._non_cuda(self._calc_logits(X_batch))
marginals = torch.cat((marginals, marginal), 0)
return F.softmax(marginals, dim=-1).detach().numpy()
|
[
"def",
"marginals",
"(",
"self",
",",
"X",
")",
":",
"nn",
".",
"Module",
".",
"train",
"(",
"self",
",",
"False",
")",
"if",
"self",
".",
"_check_input",
"(",
"X",
")",
":",
"X",
"=",
"self",
".",
"_preprocess_data",
"(",
"X",
")",
"dataloader",
"=",
"DataLoader",
"(",
"MultiModalDataset",
"(",
"X",
")",
",",
"batch_size",
"=",
"self",
".",
"settings",
"[",
"\"batch_size\"",
"]",
",",
"collate_fn",
"=",
"self",
".",
"_collate_fn",
"(",
")",
",",
"shuffle",
"=",
"False",
",",
")",
"marginals",
"=",
"torch",
".",
"Tensor",
"(",
"[",
"]",
")",
"for",
"X_batch",
"in",
"dataloader",
":",
"marginal",
"=",
"self",
".",
"_non_cuda",
"(",
"self",
".",
"_calc_logits",
"(",
"X_batch",
")",
")",
"marginals",
"=",
"torch",
".",
"cat",
"(",
"(",
"marginals",
",",
"marginal",
")",
",",
"0",
")",
"return",
"F",
".",
"softmax",
"(",
"marginals",
",",
"dim",
"=",
"-",
"1",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")"
] | 30.733333
| 18.666667
|
def has_local_job_refs(io_hash):
'''
:param io_hash: input/output hash
:type io_hash: dict
:returns: boolean indicating whether any job-based object references are found in *io_hash*
'''
q = []
for field in io_hash:
if is_job_ref(io_hash[field]):
if get_job_from_jbor(io_hash[field]).startswith('localjob'):
return True
elif isinstance(io_hash[field], list) or isinstance(io_hash[field], dict):
q.append(io_hash[field])
while len(q) > 0:
thing = q.pop()
if isinstance(thing, list):
for i in range(len(thing)):
if is_job_ref(thing[i]):
if get_job_from_jbor(thing[i]).startswith('localjob'):
return True
elif isinstance(thing[i], list) or isinstance(thing[i], dict):
q.append(thing[i])
else:
for field in thing:
if is_job_ref(thing[field]):
if get_job_from_jbor(thing[field]).startswith('localjob'):
return True
elif isinstance(thing[field], list) or isinstance(thing[field], dict):
q.append(thing[field])
return False
|
[
"def",
"has_local_job_refs",
"(",
"io_hash",
")",
":",
"q",
"=",
"[",
"]",
"for",
"field",
"in",
"io_hash",
":",
"if",
"is_job_ref",
"(",
"io_hash",
"[",
"field",
"]",
")",
":",
"if",
"get_job_from_jbor",
"(",
"io_hash",
"[",
"field",
"]",
")",
".",
"startswith",
"(",
"'localjob'",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"io_hash",
"[",
"field",
"]",
",",
"list",
")",
"or",
"isinstance",
"(",
"io_hash",
"[",
"field",
"]",
",",
"dict",
")",
":",
"q",
".",
"append",
"(",
"io_hash",
"[",
"field",
"]",
")",
"while",
"len",
"(",
"q",
")",
">",
"0",
":",
"thing",
"=",
"q",
".",
"pop",
"(",
")",
"if",
"isinstance",
"(",
"thing",
",",
"list",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"thing",
")",
")",
":",
"if",
"is_job_ref",
"(",
"thing",
"[",
"i",
"]",
")",
":",
"if",
"get_job_from_jbor",
"(",
"thing",
"[",
"i",
"]",
")",
".",
"startswith",
"(",
"'localjob'",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"thing",
"[",
"i",
"]",
",",
"list",
")",
"or",
"isinstance",
"(",
"thing",
"[",
"i",
"]",
",",
"dict",
")",
":",
"q",
".",
"append",
"(",
"thing",
"[",
"i",
"]",
")",
"else",
":",
"for",
"field",
"in",
"thing",
":",
"if",
"is_job_ref",
"(",
"thing",
"[",
"field",
"]",
")",
":",
"if",
"get_job_from_jbor",
"(",
"thing",
"[",
"field",
"]",
")",
".",
"startswith",
"(",
"'localjob'",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"thing",
"[",
"field",
"]",
",",
"list",
")",
"or",
"isinstance",
"(",
"thing",
"[",
"field",
"]",
",",
"dict",
")",
":",
"q",
".",
"append",
"(",
"thing",
"[",
"field",
"]",
")",
"return",
"False"
] | 36.939394
| 20.69697
|
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
|
[
"def",
"link",
"(",
"obj_files",
",",
"out_file",
"=",
"None",
",",
"shared",
"=",
"False",
",",
"CompilerRunner_",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"cplus",
"=",
"False",
",",
"fort",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"obj_files",
"[",
"-",
"1",
"]",
")",
")",
"if",
"shared",
":",
"out_file",
"+=",
"sharedext",
"if",
"not",
"CompilerRunner_",
":",
"if",
"fort",
":",
"CompilerRunner_",
",",
"extra_kwargs",
",",
"vendor",
"=",
"get_mixed_fort_c_linker",
"(",
"vendor",
"=",
"kwargs",
".",
"get",
"(",
"'vendor'",
",",
"None",
")",
",",
"metadir",
"=",
"kwargs",
".",
"get",
"(",
"'metadir'",
",",
"None",
")",
",",
"cplus",
"=",
"cplus",
",",
"cwd",
"=",
"cwd",
",",
")",
"for",
"k",
",",
"v",
"in",
"extra_kwargs",
".",
"items",
"(",
")",
":",
"expand_collection_in_dict",
"(",
"kwargs",
",",
"k",
",",
"v",
")",
"else",
":",
"if",
"cplus",
":",
"CompilerRunner_",
"=",
"CppCompilerRunner",
"else",
":",
"CompilerRunner_",
"=",
"CCompilerRunner",
"flags",
"=",
"kwargs",
".",
"pop",
"(",
"'flags'",
",",
"[",
"]",
")",
"if",
"shared",
":",
"if",
"'-shared'",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"'-shared'",
")",
"# mimic GNU linker behavior on OS X when using -shared",
"# (otherwise likely Undefined symbol errors)",
"dl_flag",
"=",
"'-undefined dynamic_lookup'",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
"and",
"dl_flag",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"dl_flag",
")",
"run_linker",
"=",
"kwargs",
".",
"pop",
"(",
"'run_linker'",
",",
"True",
")",
"if",
"not",
"run_linker",
":",
"raise",
"ValueError",
"(",
"\"link(..., run_linker=False)!?\"",
")",
"out_file",
"=",
"get_abspath",
"(",
"out_file",
",",
"cwd",
"=",
"cwd",
")",
"runner",
"=",
"CompilerRunner_",
"(",
"obj_files",
",",
"out_file",
",",
"flags",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"kwargs",
")",
"runner",
".",
"run",
"(",
")",
"return",
"out_file"
] | 32.178082
| 17.986301
|
def compute_transitions(self, density_normalize=True):
"""Compute transition matrix.
Parameters
----------
density_normalize : `bool`
The density rescaling of Coifman and Lafon (2006): Then only the
geometry of the data matters, not the sampled density.
Returns
-------
Makes attributes `.transitions_sym` and `.transitions` available.
"""
W = self._connectivities
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(W.sum(axis=0))
if not issparse(W):
Q = np.diag(1.0/q)
else:
Q = scipy.sparse.spdiags(1.0/q, 0, W.shape[0], W.shape[0])
K = Q.dot(W).dot(Q)
else:
K = W
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
self.Z = np.diag(1.0/z)
else:
self.Z = scipy.sparse.spdiags(1.0/z, 0, K.shape[0], K.shape[0])
self._transitions_sym = self.Z.dot(K).dot(self.Z)
logg.msg('computed transitions', v=4, time=True)
|
[
"def",
"compute_transitions",
"(",
"self",
",",
"density_normalize",
"=",
"True",
")",
":",
"W",
"=",
"self",
".",
"_connectivities",
"# density normalization as of Coifman et al. (2005)",
"# ensures that kernel matrix is independent of sampling density",
"if",
"density_normalize",
":",
"# q[i] is an estimate for the sampling density at point i",
"# it's also the degree of the underlying graph",
"q",
"=",
"np",
".",
"asarray",
"(",
"W",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"if",
"not",
"issparse",
"(",
"W",
")",
":",
"Q",
"=",
"np",
".",
"diag",
"(",
"1.0",
"/",
"q",
")",
"else",
":",
"Q",
"=",
"scipy",
".",
"sparse",
".",
"spdiags",
"(",
"1.0",
"/",
"q",
",",
"0",
",",
"W",
".",
"shape",
"[",
"0",
"]",
",",
"W",
".",
"shape",
"[",
"0",
"]",
")",
"K",
"=",
"Q",
".",
"dot",
"(",
"W",
")",
".",
"dot",
"(",
"Q",
")",
"else",
":",
"K",
"=",
"W",
"# z[i] is the square root of the row sum of K",
"z",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"asarray",
"(",
"K",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
")",
"if",
"not",
"issparse",
"(",
"K",
")",
":",
"self",
".",
"Z",
"=",
"np",
".",
"diag",
"(",
"1.0",
"/",
"z",
")",
"else",
":",
"self",
".",
"Z",
"=",
"scipy",
".",
"sparse",
".",
"spdiags",
"(",
"1.0",
"/",
"z",
",",
"0",
",",
"K",
".",
"shape",
"[",
"0",
"]",
",",
"K",
".",
"shape",
"[",
"0",
"]",
")",
"self",
".",
"_transitions_sym",
"=",
"self",
".",
"Z",
".",
"dot",
"(",
"K",
")",
".",
"dot",
"(",
"self",
".",
"Z",
")",
"logg",
".",
"msg",
"(",
"'computed transitions'",
",",
"v",
"=",
"4",
",",
"time",
"=",
"True",
")"
] | 37.638889
| 19.694444
|
def _call_fan(branch, calls, executable):
"""Appends a list of callees to the branch for each parent
in the call list that calls this executable.
"""
#Since we don't keep track of the specific logic in the executables
#it is possible that we could get a infinite recursion of executables
#that keep calling each other.
if executable in branch:
return
branch.append(executable)
if executable.name in calls:
for caller in calls[executable.name]:
twig = []
_call_fan(twig, calls, caller)
branch
|
[
"def",
"_call_fan",
"(",
"branch",
",",
"calls",
",",
"executable",
")",
":",
"#Since we don't keep track of the specific logic in the executables",
"#it is possible that we could get a infinite recursion of executables",
"#that keep calling each other.",
"if",
"executable",
"in",
"branch",
":",
"return",
"branch",
".",
"append",
"(",
"executable",
")",
"if",
"executable",
".",
"name",
"in",
"calls",
":",
"for",
"caller",
"in",
"calls",
"[",
"executable",
".",
"name",
"]",
":",
"twig",
"=",
"[",
"]",
"_call_fan",
"(",
"twig",
",",
"calls",
",",
"caller",
")",
"branch"
] | 35.5625
| 13.75
|
def _serialiseFirstJob(self, jobStore):
"""
Serialises the root job. Returns the wrapping job.
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
"""
# Check if the workflow root is a checkpoint but not a leaf vertex.
# All other job vertices in the graph are checked by checkNewCheckpointsAreLeafVertices
if self.checkpoint and not Job._isLeafVertex(self):
raise JobGraphDeadlockException(
'New checkpoint job %s is not a leaf in the job graph' % self)
# Create first jobGraph
jobGraph = self._createEmptyJobGraphForJob(jobStore=jobStore, predecessorNumber=0)
# Write the graph of jobs to disk
self._serialiseJobGraph(jobGraph, jobStore, None, True)
jobStore.update(jobGraph)
# Store the name of the first job in a file in case of restart. Up to this point the
# root job is not recoverable. FIXME: "root job" or "first job", which one is it?
jobStore.setRootJob(jobGraph.jobStoreID)
return jobGraph
|
[
"def",
"_serialiseFirstJob",
"(",
"self",
",",
"jobStore",
")",
":",
"# Check if the workflow root is a checkpoint but not a leaf vertex.",
"# All other job vertices in the graph are checked by checkNewCheckpointsAreLeafVertices",
"if",
"self",
".",
"checkpoint",
"and",
"not",
"Job",
".",
"_isLeafVertex",
"(",
"self",
")",
":",
"raise",
"JobGraphDeadlockException",
"(",
"'New checkpoint job %s is not a leaf in the job graph'",
"%",
"self",
")",
"# Create first jobGraph",
"jobGraph",
"=",
"self",
".",
"_createEmptyJobGraphForJob",
"(",
"jobStore",
"=",
"jobStore",
",",
"predecessorNumber",
"=",
"0",
")",
"# Write the graph of jobs to disk",
"self",
".",
"_serialiseJobGraph",
"(",
"jobGraph",
",",
"jobStore",
",",
"None",
",",
"True",
")",
"jobStore",
".",
"update",
"(",
"jobGraph",
")",
"# Store the name of the first job in a file in case of restart. Up to this point the",
"# root job is not recoverable. FIXME: \"root job\" or \"first job\", which one is it?",
"jobStore",
".",
"setRootJob",
"(",
"jobGraph",
".",
"jobStoreID",
")",
"return",
"jobGraph"
] | 47.863636
| 24.5
|
def matrix_decomp(self, cache=None):
"""Compute a Hermitian eigenbasis decomposition of the matrix.
Parameters
----------
cache : bool or None, optional
If ``True``, store the decomposition internally. For None,
the ``cache_mat_decomp`` from class initialization is used.
Returns
-------
eigval : `numpy.ndarray`
One-dimensional array of eigenvalues. Its length is equal
to the number of matrix rows.
eigvec : `numpy.ndarray`
Two-dimensional array of eigenvectors. It has the same shape
as the decomposed matrix.
See Also
--------
scipy.linalg.decomp.eigh :
Implementation of the decomposition. Standard parameters
are used here.
Raises
------
NotImplementedError
if the matrix is sparse (not supported by scipy 0.17)
"""
# Lazy import to improve `import odl` time
import scipy.linalg
import scipy.sparse
# TODO: fix dead link `scipy.linalg.decomp.eigh`
if scipy.sparse.isspmatrix(self.matrix):
raise NotImplementedError('sparse matrix not supported')
if cache is None:
cache = self._cache_mat_decomp
if self._eigval is None or self._eigvec is None:
eigval, eigvec = scipy.linalg.eigh(self.matrix)
if cache:
self._eigval = eigval
self._eigvec = eigvec
else:
eigval, eigvec = self._eigval, self._eigvec
return eigval, eigvec
|
[
"def",
"matrix_decomp",
"(",
"self",
",",
"cache",
"=",
"None",
")",
":",
"# Lazy import to improve `import odl` time",
"import",
"scipy",
".",
"linalg",
"import",
"scipy",
".",
"sparse",
"# TODO: fix dead link `scipy.linalg.decomp.eigh`",
"if",
"scipy",
".",
"sparse",
".",
"isspmatrix",
"(",
"self",
".",
"matrix",
")",
":",
"raise",
"NotImplementedError",
"(",
"'sparse matrix not supported'",
")",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"self",
".",
"_cache_mat_decomp",
"if",
"self",
".",
"_eigval",
"is",
"None",
"or",
"self",
".",
"_eigvec",
"is",
"None",
":",
"eigval",
",",
"eigvec",
"=",
"scipy",
".",
"linalg",
".",
"eigh",
"(",
"self",
".",
"matrix",
")",
"if",
"cache",
":",
"self",
".",
"_eigval",
"=",
"eigval",
"self",
".",
"_eigvec",
"=",
"eigvec",
"else",
":",
"eigval",
",",
"eigvec",
"=",
"self",
".",
"_eigval",
",",
"self",
".",
"_eigvec",
"return",
"eigval",
",",
"eigvec"
] | 32.142857
| 19.714286
|
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
|
[
"def",
"requirement_args",
"(",
"argv",
",",
"want_paths",
"=",
"False",
",",
"want_other",
"=",
"False",
")",
":",
"was_r",
"=",
"False",
"for",
"arg",
"in",
"argv",
":",
"# Allow for requirements files named \"-r\", don't freak out if there's a",
"# trailing \"-r\", etc.",
"if",
"was_r",
":",
"if",
"want_paths",
":",
"yield",
"arg",
"was_r",
"=",
"False",
"elif",
"arg",
"in",
"[",
"'-r'",
",",
"'--requirement'",
"]",
":",
"was_r",
"=",
"True",
"else",
":",
"if",
"want_other",
":",
"yield",
"arg"
] | 36.782609
| 21.608696
|
def tenant_create(name, description=None, enabled=True, profile=None,
**connection_args):
'''
Create a keystone tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_create nova description='nova tenant'
salt '*' keystone.tenant_create test enabled=False
'''
kstone = auth(profile, **connection_args)
new = getattr(kstone, _TENANTS, None).create(name, description, enabled)
return tenant_get(new.id, profile=profile, **connection_args)
|
[
"def",
"tenant_create",
"(",
"name",
",",
"description",
"=",
"None",
",",
"enabled",
"=",
"True",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"connection_args",
")",
":",
"kstone",
"=",
"auth",
"(",
"profile",
",",
"*",
"*",
"connection_args",
")",
"new",
"=",
"getattr",
"(",
"kstone",
",",
"_TENANTS",
",",
"None",
")",
".",
"create",
"(",
"name",
",",
"description",
",",
"enabled",
")",
"return",
"tenant_get",
"(",
"new",
".",
"id",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")"
] | 33.533333
| 25.533333
|
def __prepare_body(self, search_value, search_type='url'):
"""
Prepares the http body for querying safebrowsing api. Maybe the list need to get adjusted.
:param search_value: value to search for
:type search_value: str
:param search_type: 'url' or 'ip'
:type search_type: str
:returns: http body as dict
:rtype: dict
"""
body = {
'client': {
'clientId': self.client_id,
'clientVersion': self.client_version
}
}
if search_type == 'url':
data = {
'threatTypes': [
'MALWARE', 'SOCIAL_ENGINEERING', 'UNWANTED_SOFTWARE', 'POTENTIALLY_HARMFUL_APPLICATION'
],
'platformTypes': ['ANY_PLATFORM', 'ALL_PLATFORMS', 'WINDOWS', 'LINUX', 'OSX', 'ANDROID', 'IOS'],
'threatEntryTypes': ['URL']
}
elif search_type == 'ip':
data = {
'threatTypes': ['MALWARE'],
'platformTypes': ['WINDOWS', 'LINUX', 'OSX'],
'threatEntryTypes': ['IP_RANGE']
}
else:
raise SearchTypeNotSupportedError('Currently supported search types are \'url\' and \'ip\'.')
# TODO: Only found threatEntry 'url' in the docs. What to use for ip_range?
data['threatEntries'] = [{'url': search_value}]
body['threatInfo'] = data
return body
|
[
"def",
"__prepare_body",
"(",
"self",
",",
"search_value",
",",
"search_type",
"=",
"'url'",
")",
":",
"body",
"=",
"{",
"'client'",
":",
"{",
"'clientId'",
":",
"self",
".",
"client_id",
",",
"'clientVersion'",
":",
"self",
".",
"client_version",
"}",
"}",
"if",
"search_type",
"==",
"'url'",
":",
"data",
"=",
"{",
"'threatTypes'",
":",
"[",
"'MALWARE'",
",",
"'SOCIAL_ENGINEERING'",
",",
"'UNWANTED_SOFTWARE'",
",",
"'POTENTIALLY_HARMFUL_APPLICATION'",
"]",
",",
"'platformTypes'",
":",
"[",
"'ANY_PLATFORM'",
",",
"'ALL_PLATFORMS'",
",",
"'WINDOWS'",
",",
"'LINUX'",
",",
"'OSX'",
",",
"'ANDROID'",
",",
"'IOS'",
"]",
",",
"'threatEntryTypes'",
":",
"[",
"'URL'",
"]",
"}",
"elif",
"search_type",
"==",
"'ip'",
":",
"data",
"=",
"{",
"'threatTypes'",
":",
"[",
"'MALWARE'",
"]",
",",
"'platformTypes'",
":",
"[",
"'WINDOWS'",
",",
"'LINUX'",
",",
"'OSX'",
"]",
",",
"'threatEntryTypes'",
":",
"[",
"'IP_RANGE'",
"]",
"}",
"else",
":",
"raise",
"SearchTypeNotSupportedError",
"(",
"'Currently supported search types are \\'url\\' and \\'ip\\'.'",
")",
"# TODO: Only found threatEntry 'url' in the docs. What to use for ip_range?",
"data",
"[",
"'threatEntries'",
"]",
"=",
"[",
"{",
"'url'",
":",
"search_value",
"}",
"]",
"body",
"[",
"'threatInfo'",
"]",
"=",
"data",
"return",
"body"
] | 37.947368
| 21.421053
|
def json(
body,
status=200,
headers=None,
content_type="application/json",
dumps=json_dumps,
**kwargs
):
"""
Returns response object with body in json format.
:param body: Response data to be serialized.
:param status: Response code.
:param headers: Custom Headers.
:param kwargs: Remaining arguments that are passed to the json encoder.
"""
return HTTPResponse(
dumps(body, **kwargs),
headers=headers,
status=status,
content_type=content_type,
)
|
[
"def",
"json",
"(",
"body",
",",
"status",
"=",
"200",
",",
"headers",
"=",
"None",
",",
"content_type",
"=",
"\"application/json\"",
",",
"dumps",
"=",
"json_dumps",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"HTTPResponse",
"(",
"dumps",
"(",
"body",
",",
"*",
"*",
"kwargs",
")",
",",
"headers",
"=",
"headers",
",",
"status",
"=",
"status",
",",
"content_type",
"=",
"content_type",
",",
")"
] | 23.545455
| 18.545455
|
def validate_config(self, config):
"""
Validate configuration dict keys are supported
:type config: dict
:param config: configuration dictionary
"""
try:
hosts = config['hosts']
except KeyError:
raise InvalidConfigurationError('hosts', "",
reason=('hosts configuration '
'section is required'))
for key in config.keys():
if key not in default_allowed_keys:
raise InvalidConfigurationError(key, config[key])
bucket = False
# optional configuration
try:
for key in config['aws'].keys():
if key == 'bucket' and config['aws'][key] is not None:
bucket = True
if key not in aws_allowed_keys:
raise InvalidConfigurationError(key, config['aws'][key])
except KeyError:
pass
# optional configuration
try:
for key in config['logging'].keys():
if key not in logging_allowed_keys:
raise InvalidConfigurationError(key, config['logging'][key])
except KeyError:
pass
# optional configuration
try:
for key in config['repository'].keys():
if key not in repository_allowed_keys:
raise InvalidConfigurationError(key, config['repository'][key])
except KeyError:
pass
# required configuration
if type(config['hosts']) is not list:
raise InvalidConfigurationError('hosts', config['hosts'],
reason="hosts must be a list")
filename = False
for host in config['hosts']:
for key in host.keys():
if key == 'filename' and host['filename'] is not None:
filename = True
if key == 'jump_host' and host['jump_host'] is not None:
for jump_key in host['jump_host'].keys():
if jump_key not in jump_host_allowed_keys:
raise InvalidConfigurationError(key, host['jump_host'])
if key not in host_allowed_keys:
raise InvalidConfigurationError(key, host[key])
if bucket and filename:
raise InvalidConfigurationError('bucket', config['aws']['bucket'],
reason=('bucket configuration is'
'incompatible with filename'
'configuration in hosts'))
|
[
"def",
"validate_config",
"(",
"self",
",",
"config",
")",
":",
"try",
":",
"hosts",
"=",
"config",
"[",
"'hosts'",
"]",
"except",
"KeyError",
":",
"raise",
"InvalidConfigurationError",
"(",
"'hosts'",
",",
"\"\"",
",",
"reason",
"=",
"(",
"'hosts configuration '",
"'section is required'",
")",
")",
"for",
"key",
"in",
"config",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"default_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"config",
"[",
"key",
"]",
")",
"bucket",
"=",
"False",
"# optional configuration",
"try",
":",
"for",
"key",
"in",
"config",
"[",
"'aws'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"key",
"==",
"'bucket'",
"and",
"config",
"[",
"'aws'",
"]",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"bucket",
"=",
"True",
"if",
"key",
"not",
"in",
"aws_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"config",
"[",
"'aws'",
"]",
"[",
"key",
"]",
")",
"except",
"KeyError",
":",
"pass",
"# optional configuration",
"try",
":",
"for",
"key",
"in",
"config",
"[",
"'logging'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"logging_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"config",
"[",
"'logging'",
"]",
"[",
"key",
"]",
")",
"except",
"KeyError",
":",
"pass",
"# optional configuration",
"try",
":",
"for",
"key",
"in",
"config",
"[",
"'repository'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"repository_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"config",
"[",
"'repository'",
"]",
"[",
"key",
"]",
")",
"except",
"KeyError",
":",
"pass",
"# required configuration",
"if",
"type",
"(",
"config",
"[",
"'hosts'",
"]",
")",
"is",
"not",
"list",
":",
"raise",
"InvalidConfigurationError",
"(",
"'hosts'",
",",
"config",
"[",
"'hosts'",
"]",
",",
"reason",
"=",
"\"hosts must be a list\"",
")",
"filename",
"=",
"False",
"for",
"host",
"in",
"config",
"[",
"'hosts'",
"]",
":",
"for",
"key",
"in",
"host",
".",
"keys",
"(",
")",
":",
"if",
"key",
"==",
"'filename'",
"and",
"host",
"[",
"'filename'",
"]",
"is",
"not",
"None",
":",
"filename",
"=",
"True",
"if",
"key",
"==",
"'jump_host'",
"and",
"host",
"[",
"'jump_host'",
"]",
"is",
"not",
"None",
":",
"for",
"jump_key",
"in",
"host",
"[",
"'jump_host'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"jump_key",
"not",
"in",
"jump_host_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"host",
"[",
"'jump_host'",
"]",
")",
"if",
"key",
"not",
"in",
"host_allowed_keys",
":",
"raise",
"InvalidConfigurationError",
"(",
"key",
",",
"host",
"[",
"key",
"]",
")",
"if",
"bucket",
"and",
"filename",
":",
"raise",
"InvalidConfigurationError",
"(",
"'bucket'",
",",
"config",
"[",
"'aws'",
"]",
"[",
"'bucket'",
"]",
",",
"reason",
"=",
"(",
"'bucket configuration is'",
"'incompatible with filename'",
"'configuration in hosts'",
")",
")"
] | 39.940299
| 21.58209
|
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p): continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(table, headers= ["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid")
|
[
"def",
"to_table",
"(",
"self",
",",
"filter_function",
"=",
"None",
")",
":",
"table",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
":",
"if",
"filter_function",
"is",
"not",
"None",
"and",
"filter_function",
"(",
"p",
")",
":",
"continue",
"table",
".",
"append",
"(",
"[",
"p",
".",
"basename",
",",
"p",
".",
"symbol",
",",
"p",
".",
"Z_val",
",",
"p",
".",
"l_max",
",",
"p",
".",
"l_local",
",",
"p",
".",
"xc",
",",
"p",
".",
"type",
"]",
")",
"return",
"tabulate",
"(",
"table",
",",
"headers",
"=",
"[",
"\"basename\"",
",",
"\"symbol\"",
",",
"\"Z_val\"",
",",
"\"l_max\"",
",",
"\"l_local\"",
",",
"\"XC\"",
",",
"\"type\"",
"]",
",",
"tablefmt",
"=",
"\"grid\"",
")"
] | 55.875
| 24.125
|
def get_by(self, field, value):
"""
Gets all drive enclosures that match the filter.
The search is case-insensitive.
Args:
Field: field name to filter.
Value: value to filter.
Returns:
list: A list of drive enclosures.
"""
return self._client.get_by(field=field, value=value)
|
[
"def",
"get_by",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"return",
"self",
".",
"_client",
".",
"get_by",
"(",
"field",
"=",
"field",
",",
"value",
"=",
"value",
")"
] | 25.5
| 16.214286
|
def to_filter(self, query, arg):
"""Json-server filter using the _or_ operator."""
return filter_from_url_arg(self.model_cls, query, arg, query_operator=or_)
|
[
"def",
"to_filter",
"(",
"self",
",",
"query",
",",
"arg",
")",
":",
"return",
"filter_from_url_arg",
"(",
"self",
".",
"model_cls",
",",
"query",
",",
"arg",
",",
"query_operator",
"=",
"or_",
")"
] | 57
| 16.666667
|
def parse(md, model, encoding='utf-8', config=None):
"""
Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {})
"""
#Set up configuration to interpret the conventions for the Markdown
config = config or {}
#This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources
syntaxtypemap = {}
if config.get('autotype-h1'): syntaxtypemap['h1'] = config.get('autotype-h1')
if config.get('autotype-h2'): syntaxtypemap['h2'] = config.get('autotype-h2')
if config.get('autotype-h3'): syntaxtypemap['h3'] = config.get('autotype-h3')
interp_stanza = config.get('interpretations', {})
interpretations = {}
def setup_interpretations(interp):
#Map the interpretation IRIs to functions to do the data prep
for prop, interp_key in interp.items():
if interp_key.startswith('@'):
interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI)
if interp_key in PREP_METHODS:
interpretations[prop] = PREP_METHODS[interp_key]
else:
#just use the identity, i.e. no-op
interpretations[prop] = lambda x, **kwargs: x
setup_interpretations(interp_stanza)
#Prep ID generator, in case needed
idg = idgen(None)
#Parse the Markdown
#Alternately:
#from xml.sax.saxutils import escape, unescape
#h = markdown.markdown(escape(md.decode(encoding)), output_format='html5')
#Note: even using safe_mode this should not be presumed safe from tainted input
#h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5')
comments = mkdcomments.CommentsExtension()
h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments])
#doc = html.markup_fragment(inputsource.text(h.encode('utf-8')))
tb = treebuilder()
h = '<html>' + h + '</html>'
root = tb.parse(h)
#Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest
first_h1 = next(select_name(descendants(root), 'h1'))
#top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2'))
#Extract header elements. Notice I use an empty element with an empty parent as the default result
docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"]
sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")]
def fields(sect):
'''
Each section represents a resource and contains a list with its properties
This generator parses the list and yields the key value pairs representing the properties
Some properties have attributes, expressed in markdown as a nested list. If present these attributes
Are yielded as well, else None is yielded
'''
#import logging; logging.debug(repr(sect))
#Pull all the list elements until the next header. This accommodates multiple lists in a section
sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect)))
#results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3')
#field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ]
field_list = [ li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li') ]
def parse_li(pair):
'''
Parse each list item into a property pair
'''
if pair.strip():
matched = REL_PAT.match(pair)
if not matched:
raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair)))
#print matched.groups()
if matched.group(3): prop = matched.group(3).strip()
if matched.group(4): prop = matched.group(4).strip()
if matched.group(7):
val = matched.group(7).strip()
typeindic = RES_VAL
elif matched.group(9):
val = matched.group(9).strip()
typeindic = TEXT_VAL
elif matched.group(11):
val = matched.group(11).strip()
typeindic = TEXT_VAL
elif matched.group(12):
val = matched.group(12).strip()
typeindic = UNKNOWN_VAL
else:
val = ''
typeindic = UNKNOWN_VAL
#prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ]
#import logging; logging.debug(repr((prop, val)))
return prop, val, typeindic
return None, None, None
#Go through each list item
for li in field_list:
#Is there a nested list, which expresses attributes on a property
if list(select_name(li, 'ul')):
#main = ''.join([ node.xml_value
# for node in itertools.takewhile(
# lambda x: x.xml_name != 'ul', select_elements(li)
# )
# ])
main = ''.join(itertools.takewhile(
lambda x: isinstance(x, text), li.xml_children
))
#main = li.xml_select('string(ul/preceding-sibling::node())')
prop, val, typeindic = parse_li(main)
subfield_list = [ parse_li(sli.xml_value) for e in select_name(li, 'ul') for sli in (
select_name(e, 'li')
) ]
subfield_list = [ (p, v, t) for (p, v, t) in subfield_list if p is not None ]
#Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader
if val is None: val = ''
yield prop, val, typeindic, subfield_list
#Just a regular, unadorned property
else:
prop, val, typeindic = parse_li(li.xml_value)
if prop: yield prop, val, typeindic, None
iris = {}
#Gather the document-level metadata from the @docheader section
base = propbase = rtbase = document_iri = default_lang = None
for prop, val, typeindic, subfield_list in fields(docheader):
#The @iri section is where key IRI prefixes can be set
if prop == '@iri':
for (k, uri, typeindic) in subfield_list:
if k == '@base':
base = propbase = rtbase = uri
elif k == '@property':
propbase = uri
elif k == '@resource-type':
rtbase = uri
else:
iris[k] = uri
#The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship
elif prop == '@interpretations':
#Iterate over items from the @docheader/@interpretations section to set up for further parsing
interp = {}
for k, v, x in subfield_list:
interp[I(iri.absolutize(k, propbase))] = v
setup_interpretations(interp)
#Setting an IRI for this very document being parsed
elif prop == '@document':
document_iri = val
elif prop == '@language':
default_lang = val
#If we have a resource to which to attach them, just attach all other properties
elif document_iri or base:
rid = document_iri or base
fullprop = I(iri.absolutize(prop, propbase or base))
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val)
#Default IRI prefixes if @iri/@base is set
if not propbase: propbase = base
if not rtbase: rtbase = base
if not document_iri: document_iri = base
#Go through the resources expressed in remaining sections
for sect in sections:
#if U(sect) == '@docheader': continue #Not needed because excluded by ss
#The header can take one of 4 forms: "ResourceID" "ResourceID [ResourceType]" "[ResourceType]" or "[]"
#The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type
matched = RESOURCE_PAT.match(sect.xml_value)
if not matched:
raise ValueError(_('Syntax error in resource header: {0}'.format(sect.xml_value)))
rid = matched.group(1)
rtype = matched.group(3)
if rtype:
rtype = I(iri.absolutize(rtype, base))
if rid:
rid = I(iri.absolutize(rid, base))
if not rid:
rid = next(idg)
#Resource type might be set by syntax config
if not rtype:
rtype = syntaxtypemap.get(sect.xml_name)
if rtype:
model.add(rid, TYPE_REL, rtype)
#Add the property
for prop, val, typeindic, subfield_list in fields(sect):
attrs = {}
for (aprop, aval, atype) in subfield_list or ():
if atype == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
attrs[aprop] = URI_ABBR_PAT.sub(uri + '\\2\\3', aval)
else:
attrs[aprop] = I(iri.absolutize(aval, rtbase))
elif atype == TEXT_VAL:
attrs[aprop] = aval
elif atype == UNKNOWN_VAL:
attrs[aprop] = aval
if aprop in interpretations:
aval = interpretations[aprop](aval, rid=rid, fullprop=aprop, base=base, model=model)
if aval is not None: attrs[aprop] = aval
else:
attrs[aprop] = aval
propmatch = URI_ABBR_PAT.match(prop)
if propmatch:
uri = iris[propmatch.group(1)]
fullprop = URI_ABBR_PAT.sub(uri + '\\2\\3', prop)
else:
fullprop = I(iri.absolutize(prop, propbase))
if typeindic == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
val = URI_ABBR_PAT.sub(uri + '\\2\\3', val)
else:
val = I(iri.absolutize(val, rtbase))
model.add(rid, fullprop, val, attrs)
elif typeindic == TEXT_VAL:
if '@lang' not in attrs: attrs['@lang'] = default_lang
model.add(rid, fullprop, val, attrs)
elif typeindic == UNKNOWN_VAL:
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val, attrs)
#resinfo = AB_RESOURCE_PAT.match(val)
#if resinfo:
# val = resinfo.group(1)
# valtype = resinfo.group(3)
# if not val: val = model.generate_resource()
# if valtype: attrs[TYPE_REL] = valtype
return document_iri
|
[
"def",
"parse",
"(",
"md",
",",
"model",
",",
"encoding",
"=",
"'utf-8'",
",",
"config",
"=",
"None",
")",
":",
"#Set up configuration to interpret the conventions for the Markdown",
"config",
"=",
"config",
"or",
"{",
"}",
"#This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources",
"syntaxtypemap",
"=",
"{",
"}",
"if",
"config",
".",
"get",
"(",
"'autotype-h1'",
")",
":",
"syntaxtypemap",
"[",
"'h1'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h1'",
")",
"if",
"config",
".",
"get",
"(",
"'autotype-h2'",
")",
":",
"syntaxtypemap",
"[",
"'h2'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h2'",
")",
"if",
"config",
".",
"get",
"(",
"'autotype-h3'",
")",
":",
"syntaxtypemap",
"[",
"'h3'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h3'",
")",
"interp_stanza",
"=",
"config",
".",
"get",
"(",
"'interpretations'",
",",
"{",
"}",
")",
"interpretations",
"=",
"{",
"}",
"def",
"setup_interpretations",
"(",
"interp",
")",
":",
"#Map the interpretation IRIs to functions to do the data prep",
"for",
"prop",
",",
"interp_key",
"in",
"interp",
".",
"items",
"(",
")",
":",
"if",
"interp_key",
".",
"startswith",
"(",
"'@'",
")",
":",
"interp_key",
"=",
"iri",
".",
"absolutize",
"(",
"interp_key",
"[",
"1",
":",
"]",
",",
"VERSA_BASEIRI",
")",
"if",
"interp_key",
"in",
"PREP_METHODS",
":",
"interpretations",
"[",
"prop",
"]",
"=",
"PREP_METHODS",
"[",
"interp_key",
"]",
"else",
":",
"#just use the identity, i.e. no-op",
"interpretations",
"[",
"prop",
"]",
"=",
"lambda",
"x",
",",
"*",
"*",
"kwargs",
":",
"x",
"setup_interpretations",
"(",
"interp_stanza",
")",
"#Prep ID generator, in case needed",
"idg",
"=",
"idgen",
"(",
"None",
")",
"#Parse the Markdown",
"#Alternately:",
"#from xml.sax.saxutils import escape, unescape",
"#h = markdown.markdown(escape(md.decode(encoding)), output_format='html5')",
"#Note: even using safe_mode this should not be presumed safe from tainted input",
"#h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5')",
"comments",
"=",
"mkdcomments",
".",
"CommentsExtension",
"(",
")",
"h",
"=",
"markdown",
".",
"markdown",
"(",
"md",
",",
"safe_mode",
"=",
"'escape'",
",",
"output_format",
"=",
"'html5'",
",",
"extensions",
"=",
"[",
"comments",
"]",
")",
"#doc = html.markup_fragment(inputsource.text(h.encode('utf-8')))",
"tb",
"=",
"treebuilder",
"(",
")",
"h",
"=",
"'<html>'",
"+",
"h",
"+",
"'</html>'",
"root",
"=",
"tb",
".",
"parse",
"(",
"h",
")",
"#Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest",
"first_h1",
"=",
"next",
"(",
"select_name",
"(",
"descendants",
"(",
"root",
")",
",",
"'h1'",
")",
")",
"#top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2'))",
"#Extract header elements. Notice I use an empty element with an empty parent as the default result",
"docheader",
"=",
"next",
"(",
"select_value",
"(",
"select_name",
"(",
"descendants",
"(",
"root",
")",
",",
"'h1'",
")",
",",
"'@docheader'",
")",
",",
"element",
"(",
"'empty'",
",",
"parent",
"=",
"root",
")",
")",
"# //h1[.=\"@docheader\"]",
"sections",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"xml_value",
"!=",
"'@docheader'",
",",
"select_name_pattern",
"(",
"descendants",
"(",
"root",
")",
",",
"HEADER_PAT",
")",
")",
"# //h1[not(.=\"@docheader\")]|h2[not(.=\"@docheader\")]|h3[not(.=\"@docheader\")]",
"def",
"fields",
"(",
"sect",
")",
":",
"'''\n Each section represents a resource and contains a list with its properties\n This generator parses the list and yields the key value pairs representing the properties\n Some properties have attributes, expressed in markdown as a nested list. If present these attributes\n Are yielded as well, else None is yielded\n '''",
"#import logging; logging.debug(repr(sect))",
"#Pull all the list elements until the next header. This accommodates multiple lists in a section",
"sect_body_items",
"=",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"HEADER_PAT",
".",
"match",
"(",
"x",
".",
"xml_name",
")",
"is",
"None",
",",
"select_elements",
"(",
"following_siblings",
"(",
"sect",
")",
")",
")",
"#results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3')",
"#field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ]",
"field_list",
"=",
"[",
"li",
"for",
"elem",
"in",
"select_name",
"(",
"sect_body_items",
",",
"'ul'",
")",
"for",
"li",
"in",
"select_name",
"(",
"elem",
",",
"'li'",
")",
"]",
"def",
"parse_li",
"(",
"pair",
")",
":",
"'''\n Parse each list item into a property pair\n '''",
"if",
"pair",
".",
"strip",
"(",
")",
":",
"matched",
"=",
"REL_PAT",
".",
"match",
"(",
"pair",
")",
"if",
"not",
"matched",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'Syntax error in relationship expression: {0}'",
".",
"format",
"(",
"pair",
")",
")",
")",
"#print matched.groups()",
"if",
"matched",
".",
"group",
"(",
"3",
")",
":",
"prop",
"=",
"matched",
".",
"group",
"(",
"3",
")",
".",
"strip",
"(",
")",
"if",
"matched",
".",
"group",
"(",
"4",
")",
":",
"prop",
"=",
"matched",
".",
"group",
"(",
"4",
")",
".",
"strip",
"(",
")",
"if",
"matched",
".",
"group",
"(",
"7",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"7",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"RES_VAL",
"elif",
"matched",
".",
"group",
"(",
"9",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"9",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"TEXT_VAL",
"elif",
"matched",
".",
"group",
"(",
"11",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"11",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"TEXT_VAL",
"elif",
"matched",
".",
"group",
"(",
"12",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"12",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"UNKNOWN_VAL",
"else",
":",
"val",
"=",
"''",
"typeindic",
"=",
"UNKNOWN_VAL",
"#prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ]",
"#import logging; logging.debug(repr((prop, val)))",
"return",
"prop",
",",
"val",
",",
"typeindic",
"return",
"None",
",",
"None",
",",
"None",
"#Go through each list item",
"for",
"li",
"in",
"field_list",
":",
"#Is there a nested list, which expresses attributes on a property",
"if",
"list",
"(",
"select_name",
"(",
"li",
",",
"'ul'",
")",
")",
":",
"#main = ''.join([ node.xml_value",
"# for node in itertools.takewhile(",
"# lambda x: x.xml_name != 'ul', select_elements(li)",
"# )",
"# ])",
"main",
"=",
"''",
".",
"join",
"(",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"text",
")",
",",
"li",
".",
"xml_children",
")",
")",
"#main = li.xml_select('string(ul/preceding-sibling::node())')",
"prop",
",",
"val",
",",
"typeindic",
"=",
"parse_li",
"(",
"main",
")",
"subfield_list",
"=",
"[",
"parse_li",
"(",
"sli",
".",
"xml_value",
")",
"for",
"e",
"in",
"select_name",
"(",
"li",
",",
"'ul'",
")",
"for",
"sli",
"in",
"(",
"select_name",
"(",
"e",
",",
"'li'",
")",
")",
"]",
"subfield_list",
"=",
"[",
"(",
"p",
",",
"v",
",",
"t",
")",
"for",
"(",
"p",
",",
"v",
",",
"t",
")",
"in",
"subfield_list",
"if",
"p",
"is",
"not",
"None",
"]",
"#Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader",
"if",
"val",
"is",
"None",
":",
"val",
"=",
"''",
"yield",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"#Just a regular, unadorned property",
"else",
":",
"prop",
",",
"val",
",",
"typeindic",
"=",
"parse_li",
"(",
"li",
".",
"xml_value",
")",
"if",
"prop",
":",
"yield",
"prop",
",",
"val",
",",
"typeindic",
",",
"None",
"iris",
"=",
"{",
"}",
"#Gather the document-level metadata from the @docheader section",
"base",
"=",
"propbase",
"=",
"rtbase",
"=",
"document_iri",
"=",
"default_lang",
"=",
"None",
"for",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"in",
"fields",
"(",
"docheader",
")",
":",
"#The @iri section is where key IRI prefixes can be set",
"if",
"prop",
"==",
"'@iri'",
":",
"for",
"(",
"k",
",",
"uri",
",",
"typeindic",
")",
"in",
"subfield_list",
":",
"if",
"k",
"==",
"'@base'",
":",
"base",
"=",
"propbase",
"=",
"rtbase",
"=",
"uri",
"elif",
"k",
"==",
"'@property'",
":",
"propbase",
"=",
"uri",
"elif",
"k",
"==",
"'@resource-type'",
":",
"rtbase",
"=",
"uri",
"else",
":",
"iris",
"[",
"k",
"]",
"=",
"uri",
"#The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship",
"elif",
"prop",
"==",
"'@interpretations'",
":",
"#Iterate over items from the @docheader/@interpretations section to set up for further parsing",
"interp",
"=",
"{",
"}",
"for",
"k",
",",
"v",
",",
"x",
"in",
"subfield_list",
":",
"interp",
"[",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"k",
",",
"propbase",
")",
")",
"]",
"=",
"v",
"setup_interpretations",
"(",
"interp",
")",
"#Setting an IRI for this very document being parsed",
"elif",
"prop",
"==",
"'@document'",
":",
"document_iri",
"=",
"val",
"elif",
"prop",
"==",
"'@language'",
":",
"default_lang",
"=",
"val",
"#If we have a resource to which to attach them, just attach all other properties",
"elif",
"document_iri",
"or",
"base",
":",
"rid",
"=",
"document_iri",
"or",
"base",
"fullprop",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"prop",
",",
"propbase",
"or",
"base",
")",
")",
"if",
"fullprop",
"in",
"interpretations",
":",
"val",
"=",
"interpretations",
"[",
"fullprop",
"]",
"(",
"val",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"fullprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"val",
"is",
"not",
"None",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"else",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"#Default IRI prefixes if @iri/@base is set",
"if",
"not",
"propbase",
":",
"propbase",
"=",
"base",
"if",
"not",
"rtbase",
":",
"rtbase",
"=",
"base",
"if",
"not",
"document_iri",
":",
"document_iri",
"=",
"base",
"#Go through the resources expressed in remaining sections",
"for",
"sect",
"in",
"sections",
":",
"#if U(sect) == '@docheader': continue #Not needed because excluded by ss",
"#The header can take one of 4 forms: \"ResourceID\" \"ResourceID [ResourceType]\" \"[ResourceType]\" or \"[]\"",
"#The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type",
"matched",
"=",
"RESOURCE_PAT",
".",
"match",
"(",
"sect",
".",
"xml_value",
")",
"if",
"not",
"matched",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'Syntax error in resource header: {0}'",
".",
"format",
"(",
"sect",
".",
"xml_value",
")",
")",
")",
"rid",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"rtype",
"=",
"matched",
".",
"group",
"(",
"3",
")",
"if",
"rtype",
":",
"rtype",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"rtype",
",",
"base",
")",
")",
"if",
"rid",
":",
"rid",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"rid",
",",
"base",
")",
")",
"if",
"not",
"rid",
":",
"rid",
"=",
"next",
"(",
"idg",
")",
"#Resource type might be set by syntax config",
"if",
"not",
"rtype",
":",
"rtype",
"=",
"syntaxtypemap",
".",
"get",
"(",
"sect",
".",
"xml_name",
")",
"if",
"rtype",
":",
"model",
".",
"add",
"(",
"rid",
",",
"TYPE_REL",
",",
"rtype",
")",
"#Add the property",
"for",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"in",
"fields",
"(",
"sect",
")",
":",
"attrs",
"=",
"{",
"}",
"for",
"(",
"aprop",
",",
"aval",
",",
"atype",
")",
"in",
"subfield_list",
"or",
"(",
")",
":",
"if",
"atype",
"==",
"RES_VAL",
":",
"valmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"aval",
")",
"if",
"valmatch",
":",
"uri",
"=",
"iris",
"[",
"valmatch",
".",
"group",
"(",
"1",
")",
"]",
"attrs",
"[",
"aprop",
"]",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"aval",
")",
"else",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"aval",
",",
"rtbase",
")",
")",
"elif",
"atype",
"==",
"TEXT_VAL",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"elif",
"atype",
"==",
"UNKNOWN_VAL",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"if",
"aprop",
"in",
"interpretations",
":",
"aval",
"=",
"interpretations",
"[",
"aprop",
"]",
"(",
"aval",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"aprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"aval",
"is",
"not",
"None",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"else",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"propmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"prop",
")",
"if",
"propmatch",
":",
"uri",
"=",
"iris",
"[",
"propmatch",
".",
"group",
"(",
"1",
")",
"]",
"fullprop",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"prop",
")",
"else",
":",
"fullprop",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"prop",
",",
"propbase",
")",
")",
"if",
"typeindic",
"==",
"RES_VAL",
":",
"valmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"aval",
")",
"if",
"valmatch",
":",
"uri",
"=",
"iris",
"[",
"valmatch",
".",
"group",
"(",
"1",
")",
"]",
"val",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"val",
")",
"else",
":",
"val",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"val",
",",
"rtbase",
")",
")",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"elif",
"typeindic",
"==",
"TEXT_VAL",
":",
"if",
"'@lang'",
"not",
"in",
"attrs",
":",
"attrs",
"[",
"'@lang'",
"]",
"=",
"default_lang",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"elif",
"typeindic",
"==",
"UNKNOWN_VAL",
":",
"if",
"fullprop",
"in",
"interpretations",
":",
"val",
"=",
"interpretations",
"[",
"fullprop",
"]",
"(",
"val",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"fullprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"val",
"is",
"not",
"None",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"else",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"#resinfo = AB_RESOURCE_PAT.match(val)",
"#if resinfo:",
"# val = resinfo.group(1)",
"# valtype = resinfo.group(3)",
"# if not val: val = model.generate_resource()",
"# if valtype: attrs[TYPE_REL] = valtype",
"return",
"document_iri"
] | 48.273438
| 23.492188
|
def log(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the log function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the log function.
'''
return cls._unary_op(x, tf.log, tf.float32)
|
[
"def",
"log",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"log",
",",
"tf",
".",
"float32",
")"
] | 28.1
| 21.7
|
def get_package_versions(package):
"""Get the package version information (=SetuptoolsVersion) which is
comparable.
note: we use the pip list_command implementation for this
:param package: name of the package
:return: installed version, latest available version
"""
list_command = ListCommand()
options, args = list_command.parse_args([])
packages = [get_dist(package)]
dists = list_command.iter_packages_latest_infos(packages, options)
try:
dist = next(dists)
return dist.parsed_version, dist.latest_version
except StopIteration:
return None, None
|
[
"def",
"get_package_versions",
"(",
"package",
")",
":",
"list_command",
"=",
"ListCommand",
"(",
")",
"options",
",",
"args",
"=",
"list_command",
".",
"parse_args",
"(",
"[",
"]",
")",
"packages",
"=",
"[",
"get_dist",
"(",
"package",
")",
"]",
"dists",
"=",
"list_command",
".",
"iter_packages_latest_infos",
"(",
"packages",
",",
"options",
")",
"try",
":",
"dist",
"=",
"next",
"(",
"dists",
")",
"return",
"dist",
".",
"parsed_version",
",",
"dist",
".",
"latest_version",
"except",
"StopIteration",
":",
"return",
"None",
",",
"None"
] | 35.647059
| 14.764706
|
def extract(self, item, list_article_candidate):
"""Compares the extracted authors.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely authors
"""
list_author = []
# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.
for article_candidate in list_article_candidate:
if (article_candidate.author is not None) and (article_candidate.author != '[]'):
list_author.append((article_candidate.author, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_author) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_author if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no author extracted by newspaper, return the first result of list_author.
return list_author[0][0]
else:
return list_newspaper[0][0]
|
[
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_author",
"=",
"[",
"]",
"# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.",
"for",
"article_candidate",
"in",
"list_article_candidate",
":",
"if",
"(",
"article_candidate",
".",
"author",
"is",
"not",
"None",
")",
"and",
"(",
"article_candidate",
".",
"author",
"!=",
"'[]'",
")",
":",
"list_author",
".",
"append",
"(",
"(",
"article_candidate",
".",
"author",
",",
"article_candidate",
".",
"extractor",
")",
")",
"# If there is no value in the list, return None.",
"if",
"len",
"(",
"list_author",
")",
"==",
"0",
":",
"return",
"None",
"# If there are more options than one, return the result from newspaper.",
"list_newspaper",
"=",
"[",
"x",
"for",
"x",
"in",
"list_author",
"if",
"x",
"[",
"1",
"]",
"==",
"\"newspaper\"",
"]",
"if",
"len",
"(",
"list_newspaper",
")",
"==",
"0",
":",
"# If there is no author extracted by newspaper, return the first result of list_author.",
"return",
"list_author",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"return",
"list_newspaper",
"[",
"0",
"]",
"[",
"0",
"]"
] | 45.269231
| 27.538462
|
def ssh(lancet, print_cmd, environment):
"""
SSH into the given environment, based on the dploi configuration.
"""
namespace = {}
with open(lancet.config.get('dploi', 'deployment_spec')) as fh:
code = compile(fh.read(), 'deployment.py', 'exec')
exec(code, {}, namespace)
config = namespace['settings'][environment]
host = '{}@{}'.format(config['user'], config['hosts'][0])
cmd = ['ssh', '-p', str(config.get('port', 22)), host]
if print_cmd:
click.echo(' '.join(quote(s) for s in cmd))
else:
lancet.defer_to_shell(*cmd)
|
[
"def",
"ssh",
"(",
"lancet",
",",
"print_cmd",
",",
"environment",
")",
":",
"namespace",
"=",
"{",
"}",
"with",
"open",
"(",
"lancet",
".",
"config",
".",
"get",
"(",
"'dploi'",
",",
"'deployment_spec'",
")",
")",
"as",
"fh",
":",
"code",
"=",
"compile",
"(",
"fh",
".",
"read",
"(",
")",
",",
"'deployment.py'",
",",
"'exec'",
")",
"exec",
"(",
"code",
",",
"{",
"}",
",",
"namespace",
")",
"config",
"=",
"namespace",
"[",
"'settings'",
"]",
"[",
"environment",
"]",
"host",
"=",
"'{}@{}'",
".",
"format",
"(",
"config",
"[",
"'user'",
"]",
",",
"config",
"[",
"'hosts'",
"]",
"[",
"0",
"]",
")",
"cmd",
"=",
"[",
"'ssh'",
",",
"'-p'",
",",
"str",
"(",
"config",
".",
"get",
"(",
"'port'",
",",
"22",
")",
")",
",",
"host",
"]",
"if",
"print_cmd",
":",
"click",
".",
"echo",
"(",
"' '",
".",
"join",
"(",
"quote",
"(",
"s",
")",
"for",
"s",
"in",
"cmd",
")",
")",
"else",
":",
"lancet",
".",
"defer_to_shell",
"(",
"*",
"cmd",
")"
] | 32.055556
| 18.833333
|
def get_status(self, channel=Channel.CHANNEL_CH0):
"""
Returns the error status of a specific CAN channel.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with CAN and USB status (see structure :class:`Status`).
:rtype: tuple(int, int)
"""
status = Status()
UcanGetStatusEx(self._handle, channel, byref(status))
return status.can_status, status.usb_status
|
[
"def",
"get_status",
"(",
"self",
",",
"channel",
"=",
"Channel",
".",
"CHANNEL_CH0",
")",
":",
"status",
"=",
"Status",
"(",
")",
"UcanGetStatusEx",
"(",
"self",
".",
"_handle",
",",
"channel",
",",
"byref",
"(",
"status",
")",
")",
"return",
"status",
".",
"can_status",
",",
"status",
".",
"usb_status"
] | 44.636364
| 21.545455
|
def spaced_coordinate(name, keys, ordered=True):
"""
Create a subclass of Coordinate, instances of which must have exactly the given keys.
Parameters
----------
name : str
Name of the new class
keys : sequence
Keys which instances must exclusively have
ordered : bool
Whether to set the class' ``default_order`` based on the order of ``keys``
Returns
-------
type
"""
def validate(self):
"""Raise a ValueError if the instance's keys are incorrect"""
if set(keys) != set(self):
raise ValueError('{} needs keys {} and got {}'.format(type(self).__name__, keys, tuple(self)))
new_class = type(name, (Coordinate, ), {'default_order': keys if ordered else None, '_validate': validate})
return new_class
|
[
"def",
"spaced_coordinate",
"(",
"name",
",",
"keys",
",",
"ordered",
"=",
"True",
")",
":",
"def",
"validate",
"(",
"self",
")",
":",
"\"\"\"Raise a ValueError if the instance's keys are incorrect\"\"\"",
"if",
"set",
"(",
"keys",
")",
"!=",
"set",
"(",
"self",
")",
":",
"raise",
"ValueError",
"(",
"'{} needs keys {} and got {}'",
".",
"format",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"keys",
",",
"tuple",
"(",
"self",
")",
")",
")",
"new_class",
"=",
"type",
"(",
"name",
",",
"(",
"Coordinate",
",",
")",
",",
"{",
"'default_order'",
":",
"keys",
"if",
"ordered",
"else",
"None",
",",
"'_validate'",
":",
"validate",
"}",
")",
"return",
"new_class"
] | 32.666667
| 26.291667
|
def _run_init_queries(self):
'''
Initialization queries
'''
for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir):
self._db.create_table_from_object(obj())
|
[
"def",
"_run_init_queries",
"(",
"self",
")",
":",
"for",
"obj",
"in",
"(",
"Package",
",",
"PackageCfgFile",
",",
"PayloadFile",
",",
"IgnoredDir",
",",
"AllowedDir",
")",
":",
"self",
".",
"_db",
".",
"create_table_from_object",
"(",
"obj",
"(",
")",
")"
] | 35.666667
| 22.333333
|
def _set_igmpPIM(self, v, load=False):
"""
Setter method for igmpPIM, mapped from YANG variable /interface_vlan/vlan/ip/igmpPIM (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmpPIM is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmpPIM() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igmpPIM.igmpPIM, is_container='container', presence=False, yang_name="igmpPIM", rest_name="pim", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP PIM Snooping', u'alt-name': u'pim', u'cli-incomplete-no': None, u'sort-priority': u'128', u'hidden': u'full', u'callpoint': u'IgmpsVlan'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmpPIM must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igmpPIM.igmpPIM, is_container='container', presence=False, yang_name="igmpPIM", rest_name="pim", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP PIM Snooping', u'alt-name': u'pim', u'cli-incomplete-no': None, u'sort-priority': u'128', u'hidden': u'full', u'callpoint': u'IgmpsVlan'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True)""",
})
self.__igmpPIM = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_igmpPIM",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"igmpPIM",
".",
"igmpPIM",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"igmpPIM\"",
",",
"rest_name",
"=",
"\"pim\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'IP PIM Snooping'",
",",
"u'alt-name'",
":",
"u'pim'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'128'",
",",
"u'hidden'",
":",
"u'full'",
",",
"u'callpoint'",
":",
"u'IgmpsVlan'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-igmp-snooping'",
",",
"defining_module",
"=",
"'brocade-igmp-snooping'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"igmpPIM must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=igmpPIM.igmpPIM, is_container='container', presence=False, yang_name=\"igmpPIM\", rest_name=\"pim\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP PIM Snooping', u'alt-name': u'pim', u'cli-incomplete-no': None, u'sort-priority': u'128', u'hidden': u'full', u'callpoint': u'IgmpsVlan'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__igmpPIM",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 82.181818
| 38.5
|
def enforce_relationship_refs(instance):
"""Ensures that all SDOs being referenced by the SRO are contained
within the same bundle"""
if instance['type'] != 'bundle' or 'objects' not in instance:
return
rel_references = set()
"""Find and store all ids"""
for obj in instance['objects']:
if obj['type'] != 'relationship':
rel_references.add(obj['id'])
"""Check if id has been encountered"""
for obj in instance['objects']:
if obj['type'] == 'relationship':
if obj['source_ref'] not in rel_references:
yield JSONError("Relationship object %s makes reference to %s "
"Which is not found in current bundle "
% (obj['id'], obj['source_ref']), 'enforce-relationship-refs')
if obj['target_ref'] not in rel_references:
yield JSONError("Relationship object %s makes reference to %s "
"Which is not found in current bundle "
% (obj['id'], obj['target_ref']), 'enforce-relationship-refs')
|
[
"def",
"enforce_relationship_refs",
"(",
"instance",
")",
":",
"if",
"instance",
"[",
"'type'",
"]",
"!=",
"'bundle'",
"or",
"'objects'",
"not",
"in",
"instance",
":",
"return",
"rel_references",
"=",
"set",
"(",
")",
"\"\"\"Find and store all ids\"\"\"",
"for",
"obj",
"in",
"instance",
"[",
"'objects'",
"]",
":",
"if",
"obj",
"[",
"'type'",
"]",
"!=",
"'relationship'",
":",
"rel_references",
".",
"add",
"(",
"obj",
"[",
"'id'",
"]",
")",
"\"\"\"Check if id has been encountered\"\"\"",
"for",
"obj",
"in",
"instance",
"[",
"'objects'",
"]",
":",
"if",
"obj",
"[",
"'type'",
"]",
"==",
"'relationship'",
":",
"if",
"obj",
"[",
"'source_ref'",
"]",
"not",
"in",
"rel_references",
":",
"yield",
"JSONError",
"(",
"\"Relationship object %s makes reference to %s \"",
"\"Which is not found in current bundle \"",
"%",
"(",
"obj",
"[",
"'id'",
"]",
",",
"obj",
"[",
"'source_ref'",
"]",
")",
",",
"'enforce-relationship-refs'",
")",
"if",
"obj",
"[",
"'target_ref'",
"]",
"not",
"in",
"rel_references",
":",
"yield",
"JSONError",
"(",
"\"Relationship object %s makes reference to %s \"",
"\"Which is not found in current bundle \"",
"%",
"(",
"obj",
"[",
"'id'",
"]",
",",
"obj",
"[",
"'target_ref'",
"]",
")",
",",
"'enforce-relationship-refs'",
")"
] | 44.36
| 20.64
|
def _make_actor_method_executor(self, method_name, method, actor_imported):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method.
"""
def actor_method_executor(dummy_return_id, actor, *args):
# Update the actor's task counter to reflect the task we're about
# to execute.
self._worker.actor_task_counter += 1
# Execute the assigned method and save a checkpoint if necessary.
try:
if is_class_method(method):
method_returns = method(*args)
else:
method_returns = method(actor, *args)
except Exception as e:
# Save the checkpoint before allowing the method exception
# to be thrown, but don't save the checkpoint for actor
# creation task.
if (isinstance(actor, ray.actor.Checkpointable)
and self._worker.actor_task_counter != 1):
self._save_and_log_checkpoint(actor)
raise e
else:
# Handle any checkpointing operations before storing the
# method's return values.
# NOTE(swang): If method_returns is a pointer to the actor's
# state and the checkpointing operations can modify the return
# values if they mutate the actor's state. Is this okay?
if isinstance(actor, ray.actor.Checkpointable):
# If this is the first task to execute on the actor, try to
# resume from a checkpoint.
if self._worker.actor_task_counter == 1:
if actor_imported:
self._restore_and_log_checkpoint(actor)
else:
# Save the checkpoint before returning the method's
# return values.
self._save_and_log_checkpoint(actor)
return method_returns
return actor_method_executor
|
[
"def",
"_make_actor_method_executor",
"(",
"self",
",",
"method_name",
",",
"method",
",",
"actor_imported",
")",
":",
"def",
"actor_method_executor",
"(",
"dummy_return_id",
",",
"actor",
",",
"*",
"args",
")",
":",
"# Update the actor's task counter to reflect the task we're about",
"# to execute.",
"self",
".",
"_worker",
".",
"actor_task_counter",
"+=",
"1",
"# Execute the assigned method and save a checkpoint if necessary.",
"try",
":",
"if",
"is_class_method",
"(",
"method",
")",
":",
"method_returns",
"=",
"method",
"(",
"*",
"args",
")",
"else",
":",
"method_returns",
"=",
"method",
"(",
"actor",
",",
"*",
"args",
")",
"except",
"Exception",
"as",
"e",
":",
"# Save the checkpoint before allowing the method exception",
"# to be thrown, but don't save the checkpoint for actor",
"# creation task.",
"if",
"(",
"isinstance",
"(",
"actor",
",",
"ray",
".",
"actor",
".",
"Checkpointable",
")",
"and",
"self",
".",
"_worker",
".",
"actor_task_counter",
"!=",
"1",
")",
":",
"self",
".",
"_save_and_log_checkpoint",
"(",
"actor",
")",
"raise",
"e",
"else",
":",
"# Handle any checkpointing operations before storing the",
"# method's return values.",
"# NOTE(swang): If method_returns is a pointer to the actor's",
"# state and the checkpointing operations can modify the return",
"# values if they mutate the actor's state. Is this okay?",
"if",
"isinstance",
"(",
"actor",
",",
"ray",
".",
"actor",
".",
"Checkpointable",
")",
":",
"# If this is the first task to execute on the actor, try to",
"# resume from a checkpoint.",
"if",
"self",
".",
"_worker",
".",
"actor_task_counter",
"==",
"1",
":",
"if",
"actor_imported",
":",
"self",
".",
"_restore_and_log_checkpoint",
"(",
"actor",
")",
"else",
":",
"# Save the checkpoint before returning the method's",
"# return values.",
"self",
".",
"_save_and_log_checkpoint",
"(",
"actor",
")",
"return",
"method_returns",
"return",
"actor_method_executor"
] | 48.355932
| 23.40678
|
def get_format_spec(self):
'''
The format specification according to the values of `align` and `width`
'''
return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
|
[
"def",
"get_format_spec",
"(",
"self",
")",
":",
"return",
"u\"{{:{align}{width}}}\"",
".",
"format",
"(",
"align",
"=",
"self",
".",
"align",
",",
"width",
"=",
"self",
".",
"width",
")"
] | 41.4
| 30.2
|
def run(cls, return_results=False):
""" Iterates through all associated Fields and applies all attached Rules. Depending on 'return_collated_results',
this method will either return True (all rules successful), False (all, or some, rules failed)
or a dictionary list
containing the collated results of all Field Rules.
Keyword arguments:
return_collated_results bool -- Returns dictionary list of Field Rule collated results instead of True or False.
"""
cls.result = []
passed = True
for field in cls.fields:
result, errors = field.run()
results = {
'field': field.name,
'value': field.value,
'passed': result,
'errors': None
}
if errors:
passed = False
results['errors'] = errors
cls.result.append(results)
if return_results:
return cls.result
return passed
|
[
"def",
"run",
"(",
"cls",
",",
"return_results",
"=",
"False",
")",
":",
"cls",
".",
"result",
"=",
"[",
"]",
"passed",
"=",
"True",
"for",
"field",
"in",
"cls",
".",
"fields",
":",
"result",
",",
"errors",
"=",
"field",
".",
"run",
"(",
")",
"results",
"=",
"{",
"'field'",
":",
"field",
".",
"name",
",",
"'value'",
":",
"field",
".",
"value",
",",
"'passed'",
":",
"result",
",",
"'errors'",
":",
"None",
"}",
"if",
"errors",
":",
"passed",
"=",
"False",
"results",
"[",
"'errors'",
"]",
"=",
"errors",
"cls",
".",
"result",
".",
"append",
"(",
"results",
")",
"if",
"return_results",
":",
"return",
"cls",
".",
"result",
"return",
"passed"
] | 31.21875
| 20.625
|
def datetime_to_str(self,format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to str. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[str]
The SArray converted to the type 'str'.
Examples
--------
>>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5))
>>> sa = turicreate.SArray([dt])
>>> sa.datetime_to_str("%e %b %Y %T %ZP")
dtype: str
Rows: 1
[20 Oct 2011 09:30:10 GMT-05:00]
See Also
----------
str_to_datetime
References
----------
[1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype != datetime.datetime):
raise TypeError("datetime_to_str expects SArray of datetime as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.datetime_to_str(format))
|
[
"def",
"datetime_to_str",
"(",
"self",
",",
"format",
"=",
"\"%Y-%m-%dT%H:%M:%S%ZP\"",
")",
":",
"if",
"(",
"self",
".",
"dtype",
"!=",
"datetime",
".",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"\"datetime_to_str expects SArray of datetime as input SArray\"",
")",
"with",
"cython_context",
"(",
")",
":",
"return",
"SArray",
"(",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"datetime_to_str",
"(",
"format",
")",
")"
] | 31.052632
| 25.263158
|
def error_log(self, msg='', level=20, traceback=False):
"""Write error message to log.
Args:
msg (str): error message
level (int): logging level
traceback (bool): add traceback to output or not
"""
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
|
[
"def",
"error_log",
"(",
"self",
",",
"msg",
"=",
"''",
",",
"level",
"=",
"20",
",",
"traceback",
"=",
"False",
")",
":",
"# Override this in subclasses as desired",
"sys",
".",
"stderr",
".",
"write",
"(",
"msg",
"+",
"'\\n'",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"if",
"traceback",
":",
"tblines",
"=",
"traceback_",
".",
"format_exc",
"(",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"tblines",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | 32.933333
| 11.4
|
def main(args=None):
# type: (Optional[List[str]]) -> int
""" Main logic. """
cli_args = ArgumentParser()
cli_args.add_argument(
"-c",
"--coordinates",
default="",
type=str,
help="the part of the screen to capture: top, left, width, height",
)
cli_args.add_argument(
"-l",
"--level",
default=6,
type=int,
choices=list(range(10)),
help="the PNG compression level",
)
cli_args.add_argument(
"-m", "--monitor", default=0, type=int, help="the monitor to screen shot"
)
cli_args.add_argument(
"-o", "--output", default="monitor-{mon}.png", help="the output file name"
)
cli_args.add_argument(
"-q",
"--quiet",
default=False,
action="store_true",
help="do not print created files",
)
cli_args.add_argument("-v", "--version", action="version", version=__version__)
options = cli_args.parse_args(args)
kwargs = {"mon": options.monitor, "output": options.output}
if options.coordinates:
try:
top, left, width, height = options.coordinates.split(",")
except ValueError:
print("Coordinates syntax: top, left, width, height")
return 2
kwargs["mon"] = {
"top": int(top),
"left": int(left),
"width": int(width),
"height": int(height),
}
if options.output == "monitor-{mon}.png":
kwargs["output"] = "sct-{top}x{left}_{width}x{height}.png"
try:
with mss() as sct:
if options.coordinates:
output = kwargs["output"].format(**kwargs["mon"])
sct_img = sct.grab(kwargs["mon"])
to_png(sct_img.rgb, sct_img.size, level=options.level, output=output)
if not options.quiet:
print(os.path.realpath(output))
else:
for file_name in sct.save(**kwargs):
if not options.quiet:
print(os.path.realpath(file_name))
return 0
except ScreenShotError:
return 1
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"# type: (Optional[List[str]]) -> int",
"cli_args",
"=",
"ArgumentParser",
"(",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--coordinates\"",
",",
"default",
"=",
"\"\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the part of the screen to capture: top, left, width, height\"",
",",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-l\"",
",",
"\"--level\"",
",",
"default",
"=",
"6",
",",
"type",
"=",
"int",
",",
"choices",
"=",
"list",
"(",
"range",
"(",
"10",
")",
")",
",",
"help",
"=",
"\"the PNG compression level\"",
",",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--monitor\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"the monitor to screen shot\"",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"default",
"=",
"\"monitor-{mon}.png\"",
",",
"help",
"=",
"\"the output file name\"",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-q\"",
",",
"\"--quiet\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"do not print created files\"",
",",
")",
"cli_args",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"__version__",
")",
"options",
"=",
"cli_args",
".",
"parse_args",
"(",
"args",
")",
"kwargs",
"=",
"{",
"\"mon\"",
":",
"options",
".",
"monitor",
",",
"\"output\"",
":",
"options",
".",
"output",
"}",
"if",
"options",
".",
"coordinates",
":",
"try",
":",
"top",
",",
"left",
",",
"width",
",",
"height",
"=",
"options",
".",
"coordinates",
".",
"split",
"(",
"\",\"",
")",
"except",
"ValueError",
":",
"print",
"(",
"\"Coordinates syntax: top, left, width, height\"",
")",
"return",
"2",
"kwargs",
"[",
"\"mon\"",
"]",
"=",
"{",
"\"top\"",
":",
"int",
"(",
"top",
")",
",",
"\"left\"",
":",
"int",
"(",
"left",
")",
",",
"\"width\"",
":",
"int",
"(",
"width",
")",
",",
"\"height\"",
":",
"int",
"(",
"height",
")",
",",
"}",
"if",
"options",
".",
"output",
"==",
"\"monitor-{mon}.png\"",
":",
"kwargs",
"[",
"\"output\"",
"]",
"=",
"\"sct-{top}x{left}_{width}x{height}.png\"",
"try",
":",
"with",
"mss",
"(",
")",
"as",
"sct",
":",
"if",
"options",
".",
"coordinates",
":",
"output",
"=",
"kwargs",
"[",
"\"output\"",
"]",
".",
"format",
"(",
"*",
"*",
"kwargs",
"[",
"\"mon\"",
"]",
")",
"sct_img",
"=",
"sct",
".",
"grab",
"(",
"kwargs",
"[",
"\"mon\"",
"]",
")",
"to_png",
"(",
"sct_img",
".",
"rgb",
",",
"sct_img",
".",
"size",
",",
"level",
"=",
"options",
".",
"level",
",",
"output",
"=",
"output",
")",
"if",
"not",
"options",
".",
"quiet",
":",
"print",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"output",
")",
")",
"else",
":",
"for",
"file_name",
"in",
"sct",
".",
"save",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"options",
".",
"quiet",
":",
"print",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"file_name",
")",
")",
"return",
"0",
"except",
"ScreenShotError",
":",
"return",
"1"
] | 31.029412
| 20.514706
|
def build_bsub_command(command_template, lsf_args):
"""Build and return a lsf batch command template
The structure will be 'bsub -s <key> <value> <command_template>'
where <key> and <value> refer to items in lsf_args
"""
if command_template is None:
return ""
full_command = 'bsub -o {logfile}'
for key, value in lsf_args.items():
full_command += ' -%s' % key
if value is not None:
full_command += ' %s' % value
full_command += ' %s' % command_template
return full_command
|
[
"def",
"build_bsub_command",
"(",
"command_template",
",",
"lsf_args",
")",
":",
"if",
"command_template",
"is",
"None",
":",
"return",
"\"\"",
"full_command",
"=",
"'bsub -o {logfile}'",
"for",
"key",
",",
"value",
"in",
"lsf_args",
".",
"items",
"(",
")",
":",
"full_command",
"+=",
"' -%s'",
"%",
"key",
"if",
"value",
"is",
"not",
"None",
":",
"full_command",
"+=",
"' %s'",
"%",
"value",
"full_command",
"+=",
"' %s'",
"%",
"command_template",
"return",
"full_command"
] | 35.666667
| 11.2
|
def _strip_feature_version(featureid):
"""
some feature versions are encoded as featureid.version, this strips those off, if they exist
"""
version_detector = re.compile(r"(?P<featureid>.*)(?P<version>\.\d+)")
match = version_detector.match(featureid)
if match:
return match.groupdict()["featureid"]
else:
return featureid
|
[
"def",
"_strip_feature_version",
"(",
"featureid",
")",
":",
"version_detector",
"=",
"re",
".",
"compile",
"(",
"r\"(?P<featureid>.*)(?P<version>\\.\\d+)\"",
")",
"match",
"=",
"version_detector",
".",
"match",
"(",
"featureid",
")",
"if",
"match",
":",
"return",
"match",
".",
"groupdict",
"(",
")",
"[",
"\"featureid\"",
"]",
"else",
":",
"return",
"featureid"
] | 35.7
| 17.5
|
def start(self):
""" Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
"""
self.comm.Barrier()
logger.debug("Manager synced with workers")
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.pull_tasks,
args=(self._kill_event,))
self._result_pusher_thread = threading.Thread(target=self.push_results,
args=(self._kill_event,))
self._task_puller_thread.start()
self._result_pusher_thread.start()
start = None
result_counter = 0
task_recv_counter = 0
task_sent_counter = 0
logger.info("Loop start")
while not self._kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
# In this block we attempt to probe MPI for a set amount of time,
# and if we have exhausted all available MPI events, we move on
# to the next block. The timer and counter trigger balance
# fairness and responsiveness.
timer = time.time() + 0.05
counter = min(10, comm.size)
while time.time() < timer:
info = MPI.Status()
if counter > 10:
logger.debug("Hit max mpi events per round")
break
if not self.comm.Iprobe(status=info):
logger.debug("Timer expired, processed {} mpi events".format(counter))
break
else:
tag = info.Get_tag()
logger.info("Message with tag {} received".format(tag))
counter += 1
if tag == RESULT_TAG:
result = self.recv_result_from_workers()
self.pending_result_queue.put(result)
result_counter += 1
elif tag == TASK_REQUEST_TAG:
worker_rank = self.recv_task_request_from_workers()
self.ready_worker_queue.put(worker_rank)
else:
logger.error("Unknown tag {} - ignoring this message and continuing".format(tag))
available_worker_cnt = self.ready_worker_queue.qsize()
available_task_cnt = self.pending_task_queue.qsize()
logger.debug("[MAIN] Ready workers: {} Ready tasks: {}".format(available_worker_cnt,
available_task_cnt))
this_round = min(available_worker_cnt, available_task_cnt)
for i in range(this_round):
worker_rank = self.ready_worker_queue.get()
task = self.pending_task_queue.get()
comm.send(task, dest=worker_rank, tag=worker_rank)
task_sent_counter += 1
logger.debug("Assigning worker:{} task:{}".format(worker_rank, task['task_id']))
if not start:
start = time.time()
logger.debug("Tasks recvd:{} Tasks dispatched:{} Results recvd:{}".format(
task_recv_counter, task_sent_counter, result_counter))
# print("[{}] Received: {}".format(self.identity, msg))
# time.sleep(random.randint(4,10)/10)
self._task_puller_thread.join()
self._result_pusher_thread.join()
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info("mpi_worker_pool ran for {} seconds".format(delta))
|
[
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"comm",
".",
"Barrier",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Manager synced with workers\"",
")",
"self",
".",
"_kill_event",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"_task_puller_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"pull_tasks",
",",
"args",
"=",
"(",
"self",
".",
"_kill_event",
",",
")",
")",
"self",
".",
"_result_pusher_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"push_results",
",",
"args",
"=",
"(",
"self",
".",
"_kill_event",
",",
")",
")",
"self",
".",
"_task_puller_thread",
".",
"start",
"(",
")",
"self",
".",
"_result_pusher_thread",
".",
"start",
"(",
")",
"start",
"=",
"None",
"result_counter",
"=",
"0",
"task_recv_counter",
"=",
"0",
"task_sent_counter",
"=",
"0",
"logger",
".",
"info",
"(",
"\"Loop start\"",
")",
"while",
"not",
"self",
".",
"_kill_event",
".",
"is_set",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"LOOP_SLOWDOWN",
")",
"# In this block we attempt to probe MPI for a set amount of time,",
"# and if we have exhausted all available MPI events, we move on",
"# to the next block. The timer and counter trigger balance",
"# fairness and responsiveness.",
"timer",
"=",
"time",
".",
"time",
"(",
")",
"+",
"0.05",
"counter",
"=",
"min",
"(",
"10",
",",
"comm",
".",
"size",
")",
"while",
"time",
".",
"time",
"(",
")",
"<",
"timer",
":",
"info",
"=",
"MPI",
".",
"Status",
"(",
")",
"if",
"counter",
">",
"10",
":",
"logger",
".",
"debug",
"(",
"\"Hit max mpi events per round\"",
")",
"break",
"if",
"not",
"self",
".",
"comm",
".",
"Iprobe",
"(",
"status",
"=",
"info",
")",
":",
"logger",
".",
"debug",
"(",
"\"Timer expired, processed {} mpi events\"",
".",
"format",
"(",
"counter",
")",
")",
"break",
"else",
":",
"tag",
"=",
"info",
".",
"Get_tag",
"(",
")",
"logger",
".",
"info",
"(",
"\"Message with tag {} received\"",
".",
"format",
"(",
"tag",
")",
")",
"counter",
"+=",
"1",
"if",
"tag",
"==",
"RESULT_TAG",
":",
"result",
"=",
"self",
".",
"recv_result_from_workers",
"(",
")",
"self",
".",
"pending_result_queue",
".",
"put",
"(",
"result",
")",
"result_counter",
"+=",
"1",
"elif",
"tag",
"==",
"TASK_REQUEST_TAG",
":",
"worker_rank",
"=",
"self",
".",
"recv_task_request_from_workers",
"(",
")",
"self",
".",
"ready_worker_queue",
".",
"put",
"(",
"worker_rank",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Unknown tag {} - ignoring this message and continuing\"",
".",
"format",
"(",
"tag",
")",
")",
"available_worker_cnt",
"=",
"self",
".",
"ready_worker_queue",
".",
"qsize",
"(",
")",
"available_task_cnt",
"=",
"self",
".",
"pending_task_queue",
".",
"qsize",
"(",
")",
"logger",
".",
"debug",
"(",
"\"[MAIN] Ready workers: {} Ready tasks: {}\"",
".",
"format",
"(",
"available_worker_cnt",
",",
"available_task_cnt",
")",
")",
"this_round",
"=",
"min",
"(",
"available_worker_cnt",
",",
"available_task_cnt",
")",
"for",
"i",
"in",
"range",
"(",
"this_round",
")",
":",
"worker_rank",
"=",
"self",
".",
"ready_worker_queue",
".",
"get",
"(",
")",
"task",
"=",
"self",
".",
"pending_task_queue",
".",
"get",
"(",
")",
"comm",
".",
"send",
"(",
"task",
",",
"dest",
"=",
"worker_rank",
",",
"tag",
"=",
"worker_rank",
")",
"task_sent_counter",
"+=",
"1",
"logger",
".",
"debug",
"(",
"\"Assigning worker:{} task:{}\"",
".",
"format",
"(",
"worker_rank",
",",
"task",
"[",
"'task_id'",
"]",
")",
")",
"if",
"not",
"start",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Tasks recvd:{} Tasks dispatched:{} Results recvd:{}\"",
".",
"format",
"(",
"task_recv_counter",
",",
"task_sent_counter",
",",
"result_counter",
")",
")",
"# print(\"[{}] Received: {}\".format(self.identity, msg))",
"# time.sleep(random.randint(4,10)/10)",
"self",
".",
"_task_puller_thread",
".",
"join",
"(",
")",
"self",
".",
"_result_pusher_thread",
".",
"join",
"(",
")",
"self",
".",
"task_incoming",
".",
"close",
"(",
")",
"self",
".",
"result_outgoing",
".",
"close",
"(",
")",
"self",
".",
"context",
".",
"term",
"(",
")",
"delta",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"logger",
".",
"info",
"(",
"\"mpi_worker_pool ran for {} seconds\"",
".",
"format",
"(",
"delta",
")",
")"
] | 39.231579
| 23.157895
|
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
|
[
"def",
"check",
"(",
"self",
",",
"line_info",
")",
":",
"if",
"not",
"self",
".",
"shell",
".",
"automagic",
"or",
"not",
"self",
".",
"shell",
".",
"find_magic",
"(",
"line_info",
".",
"ifun",
")",
":",
"return",
"None",
"# We have a likely magic method. Make sure we should actually call it.",
"if",
"line_info",
".",
"continue_prompt",
"and",
"not",
"self",
".",
"prefilter_manager",
".",
"multi_line_specials",
":",
"return",
"None",
"head",
"=",
"line_info",
".",
"ifun",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"if",
"is_shadowed",
"(",
"head",
",",
"self",
".",
"shell",
")",
":",
"return",
"None",
"return",
"self",
".",
"prefilter_manager",
".",
"get_handler_by_name",
"(",
"'magic'",
")"
] | 46.277778
| 24.277778
|
def filter_by_attrs(self, **kwargs):
"""Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. A Dataset is returned
containing only the variables for which all the filter tests pass.
These tests are either ``key=value`` for which the attribute ``key``
has the exact value ``value`` or the callable passed into
``key=callable`` returns True. The callable will be passed a single
value, either the value of the attribute ``key`` or ``None`` if the
DataArray does not have an attribute with the name ``key``.
Parameters
----------
**kwargs : key=value
key : str
Attribute name.
value : callable or obj
If value is a callable, it should return a boolean in the form
of bool = func(attr) where attr is da.attrs[key].
Otherwise, value will be compared to the each
DataArray's attrs[key].
Returns
-------
new : Dataset
New dataset with variables filtered by attribute.
Examples
--------
>>> # Create an example dataset:
>>> import numpy as np
>>> import pandas as pd
>>> import xarray as xr
>>> temp = 15 + 8 * np.random.randn(2, 2, 3)
>>> precip = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> dims = ['x', 'y', 'time']
>>> temp_attr = dict(standard_name='air_potential_temperature')
>>> precip_attr = dict(standard_name='convective_precipitation_flux')
>>> ds = xr.Dataset({
... 'temperature': (dims, temp, temp_attr),
... 'precipitation': (dims, precip, precip_attr)},
... coords={
... 'lon': (['x', 'y'], lon),
... 'lat': (['x', 'y'], lat),
... 'time': pd.date_range('2014-09-06', periods=3),
... 'reference_time': pd.Timestamp('2014-09-05')})
>>> # Get variables matching a specific standard_name.
>>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
* x (x) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
lat (x, y) float64 42.25 42.21 42.63 42.59
* y (y) int64 0 1
reference_time datetime64[ns] 2014-09-05
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
Data variables:
precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...
>>> # Get all variables that have a standard_name attribute.
>>> standard_name = lambda v: v is not None
>>> ds.filter_by_attrs(standard_name=standard_name)
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* x (x) int64 0 1
* y (y) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Data variables:
temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...
precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...
""" # noqa
selection = []
for var_name, variable in self.data_vars.items():
has_value_flag = False
for attr_name, pattern in kwargs.items():
attr_value = variable.attrs.get(attr_name)
if ((callable(pattern) and pattern(attr_value)) or
attr_value == pattern):
has_value_flag = True
else:
has_value_flag = False
break
if has_value_flag is True:
selection.append(var_name)
return self[selection]
|
[
"def",
"filter_by_attrs",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa",
"selection",
"=",
"[",
"]",
"for",
"var_name",
",",
"variable",
"in",
"self",
".",
"data_vars",
".",
"items",
"(",
")",
":",
"has_value_flag",
"=",
"False",
"for",
"attr_name",
",",
"pattern",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"attr_value",
"=",
"variable",
".",
"attrs",
".",
"get",
"(",
"attr_name",
")",
"if",
"(",
"(",
"callable",
"(",
"pattern",
")",
"and",
"pattern",
"(",
"attr_value",
")",
")",
"or",
"attr_value",
"==",
"pattern",
")",
":",
"has_value_flag",
"=",
"True",
"else",
":",
"has_value_flag",
"=",
"False",
"break",
"if",
"has_value_flag",
"is",
"True",
":",
"selection",
".",
"append",
"(",
"var_name",
")",
"return",
"self",
"[",
"selection",
"]"
] | 45.684783
| 19.25
|
def pad_hex(long_int):
"""
Converts a Long integer (or hex string) to hex format padded with zeroes for hashing
:param {Long integer|String} long_int Number or string to pad.
:return {String} Padded hex string.
"""
if not isinstance(long_int, six.string_types):
hash_str = long_to_hex(long_int)
else:
hash_str = long_int
if len(hash_str) % 2 == 1:
hash_str = '0%s' % hash_str
elif hash_str[0] in '89ABCDEFabcdef':
hash_str = '00%s' % hash_str
return hash_str
|
[
"def",
"pad_hex",
"(",
"long_int",
")",
":",
"if",
"not",
"isinstance",
"(",
"long_int",
",",
"six",
".",
"string_types",
")",
":",
"hash_str",
"=",
"long_to_hex",
"(",
"long_int",
")",
"else",
":",
"hash_str",
"=",
"long_int",
"if",
"len",
"(",
"hash_str",
")",
"%",
"2",
"==",
"1",
":",
"hash_str",
"=",
"'0%s'",
"%",
"hash_str",
"elif",
"hash_str",
"[",
"0",
"]",
"in",
"'89ABCDEFabcdef'",
":",
"hash_str",
"=",
"'00%s'",
"%",
"hash_str",
"return",
"hash_str"
] | 34.4
| 12.533333
|
def super_glob(pattern):
'glob that understands **/ for all sub-directories recursively.'
pieces = pattern.split('/')
if '**' in pieces:
prefix = '/'.join(pieces[:pieces.index('**')])
postfix = '/'.join(pieces[pieces.index('**') + 1:])
roots = [dirname
for dirname, dirnames, filenames in os.walk(prefix)]
patterns = [root + '/' + postfix for root in roots]
else:
patterns = ['/'.join(pieces)]
return chain.from_iterable(glob(pattern) for pattern in patterns)
|
[
"def",
"super_glob",
"(",
"pattern",
")",
":",
"pieces",
"=",
"pattern",
".",
"split",
"(",
"'/'",
")",
"if",
"'**'",
"in",
"pieces",
":",
"prefix",
"=",
"'/'",
".",
"join",
"(",
"pieces",
"[",
":",
"pieces",
".",
"index",
"(",
"'**'",
")",
"]",
")",
"postfix",
"=",
"'/'",
".",
"join",
"(",
"pieces",
"[",
"pieces",
".",
"index",
"(",
"'**'",
")",
"+",
"1",
":",
"]",
")",
"roots",
"=",
"[",
"dirname",
"for",
"dirname",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"prefix",
")",
"]",
"patterns",
"=",
"[",
"root",
"+",
"'/'",
"+",
"postfix",
"for",
"root",
"in",
"roots",
"]",
"else",
":",
"patterns",
"=",
"[",
"'/'",
".",
"join",
"(",
"pieces",
")",
"]",
"return",
"chain",
".",
"from_iterable",
"(",
"glob",
"(",
"pattern",
")",
"for",
"pattern",
"in",
"patterns",
")"
] | 43.75
| 19.25
|
def iswhat(o):
"""Returns a dictionary of all possible identity checks available to
:mod:`inspect` applied to `o`.
Returns:
dict: keys are `inspect.is*` function names; values are `bool` results
returned by each of the methods.
"""
import inspect
isfs = {n: f for n, f in inspect.getmembers(inspect) if n[0:2] == "is"}
return {n: f(o) for n, f in isfs.items()}
|
[
"def",
"iswhat",
"(",
"o",
")",
":",
"import",
"inspect",
"isfs",
"=",
"{",
"n",
":",
"f",
"for",
"n",
",",
"f",
"in",
"inspect",
".",
"getmembers",
"(",
"inspect",
")",
"if",
"n",
"[",
"0",
":",
"2",
"]",
"==",
"\"is\"",
"}",
"return",
"{",
"n",
":",
"f",
"(",
"o",
")",
"for",
"n",
",",
"f",
"in",
"isfs",
".",
"items",
"(",
")",
"}"
] | 36.090909
| 18.363636
|
def sql_reset(app, style, connection):
"Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module."
return sql_delete(app, style, connection) + sql_all(app, style, connection)
|
[
"def",
"sql_reset",
"(",
"app",
",",
"style",
",",
"connection",
")",
":",
"return",
"sql_delete",
"(",
"app",
",",
"style",
",",
"connection",
")",
"+",
"sql_all",
"(",
"app",
",",
"style",
",",
"connection",
")"
] | 69.666667
| 31
|
def model_to_dict(model, exclude=None):
"""
Extract a SQLAlchemy model instance to a dictionary
:param model: the model to be extracted
:param exclude: Any keys to be excluded
:return: New dictionary consisting of property-values
"""
exclude = exclude or []
exclude.append('_sa_instance_state')
return {k: v for k, v in model.__dict__.items() if k not in exclude}
|
[
"def",
"model_to_dict",
"(",
"model",
",",
"exclude",
"=",
"None",
")",
":",
"exclude",
"=",
"exclude",
"or",
"[",
"]",
"exclude",
".",
"append",
"(",
"'_sa_instance_state'",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"model",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"exclude",
"}"
] | 39
| 8.4
|
def connect(self):
"""
Create internal connection to AMQP service.
"""
logging.info("Connecting to {} with user {}.".format(self.host, self.username))
credentials = pika.PlainCredentials(self.username, self.password)
connection_params = pika.ConnectionParameters(host=self.host,
credentials=credentials,
heartbeat_interval=self.heartbeat_interval)
self.connection = pika.BlockingConnection(connection_params)
|
[
"def",
"connect",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"\"Connecting to {} with user {}.\"",
".",
"format",
"(",
"self",
".",
"host",
",",
"self",
".",
"username",
")",
")",
"credentials",
"=",
"pika",
".",
"PlainCredentials",
"(",
"self",
".",
"username",
",",
"self",
".",
"password",
")",
"connection_params",
"=",
"pika",
".",
"ConnectionParameters",
"(",
"host",
"=",
"self",
".",
"host",
",",
"credentials",
"=",
"credentials",
",",
"heartbeat_interval",
"=",
"self",
".",
"heartbeat_interval",
")",
"self",
".",
"connection",
"=",
"pika",
".",
"BlockingConnection",
"(",
"connection_params",
")"
] | 56.3
| 26.5
|
def pause(self):
""" Pauses playback. """
if self.decoder.status == mediadecoder.PAUSED:
self.decoder.pause()
self.paused = False
elif self.decoder.status == mediadecoder.PLAYING:
self.decoder.pause()
self.paused = True
else:
print("Player not in pausable state")
|
[
"def",
"pause",
"(",
"self",
")",
":",
"if",
"self",
".",
"decoder",
".",
"status",
"==",
"mediadecoder",
".",
"PAUSED",
":",
"self",
".",
"decoder",
".",
"pause",
"(",
")",
"self",
".",
"paused",
"=",
"False",
"elif",
"self",
".",
"decoder",
".",
"status",
"==",
"mediadecoder",
".",
"PLAYING",
":",
"self",
".",
"decoder",
".",
"pause",
"(",
")",
"self",
".",
"paused",
"=",
"True",
"else",
":",
"print",
"(",
"\"Player not in pausable state\"",
")"
] | 27.7
| 14.7
|
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
GzipFileEntry: a file entry or None if not available.
"""
path_spec = gzip_path_spec.GzipPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec)
|
[
"def",
"GetRootFileEntry",
"(",
"self",
")",
":",
"path_spec",
"=",
"gzip_path_spec",
".",
"GzipPathSpec",
"(",
"parent",
"=",
"self",
".",
"_path_spec",
".",
"parent",
")",
"return",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")"
] | 33.125
| 17.875
|
def get_syslog_config(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Retrieve the syslog configuration.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
esxi_hosts
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Dictionary with keys and values corresponding to the
syslog configuration, per host.
CLI Example:
.. code-block:: bash
# Used for ESXi host connection information
salt '*' vsphere.get_syslog_config my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.get_syslog_config my.vcenter.location root bad-password \
esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
'''
cmd = 'system syslog config get'
ret = {}
if esxi_hosts:
if not isinstance(esxi_hosts, list):
raise CommandExecutionError('\'esxi_hosts\' must be a list.')
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host, credstore=credstore)
# format the response stdout into something useful
ret.update({esxi_host: _format_syslog_config(response)})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
credstore=credstore)
# format the response stdout into something useful
ret.update({host: _format_syslog_config(response)})
return ret
|
[
"def",
"get_syslog_config",
"(",
"host",
",",
"username",
",",
"password",
",",
"protocol",
"=",
"None",
",",
"port",
"=",
"None",
",",
"esxi_hosts",
"=",
"None",
",",
"credstore",
"=",
"None",
")",
":",
"cmd",
"=",
"'system syslog config get'",
"ret",
"=",
"{",
"}",
"if",
"esxi_hosts",
":",
"if",
"not",
"isinstance",
"(",
"esxi_hosts",
",",
"list",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'\\'esxi_hosts\\' must be a list.'",
")",
"for",
"esxi_host",
"in",
"esxi_hosts",
":",
"response",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"esxcli",
"(",
"host",
",",
"username",
",",
"password",
",",
"cmd",
",",
"protocol",
"=",
"protocol",
",",
"port",
"=",
"port",
",",
"esxi_host",
"=",
"esxi_host",
",",
"credstore",
"=",
"credstore",
")",
"# format the response stdout into something useful",
"ret",
".",
"update",
"(",
"{",
"esxi_host",
":",
"_format_syslog_config",
"(",
"response",
")",
"}",
")",
"else",
":",
"# Handles a single host or a vCenter connection when no esxi_hosts are provided.",
"response",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"esxcli",
"(",
"host",
",",
"username",
",",
"password",
",",
"cmd",
",",
"protocol",
"=",
"protocol",
",",
"port",
"=",
"port",
",",
"credstore",
"=",
"credstore",
")",
"# format the response stdout into something useful",
"ret",
".",
"update",
"(",
"{",
"host",
":",
"_format_syslog_config",
"(",
"response",
")",
"}",
")",
"return",
"ret"
] | 36.125
| 28.40625
|
def prune(manager: Manager):
"""Prune nodes not belonging to any edges."""
nodes_to_delete = [
node
for node in tqdm(manager.session.query(Node), total=manager.count_nodes())
if not node.networks
]
manager.session.delete(nodes_to_delete)
manager.session.commit()
|
[
"def",
"prune",
"(",
"manager",
":",
"Manager",
")",
":",
"nodes_to_delete",
"=",
"[",
"node",
"for",
"node",
"in",
"tqdm",
"(",
"manager",
".",
"session",
".",
"query",
"(",
"Node",
")",
",",
"total",
"=",
"manager",
".",
"count_nodes",
"(",
")",
")",
"if",
"not",
"node",
".",
"networks",
"]",
"manager",
".",
"session",
".",
"delete",
"(",
"nodes_to_delete",
")",
"manager",
".",
"session",
".",
"commit",
"(",
")"
] | 33.111111
| 17.888889
|
def parse_name(cls, name: str, default: T = None) -> T:
"""Parse specified name for IntEnum; return default if not found."""
if not name:
return default
name = name.lower()
return next((item for item in cls if name == item.name.lower()), default)
|
[
"def",
"parse_name",
"(",
"cls",
",",
"name",
":",
"str",
",",
"default",
":",
"T",
"=",
"None",
")",
"->",
"T",
":",
"if",
"not",
"name",
":",
"return",
"default",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"return",
"next",
"(",
"(",
"item",
"for",
"item",
"in",
"cls",
"if",
"name",
"==",
"item",
".",
"name",
".",
"lower",
"(",
")",
")",
",",
"default",
")"
] | 47.5
| 17.166667
|
def deserialize(cls, target_class, array):
"""
:type target_class: core.SessionServer|type
:type array: list
:rtype: core.SessionServer
"""
session_server = target_class.__new__(target_class)
session_server.__dict__ = {
cls._ATTRIBUTE_ID: converter.deserialize(
core.Id,
array[cls._INDEX_ID][cls._FIELD_ID]
),
cls._ATTRIBUTE_TOKEN: converter.deserialize(
core.SessionToken,
array[cls._INDEX_TOKEN][cls._FIELD_TOKEN]
),
cls._ATTRIBUTE_USER_COMPANY: None,
cls._ATTRIBUTE_USER_PERSON: None,
}
user_dict_wrapped = array[cls._INDEX_USER]
if cls._FIELD_USER_COMPANY in user_dict_wrapped:
session_server.__dict__[cls._ATTRIBUTE_USER_COMPANY] = \
converter.deserialize(
endpoint.UserCompany,
user_dict_wrapped[cls._FIELD_USER_COMPANY]
)
elif cls._FIELD_USER_PERSON in user_dict_wrapped:
session_server.__dict__[cls._ATTRIBUTE_USER_PERSON] = \
converter.deserialize(
endpoint.UserPerson,
user_dict_wrapped[cls._FIELD_USER_PERSON]
)
elif cls._FIELD_USER_API_KEY in user_dict_wrapped:
session_server.__dict__[cls._ATTRIBUTE_USER_API_KEY] = \
converter.deserialize(
endpoint.UserApiKey,
user_dict_wrapped[cls._FIELD_USER_API_KEY]
)
else:
raise BunqException(cls._ERROR_COULD_NOT_DETERMINE_USER)
return session_server
|
[
"def",
"deserialize",
"(",
"cls",
",",
"target_class",
",",
"array",
")",
":",
"session_server",
"=",
"target_class",
".",
"__new__",
"(",
"target_class",
")",
"session_server",
".",
"__dict__",
"=",
"{",
"cls",
".",
"_ATTRIBUTE_ID",
":",
"converter",
".",
"deserialize",
"(",
"core",
".",
"Id",
",",
"array",
"[",
"cls",
".",
"_INDEX_ID",
"]",
"[",
"cls",
".",
"_FIELD_ID",
"]",
")",
",",
"cls",
".",
"_ATTRIBUTE_TOKEN",
":",
"converter",
".",
"deserialize",
"(",
"core",
".",
"SessionToken",
",",
"array",
"[",
"cls",
".",
"_INDEX_TOKEN",
"]",
"[",
"cls",
".",
"_FIELD_TOKEN",
"]",
")",
",",
"cls",
".",
"_ATTRIBUTE_USER_COMPANY",
":",
"None",
",",
"cls",
".",
"_ATTRIBUTE_USER_PERSON",
":",
"None",
",",
"}",
"user_dict_wrapped",
"=",
"array",
"[",
"cls",
".",
"_INDEX_USER",
"]",
"if",
"cls",
".",
"_FIELD_USER_COMPANY",
"in",
"user_dict_wrapped",
":",
"session_server",
".",
"__dict__",
"[",
"cls",
".",
"_ATTRIBUTE_USER_COMPANY",
"]",
"=",
"converter",
".",
"deserialize",
"(",
"endpoint",
".",
"UserCompany",
",",
"user_dict_wrapped",
"[",
"cls",
".",
"_FIELD_USER_COMPANY",
"]",
")",
"elif",
"cls",
".",
"_FIELD_USER_PERSON",
"in",
"user_dict_wrapped",
":",
"session_server",
".",
"__dict__",
"[",
"cls",
".",
"_ATTRIBUTE_USER_PERSON",
"]",
"=",
"converter",
".",
"deserialize",
"(",
"endpoint",
".",
"UserPerson",
",",
"user_dict_wrapped",
"[",
"cls",
".",
"_FIELD_USER_PERSON",
"]",
")",
"elif",
"cls",
".",
"_FIELD_USER_API_KEY",
"in",
"user_dict_wrapped",
":",
"session_server",
".",
"__dict__",
"[",
"cls",
".",
"_ATTRIBUTE_USER_API_KEY",
"]",
"=",
"converter",
".",
"deserialize",
"(",
"endpoint",
".",
"UserApiKey",
",",
"user_dict_wrapped",
"[",
"cls",
".",
"_FIELD_USER_API_KEY",
"]",
")",
"else",
":",
"raise",
"BunqException",
"(",
"cls",
".",
"_ERROR_COULD_NOT_DETERMINE_USER",
")",
"return",
"session_server"
] | 36.086957
| 17.173913
|
def delimiter_groups(line, begin_delim=begin_delim,
end_delim=end_delim):
"""Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc.
"""
text = []
line = iter(line)
while True:
# First build and yield an unsplittable group
for item in line:
text.append(item)
if item in begin_delim:
break
if not text:
break
yield text
# Now build and yield a splittable group
level = 0
text = []
for item in line:
if item in begin_delim:
level += 1
elif item in end_delim:
level -= 1
if level < 0:
yield text
text = [item]
break
text.append(item)
else:
assert not text, text
break
|
[
"def",
"delimiter_groups",
"(",
"line",
",",
"begin_delim",
"=",
"begin_delim",
",",
"end_delim",
"=",
"end_delim",
")",
":",
"text",
"=",
"[",
"]",
"line",
"=",
"iter",
"(",
"line",
")",
"while",
"True",
":",
"# First build and yield an unsplittable group",
"for",
"item",
"in",
"line",
":",
"text",
".",
"append",
"(",
"item",
")",
"if",
"item",
"in",
"begin_delim",
":",
"break",
"if",
"not",
"text",
":",
"break",
"yield",
"text",
"# Now build and yield a splittable group",
"level",
"=",
"0",
"text",
"=",
"[",
"]",
"for",
"item",
"in",
"line",
":",
"if",
"item",
"in",
"begin_delim",
":",
"level",
"+=",
"1",
"elif",
"item",
"in",
"end_delim",
":",
"level",
"-=",
"1",
"if",
"level",
"<",
"0",
":",
"yield",
"text",
"text",
"=",
"[",
"item",
"]",
"break",
"text",
".",
"append",
"(",
"item",
")",
"else",
":",
"assert",
"not",
"text",
",",
"text",
"break"
] | 27.470588
| 14.617647
|
def genusspecific(self, analysistype='genesippr'):
"""
Creates simplified genus-specific reports. Instead of the % ID and the fold coverage, a simple +/- scheme is
used for presence/absence
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
# Dictionary to store all the output strings
results = dict()
for genus, genelist in self.genedict.items():
# Initialise the dictionary with the appropriate genus
results[genus] = str()
for sample in self.runmetadata.samples:
try:
# Find the samples that match the current genus - note that samples with multiple hits will be
# represented in multiple outputs
if genus in sample[analysistype].targetgenera:
# Populate the results string with the sample name
results[genus] += '{},'.format(sample.name)
# Iterate through all the genes associated with this genus. If the gene is in the current
# sample, add a + to the string, otherwise, add a -
for gene in genelist:
if gene.lower() in [target[0].lower().split('_')[0] for target in
sample[analysistype].results.items()]:
results[genus] += '+,'
else:
results[genus] += '-,'
results[genus] += '\n'
# If the sample is missing the targetgenera attribute, then it is ignored for these reports
except AttributeError:
pass
# Create and populate the genus-specific reports
for genus, resultstring in results.items():
# Only create the report if there are results for the current genus
if resultstring:
with open(os.path.join(self.reportpath, '{}_genesippr.csv'.format(genus)), 'w') as genusreport:
# Write the header to the report - Strain plus add the genes associated with the genus
genusreport.write('Strain,{}\n'.format(','.join(self.genedict[genus])))
# Write the results to the report
genusreport.write(resultstring)
|
[
"def",
"genusspecific",
"(",
"self",
",",
"analysistype",
"=",
"'genesippr'",
")",
":",
"# Dictionary to store all the output strings",
"results",
"=",
"dict",
"(",
")",
"for",
"genus",
",",
"genelist",
"in",
"self",
".",
"genedict",
".",
"items",
"(",
")",
":",
"# Initialise the dictionary with the appropriate genus",
"results",
"[",
"genus",
"]",
"=",
"str",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"try",
":",
"# Find the samples that match the current genus - note that samples with multiple hits will be",
"# represented in multiple outputs",
"if",
"genus",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
":",
"# Populate the results string with the sample name",
"results",
"[",
"genus",
"]",
"+=",
"'{},'",
".",
"format",
"(",
"sample",
".",
"name",
")",
"# Iterate through all the genes associated with this genus. If the gene is in the current",
"# sample, add a + to the string, otherwise, add a -",
"for",
"gene",
"in",
"genelist",
":",
"if",
"gene",
".",
"lower",
"(",
")",
"in",
"[",
"target",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"for",
"target",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
"]",
":",
"results",
"[",
"genus",
"]",
"+=",
"'+,'",
"else",
":",
"results",
"[",
"genus",
"]",
"+=",
"'-,'",
"results",
"[",
"genus",
"]",
"+=",
"'\\n'",
"# If the sample is missing the targetgenera attribute, then it is ignored for these reports",
"except",
"AttributeError",
":",
"pass",
"# Create and populate the genus-specific reports",
"for",
"genus",
",",
"resultstring",
"in",
"results",
".",
"items",
"(",
")",
":",
"# Only create the report if there are results for the current genus",
"if",
"resultstring",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'{}_genesippr.csv'",
".",
"format",
"(",
"genus",
")",
")",
",",
"'w'",
")",
"as",
"genusreport",
":",
"# Write the header to the report - Strain plus add the genes associated with the genus",
"genusreport",
".",
"write",
"(",
"'Strain,{}\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"genedict",
"[",
"genus",
"]",
")",
")",
")",
"# Write the results to the report",
"genusreport",
".",
"write",
"(",
"resultstring",
")"
] | 61.179487
| 27.076923
|
def getObjectProfile(self, pid, asOfDateTime=None):
"""Get top-level information aboug a single Fedora object; optionally,
retrieve information as of a particular date-time.
:param pid: object pid
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid} ? [format] [asOfDateTime]
http_args = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
url = 'objects/%(pid)s' % {'pid': pid}
return self.get(url, params=http_args)
|
[
"def",
"getObjectProfile",
"(",
"self",
",",
"pid",
",",
"asOfDateTime",
"=",
"None",
")",
":",
"# /objects/{pid} ? [format] [asOfDateTime]",
"http_args",
"=",
"{",
"}",
"if",
"asOfDateTime",
":",
"http_args",
"[",
"'asOfDateTime'",
"]",
"=",
"datetime_to_fedoratime",
"(",
"asOfDateTime",
")",
"http_args",
".",
"update",
"(",
"self",
".",
"format_xml",
")",
"url",
"=",
"'objects/%(pid)s'",
"%",
"{",
"'pid'",
":",
"pid",
"}",
"return",
"self",
".",
"get",
"(",
"url",
",",
"params",
"=",
"http_args",
")"
] | 46.25
| 16.1875
|
def _convert_number_to_subscript(num):
"""
Converts number into subscript
input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"]
output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"]
:param num: number called after sign
:return: number in subscript
"""
subscript = ''
for character in str(num):
subscript += chr(0x2080 + int(character))
return subscript
|
[
"def",
"_convert_number_to_subscript",
"(",
"num",
")",
":",
"subscript",
"=",
"''",
"for",
"character",
"in",
"str",
"(",
"num",
")",
":",
"subscript",
"+=",
"chr",
"(",
"0x2080",
"+",
"int",
"(",
"character",
")",
")",
"return",
"subscript"
] | 32.142857
| 14.571429
|
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", [self.connection.schema_name, table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
# The row_number() function for ordering the index fields can be
# replaced by WITH ORDINALITY in the unnest() functions when support
# for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,
array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions
FROM (
SELECT
row_number() OVER () as rnum, c2.relname as indexname,
idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s and n.nspname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name, self.connection.schema_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints
|
[
"def",
"get_constraints",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"constraints",
"=",
"{",
"}",
"# Loop over the key table, collecting things as constraints. The column",
"# array must return column names in the same order in which they were",
"# created.",
"# The subquery containing generate_series can be replaced with",
"# \"WITH ORDINALITY\" when support for PostgreSQL 9.3 is dropped.",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT\n c.conname,\n array(\n SELECT attname\n FROM (\n SELECT unnest(c.conkey) AS colid,\n generate_series(1, array_length(c.conkey, 1)) AS arridx\n ) AS cols\n JOIN pg_attribute AS ca ON cols.colid = ca.attnum\n WHERE ca.attrelid = c.conrelid\n ORDER BY cols.arridx\n ),\n c.contype,\n (SELECT fkc.relname || '.' || fka.attname\n FROM pg_attribute AS fka\n JOIN pg_class AS fkc ON fka.attrelid = fkc.oid\n WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),\n cl.reloptions\n FROM pg_constraint AS c\n JOIN pg_class AS cl ON c.conrelid = cl.oid\n JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid\n WHERE ns.nspname = %s AND cl.relname = %s\n \"\"\"",
",",
"[",
"self",
".",
"connection",
".",
"schema_name",
",",
"table_name",
"]",
")",
"for",
"constraint",
",",
"columns",
",",
"kind",
",",
"used_cols",
",",
"options",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"constraints",
"[",
"constraint",
"]",
"=",
"{",
"\"columns\"",
":",
"columns",
",",
"\"primary_key\"",
":",
"kind",
"==",
"\"p\"",
",",
"\"unique\"",
":",
"kind",
"in",
"[",
"\"p\"",
",",
"\"u\"",
"]",
",",
"\"foreign_key\"",
":",
"tuple",
"(",
"used_cols",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
")",
"if",
"kind",
"==",
"\"f\"",
"else",
"None",
",",
"\"check\"",
":",
"kind",
"==",
"\"c\"",
",",
"\"index\"",
":",
"False",
",",
"\"definition\"",
":",
"None",
",",
"\"options\"",
":",
"options",
",",
"}",
"# Now get indexes",
"# The row_number() function for ordering the index fields can be",
"# replaced by WITH ORDINALITY in the unnest() functions when support",
"# for PostgreSQL 9.3 is dropped.",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT\n indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,\n array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions\n FROM (\n SELECT\n row_number() OVER () as rnum, c2.relname as indexname,\n idx.*, attr.attname, am.amname,\n CASE\n WHEN idx.indexprs IS NOT NULL THEN\n pg_get_indexdef(idx.indexrelid)\n END AS exprdef,\n CASE am.amname\n WHEN 'btree' THEN\n CASE (option & 1)\n WHEN 1 THEN 'DESC' ELSE 'ASC'\n END\n END as ordering,\n c2.reloptions as attoptions\n FROM (\n SELECT\n *, unnest(i.indkey) as key, unnest(i.indoption) as option\n FROM pg_index i\n ) idx\n LEFT JOIN pg_class c ON idx.indrelid = c.oid\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid\n LEFT JOIN pg_am am ON c2.relam = am.oid\n LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key\n WHERE c.relname = %s and n.nspname = %s\n ) s2\n GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;\n \"\"\"",
",",
"[",
"table_name",
",",
"self",
".",
"connection",
".",
"schema_name",
"]",
")",
"for",
"index",
",",
"columns",
",",
"unique",
",",
"primary",
",",
"orders",
",",
"type_",
",",
"definition",
",",
"options",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"if",
"index",
"not",
"in",
"constraints",
":",
"constraints",
"[",
"index",
"]",
"=",
"{",
"\"columns\"",
":",
"columns",
"if",
"columns",
"!=",
"[",
"None",
"]",
"else",
"[",
"]",
",",
"\"orders\"",
":",
"orders",
"if",
"orders",
"!=",
"[",
"None",
"]",
"else",
"[",
"]",
",",
"\"primary_key\"",
":",
"primary",
",",
"\"unique\"",
":",
"unique",
",",
"\"foreign_key\"",
":",
"None",
",",
"\"check\"",
":",
"False",
",",
"\"index\"",
":",
"True",
",",
"\"type\"",
":",
"Index",
".",
"suffix",
"if",
"type_",
"==",
"'btree'",
"else",
"type_",
",",
"\"definition\"",
":",
"definition",
",",
"\"options\"",
":",
"options",
",",
"}",
"return",
"constraints"
] | 42.606061
| 17.434343
|
def restart(gandi, resource, background, force):
"""Restart a PaaS instance.
Resource can be a vhost, a hostname, or an ID
"""
output_keys = ['id', 'type', 'step']
possible_resources = gandi.paas.resource_list()
for item in resource:
if item not in possible_resources:
gandi.echo('Sorry PaaS instance %s does not exist' % item)
gandi.echo('Please use one of the following: %s' %
possible_resources)
return
if not force:
instance_info = "'%s'" % ', '.join(resource)
proceed = click.confirm("Are you sure to restart PaaS instance %s?" %
instance_info)
if not proceed:
return
opers = gandi.paas.restart(resource, background)
if background:
for oper in opers:
output_generic(gandi, oper, output_keys)
return opers
|
[
"def",
"restart",
"(",
"gandi",
",",
"resource",
",",
"background",
",",
"force",
")",
":",
"output_keys",
"=",
"[",
"'id'",
",",
"'type'",
",",
"'step'",
"]",
"possible_resources",
"=",
"gandi",
".",
"paas",
".",
"resource_list",
"(",
")",
"for",
"item",
"in",
"resource",
":",
"if",
"item",
"not",
"in",
"possible_resources",
":",
"gandi",
".",
"echo",
"(",
"'Sorry PaaS instance %s does not exist'",
"%",
"item",
")",
"gandi",
".",
"echo",
"(",
"'Please use one of the following: %s'",
"%",
"possible_resources",
")",
"return",
"if",
"not",
"force",
":",
"instance_info",
"=",
"\"'%s'\"",
"%",
"', '",
".",
"join",
"(",
"resource",
")",
"proceed",
"=",
"click",
".",
"confirm",
"(",
"\"Are you sure to restart PaaS instance %s?\"",
"%",
"instance_info",
")",
"if",
"not",
"proceed",
":",
"return",
"opers",
"=",
"gandi",
".",
"paas",
".",
"restart",
"(",
"resource",
",",
"background",
")",
"if",
"background",
":",
"for",
"oper",
"in",
"opers",
":",
"output_generic",
"(",
"gandi",
",",
"oper",
",",
"output_keys",
")",
"return",
"opers"
] | 30.413793
| 19.37931
|
def from_helices(cls, assembly, cutoff=7.0, min_helix_length=8):
""" Generate KnobGroup from the helices in the assembly - classic socket functionality.
Notes
-----
Socket identifies knobs-into-holes (KIHs) packing motifs in protein structures.
The following resources can provide more information:
The socket webserver: http://coiledcoils.chm.bris.ac.uk/socket/server.html
The help page: http://coiledcoils.chm.bris.ac.uk/socket/help.html
The original publication reference: Walshaw, J. & Woolfson, D.N. (2001) J. Mol. Biol., 307 (5), 1427-1450.
Parameters
----------
assembly : Assembly
cutoff : float
Socket cutoff in Angstroms
min_helix_length : int
Minimum number of Residues in a helix considered for KIH packing.
Returns
-------
instance : KnobGroup
None if no helices or no kihs.
"""
cutoff = float(cutoff)
helices = Assembly([x for x in assembly.helices if len(x) >= min_helix_length])
if len(helices) <= 1:
return None
# reassign ampal_parents
helices.relabel_polymers([x.ampal_parent.id for x in helices])
for i, h in enumerate(helices):
h.number = i
h.ampal_parent = h[0].ampal_parent
for r in h.get_monomers():
r.tags['helix'] = h
all_kihs = []
cluster_dict = cluster_helices(helices, cluster_distance=(cutoff + 10))
for k, v in cluster_dict.items():
if len(v) > 1:
kihs = find_kihs(v, cutoff=cutoff, hole_size=4)
if len(kihs) == 0:
continue
for x in kihs:
all_kihs.append(x)
instance = cls(ampal_parent=helices, cutoff=cutoff)
for x in all_kihs:
x.ampal_parent = instance
instance._monomers = all_kihs
instance.relabel_monomers()
return instance
|
[
"def",
"from_helices",
"(",
"cls",
",",
"assembly",
",",
"cutoff",
"=",
"7.0",
",",
"min_helix_length",
"=",
"8",
")",
":",
"cutoff",
"=",
"float",
"(",
"cutoff",
")",
"helices",
"=",
"Assembly",
"(",
"[",
"x",
"for",
"x",
"in",
"assembly",
".",
"helices",
"if",
"len",
"(",
"x",
")",
">=",
"min_helix_length",
"]",
")",
"if",
"len",
"(",
"helices",
")",
"<=",
"1",
":",
"return",
"None",
"# reassign ampal_parents",
"helices",
".",
"relabel_polymers",
"(",
"[",
"x",
".",
"ampal_parent",
".",
"id",
"for",
"x",
"in",
"helices",
"]",
")",
"for",
"i",
",",
"h",
"in",
"enumerate",
"(",
"helices",
")",
":",
"h",
".",
"number",
"=",
"i",
"h",
".",
"ampal_parent",
"=",
"h",
"[",
"0",
"]",
".",
"ampal_parent",
"for",
"r",
"in",
"h",
".",
"get_monomers",
"(",
")",
":",
"r",
".",
"tags",
"[",
"'helix'",
"]",
"=",
"h",
"all_kihs",
"=",
"[",
"]",
"cluster_dict",
"=",
"cluster_helices",
"(",
"helices",
",",
"cluster_distance",
"=",
"(",
"cutoff",
"+",
"10",
")",
")",
"for",
"k",
",",
"v",
"in",
"cluster_dict",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"v",
")",
">",
"1",
":",
"kihs",
"=",
"find_kihs",
"(",
"v",
",",
"cutoff",
"=",
"cutoff",
",",
"hole_size",
"=",
"4",
")",
"if",
"len",
"(",
"kihs",
")",
"==",
"0",
":",
"continue",
"for",
"x",
"in",
"kihs",
":",
"all_kihs",
".",
"append",
"(",
"x",
")",
"instance",
"=",
"cls",
"(",
"ampal_parent",
"=",
"helices",
",",
"cutoff",
"=",
"cutoff",
")",
"for",
"x",
"in",
"all_kihs",
":",
"x",
".",
"ampal_parent",
"=",
"instance",
"instance",
".",
"_monomers",
"=",
"all_kihs",
"instance",
".",
"relabel_monomers",
"(",
")",
"return",
"instance"
] | 39.38
| 18.86
|
def get_all_terms(self):
"""
Return all of the terms in the account.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
"""
if not self._canvas_account_id:
raise MissingAccountID()
params = {"workflow_state": 'all', 'per_page': 500}
url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms"
data_key = 'enrollment_terms'
terms = []
response = self._get_paged_resource(url, params, data_key)
for data in response[data_key]:
terms.append(CanvasTerm(data=data))
return terms
|
[
"def",
"get_all_terms",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"params",
"=",
"{",
"\"workflow_state\"",
":",
"'all'",
",",
"'per_page'",
":",
"500",
"}",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\"/terms\"",
"data_key",
"=",
"'enrollment_terms'",
"terms",
"=",
"[",
"]",
"response",
"=",
"self",
".",
"_get_paged_resource",
"(",
"url",
",",
"params",
",",
"data_key",
")",
"for",
"data",
"in",
"response",
"[",
"data_key",
"]",
":",
"terms",
".",
"append",
"(",
"CanvasTerm",
"(",
"data",
"=",
"data",
")",
")",
"return",
"terms"
] | 36.117647
| 16.823529
|
def _GetAPFSVolumeIdentifiers(self, scan_node):
"""Determines the APFS volume identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: APFS volume identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the the scan node is invalid.
UserAbort: if the user requested to abort.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
# TODO: refactor self._volumes to use scan options.
if self._volumes:
if self._volumes == 'all':
volumes = range(1, volume_system.number_of_volumes + 1)
else:
volumes = self._volumes
selected_volume_identifiers = self._NormalizedVolumeIdentifiers(
volume_system, volumes, prefix='apfs')
if not set(selected_volume_identifiers).difference(volume_identifiers):
return selected_volume_identifiers
if len(volume_identifiers) > 1:
try:
volume_identifiers = self._PromptUserForAPFSVolumeIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix='apfs')
|
[
"def",
"_GetAPFSVolumeIdentifiers",
"(",
"self",
",",
"scan_node",
")",
":",
"if",
"not",
"scan_node",
"or",
"not",
"scan_node",
".",
"path_spec",
":",
"raise",
"errors",
".",
"SourceScannerError",
"(",
"'Invalid scan node.'",
")",
"volume_system",
"=",
"apfs_volume_system",
".",
"APFSVolumeSystem",
"(",
")",
"volume_system",
".",
"Open",
"(",
"scan_node",
".",
"path_spec",
")",
"volume_identifiers",
"=",
"self",
".",
"_source_scanner",
".",
"GetVolumeIdentifiers",
"(",
"volume_system",
")",
"if",
"not",
"volume_identifiers",
":",
"return",
"[",
"]",
"# TODO: refactor self._volumes to use scan options.",
"if",
"self",
".",
"_volumes",
":",
"if",
"self",
".",
"_volumes",
"==",
"'all'",
":",
"volumes",
"=",
"range",
"(",
"1",
",",
"volume_system",
".",
"number_of_volumes",
"+",
"1",
")",
"else",
":",
"volumes",
"=",
"self",
".",
"_volumes",
"selected_volume_identifiers",
"=",
"self",
".",
"_NormalizedVolumeIdentifiers",
"(",
"volume_system",
",",
"volumes",
",",
"prefix",
"=",
"'apfs'",
")",
"if",
"not",
"set",
"(",
"selected_volume_identifiers",
")",
".",
"difference",
"(",
"volume_identifiers",
")",
":",
"return",
"selected_volume_identifiers",
"if",
"len",
"(",
"volume_identifiers",
")",
">",
"1",
":",
"try",
":",
"volume_identifiers",
"=",
"self",
".",
"_PromptUserForAPFSVolumeIdentifiers",
"(",
"volume_system",
",",
"volume_identifiers",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"errors",
".",
"UserAbort",
"(",
"'File system scan aborted.'",
")",
"return",
"self",
".",
"_NormalizedVolumeIdentifiers",
"(",
"volume_system",
",",
"volume_identifiers",
",",
"prefix",
"=",
"'apfs'",
")"
] | 32.702128
| 20.93617
|
def format(self, title=None, subtitle=None, prologue_text=None, epilogue_text=None, items=None):
"""
Format the menu and return as a string.
:return: a string representation of the formatted menu.
"""
self.clear_data()
content = ''
# Header Section
if title is not None:
self.__header.title = title
if subtitle is not None:
self.__header.subtitle = subtitle
sections = [self.__header]
# Prologue Section
if prologue_text is not None:
self.__prologue.text = prologue_text
sections.append(self.__prologue)
# Items Section
if items is not None:
self.__items_section.items = items
sections.append(self.__items_section)
# Epilogue Section
if epilogue_text is not None:
self.__epilogue.text = epilogue_text
sections.append(self.__epilogue)
sections.append(self.__footer)
sections.append(self.__prompt)
for sect in sections:
content += "\n".join(sect.generate())
# Don't add newline to prompt so input is on same line as prompt
if not isinstance(sect, MenuPrompt):
content += "\n"
return content
|
[
"def",
"format",
"(",
"self",
",",
"title",
"=",
"None",
",",
"subtitle",
"=",
"None",
",",
"prologue_text",
"=",
"None",
",",
"epilogue_text",
"=",
"None",
",",
"items",
"=",
"None",
")",
":",
"self",
".",
"clear_data",
"(",
")",
"content",
"=",
"''",
"# Header Section",
"if",
"title",
"is",
"not",
"None",
":",
"self",
".",
"__header",
".",
"title",
"=",
"title",
"if",
"subtitle",
"is",
"not",
"None",
":",
"self",
".",
"__header",
".",
"subtitle",
"=",
"subtitle",
"sections",
"=",
"[",
"self",
".",
"__header",
"]",
"# Prologue Section",
"if",
"prologue_text",
"is",
"not",
"None",
":",
"self",
".",
"__prologue",
".",
"text",
"=",
"prologue_text",
"sections",
".",
"append",
"(",
"self",
".",
"__prologue",
")",
"# Items Section",
"if",
"items",
"is",
"not",
"None",
":",
"self",
".",
"__items_section",
".",
"items",
"=",
"items",
"sections",
".",
"append",
"(",
"self",
".",
"__items_section",
")",
"# Epilogue Section",
"if",
"epilogue_text",
"is",
"not",
"None",
":",
"self",
".",
"__epilogue",
".",
"text",
"=",
"epilogue_text",
"sections",
".",
"append",
"(",
"self",
".",
"__epilogue",
")",
"sections",
".",
"append",
"(",
"self",
".",
"__footer",
")",
"sections",
".",
"append",
"(",
"self",
".",
"__prompt",
")",
"for",
"sect",
"in",
"sections",
":",
"content",
"+=",
"\"\\n\"",
".",
"join",
"(",
"sect",
".",
"generate",
"(",
")",
")",
"# Don't add newline to prompt so input is on same line as prompt",
"if",
"not",
"isinstance",
"(",
"sect",
",",
"MenuPrompt",
")",
":",
"content",
"+=",
"\"\\n\"",
"return",
"content"
] | 38.333333
| 11.060606
|
def _parse_table(self):
"""Parse a wikicode table by starting with the first line."""
reset = self._head
self._head += 2
try:
self._push(contexts.TABLE_OPEN)
padding = self._handle_table_style("\n")
except BadRoute:
self._head = reset
self._emit_text("{")
return
style = self._pop()
self._head += 1
restore_point = self._stack_ident
try:
table = self._parse(contexts.TABLE_OPEN)
except BadRoute:
while self._stack_ident != restore_point:
self._memoize_bad_route()
self._pop()
self._head = reset
self._emit_text("{")
return
self._emit_table_tag("{|", "table", style, padding, None, table, "|}")
# Offset displacement done by _parse():
self._head -= 1
|
[
"def",
"_parse_table",
"(",
"self",
")",
":",
"reset",
"=",
"self",
".",
"_head",
"self",
".",
"_head",
"+=",
"2",
"try",
":",
"self",
".",
"_push",
"(",
"contexts",
".",
"TABLE_OPEN",
")",
"padding",
"=",
"self",
".",
"_handle_table_style",
"(",
"\"\\n\"",
")",
"except",
"BadRoute",
":",
"self",
".",
"_head",
"=",
"reset",
"self",
".",
"_emit_text",
"(",
"\"{\"",
")",
"return",
"style",
"=",
"self",
".",
"_pop",
"(",
")",
"self",
".",
"_head",
"+=",
"1",
"restore_point",
"=",
"self",
".",
"_stack_ident",
"try",
":",
"table",
"=",
"self",
".",
"_parse",
"(",
"contexts",
".",
"TABLE_OPEN",
")",
"except",
"BadRoute",
":",
"while",
"self",
".",
"_stack_ident",
"!=",
"restore_point",
":",
"self",
".",
"_memoize_bad_route",
"(",
")",
"self",
".",
"_pop",
"(",
")",
"self",
".",
"_head",
"=",
"reset",
"self",
".",
"_emit_text",
"(",
"\"{\"",
")",
"return",
"self",
".",
"_emit_table_tag",
"(",
"\"{|\"",
",",
"\"table\"",
",",
"style",
",",
"padding",
",",
"None",
",",
"table",
",",
"\"|}\"",
")",
"# Offset displacement done by _parse():",
"self",
".",
"_head",
"-=",
"1"
] | 31.428571
| 15.821429
|
def revokeRegistIssue(self, CorpNum, mgtKey, orgConfirmNum, orgTradeDate, smssendYN=False, memo=None, UserID=None,
isPartCancel=False, cancelType=None, supplyCost=None, tax=None, serviceFee=None,
totalAmount=None):
""" 취소현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
mgtKey : 현금영수증 문서관리번호
orgConfirmNum : 원본현금영수증 승인번호
orgTradeDate : 원본현금영수증 거래일자
smssendYN : 발행안내문자 전송여부
memo : 메모
UserID : 팝빌회원 아이디
isPartCancel : 부분취소여부
cancelType : 취소사유
supplyCost : [취소] 공급가액
tax : [취소] 세액
serviceFee : [취소] 봉사료
totalAmount : [취소] 합계금액
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify({
"mgtKey": mgtKey,
"orgConfirmNum": orgConfirmNum,
"orgTradeDate": orgTradeDate,
"smssendYN": smssendYN,
"memo": memo,
"isPartCancel": isPartCancel,
"cancelType": cancelType,
"supplyCost": supplyCost,
"tax": tax,
"serviceFee": serviceFee,
"totalAmount": totalAmount,
})
return self._httppost('/Cashbill', postData, CorpNum, UserID, "REVOKEISSUE")
|
[
"def",
"revokeRegistIssue",
"(",
"self",
",",
"CorpNum",
",",
"mgtKey",
",",
"orgConfirmNum",
",",
"orgTradeDate",
",",
"smssendYN",
"=",
"False",
",",
"memo",
"=",
"None",
",",
"UserID",
"=",
"None",
",",
"isPartCancel",
"=",
"False",
",",
"cancelType",
"=",
"None",
",",
"supplyCost",
"=",
"None",
",",
"tax",
"=",
"None",
",",
"serviceFee",
"=",
"None",
",",
"totalAmount",
"=",
"None",
")",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"mgtKey\"",
":",
"mgtKey",
",",
"\"orgConfirmNum\"",
":",
"orgConfirmNum",
",",
"\"orgTradeDate\"",
":",
"orgTradeDate",
",",
"\"smssendYN\"",
":",
"smssendYN",
",",
"\"memo\"",
":",
"memo",
",",
"\"isPartCancel\"",
":",
"isPartCancel",
",",
"\"cancelType\"",
":",
"cancelType",
",",
"\"supplyCost\"",
":",
"supplyCost",
",",
"\"tax\"",
":",
"tax",
",",
"\"serviceFee\"",
":",
"serviceFee",
",",
"\"totalAmount\"",
":",
"totalAmount",
",",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"REVOKEISSUE\"",
")"
] | 37.358974
| 12.717949
|
def to_jsonf(self, fpath: str, encoding: str='utf8', indent: int=None, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to json file
:param fpath: Json file path
:param encoding: Json file encoding
:param indent: Number of indentation
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json file path
"""
return util.save_jsonf(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty), fpath, encoding, indent)
|
[
"def",
"to_jsonf",
"(",
"self",
",",
"fpath",
":",
"str",
",",
"encoding",
":",
"str",
"=",
"'utf8'",
",",
"indent",
":",
"int",
"=",
"None",
",",
"ignore_none",
":",
"bool",
"=",
"True",
",",
"ignore_empty",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"return",
"util",
".",
"save_jsonf",
"(",
"traverse",
"(",
"self",
",",
"ignore_none",
",",
"force_value",
"=",
"True",
",",
"ignore_empty",
"=",
"ignore_empty",
")",
",",
"fpath",
",",
"encoding",
",",
"indent",
")"
] | 55.181818
| 27.727273
|
def reset_passwd(self, data):
""" Reset the user password """
error = False
msg = ""
# Check input format
if len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"reset": data["reset_hash"]},
{"$set": {"password": passwd_hash},
"$unset": {"reset": True, "activate": True}})
if user is None:
error = True
msg = _("Invalid reset hash.")
else:
msg = _("Your password has been successfully changed.")
return msg, error
|
[
"def",
"reset_passwd",
"(",
"self",
",",
"data",
")",
":",
"error",
"=",
"False",
"msg",
"=",
"\"\"",
"# Check input format",
"if",
"len",
"(",
"data",
"[",
"\"passwd\"",
"]",
")",
"<",
"6",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Password too short.\"",
")",
"elif",
"data",
"[",
"\"passwd\"",
"]",
"!=",
"data",
"[",
"\"passwd2\"",
"]",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Passwords don't match !\"",
")",
"if",
"not",
"error",
":",
"passwd_hash",
"=",
"hashlib",
".",
"sha512",
"(",
"data",
"[",
"\"passwd\"",
"]",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"user",
"=",
"self",
".",
"database",
".",
"users",
".",
"find_one_and_update",
"(",
"{",
"\"reset\"",
":",
"data",
"[",
"\"reset_hash\"",
"]",
"}",
",",
"{",
"\"$set\"",
":",
"{",
"\"password\"",
":",
"passwd_hash",
"}",
",",
"\"$unset\"",
":",
"{",
"\"reset\"",
":",
"True",
",",
"\"activate\"",
":",
"True",
"}",
"}",
")",
"if",
"user",
"is",
"None",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Invalid reset hash.\"",
")",
"else",
":",
"msg",
"=",
"_",
"(",
"\"Your password has been successfully changed.\"",
")",
"return",
"msg",
",",
"error"
] | 38.36
| 22.72
|
def _set_system_utilization(self, v, load=False):
"""
Setter method for system_utilization, mapped from YANG variable /telemetry/profile/system_utilization (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_utilization is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_utilization() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",system_utilization.system_utilization, yang_name="system-utilization", rest_name="system-utilization", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}), is_container='list', yang_name="system-utilization", rest_name="system-utilization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_utilization must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",system_utilization.system_utilization, yang_name="system-utilization", rest_name="system-utilization", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}), is_container='list', yang_name="system-utilization", rest_name="system-utilization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__system_utilization = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_system_utilization",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"name\"",
",",
"system_utilization",
".",
"system_utilization",
",",
"yang_name",
"=",
"\"system-utilization\"",
",",
"rest_name",
"=",
"\"system-utilization\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'name'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-full-command'",
":",
"None",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'SystemProfile'",
",",
"u'info'",
":",
"u'System Utilization profile'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"system-utilization\"",
",",
"rest_name",
"=",
"\"system-utilization\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-full-command'",
":",
"None",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'SystemProfile'",
",",
"u'info'",
":",
"u'System Utilization profile'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-telemetry'",
",",
"defining_module",
"=",
"'brocade-telemetry'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"system_utilization must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"name\",system_utilization.system_utilization, yang_name=\"system-utilization\", rest_name=\"system-utilization\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}), is_container='list', yang_name=\"system-utilization\", rest_name=\"system-utilization\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'SystemProfile', u'info': u'System Utilization profile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__system_utilization",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 115.863636
| 55.636364
|
def json_get_default(json: JsonValue, path: str,
default: Any, expected_type: Any = ANY) -> Any:
"""Get a JSON value by path, optionally checking its type.
This works exactly like json_get(), but instead of raising
ValueError or IndexError when a path part is not found, return
the provided default value:
>>> json_get_default({}, "/foo", "I am a default value")
'I am a default value'
TypeErrors will be raised as in json_get() if an expected_type
is provided:
>>> json_get_default({"foo": "bar"}, "/foo", 123, int)
Traceback (most recent call last):
...
TypeError: wrong JSON type int != str
"""
try:
return json_get(json, path, expected_type)
except (ValueError, IndexError):
return default
|
[
"def",
"json_get_default",
"(",
"json",
":",
"JsonValue",
",",
"path",
":",
"str",
",",
"default",
":",
"Any",
",",
"expected_type",
":",
"Any",
"=",
"ANY",
")",
"->",
"Any",
":",
"try",
":",
"return",
"json_get",
"(",
"json",
",",
"path",
",",
"expected_type",
")",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"return",
"default"
] | 33.73913
| 19.608696
|
def sub(self):
'''
:param fields:
Set fields to substitute
:returns:
Substituted Template with given fields.
If no fields were set up beforehand, :func:`raw` is used.
'''
if self.__fields:
return _Template(self.raw).substitute(self.__fields)
return self.raw
|
[
"def",
"sub",
"(",
"self",
")",
":",
"if",
"self",
".",
"__fields",
":",
"return",
"_Template",
"(",
"self",
".",
"raw",
")",
".",
"substitute",
"(",
"self",
".",
"__fields",
")",
"return",
"self",
".",
"raw"
] | 28.583333
| 22.083333
|
def process(self):
"""Process current event."""
try:
self.receiver(self)
# TODO RESTException
except Exception as e:
current_app.logger.exception('Could not process event.')
self.response_code = 500
self.response = dict(status=500, message=str(e))
return self
|
[
"def",
"process",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"receiver",
"(",
"self",
")",
"# TODO RESTException",
"except",
"Exception",
"as",
"e",
":",
"current_app",
".",
"logger",
".",
"exception",
"(",
"'Could not process event.'",
")",
"self",
".",
"response_code",
"=",
"500",
"self",
".",
"response",
"=",
"dict",
"(",
"status",
"=",
"500",
",",
"message",
"=",
"str",
"(",
"e",
")",
")",
"return",
"self"
] | 33.8
| 15.4
|
def _sorted_nicely(self, l):
"""Return list sorted in the way that humans expect.
:param l: iterable to be sorted
:returns: sorted list
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
|
[
"def",
"_sorted_nicely",
"(",
"self",
",",
"l",
")",
":",
"convert",
"=",
"lambda",
"text",
":",
"int",
"(",
"text",
")",
"if",
"text",
".",
"isdigit",
"(",
")",
"else",
"text",
"alphanum_key",
"=",
"lambda",
"key",
":",
"[",
"convert",
"(",
"c",
")",
"for",
"c",
"in",
"re",
".",
"split",
"(",
"'([0-9]+)'",
",",
"key",
")",
"]",
"return",
"sorted",
"(",
"l",
",",
"key",
"=",
"alphanum_key",
")"
] | 41.222222
| 14.666667
|
def disable_servicegroup_host_checks(self, servicegroup):
"""Disable host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None
"""
for service_id in servicegroup.get_services():
if service_id in self.daemon.services:
host_id = self.daemon.services[service_id].host
self.disable_host_check(self.daemon.hosts[host_id])
|
[
"def",
"disable_servicegroup_host_checks",
"(",
"self",
",",
"servicegroup",
")",
":",
"for",
"service_id",
"in",
"servicegroup",
".",
"get_services",
"(",
")",
":",
"if",
"service_id",
"in",
"self",
".",
"daemon",
".",
"services",
":",
"host_id",
"=",
"self",
".",
"daemon",
".",
"services",
"[",
"service_id",
"]",
".",
"host",
"self",
".",
"disable_host_check",
"(",
"self",
".",
"daemon",
".",
"hosts",
"[",
"host_id",
"]",
")"
] | 43.5
| 19.071429
|
def check_spam(self, ip=None, email=None, name=None, login=None, realname=None,
subject=None, body=None, subject_type='plain', body_type='plain'):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/check-spam.xml
subject_type = plain|html|bbcode
body_type = plain|html|bbcode
"""
data = {'ip': ip, 'email': email, 'name': name, 'login': login, 'realname': realname,
'body-%s' % body_type: body, 'subject-%s' % subject_type: subject}
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/check-spam', data=data)
root = ET.fromstring(r.content)
return {
'id': root.findtext('id'),
'spam_flag': yesnobool(root.find('text').attrib['spam-flag']),
'links': [(link.attrib['href'], yesnobool(link.attrib['spam-flag'])) for link in root.findall('./links/link')]
}
|
[
"def",
"check_spam",
"(",
"self",
",",
"ip",
"=",
"None",
",",
"email",
"=",
"None",
",",
"name",
"=",
"None",
",",
"login",
"=",
"None",
",",
"realname",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"body",
"=",
"None",
",",
"subject_type",
"=",
"'plain'",
",",
"body_type",
"=",
"'plain'",
")",
":",
"data",
"=",
"{",
"'ip'",
":",
"ip",
",",
"'email'",
":",
"email",
",",
"'name'",
":",
"name",
",",
"'login'",
":",
"login",
",",
"'realname'",
":",
"realname",
",",
"'body-%s'",
"%",
"body_type",
":",
"body",
",",
"'subject-%s'",
"%",
"subject_type",
":",
"subject",
"}",
"r",
"=",
"self",
".",
"request",
"(",
"'post'",
",",
"'http://cleanweb-api.yandex.ru/1.0/check-spam'",
",",
"data",
"=",
"data",
")",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"r",
".",
"content",
")",
"return",
"{",
"'id'",
":",
"root",
".",
"findtext",
"(",
"'id'",
")",
",",
"'spam_flag'",
":",
"yesnobool",
"(",
"root",
".",
"find",
"(",
"'text'",
")",
".",
"attrib",
"[",
"'spam-flag'",
"]",
")",
",",
"'links'",
":",
"[",
"(",
"link",
".",
"attrib",
"[",
"'href'",
"]",
",",
"yesnobool",
"(",
"link",
".",
"attrib",
"[",
"'spam-flag'",
"]",
")",
")",
"for",
"link",
"in",
"root",
".",
"findall",
"(",
"'./links/link'",
")",
"]",
"}"
] | 59.733333
| 27.266667
|
def txt(self, txt, h=None, at_x=None, to_x=None, change_style=None, change_size=None):
"""print string to defined (at_x) position
to_x can apply only if at_x is None and if used then forces align='R'
"""
h = h or self.height
self._change_props(change_style, change_size)
align = 'L'
w = None
if at_x is None:
if to_x is not None:
align = 'R'
self.oPdf.set_x(0)
w = to_x
else:
self.oPdf.set_x(at_x)
if w is None:
w = self.oPdf.get_string_width(txt)
self.oPdf.cell(w, h=h, txt=txt, align=align)
|
[
"def",
"txt",
"(",
"self",
",",
"txt",
",",
"h",
"=",
"None",
",",
"at_x",
"=",
"None",
",",
"to_x",
"=",
"None",
",",
"change_style",
"=",
"None",
",",
"change_size",
"=",
"None",
")",
":",
"h",
"=",
"h",
"or",
"self",
".",
"height",
"self",
".",
"_change_props",
"(",
"change_style",
",",
"change_size",
")",
"align",
"=",
"'L'",
"w",
"=",
"None",
"if",
"at_x",
"is",
"None",
":",
"if",
"to_x",
"is",
"not",
"None",
":",
"align",
"=",
"'R'",
"self",
".",
"oPdf",
".",
"set_x",
"(",
"0",
")",
"w",
"=",
"to_x",
"else",
":",
"self",
".",
"oPdf",
".",
"set_x",
"(",
"at_x",
")",
"if",
"w",
"is",
"None",
":",
"w",
"=",
"self",
".",
"oPdf",
".",
"get_string_width",
"(",
"txt",
")",
"self",
".",
"oPdf",
".",
"cell",
"(",
"w",
",",
"h",
"=",
"h",
",",
"txt",
"=",
"txt",
",",
"align",
"=",
"align",
")"
] | 35.944444
| 15.777778
|
def jsonify_parameters(params):
"""
When sent in an authorized REST request, only strings and integers can be
transmitted accurately. Other types of data need to be encoded into JSON.
"""
result = {}
for param, value in params.items():
if isinstance(value, (int, str)):
result[param] = value
else:
result[param] = json.dumps(value)
return result
|
[
"def",
"jsonify_parameters",
"(",
"params",
")",
":",
"result",
"=",
"{",
"}",
"for",
"param",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"str",
")",
")",
":",
"result",
"[",
"param",
"]",
"=",
"value",
"else",
":",
"result",
"[",
"param",
"]",
"=",
"json",
".",
"dumps",
"(",
"value",
")",
"return",
"result"
] | 33.5
| 14.333333
|
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name."
pat = re.compile(pat)
def _inner(o):
s = str((os.path.join(self.path,o) if full_path else o).as_posix())
res = pat.search(s)
assert res,f'Failed to find "{pat}" in "{s}"'
return res.group(1)
return self.label_from_func(_inner, label_cls=label_cls, **kwargs)
|
[
"def",
"label_from_re",
"(",
"self",
",",
"pat",
":",
"str",
",",
"full_path",
":",
"bool",
"=",
"False",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"pat",
"=",
"re",
".",
"compile",
"(",
"pat",
")",
"def",
"_inner",
"(",
"o",
")",
":",
"s",
"=",
"str",
"(",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"o",
")",
"if",
"full_path",
"else",
"o",
")",
".",
"as_posix",
"(",
")",
")",
"res",
"=",
"pat",
".",
"search",
"(",
"s",
")",
"assert",
"res",
",",
"f'Failed to find \"{pat}\" in \"{s}\"'",
"return",
"res",
".",
"group",
"(",
"1",
")",
"return",
"self",
".",
"label_from_func",
"(",
"_inner",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] | 60.111111
| 30.555556
|
def urlopen(url, session, referrer=None, max_content_bytes=None,
timeout=ConnectionTimeoutSecs, raise_for_status=True,
stream=False, data=None, useragent=UserAgent):
"""Open an URL and return the response object."""
out.debug(u'Open URL %s' % url)
headers = {'User-Agent': useragent}
if referrer:
headers['Referer'] = referrer
out.debug(u'Sending headers %s' % headers, level=3)
out.debug(u'Sending cookies %s' % session.cookies)
kwargs = {
"headers": headers,
"timeout": timeout,
}
if hasattr(requests, 'adapters'):
# requests >= 1.0
kwargs["stream"] = stream
else:
# requests << 1.0
kwargs["prefetch"] = not stream
kwargs["config"] = {"max_retries": MaxRetries}
if data is None:
func = session.get
else:
kwargs['data'] = data
func = session.post
out.debug(u'Sending POST data %s' % data, level=3)
try:
req = func(url, **kwargs)
out.debug(u'Response cookies: %s' % req.cookies)
check_content_size(url, req.headers, max_content_bytes)
if raise_for_status:
req.raise_for_status()
return req
except requests.exceptions.RequestException as err:
msg = 'URL retrieval of %s failed: %s' % (url, err)
raise IOError(msg)
|
[
"def",
"urlopen",
"(",
"url",
",",
"session",
",",
"referrer",
"=",
"None",
",",
"max_content_bytes",
"=",
"None",
",",
"timeout",
"=",
"ConnectionTimeoutSecs",
",",
"raise_for_status",
"=",
"True",
",",
"stream",
"=",
"False",
",",
"data",
"=",
"None",
",",
"useragent",
"=",
"UserAgent",
")",
":",
"out",
".",
"debug",
"(",
"u'Open URL %s'",
"%",
"url",
")",
"headers",
"=",
"{",
"'User-Agent'",
":",
"useragent",
"}",
"if",
"referrer",
":",
"headers",
"[",
"'Referer'",
"]",
"=",
"referrer",
"out",
".",
"debug",
"(",
"u'Sending headers %s'",
"%",
"headers",
",",
"level",
"=",
"3",
")",
"out",
".",
"debug",
"(",
"u'Sending cookies %s'",
"%",
"session",
".",
"cookies",
")",
"kwargs",
"=",
"{",
"\"headers\"",
":",
"headers",
",",
"\"timeout\"",
":",
"timeout",
",",
"}",
"if",
"hasattr",
"(",
"requests",
",",
"'adapters'",
")",
":",
"# requests >= 1.0",
"kwargs",
"[",
"\"stream\"",
"]",
"=",
"stream",
"else",
":",
"# requests << 1.0",
"kwargs",
"[",
"\"prefetch\"",
"]",
"=",
"not",
"stream",
"kwargs",
"[",
"\"config\"",
"]",
"=",
"{",
"\"max_retries\"",
":",
"MaxRetries",
"}",
"if",
"data",
"is",
"None",
":",
"func",
"=",
"session",
".",
"get",
"else",
":",
"kwargs",
"[",
"'data'",
"]",
"=",
"data",
"func",
"=",
"session",
".",
"post",
"out",
".",
"debug",
"(",
"u'Sending POST data %s'",
"%",
"data",
",",
"level",
"=",
"3",
")",
"try",
":",
"req",
"=",
"func",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"out",
".",
"debug",
"(",
"u'Response cookies: %s'",
"%",
"req",
".",
"cookies",
")",
"check_content_size",
"(",
"url",
",",
"req",
".",
"headers",
",",
"max_content_bytes",
")",
"if",
"raise_for_status",
":",
"req",
".",
"raise_for_status",
"(",
")",
"return",
"req",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"err",
":",
"msg",
"=",
"'URL retrieval of %s failed: %s'",
"%",
"(",
"url",
",",
"err",
")",
"raise",
"IOError",
"(",
"msg",
")"
] | 35.675676
| 15.540541
|
def merge_result(res):
"""
Merge all items in `res` into a list.
This command is used when sending a command to multiple nodes
and they result from each node should be merged into a single list.
"""
if not isinstance(res, dict):
raise ValueError('Value should be of dict type')
result = set([])
for _, v in res.items():
for value in v:
result.add(value)
return list(result)
|
[
"def",
"merge_result",
"(",
"res",
")",
":",
"if",
"not",
"isinstance",
"(",
"res",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'Value should be of dict type'",
")",
"result",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"_",
",",
"v",
"in",
"res",
".",
"items",
"(",
")",
":",
"for",
"value",
"in",
"v",
":",
"result",
".",
"add",
"(",
"value",
")",
"return",
"list",
"(",
"result",
")"
] | 25
| 19.705882
|
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print "Class parameters"
print "----------------"
print
for p in wrap_paragraphs(self.keyvalue_description):
print p
print
for cls in self.classes:
cls.class_print_help()
print
else:
print "To see all available configurables, use `--help-all`"
print
|
[
"def",
"print_help",
"(",
"self",
",",
"classes",
"=",
"False",
")",
":",
"self",
".",
"print_subcommands",
"(",
")",
"self",
".",
"print_options",
"(",
")",
"if",
"classes",
":",
"if",
"self",
".",
"classes",
":",
"print",
"\"Class parameters\"",
"print",
"\"----------------\"",
"print",
"for",
"p",
"in",
"wrap_paragraphs",
"(",
"self",
".",
"keyvalue_description",
")",
":",
"print",
"p",
"print",
"for",
"cls",
"in",
"self",
".",
"classes",
":",
"cls",
".",
"class_print_help",
"(",
")",
"print",
"else",
":",
"print",
"\"To see all available configurables, use `--help-all`\"",
"print"
] | 31.173913
| 17.130435
|
def sphere_constrained_cubic(dr, a, alpha):
"""
Sphere generated by a cubic interpolant constrained to be (1,0) on
(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.
"""
sqrt3 = np.sqrt(3)
b_coeff = a*0.5/sqrt3*(1 - 0.6*sqrt3*alpha)/(0.15 + a*a)
rscl = np.clip(dr, -0.5*sqrt3, 0.5*sqrt3)
a, d = rscl + 0.5*sqrt3, rscl - 0.5*sqrt3
return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3
|
[
"def",
"sphere_constrained_cubic",
"(",
"dr",
",",
"a",
",",
"alpha",
")",
":",
"sqrt3",
"=",
"np",
".",
"sqrt",
"(",
"3",
")",
"b_coeff",
"=",
"a",
"*",
"0.5",
"/",
"sqrt3",
"*",
"(",
"1",
"-",
"0.6",
"*",
"sqrt3",
"*",
"alpha",
")",
"/",
"(",
"0.15",
"+",
"a",
"*",
"a",
")",
"rscl",
"=",
"np",
".",
"clip",
"(",
"dr",
",",
"-",
"0.5",
"*",
"sqrt3",
",",
"0.5",
"*",
"sqrt3",
")",
"a",
",",
"d",
"=",
"rscl",
"+",
"0.5",
"*",
"sqrt3",
",",
"rscl",
"-",
"0.5",
"*",
"sqrt3",
"return",
"alpha",
"*",
"d",
"*",
"a",
"*",
"rscl",
"+",
"b_coeff",
"*",
"d",
"*",
"a",
"-",
"d",
"/",
"sqrt3"
] | 35.5
| 17.333333
|
def main():
""" Main program. """
args = command.parse_args()
with btrfs.FileSystem(args.dir) as mount:
# mount.rescanSizes()
fInfo = mount.FS_INFO()
pprint.pprint(fInfo)
vols = mount.subvolumes
# for dev in mount.devices:
# pprint.pprint(dev)
for vol in vols:
print(vol)
return 0
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"command",
".",
"parse_args",
"(",
")",
"with",
"btrfs",
".",
"FileSystem",
"(",
"args",
".",
"dir",
")",
"as",
"mount",
":",
"# mount.rescanSizes()",
"fInfo",
"=",
"mount",
".",
"FS_INFO",
"(",
")",
"pprint",
".",
"pprint",
"(",
"fInfo",
")",
"vols",
"=",
"mount",
".",
"subvolumes",
"# for dev in mount.devices:",
"# pprint.pprint(dev)",
"for",
"vol",
"in",
"vols",
":",
"print",
"(",
"vol",
")",
"return",
"0"
] | 18.736842
| 21
|
def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p
|
[
"def",
"pave_event_space",
"(",
"fn",
"=",
"pair",
")",
":",
"global",
"_event_space",
"event_space",
"=",
"next",
"(",
"_event_space",
")",
"@",
"_ensure_seeders_list",
"def",
"p",
"(",
"seeders",
",",
"delegator_factory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"fn",
"(",
"seeders",
"+",
"[",
"per_event_source_id",
"(",
"event_space",
")",
"]",
",",
"delegator_factory",
",",
"*",
"args",
",",
"event_space",
"=",
"event_space",
",",
"*",
"*",
"kwargs",
")",
"return",
"p"
] | 33.615385
| 20.538462
|
def parse(celf, s) :
"generates an Introspection tree from the given XML string description."
def from_string_elts(celf, attrs, tree) :
elts = dict((k, attrs[k]) for k in attrs)
child_tags = dict \
(
(childclass.tag_name, childclass)
for childclass in tuple(celf.tag_elts.values()) + (Introspection.Annotation,)
)
children = []
for child in tree :
if child.tag not in child_tags :
raise KeyError("unrecognized tag %s" % child.tag)
#end if
childclass = child_tags[child.tag]
childattrs = {}
for attrname in childclass.tag_attrs :
if hasattr(childclass, "tag_attrs_optional") and attrname in childclass.tag_attrs_optional :
childattrs[attrname] = child.attrib.get(attrname, None)
else :
if attrname not in child.attrib :
raise ValueError("missing %s attribute for %s tag" % (attrname, child.tag))
#end if
childattrs[attrname] = child.attrib[attrname]
#end if
#end for
if hasattr(childclass, "attr_convert") :
for attr in childclass.attr_convert :
if attr in childattrs :
childattrs[attr] = childclass.attr_convert[attr](childattrs[attr])
#end if
#end for
#end if
children.append(from_string_elts(childclass, childattrs, child))
#end for
for child_tag, childclass in tuple(celf.tag_elts.items()) + ((), (("annotations", Introspection.Annotation),))[tree.tag != "annotation"] :
for child in children :
if isinstance(child, childclass) :
if child_tag not in elts :
elts[child_tag] = []
#end if
elts[child_tag].append(child)
#end if
#end for
#end for
return \
celf(**elts)
#end from_string_elts
#begin parse
tree = XMLElementTree.fromstring(s)
assert tree.tag == "node", "root of introspection tree must be <node> tag"
return \
from_string_elts(Introspection, {}, tree)
|
[
"def",
"parse",
"(",
"celf",
",",
"s",
")",
":",
"def",
"from_string_elts",
"(",
"celf",
",",
"attrs",
",",
"tree",
")",
":",
"elts",
"=",
"dict",
"(",
"(",
"k",
",",
"attrs",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"attrs",
")",
"child_tags",
"=",
"dict",
"(",
"(",
"childclass",
".",
"tag_name",
",",
"childclass",
")",
"for",
"childclass",
"in",
"tuple",
"(",
"celf",
".",
"tag_elts",
".",
"values",
"(",
")",
")",
"+",
"(",
"Introspection",
".",
"Annotation",
",",
")",
")",
"children",
"=",
"[",
"]",
"for",
"child",
"in",
"tree",
":",
"if",
"child",
".",
"tag",
"not",
"in",
"child_tags",
":",
"raise",
"KeyError",
"(",
"\"unrecognized tag %s\"",
"%",
"child",
".",
"tag",
")",
"#end if",
"childclass",
"=",
"child_tags",
"[",
"child",
".",
"tag",
"]",
"childattrs",
"=",
"{",
"}",
"for",
"attrname",
"in",
"childclass",
".",
"tag_attrs",
":",
"if",
"hasattr",
"(",
"childclass",
",",
"\"tag_attrs_optional\"",
")",
"and",
"attrname",
"in",
"childclass",
".",
"tag_attrs_optional",
":",
"childattrs",
"[",
"attrname",
"]",
"=",
"child",
".",
"attrib",
".",
"get",
"(",
"attrname",
",",
"None",
")",
"else",
":",
"if",
"attrname",
"not",
"in",
"child",
".",
"attrib",
":",
"raise",
"ValueError",
"(",
"\"missing %s attribute for %s tag\"",
"%",
"(",
"attrname",
",",
"child",
".",
"tag",
")",
")",
"#end if",
"childattrs",
"[",
"attrname",
"]",
"=",
"child",
".",
"attrib",
"[",
"attrname",
"]",
"#end if",
"#end for",
"if",
"hasattr",
"(",
"childclass",
",",
"\"attr_convert\"",
")",
":",
"for",
"attr",
"in",
"childclass",
".",
"attr_convert",
":",
"if",
"attr",
"in",
"childattrs",
":",
"childattrs",
"[",
"attr",
"]",
"=",
"childclass",
".",
"attr_convert",
"[",
"attr",
"]",
"(",
"childattrs",
"[",
"attr",
"]",
")",
"#end if",
"#end for",
"#end if",
"children",
".",
"append",
"(",
"from_string_elts",
"(",
"childclass",
",",
"childattrs",
",",
"child",
")",
")",
"#end for",
"for",
"child_tag",
",",
"childclass",
"in",
"tuple",
"(",
"celf",
".",
"tag_elts",
".",
"items",
"(",
")",
")",
"+",
"(",
"(",
")",
",",
"(",
"(",
"\"annotations\"",
",",
"Introspection",
".",
"Annotation",
")",
",",
")",
")",
"[",
"tree",
".",
"tag",
"!=",
"\"annotation\"",
"]",
":",
"for",
"child",
"in",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"childclass",
")",
":",
"if",
"child_tag",
"not",
"in",
"elts",
":",
"elts",
"[",
"child_tag",
"]",
"=",
"[",
"]",
"#end if",
"elts",
"[",
"child_tag",
"]",
".",
"append",
"(",
"child",
")",
"#end if",
"#end for",
"#end for",
"return",
"celf",
"(",
"*",
"*",
"elts",
")",
"#end from_string_elts",
"#begin parse",
"tree",
"=",
"XMLElementTree",
".",
"fromstring",
"(",
"s",
")",
"assert",
"tree",
".",
"tag",
"==",
"\"node\"",
",",
"\"root of introspection tree must be <node> tag\"",
"return",
"from_string_elts",
"(",
"Introspection",
",",
"{",
"}",
",",
"tree",
")"
] | 45.145455
| 22.2
|
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
|
[
"def",
"_proc_builtin",
"(",
"self",
",",
"tarfile",
")",
":",
"self",
".",
"offset_data",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"offset",
"=",
"self",
".",
"offset_data",
"if",
"self",
".",
"isreg",
"(",
")",
"or",
"self",
".",
"type",
"not",
"in",
"SUPPORTED_TYPES",
":",
"# Skip the following data blocks.",
"offset",
"+=",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
"tarfile",
".",
"offset",
"=",
"offset",
"# Patch the TarInfo object with saved global",
"# header information.",
"self",
".",
"_apply_pax_info",
"(",
"tarfile",
".",
"pax_headers",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"return",
"self"
] | 37
| 14.5625
|
def dic(self):
r""" Returns the corrected Deviance Information Criterion (DIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, this method will return `None` for that chain. **Note that
the DIC metric is only valid on posterior surfaces which closely resemble multivariate normals!**
Formally, we follow Liddle (2007) and first define *Bayesian complexity* as
.. math::
p_D = \bar{D}(\theta) - D(\bar{\theta}),
where :math:`D(\theta) = -2\ln(P(\theta)) + C` is the deviance, where :math:`P` is the posterior
and :math:`C` a constant. From here the DIC is defined as
.. math::
DIC \equiv D(\bar{\theta}) + 2p_D = \bar{D}(\theta) + p_D.
Returns
-------
list[float]
A list of all the DIC values - one per chain, in the order in which the chains were added.
References
----------
[1] Andrew R. Liddle, "Information criteria for astrophysical model selection", MNRAS (2007)
"""
dics = []
dics_bool = []
for i, chain in enumerate(self.parent.chains):
p = chain.posterior
if p is None:
dics_bool.append(False)
self._logger.warn("You need to set the posterior for chain %s to get the DIC" % chain.name)
else:
dics_bool.append(True)
num_params = chain.chain.shape[1]
means = np.array([np.average(chain.chain[:, ii], weights=chain.weights) for ii in range(num_params)])
d = -2 * p
d_of_mean = griddata(chain.chain, d, means, method='nearest')[0]
mean_d = np.average(d, weights=chain.weights)
p_d = mean_d - d_of_mean
dic = mean_d + p_d
dics.append(dic)
if len(dics) > 0:
dics -= np.min(dics)
dics_fin = []
i = 0
for b in dics_bool:
if not b:
dics_fin.append(None)
else:
dics_fin.append(dics[i])
i += 1
return dics_fin
|
[
"def",
"dic",
"(",
"self",
")",
":",
"dics",
"=",
"[",
"]",
"dics_bool",
"=",
"[",
"]",
"for",
"i",
",",
"chain",
"in",
"enumerate",
"(",
"self",
".",
"parent",
".",
"chains",
")",
":",
"p",
"=",
"chain",
".",
"posterior",
"if",
"p",
"is",
"None",
":",
"dics_bool",
".",
"append",
"(",
"False",
")",
"self",
".",
"_logger",
".",
"warn",
"(",
"\"You need to set the posterior for chain %s to get the DIC\"",
"%",
"chain",
".",
"name",
")",
"else",
":",
"dics_bool",
".",
"append",
"(",
"True",
")",
"num_params",
"=",
"chain",
".",
"chain",
".",
"shape",
"[",
"1",
"]",
"means",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"average",
"(",
"chain",
".",
"chain",
"[",
":",
",",
"ii",
"]",
",",
"weights",
"=",
"chain",
".",
"weights",
")",
"for",
"ii",
"in",
"range",
"(",
"num_params",
")",
"]",
")",
"d",
"=",
"-",
"2",
"*",
"p",
"d_of_mean",
"=",
"griddata",
"(",
"chain",
".",
"chain",
",",
"d",
",",
"means",
",",
"method",
"=",
"'nearest'",
")",
"[",
"0",
"]",
"mean_d",
"=",
"np",
".",
"average",
"(",
"d",
",",
"weights",
"=",
"chain",
".",
"weights",
")",
"p_d",
"=",
"mean_d",
"-",
"d_of_mean",
"dic",
"=",
"mean_d",
"+",
"p_d",
"dics",
".",
"append",
"(",
"dic",
")",
"if",
"len",
"(",
"dics",
")",
">",
"0",
":",
"dics",
"-=",
"np",
".",
"min",
"(",
"dics",
")",
"dics_fin",
"=",
"[",
"]",
"i",
"=",
"0",
"for",
"b",
"in",
"dics_bool",
":",
"if",
"not",
"b",
":",
"dics_fin",
".",
"append",
"(",
"None",
")",
"else",
":",
"dics_fin",
".",
"append",
"(",
"dics",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"return",
"dics_fin"
] | 39.792453
| 25.679245
|
def ReadResponsesForRequestId(self, session_id, request_id, timestamp=None):
"""Reads responses for one request.
Args:
session_id: The session id to use.
request_id: The id of the request.
timestamp: A timestamp as used in the data store.
Yields:
fetched responses for the request
"""
request = rdf_flow_runner.RequestState(id=request_id, session_id=session_id)
for _, responses in self.ReadResponses([request], timestamp=timestamp):
return responses
|
[
"def",
"ReadResponsesForRequestId",
"(",
"self",
",",
"session_id",
",",
"request_id",
",",
"timestamp",
"=",
"None",
")",
":",
"request",
"=",
"rdf_flow_runner",
".",
"RequestState",
"(",
"id",
"=",
"request_id",
",",
"session_id",
"=",
"session_id",
")",
"for",
"_",
",",
"responses",
"in",
"self",
".",
"ReadResponses",
"(",
"[",
"request",
"]",
",",
"timestamp",
"=",
"timestamp",
")",
":",
"return",
"responses"
] | 35.214286
| 20.357143
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.