text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def filter(self, value=None, model=None, context=None):
"""
Sequentially applies all the filters to provided value
:param value: a value to filter
:param model: parent entity
:param context: filtering context, usually parent entity
:return: filtered value
"""
if value is None:
return value
for filter_obj in self.filters:
value = filter_obj.filter(
value=value,
model=model,
context=context if self.use_context else None
)
return value | [
"def",
"filter",
"(",
"self",
",",
"value",
"=",
"None",
",",
"model",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"for",
"filter_obj",
"in",
"self",
".",
"filters",
":",
"value",
"=",
"filter_obj",
".",
"filter",
"(",
"value",
"=",
"value",
",",
"model",
"=",
"model",
",",
"context",
"=",
"context",
"if",
"self",
".",
"use_context",
"else",
"None",
")",
"return",
"value"
] | 32.444444 | 13.444444 |
def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o)
except TypeError as err:
print("Problem checking device {!r}: {!r}".format(
dev_o['info']['description']['name'],
str(err)))
return matches | [
"def",
"search_for_devices_by_serial_number",
"(",
"self",
",",
"sn",
")",
":",
"import",
"re",
"sn_search",
"=",
"re",
".",
"compile",
"(",
"sn",
")",
"matches",
"=",
"[",
"]",
"for",
"dev_o",
"in",
"self",
".",
"get_all_devices_in_portal",
"(",
")",
":",
"# print(\"Checking {0}\".format(dev_o['sn']))",
"try",
":",
"if",
"sn_search",
".",
"match",
"(",
"dev_o",
"[",
"'sn'",
"]",
")",
":",
"matches",
".",
"append",
"(",
"dev_o",
")",
"except",
"TypeError",
"as",
"err",
":",
"print",
"(",
"\"Problem checking device {!r}: {!r}\"",
".",
"format",
"(",
"dev_o",
"[",
"'info'",
"]",
"[",
"'description'",
"]",
"[",
"'name'",
"]",
",",
"str",
"(",
"err",
")",
")",
")",
"return",
"matches"
] | 32.608696 | 18.521739 |
def GetMTime(path):
'''
:param unicode path:
Path to file or directory
:rtype: float
:returns:
Modification time for path.
If this is a directory, the highest mtime from files inside it will be returned.
@note:
In some Linux distros (such as CentOs, or anything with ext3), mtime will not return a value
with resolutions higher than a second.
http://stackoverflow.com/questions/2428556/os-path-getmtime-doesnt-return-fraction-of-a-second
'''
_AssertIsLocal(path)
if os.path.isdir(path):
files = FindFiles(path)
if len(files) > 0:
return max(map(os.path.getmtime, files))
return os.path.getmtime(path) | [
"def",
"GetMTime",
"(",
"path",
")",
":",
"_AssertIsLocal",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"files",
"=",
"FindFiles",
"(",
"path",
")",
"if",
"len",
"(",
"files",
")",
">",
"0",
":",
"return",
"max",
"(",
"map",
"(",
"os",
".",
"path",
".",
"getmtime",
",",
"files",
")",
")",
"return",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")"
] | 26.692308 | 27.769231 |
def create_edges(self):
"""Set up edge-node and edge-cell relations.
"""
# Reshape into individual edges.
# Sort the columns to make it possible for `unique()` to identify
# individual edges.
s = self.idx_hierarchy.shape
a = numpy.sort(self.idx_hierarchy.reshape(s[0], -1).T)
a_unique, inv, cts = unique_rows(a)
assert numpy.all(
cts < 3
), "No edge has more than 2 cells. Are cells listed twice?"
self.is_boundary_edge = (cts[inv] == 1).reshape(s[1:])
self.is_boundary_edge_individual = cts == 1
self.edges = {"nodes": a_unique}
# cell->edges relationship
self.cells["edges"] = inv.reshape(3, -1).T
self._edges_cells = None
self._edge_gid_to_edge_list = None
# Store an index {boundary,interior}_edge -> edge_gid
self._edge_to_edge_gid = [
[],
numpy.where(self.is_boundary_edge_individual)[0],
numpy.where(~self.is_boundary_edge_individual)[0],
]
return | [
"def",
"create_edges",
"(",
"self",
")",
":",
"# Reshape into individual edges.",
"# Sort the columns to make it possible for `unique()` to identify",
"# individual edges.",
"s",
"=",
"self",
".",
"idx_hierarchy",
".",
"shape",
"a",
"=",
"numpy",
".",
"sort",
"(",
"self",
".",
"idx_hierarchy",
".",
"reshape",
"(",
"s",
"[",
"0",
"]",
",",
"-",
"1",
")",
".",
"T",
")",
"a_unique",
",",
"inv",
",",
"cts",
"=",
"unique_rows",
"(",
"a",
")",
"assert",
"numpy",
".",
"all",
"(",
"cts",
"<",
"3",
")",
",",
"\"No edge has more than 2 cells. Are cells listed twice?\"",
"self",
".",
"is_boundary_edge",
"=",
"(",
"cts",
"[",
"inv",
"]",
"==",
"1",
")",
".",
"reshape",
"(",
"s",
"[",
"1",
":",
"]",
")",
"self",
".",
"is_boundary_edge_individual",
"=",
"cts",
"==",
"1",
"self",
".",
"edges",
"=",
"{",
"\"nodes\"",
":",
"a_unique",
"}",
"# cell->edges relationship",
"self",
".",
"cells",
"[",
"\"edges\"",
"]",
"=",
"inv",
".",
"reshape",
"(",
"3",
",",
"-",
"1",
")",
".",
"T",
"self",
".",
"_edges_cells",
"=",
"None",
"self",
".",
"_edge_gid_to_edge_list",
"=",
"None",
"# Store an index {boundary,interior}_edge -> edge_gid",
"self",
".",
"_edge_to_edge_gid",
"=",
"[",
"[",
"]",
",",
"numpy",
".",
"where",
"(",
"self",
".",
"is_boundary_edge_individual",
")",
"[",
"0",
"]",
",",
"numpy",
".",
"where",
"(",
"~",
"self",
".",
"is_boundary_edge_individual",
")",
"[",
"0",
"]",
",",
"]",
"return"
] | 31.666667 | 19.575758 |
def callprop(self, prop, *args):
'''Call a property prop as a method (this will be self).
NOTE: dont pass this and arguments here, these will be added
automatically!'''
if not isinstance(prop, basestring):
prop = prop.to_string().value
cand = self.get(prop)
if not cand.is_callable():
raise MakeError('TypeError',
'%s is not a function' % cand.typeof())
return cand.call(self, args) | [
"def",
"callprop",
"(",
"self",
",",
"prop",
",",
"*",
"args",
")",
":",
"if",
"not",
"isinstance",
"(",
"prop",
",",
"basestring",
")",
":",
"prop",
"=",
"prop",
".",
"to_string",
"(",
")",
".",
"value",
"cand",
"=",
"self",
".",
"get",
"(",
"prop",
")",
"if",
"not",
"cand",
".",
"is_callable",
"(",
")",
":",
"raise",
"MakeError",
"(",
"'TypeError'",
",",
"'%s is not a function'",
"%",
"cand",
".",
"typeof",
"(",
")",
")",
"return",
"cand",
".",
"call",
"(",
"self",
",",
"args",
")"
] | 40 | 14 |
def shift_up_left(self, times=1):
"""
Finds Location shifted up left by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file - times)
except IndexError as e:
raise IndexError(e) | [
"def",
"shift_up_left",
"(",
"self",
",",
"times",
"=",
"1",
")",
":",
"try",
":",
"return",
"Location",
"(",
"self",
".",
"_rank",
"+",
"times",
",",
"self",
".",
"_file",
"-",
"times",
")",
"except",
"IndexError",
"as",
"e",
":",
"raise",
"IndexError",
"(",
"e",
")"
] | 26.3 | 13.9 |
def check_vprint(s, vprinter):
'''checked verbose printing'''
if vprinter is True:
print(s);
elif callable(vprinter):
vprinter(s); | [
"def",
"check_vprint",
"(",
"s",
",",
"vprinter",
")",
":",
"if",
"vprinter",
"is",
"True",
":",
"print",
"(",
"s",
")",
"elif",
"callable",
"(",
"vprinter",
")",
":",
"vprinter",
"(",
"s",
")"
] | 25.5 | 14.5 |
def serialize(self):
"""
Convert the data to one that can be saved in h5 structures
Returns:
pandas.DataFrame: like a cell data frame but serialized. columns
"""
df = self.copy()
df['scored_calls'] = df['scored_calls'].apply(lambda x: json.dumps(x))
df['channel_values'] = df['channel_values'].apply(lambda x: json.dumps(x))
df['regions'] = df['regions'].apply(lambda x: json.dumps(x))
df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.dumps(x))
df['neighbors'] = df['neighbors'].apply(lambda x: json.dumps(x))
df['frame_shape'] = df['frame_shape'].apply(lambda x: json.dumps(x))
return df | [
"def",
"serialize",
"(",
"self",
")",
":",
"df",
"=",
"self",
".",
"copy",
"(",
")",
"df",
"[",
"'scored_calls'",
"]",
"=",
"df",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'channel_values'",
"]",
"=",
"df",
"[",
"'channel_values'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'regions'",
"]",
"=",
"df",
"[",
"'regions'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'phenotype_calls'",
"]",
"=",
"df",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'neighbors'",
"]",
"=",
"df",
"[",
"'neighbors'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'frame_shape'",
"]",
"=",
"df",
"[",
"'frame_shape'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"return",
"df"
] | 46.733333 | 27 |
def parse_object_type_extension(lexer: Lexer) -> ObjectTypeExtensionNode:
"""ObjectTypeExtension"""
start = lexer.token
expect_keyword(lexer, "extend")
expect_keyword(lexer, "type")
name = parse_name(lexer)
interfaces = parse_implements_interfaces(lexer)
directives = parse_directives(lexer, True)
fields = parse_fields_definition(lexer)
if not (interfaces or directives or fields):
raise unexpected(lexer)
return ObjectTypeExtensionNode(
name=name,
interfaces=interfaces,
directives=directives,
fields=fields,
loc=loc(lexer, start),
) | [
"def",
"parse_object_type_extension",
"(",
"lexer",
":",
"Lexer",
")",
"->",
"ObjectTypeExtensionNode",
":",
"start",
"=",
"lexer",
".",
"token",
"expect_keyword",
"(",
"lexer",
",",
"\"extend\"",
")",
"expect_keyword",
"(",
"lexer",
",",
"\"type\"",
")",
"name",
"=",
"parse_name",
"(",
"lexer",
")",
"interfaces",
"=",
"parse_implements_interfaces",
"(",
"lexer",
")",
"directives",
"=",
"parse_directives",
"(",
"lexer",
",",
"True",
")",
"fields",
"=",
"parse_fields_definition",
"(",
"lexer",
")",
"if",
"not",
"(",
"interfaces",
"or",
"directives",
"or",
"fields",
")",
":",
"raise",
"unexpected",
"(",
"lexer",
")",
"return",
"ObjectTypeExtensionNode",
"(",
"name",
"=",
"name",
",",
"interfaces",
"=",
"interfaces",
",",
"directives",
"=",
"directives",
",",
"fields",
"=",
"fields",
",",
"loc",
"=",
"loc",
"(",
"lexer",
",",
"start",
")",
",",
")"
] | 33.888889 | 12.277778 |
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present) | [
"def",
"handle_presence",
"(",
"self",
",",
"old_present",
")",
":",
"# On the first run it may need more time for the EventPublisher",
"# to come up and be ready. Set the timeout to account for this.",
"if",
"self",
".",
"presence_events",
"and",
"self",
".",
"event",
".",
"connect_pull",
"(",
"timeout",
"=",
"3",
")",
":",
"present",
"=",
"self",
".",
"ckminions",
".",
"connected_ids",
"(",
")",
"new",
"=",
"present",
".",
"difference",
"(",
"old_present",
")",
"lost",
"=",
"old_present",
".",
"difference",
"(",
"present",
")",
"if",
"new",
"or",
"lost",
":",
"# Fire new minions present event",
"data",
"=",
"{",
"'new'",
":",
"list",
"(",
"new",
")",
",",
"'lost'",
":",
"list",
"(",
"lost",
")",
"}",
"self",
".",
"event",
".",
"fire_event",
"(",
"data",
",",
"tagify",
"(",
"'change'",
",",
"'presence'",
")",
")",
"data",
"=",
"{",
"'present'",
":",
"list",
"(",
"present",
")",
"}",
"self",
".",
"event",
".",
"fire_event",
"(",
"data",
",",
"tagify",
"(",
"'present'",
",",
"'presence'",
")",
")",
"old_present",
".",
"clear",
"(",
")",
"old_present",
".",
"update",
"(",
"present",
")"
] | 46.368421 | 15.105263 |
def early_warning(iterable, name='this generator'):
''' This function logs an early warning that the generator is empty.
This is handy for times when you're manually playing with generators and
would appreciate the console warning you ahead of time that your generator
is now empty, instead of being surprised with a StopIteration or
GeneratorExit exception when youre trying to test something. '''
nxt = None
prev = next(iterable)
while 1:
try:
nxt = next(iterable)
except:
warning(' {} is now empty'.format(name))
yield prev
break
else:
yield prev
prev = nxt | [
"def",
"early_warning",
"(",
"iterable",
",",
"name",
"=",
"'this generator'",
")",
":",
"nxt",
"=",
"None",
"prev",
"=",
"next",
"(",
"iterable",
")",
"while",
"1",
":",
"try",
":",
"nxt",
"=",
"next",
"(",
"iterable",
")",
"except",
":",
"warning",
"(",
"' {} is now empty'",
".",
"format",
"(",
"name",
")",
")",
"yield",
"prev",
"break",
"else",
":",
"yield",
"prev",
"prev",
"=",
"nxt"
] | 33.55 | 24.95 |
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game.
"""
fields_to_include = {
'assist_percentage': self.assist_percentage,
'assists': self.assists,
'block_percentage': self.block_percentage,
'blocks': self.blocks,
'box_plus_minus': self.box_plus_minus,
'defensive_rating': self.defensive_rating,
'defensive_rebound_percentage': self.defensive_rebound_percentage,
'defensive_rebounds': self.defensive_rebounds,
'effective_field_goal_percentage':
self.effective_field_goal_percentage,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempt_rate': self.free_throw_attempt_rate,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'minutes_played': self.minutes_played,
'offensive_rating': self.offensive_rating,
'offensive_rebound_percentage': self.offensive_rebound_percentage,
'offensive_rebounds': self.offensive_rebounds,
'personal_fouls': self.personal_fouls,
'points': self.points,
'steal_percentage': self.steal_percentage,
'steals': self.steals,
'three_point_attempt_rate': self.three_point_attempt_rate,
'three_point_attempts': self.three_point_attempts,
'three_point_percentage': self.three_point_percentage,
'three_pointers': self.three_pointers,
'total_rebound_percentage': self.total_rebound_percentage,
'total_rebounds': self.total_rebounds,
'true_shooting_percentage': self.true_shooting_percentage,
'turnover_percentage': self.turnover_percentage,
'turnovers': self.turnovers,
'two_point_attempts': self.two_point_attempts,
'two_point_percentage': self.two_point_percentage,
'two_pointers': self.two_pointers,
'usage_percentage': self.usage_percentage
}
return pd.DataFrame([fields_to_include], index=[self._player_id]) | [
"def",
"dataframe",
"(",
"self",
")",
":",
"fields_to_include",
"=",
"{",
"'assist_percentage'",
":",
"self",
".",
"assist_percentage",
",",
"'assists'",
":",
"self",
".",
"assists",
",",
"'block_percentage'",
":",
"self",
".",
"block_percentage",
",",
"'blocks'",
":",
"self",
".",
"blocks",
",",
"'box_plus_minus'",
":",
"self",
".",
"box_plus_minus",
",",
"'defensive_rating'",
":",
"self",
".",
"defensive_rating",
",",
"'defensive_rebound_percentage'",
":",
"self",
".",
"defensive_rebound_percentage",
",",
"'defensive_rebounds'",
":",
"self",
".",
"defensive_rebounds",
",",
"'effective_field_goal_percentage'",
":",
"self",
".",
"effective_field_goal_percentage",
",",
"'field_goal_attempts'",
":",
"self",
".",
"field_goal_attempts",
",",
"'field_goal_percentage'",
":",
"self",
".",
"field_goal_percentage",
",",
"'field_goals'",
":",
"self",
".",
"field_goals",
",",
"'free_throw_attempt_rate'",
":",
"self",
".",
"free_throw_attempt_rate",
",",
"'free_throw_attempts'",
":",
"self",
".",
"free_throw_attempts",
",",
"'free_throw_percentage'",
":",
"self",
".",
"free_throw_percentage",
",",
"'free_throws'",
":",
"self",
".",
"free_throws",
",",
"'minutes_played'",
":",
"self",
".",
"minutes_played",
",",
"'offensive_rating'",
":",
"self",
".",
"offensive_rating",
",",
"'offensive_rebound_percentage'",
":",
"self",
".",
"offensive_rebound_percentage",
",",
"'offensive_rebounds'",
":",
"self",
".",
"offensive_rebounds",
",",
"'personal_fouls'",
":",
"self",
".",
"personal_fouls",
",",
"'points'",
":",
"self",
".",
"points",
",",
"'steal_percentage'",
":",
"self",
".",
"steal_percentage",
",",
"'steals'",
":",
"self",
".",
"steals",
",",
"'three_point_attempt_rate'",
":",
"self",
".",
"three_point_attempt_rate",
",",
"'three_point_attempts'",
":",
"self",
".",
"three_point_attempts",
",",
"'three_point_percentage'",
":",
"self",
".",
"three_point_percentage",
",",
"'three_pointers'",
":",
"self",
".",
"three_pointers",
",",
"'total_rebound_percentage'",
":",
"self",
".",
"total_rebound_percentage",
",",
"'total_rebounds'",
":",
"self",
".",
"total_rebounds",
",",
"'true_shooting_percentage'",
":",
"self",
".",
"true_shooting_percentage",
",",
"'turnover_percentage'",
":",
"self",
".",
"turnover_percentage",
",",
"'turnovers'",
":",
"self",
".",
"turnovers",
",",
"'two_point_attempts'",
":",
"self",
".",
"two_point_attempts",
",",
"'two_point_percentage'",
":",
"self",
".",
"two_point_percentage",
",",
"'two_pointers'",
":",
"self",
".",
"two_pointers",
",",
"'usage_percentage'",
":",
"self",
".",
"usage_percentage",
"}",
"return",
"pd",
".",
"DataFrame",
"(",
"[",
"fields_to_include",
"]",
",",
"index",
"=",
"[",
"self",
".",
"_player_id",
"]",
")"
] | 51.478261 | 16.391304 |
def value_for_keypath(obj, path):
"""Get value from walking key path with start object obj.
"""
val = obj
for part in path.split('.'):
match = re.match(list_index_re, part)
if match is not None:
val = _extract(val, match.group(1))
if not isinstance(val, list) and not isinstance(val, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
val = val[index]
else:
val = _extract(val, part)
if val is None:
return None
return val | [
"def",
"value_for_keypath",
"(",
"obj",
",",
"path",
")",
":",
"val",
"=",
"obj",
"for",
"part",
"in",
"path",
".",
"split",
"(",
"'.'",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"list_index_re",
",",
"part",
")",
"if",
"match",
"is",
"not",
"None",
":",
"val",
"=",
"_extract",
"(",
"val",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
"if",
"not",
"isinstance",
"(",
"val",
",",
"list",
")",
"and",
"not",
"isinstance",
"(",
"val",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"'expected list/tuple'",
")",
"index",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"val",
"=",
"val",
"[",
"index",
"]",
"else",
":",
"val",
"=",
"_extract",
"(",
"val",
",",
"part",
")",
"if",
"val",
"is",
"None",
":",
"return",
"None",
"return",
"val"
] | 33.529412 | 12.588235 |
def rigid_linear_interpolate(x_axis, y_axis, x_new_axis):
"""Interpolate a y = f(x) function using linear interpolation.
Rigid means the x_new_axis has to be in x_axis's range.
"""
f = interp1d(x_axis, y_axis)
return f(x_new_axis) | [
"def",
"rigid_linear_interpolate",
"(",
"x_axis",
",",
"y_axis",
",",
"x_new_axis",
")",
":",
"f",
"=",
"interp1d",
"(",
"x_axis",
",",
"y_axis",
")",
"return",
"f",
"(",
"x_new_axis",
")"
] | 35 | 14.285714 |
def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
] | [
"def",
"get_plugin_meta",
"(",
"plugins",
")",
":",
"return",
"[",
"{",
"\"name\"",
":",
"p",
".",
"name",
",",
"\"version\"",
":",
"p",
".",
"version",
",",
"\"homepage\"",
":",
"p",
".",
"homepage",
",",
"\"enabled\"",
":",
"p",
".",
"enabled",
",",
"}",
"for",
"p",
"in",
"plugins",
"if",
"is_plugin",
"(",
"p",
")",
"]"
] | 22.315789 | 16.105263 |
def CompleteBreakpoint(self, breakpoint_id):
"""Marks the specified breaking as completed.
Appends the ID to set of completed breakpoints and clears it.
Args:
breakpoint_id: breakpoint ID to complete.
"""
with self._lock:
self._completed.add(breakpoint_id)
if breakpoint_id in self._active:
self._active.pop(breakpoint_id).Clear() | [
"def",
"CompleteBreakpoint",
"(",
"self",
",",
"breakpoint_id",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_completed",
".",
"add",
"(",
"breakpoint_id",
")",
"if",
"breakpoint_id",
"in",
"self",
".",
"_active",
":",
"self",
".",
"_active",
".",
"pop",
"(",
"breakpoint_id",
")",
".",
"Clear",
"(",
")"
] | 30.583333 | 14.583333 |
def facet_raw(self, **kw):
"""
Return a new S instance with raw facet args combined with
existing set.
"""
items = kw.items()
if six.PY3:
items = list(items)
return self._clone(next_step=('facet_raw', items)) | [
"def",
"facet_raw",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"items",
"=",
"kw",
".",
"items",
"(",
")",
"if",
"six",
".",
"PY3",
":",
"items",
"=",
"list",
"(",
"items",
")",
"return",
"self",
".",
"_clone",
"(",
"next_step",
"=",
"(",
"'facet_raw'",
",",
"items",
")",
")"
] | 29.777778 | 13.333333 |
def radio_buttons_clicked(self):
"""Handler when selected radio button changed."""
# Disable all spin boxes
for spin_box in list(self.spin_boxes.values()):
spin_box.setEnabled(False)
# Disable list widget
self.list_widget.setEnabled(False)
# Get selected radio button
radio_button_checked_id = self.input_button_group.checkedId()
if radio_button_checked_id > -1:
selected_value = list(self._parameter.options.values())[
radio_button_checked_id]
if selected_value.get('type') == MULTIPLE_DYNAMIC:
# Enable list widget
self.list_widget.setEnabled(True)
elif selected_value.get('type') == SINGLE_DYNAMIC:
selected_key = list(self._parameter.options.keys())[
radio_button_checked_id]
self.spin_boxes[selected_key].setEnabled(True) | [
"def",
"radio_buttons_clicked",
"(",
"self",
")",
":",
"# Disable all spin boxes",
"for",
"spin_box",
"in",
"list",
"(",
"self",
".",
"spin_boxes",
".",
"values",
"(",
")",
")",
":",
"spin_box",
".",
"setEnabled",
"(",
"False",
")",
"# Disable list widget",
"self",
".",
"list_widget",
".",
"setEnabled",
"(",
"False",
")",
"# Get selected radio button",
"radio_button_checked_id",
"=",
"self",
".",
"input_button_group",
".",
"checkedId",
"(",
")",
"if",
"radio_button_checked_id",
">",
"-",
"1",
":",
"selected_value",
"=",
"list",
"(",
"self",
".",
"_parameter",
".",
"options",
".",
"values",
"(",
")",
")",
"[",
"radio_button_checked_id",
"]",
"if",
"selected_value",
".",
"get",
"(",
"'type'",
")",
"==",
"MULTIPLE_DYNAMIC",
":",
"# Enable list widget",
"self",
".",
"list_widget",
".",
"setEnabled",
"(",
"True",
")",
"elif",
"selected_value",
".",
"get",
"(",
"'type'",
")",
"==",
"SINGLE_DYNAMIC",
":",
"selected_key",
"=",
"list",
"(",
"self",
".",
"_parameter",
".",
"options",
".",
"keys",
"(",
")",
")",
"[",
"radio_button_checked_id",
"]",
"self",
".",
"spin_boxes",
"[",
"selected_key",
"]",
".",
"setEnabled",
"(",
"True",
")"
] | 43.809524 | 14.238095 |
def get_interface_switch(self, nexus_host,
intf_type, interface):
"""Get the interface data from host.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns response: Returns interface data
"""
if intf_type == "ethernet":
path_interface = "phys-[eth" + interface + "]"
else:
path_interface = "aggr-[po" + interface + "]"
action = snipp.PATH_IF % path_interface
starttime = time.time()
response = self.client.rest_get(action, nexus_host)
self.capture_and_print_timeshot(starttime, "getif",
switch=nexus_host)
LOG.debug("GET call returned interface %(if_type)s %(interface)s "
"config", {'if_type': intf_type, 'interface': interface})
return response | [
"def",
"get_interface_switch",
"(",
"self",
",",
"nexus_host",
",",
"intf_type",
",",
"interface",
")",
":",
"if",
"intf_type",
"==",
"\"ethernet\"",
":",
"path_interface",
"=",
"\"phys-[eth\"",
"+",
"interface",
"+",
"\"]\"",
"else",
":",
"path_interface",
"=",
"\"aggr-[po\"",
"+",
"interface",
"+",
"\"]\"",
"action",
"=",
"snipp",
".",
"PATH_IF",
"%",
"path_interface",
"starttime",
"=",
"time",
".",
"time",
"(",
")",
"response",
"=",
"self",
".",
"client",
".",
"rest_get",
"(",
"action",
",",
"nexus_host",
")",
"self",
".",
"capture_and_print_timeshot",
"(",
"starttime",
",",
"\"getif\"",
",",
"switch",
"=",
"nexus_host",
")",
"LOG",
".",
"debug",
"(",
"\"GET call returned interface %(if_type)s %(interface)s \"",
"\"config\"",
",",
"{",
"'if_type'",
":",
"intf_type",
",",
"'interface'",
":",
"interface",
"}",
")",
"return",
"response"
] | 39.038462 | 17.538462 |
def expand_relative_uri(self, context, uri):
"""If uri is relative then expand in context.
Prints warning if expansion happens.
"""
full_uri = urljoin(context, uri)
if (full_uri != uri):
print(" WARNING - expanded relative URI to %s" % (full_uri))
uri = full_uri
return(uri) | [
"def",
"expand_relative_uri",
"(",
"self",
",",
"context",
",",
"uri",
")",
":",
"full_uri",
"=",
"urljoin",
"(",
"context",
",",
"uri",
")",
"if",
"(",
"full_uri",
"!=",
"uri",
")",
":",
"print",
"(",
"\" WARNING - expanded relative URI to %s\"",
"%",
"(",
"full_uri",
")",
")",
"uri",
"=",
"full_uri",
"return",
"(",
"uri",
")"
] | 33.9 | 12.7 |
def complain(self, id, is_spam):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/complain.xml"""
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/complain',
data={'id': id, 'spamtype': 'spam' if is_spam else 'ham'})
return True | [
"def",
"complain",
"(",
"self",
",",
"id",
",",
"is_spam",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"'post'",
",",
"'http://cleanweb-api.yandex.ru/1.0/complain'",
",",
"data",
"=",
"{",
"'id'",
":",
"id",
",",
"'spamtype'",
":",
"'spam'",
"if",
"is_spam",
"else",
"'ham'",
"}",
")",
"return",
"True"
] | 57 | 22 |
def list_backends():
"""Return installed backends.
Backends are installed python packages named pyvisa-<something> where <something>
is the name of the backend.
:rtype: list
"""
return ['ni'] + [name for (loader, name, ispkg) in pkgutil.iter_modules()
if name.startswith('pyvisa-') and not name.endswith('-script')] | [
"def",
"list_backends",
"(",
")",
":",
"return",
"[",
"'ni'",
"]",
"+",
"[",
"name",
"for",
"(",
"loader",
",",
"name",
",",
"ispkg",
")",
"in",
"pkgutil",
".",
"iter_modules",
"(",
")",
"if",
"name",
".",
"startswith",
"(",
"'pyvisa-'",
")",
"and",
"not",
"name",
".",
"endswith",
"(",
"'-script'",
")",
"]"
] | 35.3 | 25.9 |
def searchQueryAll(self, *args, **kwargs):
"""
Experimental Method
Execute a Search query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.SearchRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.search`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.search('name', ft.MatchQuery('nosql'), limit=10)
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.search`
"""
if not self.connected:
cb = lambda x: self.searchQueryAll(*args, **kwargs)
return self.connect().addCallback(cb)
kwargs['itercls'] = BatchedSearchRequest
o = super(AsyncBucket, self).search(*args, **kwargs)
o.start()
return o._getDeferred() | [
"def",
"searchQueryAll",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"cb",
"=",
"lambda",
"x",
":",
"self",
".",
"searchQueryAll",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"connect",
"(",
")",
".",
"addCallback",
"(",
"cb",
")",
"kwargs",
"[",
"'itercls'",
"]",
"=",
"BatchedSearchRequest",
"o",
"=",
"super",
"(",
"AsyncBucket",
",",
"self",
")",
".",
"search",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"o",
".",
"start",
"(",
")",
"return",
"o",
".",
"_getDeferred",
"(",
")"
] | 29.828571 | 21.6 |
def get_extr_license_ident(self, extr_lic):
"""
Return an a license identifier from an ExtractedLicense or None.
"""
identifier_tripples = list(self.graph.triples((extr_lic, self.spdx_namespace['licenseId'], None)))
if not identifier_tripples:
self.error = True
msg = 'Extracted license must have licenseId property.'
self.logger.log(msg)
return
if len(identifier_tripples) > 1:
self.more_than_one_error('extracted license identifier_tripples')
return
identifier_tripple = identifier_tripples[0]
_s, _p, identifier = identifier_tripple
return identifier | [
"def",
"get_extr_license_ident",
"(",
"self",
",",
"extr_lic",
")",
":",
"identifier_tripples",
"=",
"list",
"(",
"self",
".",
"graph",
".",
"triples",
"(",
"(",
"extr_lic",
",",
"self",
".",
"spdx_namespace",
"[",
"'licenseId'",
"]",
",",
"None",
")",
")",
")",
"if",
"not",
"identifier_tripples",
":",
"self",
".",
"error",
"=",
"True",
"msg",
"=",
"'Extracted license must have licenseId property.'",
"self",
".",
"logger",
".",
"log",
"(",
"msg",
")",
"return",
"if",
"len",
"(",
"identifier_tripples",
")",
">",
"1",
":",
"self",
".",
"more_than_one_error",
"(",
"'extracted license identifier_tripples'",
")",
"return",
"identifier_tripple",
"=",
"identifier_tripples",
"[",
"0",
"]",
"_s",
",",
"_p",
",",
"identifier",
"=",
"identifier_tripple",
"return",
"identifier"
] | 35.894737 | 20.315789 |
def function(
name,
tgt,
ssh=False,
tgt_type='glob',
ret='',
ret_config=None,
ret_kwargs=None,
expect_minions=False,
fail_minions=None,
fail_function=None,
arg=None,
kwarg=None,
timeout=None,
batch=None,
subset=None,
**kwargs): # pylint: disable=unused-argument
'''
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type
The target type, defaults to ``glob``
arg
The list of arguments to pass into the function
kwarg
The dict (not a list) of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
fail_function
An optional string that points to a salt module that returns True or False
based on the returned data dict for individual minions
ssh
Set to `True` to use the ssh client instead of the standard salt client
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
asynchronous
Run the salt command but don't wait for a reply.
.. versionadded:: neon
'''
func_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if kwarg is None:
kwarg = {}
if isinstance(arg, six.string_types):
func_ret['warnings'] = [
'Please specify \'arg\' as a list of arguments.'
]
arg = arg.split()
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
if batch is not None:
cmd_kw['batch'] = six.text_type(batch)
if subset is not None:
cmd_kw['subset'] = subset
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
cmd_kw['_cmd_meta'] = True
cmd_kw['asynchronous'] = kwargs.pop('asynchronous', False)
if ret_config:
cmd_kw['ret_config'] = ret_config
if ret_kwargs:
cmd_kw['ret_kwargs'] = ret_kwargs
fun = name
if __opts__['test'] is True:
func_ret['comment'] = \
'Function {0} would be executed on target {1}'.format(fun, tgt)
func_ret['result'] = None
return func_ret
try:
_fire_args({'type': 'function', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
except Exception as exc:
func_ret['result'] = False
func_ret['comment'] = six.text_type(exc)
return func_ret
if cmd_kw['asynchronous']:
func_ret['__jid__'] = cmd_ret.get('jid')
func_ret['changes'] = cmd_ret
if int(cmd_ret.get('jid', 0)) > 0:
func_ret['result'] = True
func_ret['comment'] = 'Function submitted successfully.'
else:
func_ret['result'] = False
func_ret['comment'] = 'Function failed to run.'
return func_ret
try:
func_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, six.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
func_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in six.iteritems(cmd_ret):
m_ret = False
if mdata.get('retcode'):
func_ret['result'] = False
fail.add(minion)
if mdata.get('failed', False):
m_func = False
else:
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_ret = mdata['ret']
m_func = (not fail_function and True) or __salt__[fail_function](m_ret)
if m_ret is False:
m_func = False
if not m_func:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
if not cmd_ret:
func_ret['result'] = False
func_ret['command'] = 'No minions responded'
else:
if changes:
func_ret['changes'] = {'out': 'highstate', 'ret': changes}
if fail:
func_ret['result'] = False
func_ret['comment'] = 'Running function {0} failed on minions: {1}'.format(name, ', '.join(fail))
else:
func_ret['comment'] = 'Function ran successfully.'
if changes:
func_ret['comment'] += ' Function {0} ran on {1}.'.format(name, ', '.join(changes))
return func_ret | [
"def",
"function",
"(",
"name",
",",
"tgt",
",",
"ssh",
"=",
"False",
",",
"tgt_type",
"=",
"'glob'",
",",
"ret",
"=",
"''",
",",
"ret_config",
"=",
"None",
",",
"ret_kwargs",
"=",
"None",
",",
"expect_minions",
"=",
"False",
",",
"fail_minions",
"=",
"None",
",",
"fail_function",
"=",
"None",
",",
"arg",
"=",
"None",
",",
"kwarg",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"batch",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"func_ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
",",
"'result'",
":",
"True",
"}",
"if",
"kwarg",
"is",
"None",
":",
"kwarg",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"arg",
",",
"six",
".",
"string_types",
")",
":",
"func_ret",
"[",
"'warnings'",
"]",
"=",
"[",
"'Please specify \\'arg\\' as a list of arguments.'",
"]",
"arg",
"=",
"arg",
".",
"split",
"(",
")",
"cmd_kw",
"=",
"{",
"'arg'",
":",
"arg",
"or",
"[",
"]",
",",
"'kwarg'",
":",
"kwarg",
",",
"'ret'",
":",
"ret",
",",
"'timeout'",
":",
"timeout",
"}",
"if",
"batch",
"is",
"not",
"None",
":",
"cmd_kw",
"[",
"'batch'",
"]",
"=",
"six",
".",
"text_type",
"(",
"batch",
")",
"if",
"subset",
"is",
"not",
"None",
":",
"cmd_kw",
"[",
"'subset'",
"]",
"=",
"subset",
"cmd_kw",
"[",
"'tgt_type'",
"]",
"=",
"tgt_type",
"cmd_kw",
"[",
"'ssh'",
"]",
"=",
"ssh",
"cmd_kw",
"[",
"'expect_minions'",
"]",
"=",
"expect_minions",
"cmd_kw",
"[",
"'_cmd_meta'",
"]",
"=",
"True",
"cmd_kw",
"[",
"'asynchronous'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'asynchronous'",
",",
"False",
")",
"if",
"ret_config",
":",
"cmd_kw",
"[",
"'ret_config'",
"]",
"=",
"ret_config",
"if",
"ret_kwargs",
":",
"cmd_kw",
"[",
"'ret_kwargs'",
"]",
"=",
"ret_kwargs",
"fun",
"=",
"name",
"if",
"__opts__",
"[",
"'test'",
"]",
"is",
"True",
":",
"func_ret",
"[",
"'comment'",
"]",
"=",
"'Function {0} would be executed on target {1}'",
".",
"format",
"(",
"fun",
",",
"tgt",
")",
"func_ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"func_ret",
"try",
":",
"_fire_args",
"(",
"{",
"'type'",
":",
"'function'",
",",
"'tgt'",
":",
"tgt",
",",
"'name'",
":",
"name",
",",
"'args'",
":",
"cmd_kw",
"}",
")",
"cmd_ret",
"=",
"__salt__",
"[",
"'saltutil.cmd'",
"]",
"(",
"tgt",
",",
"fun",
",",
"*",
"*",
"cmd_kw",
")",
"except",
"Exception",
"as",
"exc",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"False",
"func_ret",
"[",
"'comment'",
"]",
"=",
"six",
".",
"text_type",
"(",
"exc",
")",
"return",
"func_ret",
"if",
"cmd_kw",
"[",
"'asynchronous'",
"]",
":",
"func_ret",
"[",
"'__jid__'",
"]",
"=",
"cmd_ret",
".",
"get",
"(",
"'jid'",
")",
"func_ret",
"[",
"'changes'",
"]",
"=",
"cmd_ret",
"if",
"int",
"(",
"cmd_ret",
".",
"get",
"(",
"'jid'",
",",
"0",
")",
")",
">",
"0",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"True",
"func_ret",
"[",
"'comment'",
"]",
"=",
"'Function submitted successfully.'",
"else",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"False",
"func_ret",
"[",
"'comment'",
"]",
"=",
"'Function failed to run.'",
"return",
"func_ret",
"try",
":",
"func_ret",
"[",
"'__jid__'",
"]",
"=",
"cmd_ret",
"[",
"next",
"(",
"iter",
"(",
"cmd_ret",
")",
")",
"]",
"[",
"'jid'",
"]",
"except",
"(",
"StopIteration",
",",
"KeyError",
")",
":",
"pass",
"changes",
"=",
"{",
"}",
"fail",
"=",
"set",
"(",
")",
"if",
"fail_minions",
"is",
"None",
":",
"fail_minions",
"=",
"(",
")",
"elif",
"isinstance",
"(",
"fail_minions",
",",
"six",
".",
"string_types",
")",
":",
"fail_minions",
"=",
"[",
"minion",
".",
"strip",
"(",
")",
"for",
"minion",
"in",
"fail_minions",
".",
"split",
"(",
"','",
")",
"]",
"elif",
"not",
"isinstance",
"(",
"fail_minions",
",",
"list",
")",
":",
"func_ret",
".",
"setdefault",
"(",
"'warnings'",
",",
"[",
"]",
")",
".",
"append",
"(",
"'\\'fail_minions\\' needs to be a list or a comma separated '",
"'string. Ignored.'",
")",
"fail_minions",
"=",
"(",
")",
"for",
"minion",
",",
"mdata",
"in",
"six",
".",
"iteritems",
"(",
"cmd_ret",
")",
":",
"m_ret",
"=",
"False",
"if",
"mdata",
".",
"get",
"(",
"'retcode'",
")",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"False",
"fail",
".",
"add",
"(",
"minion",
")",
"if",
"mdata",
".",
"get",
"(",
"'failed'",
",",
"False",
")",
":",
"m_func",
"=",
"False",
"else",
":",
"if",
"'return'",
"in",
"mdata",
"and",
"'ret'",
"not",
"in",
"mdata",
":",
"mdata",
"[",
"'ret'",
"]",
"=",
"mdata",
".",
"pop",
"(",
"'return'",
")",
"m_ret",
"=",
"mdata",
"[",
"'ret'",
"]",
"m_func",
"=",
"(",
"not",
"fail_function",
"and",
"True",
")",
"or",
"__salt__",
"[",
"fail_function",
"]",
"(",
"m_ret",
")",
"if",
"m_ret",
"is",
"False",
":",
"m_func",
"=",
"False",
"if",
"not",
"m_func",
":",
"if",
"minion",
"not",
"in",
"fail_minions",
":",
"fail",
".",
"add",
"(",
"minion",
")",
"changes",
"[",
"minion",
"]",
"=",
"m_ret",
"if",
"not",
"cmd_ret",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"False",
"func_ret",
"[",
"'command'",
"]",
"=",
"'No minions responded'",
"else",
":",
"if",
"changes",
":",
"func_ret",
"[",
"'changes'",
"]",
"=",
"{",
"'out'",
":",
"'highstate'",
",",
"'ret'",
":",
"changes",
"}",
"if",
"fail",
":",
"func_ret",
"[",
"'result'",
"]",
"=",
"False",
"func_ret",
"[",
"'comment'",
"]",
"=",
"'Running function {0} failed on minions: {1}'",
".",
"format",
"(",
"name",
",",
"', '",
".",
"join",
"(",
"fail",
")",
")",
"else",
":",
"func_ret",
"[",
"'comment'",
"]",
"=",
"'Function ran successfully.'",
"if",
"changes",
":",
"func_ret",
"[",
"'comment'",
"]",
"+=",
"' Function {0} ran on {1}.'",
".",
"format",
"(",
"name",
",",
"', '",
".",
"join",
"(",
"changes",
")",
")",
"return",
"func_ret"
] | 29.346369 | 22.75419 |
def deprecated(reason, replacement, gone_in, issue=None):
# type: (str, Optional[str], Optional[str], Optional[int]) -> None
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises errors if pip's current version is greater than or equal to
this.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
Always pass replacement, gone_in and issue as keyword arguments for clarity
at the call site.
"""
# Construct a nice message.
# This is purposely eagerly formatted as we want it to appear as if someone
# typed this entire message out.
message = "DEPRECATION: " + reason
if replacement is not None:
message += " A possible replacement is {}.".format(replacement)
if issue is not None:
url = "https://github.com/pypa/pip/issues/" + str(issue)
message += " You can find discussion regarding this at {}.".format(url)
# Raise as an error if it has to be removed.
if gone_in is not None and parse(current_version) >= parse(gone_in):
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) | [
"def",
"deprecated",
"(",
"reason",
",",
"replacement",
",",
"gone_in",
",",
"issue",
"=",
"None",
")",
":",
"# type: (str, Optional[str], Optional[str], Optional[int]) -> None",
"# Construct a nice message.",
"# This is purposely eagerly formatted as we want it to appear as if someone",
"# typed this entire message out.",
"message",
"=",
"\"DEPRECATION: \"",
"+",
"reason",
"if",
"replacement",
"is",
"not",
"None",
":",
"message",
"+=",
"\" A possible replacement is {}.\"",
".",
"format",
"(",
"replacement",
")",
"if",
"issue",
"is",
"not",
"None",
":",
"url",
"=",
"\"https://github.com/pypa/pip/issues/\"",
"+",
"str",
"(",
"issue",
")",
"message",
"+=",
"\" You can find discussion regarding this at {}.\"",
".",
"format",
"(",
"url",
")",
"# Raise as an error if it has to be removed.",
"if",
"gone_in",
"is",
"not",
"None",
"and",
"parse",
"(",
"current_version",
")",
">=",
"parse",
"(",
"gone_in",
")",
":",
"raise",
"PipDeprecationWarning",
"(",
"message",
")",
"warnings",
".",
"warn",
"(",
"message",
",",
"category",
"=",
"PipDeprecationWarning",
",",
"stacklevel",
"=",
"2",
")"
] | 42.166667 | 23.75 |
def _divf16(ins):
""" Divides 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 1, do nothing
* If 2nd operand is -1, do NEG32
"""
op1, op2 = tuple(ins.quad[2:])
if is_float(op2):
if float(op2) == 1:
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if float(op2) == -1:
return _negf(ins)
rev = not is_float(op1) and op1[0] != 't' and op2[0] == 't'
output = _f16_oper(op1, op2, reversed=rev)
output.append('call __DIVF16')
output.append('push de')
output.append('push hl')
REQUIRES.add('divf16.asm')
return output | [
"def",
"_divf16",
"(",
"ins",
")",
":",
"op1",
",",
"op2",
"=",
"tuple",
"(",
"ins",
".",
"quad",
"[",
"2",
":",
"]",
")",
"if",
"is_float",
"(",
"op2",
")",
":",
"if",
"float",
"(",
"op2",
")",
"==",
"1",
":",
"output",
"=",
"_f16_oper",
"(",
"op1",
")",
"output",
".",
"append",
"(",
"'push de'",
")",
"output",
".",
"append",
"(",
"'push hl'",
")",
"return",
"output",
"if",
"float",
"(",
"op2",
")",
"==",
"-",
"1",
":",
"return",
"_negf",
"(",
"ins",
")",
"rev",
"=",
"not",
"is_float",
"(",
"op1",
")",
"and",
"op1",
"[",
"0",
"]",
"!=",
"'t'",
"and",
"op2",
"[",
"0",
"]",
"==",
"'t'",
"output",
"=",
"_f16_oper",
"(",
"op1",
",",
"op2",
",",
"reversed",
"=",
"rev",
")",
"output",
".",
"append",
"(",
"'call __DIVF16'",
")",
"output",
".",
"append",
"(",
"'push de'",
")",
"output",
".",
"append",
"(",
"'push hl'",
")",
"REQUIRES",
".",
"add",
"(",
"'divf16.asm'",
")",
"return",
"output"
] | 26.25 | 16.607143 |
def _read_register(self, reg):
"""Read 16 bit register value."""
self.buf[0] = reg
with self.i2c_device as i2c:
i2c.write(self.buf, end=1, stop=False)
i2c.readinto(self.buf, end=2)
return self.buf[0] << 8 | self.buf[1] | [
"def",
"_read_register",
"(",
"self",
",",
"reg",
")",
":",
"self",
".",
"buf",
"[",
"0",
"]",
"=",
"reg",
"with",
"self",
".",
"i2c_device",
"as",
"i2c",
":",
"i2c",
".",
"write",
"(",
"self",
".",
"buf",
",",
"end",
"=",
"1",
",",
"stop",
"=",
"False",
")",
"i2c",
".",
"readinto",
"(",
"self",
".",
"buf",
",",
"end",
"=",
"2",
")",
"return",
"self",
".",
"buf",
"[",
"0",
"]",
"<<",
"8",
"|",
"self",
".",
"buf",
"[",
"1",
"]"
] | 38.285714 | 6.428571 |
def get_data_statistics(interpreted_files):
'''Quick and dirty function to give as redmine compatible iverview table
'''
print '| *File Name* | *File Size* | *Times Stamp* | *Events* | *Bad Events* | *Measurement time* | *# SR* | *Hits* |' # Mean Tot | Mean rel. BCID'
for interpreted_file in interpreted_files:
with tb.open_file(interpreted_file, mode="r") as in_file_h5: # open the actual hit file
n_hits = np.sum(in_file_h5.root.HistOcc[:])
measurement_time = int(in_file_h5.root.meta_data[-1]['timestamp_stop'] - in_file_h5.root.meta_data[0]['timestamp_start'])
# mean_tot = np.average(in_file_h5.root.HistTot[:], weights=range(0,16) * np.sum(range(0,16)))# / in_file_h5.root.HistTot[:].shape[0]
# mean_bcid = np.average(in_file_h5.root.HistRelBcid[:], weights=range(0,16))
n_sr = np.sum(in_file_h5.root.HistServiceRecord[:])
n_bad_events = int(np.sum(in_file_h5.root.HistErrorCounter[2:]))
try:
n_events = str(in_file_h5.root.Hits[-1]['event_number'] + 1)
except tb.NoSuchNodeError:
n_events = '~' + str(in_file_h5.root.meta_data[-1]['event_number'] + (in_file_h5.root.meta_data[-1]['event_number'] - in_file_h5.root.meta_data[-2]['event_number']))
else:
print '|', os.path.basename(interpreted_file), '|', int(os.path.getsize(interpreted_file) / (1024.0 * 1024.0)), 'Mb |', time.ctime(os.path.getctime(interpreted_file)), '|', n_events, '|', n_bad_events, '|', measurement_time, 's |', n_sr, '|', n_hits, '|' | [
"def",
"get_data_statistics",
"(",
"interpreted_files",
")",
":",
"print",
"'| *File Name* | *File Size* | *Times Stamp* | *Events* | *Bad Events* | *Measurement time* | *# SR* | *Hits* |'",
"# Mean Tot | Mean rel. BCID'",
"for",
"interpreted_file",
"in",
"interpreted_files",
":",
"with",
"tb",
".",
"open_file",
"(",
"interpreted_file",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"in_file_h5",
":",
"# open the actual hit file",
"n_hits",
"=",
"np",
".",
"sum",
"(",
"in_file_h5",
".",
"root",
".",
"HistOcc",
"[",
":",
"]",
")",
"measurement_time",
"=",
"int",
"(",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
"-",
"1",
"]",
"[",
"'timestamp_stop'",
"]",
"-",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
"0",
"]",
"[",
"'timestamp_start'",
"]",
")",
"# mean_tot = np.average(in_file_h5.root.HistTot[:], weights=range(0,16) * np.sum(range(0,16)))# / in_file_h5.root.HistTot[:].shape[0]",
"# mean_bcid = np.average(in_file_h5.root.HistRelBcid[:], weights=range(0,16))",
"n_sr",
"=",
"np",
".",
"sum",
"(",
"in_file_h5",
".",
"root",
".",
"HistServiceRecord",
"[",
":",
"]",
")",
"n_bad_events",
"=",
"int",
"(",
"np",
".",
"sum",
"(",
"in_file_h5",
".",
"root",
".",
"HistErrorCounter",
"[",
"2",
":",
"]",
")",
")",
"try",
":",
"n_events",
"=",
"str",
"(",
"in_file_h5",
".",
"root",
".",
"Hits",
"[",
"-",
"1",
"]",
"[",
"'event_number'",
"]",
"+",
"1",
")",
"except",
"tb",
".",
"NoSuchNodeError",
":",
"n_events",
"=",
"'~'",
"+",
"str",
"(",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
"-",
"1",
"]",
"[",
"'event_number'",
"]",
"+",
"(",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
"-",
"1",
"]",
"[",
"'event_number'",
"]",
"-",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
"-",
"2",
"]",
"[",
"'event_number'",
"]",
")",
")",
"else",
":",
"print",
"'|'",
",",
"os",
".",
"path",
".",
"basename",
"(",
"interpreted_file",
")",
",",
"'|'",
",",
"int",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"interpreted_file",
")",
"/",
"(",
"1024.0",
"*",
"1024.0",
")",
")",
",",
"'Mb |'",
",",
"time",
".",
"ctime",
"(",
"os",
".",
"path",
".",
"getctime",
"(",
"interpreted_file",
")",
")",
",",
"'|'",
",",
"n_events",
",",
"'|'",
",",
"n_bad_events",
",",
"'|'",
",",
"measurement_time",
",",
"'s |'",
",",
"n_sr",
",",
"'|'",
",",
"n_hits",
",",
"'|'"
] | 87.722222 | 56.833333 |
def GetClientsForHashes(cls, hashes, token=None, age=aff4.NEWEST_TIME):
"""Yields (hash, client_files) pairs for all the specified hashes.
Args:
hashes: List of RDFURN's.
token: Security token.
age: AFF4 age specification. Only get hits corresponding to the given age
spec. Should be aff4.NEWEST_TIME or a time range given as a tuple
(start, end) in microseconds since Jan 1st, 1970. If just a microseconds
value is given it's treated as the higher end of the range, i.e. (0,
age). See aff4.FACTORY.ParseAgeSpecification for details.
Yields:
(hash, client_files) tuples, where hash is a FileStoreHash instance and
client_files is a list of RDFURN's corresponding to client files that
have the hash.
Raises:
ValueError: if age was set to aff4.ALL_TIMES.
"""
if age == aff4.ALL_TIMES:
raise ValueError("age==aff4.ALL_TIMES is not supported.")
timestamp = aff4.FACTORY.ParseAgeSpecification(age)
index_objects = list(aff4.FACTORY.MultiOpen(hashes, token=token))
index_locations = {}
for o in index_objects:
index_locations.setdefault(o.urn, []).append(o.symlink_urn)
for hash_obj, client_files in data_store.DB.FileHashIndexQueryMultiple(
index_locations, timestamp=timestamp):
symlinks = index_locations[hash_obj]
for original_hash in symlinks:
hash_obj = original_hash or hash_obj
yield (FileStoreHash(hash_obj), client_files) | [
"def",
"GetClientsForHashes",
"(",
"cls",
",",
"hashes",
",",
"token",
"=",
"None",
",",
"age",
"=",
"aff4",
".",
"NEWEST_TIME",
")",
":",
"if",
"age",
"==",
"aff4",
".",
"ALL_TIMES",
":",
"raise",
"ValueError",
"(",
"\"age==aff4.ALL_TIMES is not supported.\"",
")",
"timestamp",
"=",
"aff4",
".",
"FACTORY",
".",
"ParseAgeSpecification",
"(",
"age",
")",
"index_objects",
"=",
"list",
"(",
"aff4",
".",
"FACTORY",
".",
"MultiOpen",
"(",
"hashes",
",",
"token",
"=",
"token",
")",
")",
"index_locations",
"=",
"{",
"}",
"for",
"o",
"in",
"index_objects",
":",
"index_locations",
".",
"setdefault",
"(",
"o",
".",
"urn",
",",
"[",
"]",
")",
".",
"append",
"(",
"o",
".",
"symlink_urn",
")",
"for",
"hash_obj",
",",
"client_files",
"in",
"data_store",
".",
"DB",
".",
"FileHashIndexQueryMultiple",
"(",
"index_locations",
",",
"timestamp",
"=",
"timestamp",
")",
":",
"symlinks",
"=",
"index_locations",
"[",
"hash_obj",
"]",
"for",
"original_hash",
"in",
"symlinks",
":",
"hash_obj",
"=",
"original_hash",
"or",
"hash_obj",
"yield",
"(",
"FileStoreHash",
"(",
"hash_obj",
")",
",",
"client_files",
")"
] | 43 | 22.735294 |
def _process_tasks(self, task_queue, rmgr, logger, mq_hostname, port, local_prof, sid):
'''
**Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This
function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS.
'''
placeholder_dict = dict()
def load_placeholder(task, rts_uid):
parent_pipeline = str(task.parent_pipeline['name'])
parent_stage = str(task.parent_stage['name'])
if parent_pipeline not in placeholder_dict:
placeholder_dict[parent_pipeline] = dict()
if parent_stage not in placeholder_dict[parent_pipeline]:
placeholder_dict[parent_pipeline][parent_stage] = dict()
if None not in [parent_pipeline, parent_stage, task.name]:
placeholder_dict[parent_pipeline][parent_stage][str(task.name)] = {'path': str(task.path),
'rts_uid': rts_uid}
def unit_state_cb(unit, state):
try:
logger.debug('Unit %s in state %s' % (unit.uid, unit.state))
if unit.state in rp.FINAL:
# Acquire a connection+channel to the rmq server
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
task = None
task = create_task_from_cu(unit, local_prof)
transition(obj=task,
obj_type='Task',
new_state=states.COMPLETED,
channel=mq_channel,
queue='%s-cb-to-sync' % sid,
profiler=local_prof,
logger=logger)
load_placeholder(task, unit.uid)
task_as_dict = json.dumps(task.to_dict())
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % sid,
body=task_as_dict
# properties=pika.BasicProperties(
# make message persistent
# delivery_mode = 2,
# )
)
logger.info('Pushed task %s with state %s to completed queue %s-completedq-1' % (task.uid, task.state,
sid))
mq_connection.close()
except KeyboardInterrupt:
logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
logger.exception('Error in RP callback thread: %s' % ex)
umgr = rp.UnitManager(session=rmgr._session)
umgr.add_pilots(rmgr.pilot)
umgr.register_callback(unit_state_cb)
try:
while not self._tmgr_terminate.is_set():
body = None
try:
body = task_queue.get(block=True, timeout=10)
except Queue.Empty:
# Ignore empty exception, we don't always have new tasks to run
pass
if body:
task_queue.task_done()
bulk_tasks = list()
bulk_cuds = list()
for task in body:
t = Task()
t.from_dict(task)
bulk_tasks.append(t)
bulk_cuds.append(create_cud_from_task(
t, placeholder_dict, local_prof))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
transition(obj=t,
obj_type='Task',
new_state=states.SUBMITTING,
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
profiler=local_prof,
logger=logger)
mq_connection.close()
umgr.submit_units(bulk_cuds)
for task in bulk_tasks:
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
transition(obj=task,
obj_type='Task',
new_state=states.SUBMITTED,
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
profiler=local_prof,
logger=logger)
mq_connection.close()
except KeyboardInterrupt as ex:
logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel task processor gracefully...')
except Exception as ex:
logger.exception('%s failed with %s'%(self._uid, ex))
raise EnTKError(ex) | [
"def",
"_process_tasks",
"(",
"self",
",",
"task_queue",
",",
"rmgr",
",",
"logger",
",",
"mq_hostname",
",",
"port",
",",
"local_prof",
",",
"sid",
")",
":",
"placeholder_dict",
"=",
"dict",
"(",
")",
"def",
"load_placeholder",
"(",
"task",
",",
"rts_uid",
")",
":",
"parent_pipeline",
"=",
"str",
"(",
"task",
".",
"parent_pipeline",
"[",
"'name'",
"]",
")",
"parent_stage",
"=",
"str",
"(",
"task",
".",
"parent_stage",
"[",
"'name'",
"]",
")",
"if",
"parent_pipeline",
"not",
"in",
"placeholder_dict",
":",
"placeholder_dict",
"[",
"parent_pipeline",
"]",
"=",
"dict",
"(",
")",
"if",
"parent_stage",
"not",
"in",
"placeholder_dict",
"[",
"parent_pipeline",
"]",
":",
"placeholder_dict",
"[",
"parent_pipeline",
"]",
"[",
"parent_stage",
"]",
"=",
"dict",
"(",
")",
"if",
"None",
"not",
"in",
"[",
"parent_pipeline",
",",
"parent_stage",
",",
"task",
".",
"name",
"]",
":",
"placeholder_dict",
"[",
"parent_pipeline",
"]",
"[",
"parent_stage",
"]",
"[",
"str",
"(",
"task",
".",
"name",
")",
"]",
"=",
"{",
"'path'",
":",
"str",
"(",
"task",
".",
"path",
")",
",",
"'rts_uid'",
":",
"rts_uid",
"}",
"def",
"unit_state_cb",
"(",
"unit",
",",
"state",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"'Unit %s in state %s'",
"%",
"(",
"unit",
".",
"uid",
",",
"unit",
".",
"state",
")",
")",
"if",
"unit",
".",
"state",
"in",
"rp",
".",
"FINAL",
":",
"# Acquire a connection+channel to the rmq server",
"mq_connection",
"=",
"pika",
".",
"BlockingConnection",
"(",
"pika",
".",
"ConnectionParameters",
"(",
"host",
"=",
"mq_hostname",
",",
"port",
"=",
"port",
")",
")",
"mq_channel",
"=",
"mq_connection",
".",
"channel",
"(",
")",
"task",
"=",
"None",
"task",
"=",
"create_task_from_cu",
"(",
"unit",
",",
"local_prof",
")",
"transition",
"(",
"obj",
"=",
"task",
",",
"obj_type",
"=",
"'Task'",
",",
"new_state",
"=",
"states",
".",
"COMPLETED",
",",
"channel",
"=",
"mq_channel",
",",
"queue",
"=",
"'%s-cb-to-sync'",
"%",
"sid",
",",
"profiler",
"=",
"local_prof",
",",
"logger",
"=",
"logger",
")",
"load_placeholder",
"(",
"task",
",",
"unit",
".",
"uid",
")",
"task_as_dict",
"=",
"json",
".",
"dumps",
"(",
"task",
".",
"to_dict",
"(",
")",
")",
"mq_channel",
".",
"basic_publish",
"(",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"'%s-completedq-1'",
"%",
"sid",
",",
"body",
"=",
"task_as_dict",
"# properties=pika.BasicProperties(",
"# make message persistent",
"# delivery_mode = 2,",
"# )",
")",
"logger",
".",
"info",
"(",
"'Pushed task %s with state %s to completed queue %s-completedq-1'",
"%",
"(",
"task",
".",
"uid",
",",
"task",
".",
"state",
",",
"sid",
")",
")",
"mq_connection",
".",
"close",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"exception",
"(",
"'Execution interrupted by user (you probably hit Ctrl+C), '",
"+",
"'trying to exit callback thread gracefully...'",
")",
"raise",
"KeyboardInterrupt",
"except",
"Exception",
",",
"ex",
":",
"logger",
".",
"exception",
"(",
"'Error in RP callback thread: %s'",
"%",
"ex",
")",
"umgr",
"=",
"rp",
".",
"UnitManager",
"(",
"session",
"=",
"rmgr",
".",
"_session",
")",
"umgr",
".",
"add_pilots",
"(",
"rmgr",
".",
"pilot",
")",
"umgr",
".",
"register_callback",
"(",
"unit_state_cb",
")",
"try",
":",
"while",
"not",
"self",
".",
"_tmgr_terminate",
".",
"is_set",
"(",
")",
":",
"body",
"=",
"None",
"try",
":",
"body",
"=",
"task_queue",
".",
"get",
"(",
"block",
"=",
"True",
",",
"timeout",
"=",
"10",
")",
"except",
"Queue",
".",
"Empty",
":",
"# Ignore empty exception, we don't always have new tasks to run",
"pass",
"if",
"body",
":",
"task_queue",
".",
"task_done",
"(",
")",
"bulk_tasks",
"=",
"list",
"(",
")",
"bulk_cuds",
"=",
"list",
"(",
")",
"for",
"task",
"in",
"body",
":",
"t",
"=",
"Task",
"(",
")",
"t",
".",
"from_dict",
"(",
"task",
")",
"bulk_tasks",
".",
"append",
"(",
"t",
")",
"bulk_cuds",
".",
"append",
"(",
"create_cud_from_task",
"(",
"t",
",",
"placeholder_dict",
",",
"local_prof",
")",
")",
"mq_connection",
"=",
"pika",
".",
"BlockingConnection",
"(",
"pika",
".",
"ConnectionParameters",
"(",
"host",
"=",
"mq_hostname",
",",
"port",
"=",
"port",
")",
")",
"mq_channel",
"=",
"mq_connection",
".",
"channel",
"(",
")",
"transition",
"(",
"obj",
"=",
"t",
",",
"obj_type",
"=",
"'Task'",
",",
"new_state",
"=",
"states",
".",
"SUBMITTING",
",",
"channel",
"=",
"mq_channel",
",",
"queue",
"=",
"'%s-tmgr-to-sync'",
"%",
"sid",
",",
"profiler",
"=",
"local_prof",
",",
"logger",
"=",
"logger",
")",
"mq_connection",
".",
"close",
"(",
")",
"umgr",
".",
"submit_units",
"(",
"bulk_cuds",
")",
"for",
"task",
"in",
"bulk_tasks",
":",
"mq_connection",
"=",
"pika",
".",
"BlockingConnection",
"(",
"pika",
".",
"ConnectionParameters",
"(",
"host",
"=",
"mq_hostname",
",",
"port",
"=",
"port",
")",
")",
"mq_channel",
"=",
"mq_connection",
".",
"channel",
"(",
")",
"transition",
"(",
"obj",
"=",
"task",
",",
"obj_type",
"=",
"'Task'",
",",
"new_state",
"=",
"states",
".",
"SUBMITTED",
",",
"channel",
"=",
"mq_channel",
",",
"queue",
"=",
"'%s-tmgr-to-sync'",
"%",
"sid",
",",
"profiler",
"=",
"local_prof",
",",
"logger",
"=",
"logger",
")",
"mq_connection",
".",
"close",
"(",
")",
"except",
"KeyboardInterrupt",
"as",
"ex",
":",
"logger",
".",
"exception",
"(",
"'Execution interrupted by user (you probably hit Ctrl+C), '",
"+",
"'trying to cancel task processor gracefully...'",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"exception",
"(",
"'%s failed with %s'",
"%",
"(",
"self",
".",
"_uid",
",",
"ex",
")",
")",
"raise",
"EnTKError",
"(",
"ex",
")"
] | 39.347222 | 26.625 |
def memory_pour(buffer_, *args, **kwargs):
"""Yield data from entries."""
def opener(archive_res):
_LOGGER.debug("Opening from (%d) bytes (memory_pour).", len(buffer_))
_archive_read_open_memory(archive_res, buffer_)
return _pour(opener, *args, flags=0, **kwargs) | [
"def",
"memory_pour",
"(",
"buffer_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"opener",
"(",
"archive_res",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Opening from (%d) bytes (memory_pour).\"",
",",
"len",
"(",
"buffer_",
")",
")",
"_archive_read_open_memory",
"(",
"archive_res",
",",
"buffer_",
")",
"return",
"_pour",
"(",
"opener",
",",
"*",
"args",
",",
"flags",
"=",
"0",
",",
"*",
"*",
"kwargs",
")"
] | 35.75 | 19.5 |
def create_jar(jar_file, entries):
"""
Create JAR from given entries.
:param jar_file: filename of the created JAR
:type jar_file: str
:param entries: files to put into the JAR
:type entries: list[str]
:return: None
"""
# 'jar' adds separate entries for directories, also for empty ones.
with ZipFile(jar_file, "w") as jar:
jar.writestr("META-INF/", "")
jar.writestr("META-INF/MANIFEST.MF", Manifest().get_data())
for entry in entries:
jar.write(entry)
if os.path.isdir(entry):
for root, dirs, files in os.walk(entry):
for filename in dirs + files:
jar.write(os.path.join(root, filename)) | [
"def",
"create_jar",
"(",
"jar_file",
",",
"entries",
")",
":",
"# 'jar' adds separate entries for directories, also for empty ones.",
"with",
"ZipFile",
"(",
"jar_file",
",",
"\"w\"",
")",
"as",
"jar",
":",
"jar",
".",
"writestr",
"(",
"\"META-INF/\"",
",",
"\"\"",
")",
"jar",
".",
"writestr",
"(",
"\"META-INF/MANIFEST.MF\"",
",",
"Manifest",
"(",
")",
".",
"get_data",
"(",
")",
")",
"for",
"entry",
"in",
"entries",
":",
"jar",
".",
"write",
"(",
"entry",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"entry",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"entry",
")",
":",
"for",
"filename",
"in",
"dirs",
"+",
"files",
":",
"jar",
".",
"write",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
")"
] | 35.9 | 12.7 |
def _ParseExample(self, example_features, example_feature_lists, entries,
index):
"""Parses data from an example, populating a dictionary of feature values.
Args:
example_features: A map of strings to tf.Features from the example.
example_feature_lists: A map of strings to tf.FeatureLists from the
example.
entries: A dictionary of all features parsed thus far and arrays of their
values. This is mutated by the function.
index: The index of the example to parse from a list of examples.
Raises:
TypeError: Raises an exception when a feature has inconsistent types
across
examples.
"""
features_seen = set()
for feature_list, is_feature in zip(
[example_features, example_feature_lists], [True, False]):
sequence_length = None
for feature_name in feature_list:
# If this feature has not been seen in previous examples, then
# initialize its entry into the entries dictionary.
if feature_name not in entries:
entries[feature_name] = {
'vals': [],
'counts': [],
'feat_lens': [],
'missing': index
}
feature_entry = entries[feature_name]
feature = feature_list[feature_name]
value_type = None
value_list = []
if is_feature:
# If parsing a tf.Feature, extract the type and values simply.
if feature.HasField('float_list'):
value_list = feature.float_list.value
value_type = self.fs_proto.FLOAT
elif feature.HasField('bytes_list'):
value_list = feature.bytes_list.value
value_type = self.fs_proto.STRING
elif feature.HasField('int64_list'):
value_list = feature.int64_list.value
value_type = self.fs_proto.INT
else:
# If parsing a tf.FeatureList, get the type and values by iterating
# over all Features in the FeatureList.
sequence_length = len(feature.feature)
if sequence_length != 0 and feature.feature[0].HasField('float_list'):
for feat in feature.feature:
for value in feat.float_list.value:
value_list.append(value)
value_type = self.fs_proto.FLOAT
elif sequence_length != 0 and feature.feature[0].HasField(
'bytes_list'):
for feat in feature.feature:
for value in feat.bytes_list.value:
value_list.append(value)
value_type = self.fs_proto.STRING
elif sequence_length != 0 and feature.feature[0].HasField(
'int64_list'):
for feat in feature.feature:
for value in feat.int64_list.value:
value_list.append(value)
value_type = self.fs_proto.INT
if value_type is not None:
if 'type' not in feature_entry:
feature_entry['type'] = value_type
elif feature_entry['type'] != value_type:
raise TypeError('type mismatch for feature ' + feature_name)
feature_entry['counts'].append(len(value_list))
feature_entry['vals'].extend(value_list)
if sequence_length is not None:
feature_entry['feat_lens'].append(sequence_length)
if value_list:
features_seen.add(feature_name)
# For all previously-seen features not found in this example, update the
# feature's missing value.
for f in entries:
fv = entries[f]
if f not in features_seen:
fv['missing'] += 1 | [
"def",
"_ParseExample",
"(",
"self",
",",
"example_features",
",",
"example_feature_lists",
",",
"entries",
",",
"index",
")",
":",
"features_seen",
"=",
"set",
"(",
")",
"for",
"feature_list",
",",
"is_feature",
"in",
"zip",
"(",
"[",
"example_features",
",",
"example_feature_lists",
"]",
",",
"[",
"True",
",",
"False",
"]",
")",
":",
"sequence_length",
"=",
"None",
"for",
"feature_name",
"in",
"feature_list",
":",
"# If this feature has not been seen in previous examples, then",
"# initialize its entry into the entries dictionary.",
"if",
"feature_name",
"not",
"in",
"entries",
":",
"entries",
"[",
"feature_name",
"]",
"=",
"{",
"'vals'",
":",
"[",
"]",
",",
"'counts'",
":",
"[",
"]",
",",
"'feat_lens'",
":",
"[",
"]",
",",
"'missing'",
":",
"index",
"}",
"feature_entry",
"=",
"entries",
"[",
"feature_name",
"]",
"feature",
"=",
"feature_list",
"[",
"feature_name",
"]",
"value_type",
"=",
"None",
"value_list",
"=",
"[",
"]",
"if",
"is_feature",
":",
"# If parsing a tf.Feature, extract the type and values simply.",
"if",
"feature",
".",
"HasField",
"(",
"'float_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"float_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"FLOAT",
"elif",
"feature",
".",
"HasField",
"(",
"'bytes_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"bytes_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"STRING",
"elif",
"feature",
".",
"HasField",
"(",
"'int64_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"int64_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"INT",
"else",
":",
"# If parsing a tf.FeatureList, get the type and values by iterating",
"# over all Features in the FeatureList.",
"sequence_length",
"=",
"len",
"(",
"feature",
".",
"feature",
")",
"if",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'float_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"float_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"FLOAT",
"elif",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'bytes_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"bytes_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"STRING",
"elif",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'int64_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"int64_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"INT",
"if",
"value_type",
"is",
"not",
"None",
":",
"if",
"'type'",
"not",
"in",
"feature_entry",
":",
"feature_entry",
"[",
"'type'",
"]",
"=",
"value_type",
"elif",
"feature_entry",
"[",
"'type'",
"]",
"!=",
"value_type",
":",
"raise",
"TypeError",
"(",
"'type mismatch for feature '",
"+",
"feature_name",
")",
"feature_entry",
"[",
"'counts'",
"]",
".",
"append",
"(",
"len",
"(",
"value_list",
")",
")",
"feature_entry",
"[",
"'vals'",
"]",
".",
"extend",
"(",
"value_list",
")",
"if",
"sequence_length",
"is",
"not",
"None",
":",
"feature_entry",
"[",
"'feat_lens'",
"]",
".",
"append",
"(",
"sequence_length",
")",
"if",
"value_list",
":",
"features_seen",
".",
"add",
"(",
"feature_name",
")",
"# For all previously-seen features not found in this example, update the",
"# feature's missing value.",
"for",
"f",
"in",
"entries",
":",
"fv",
"=",
"entries",
"[",
"f",
"]",
"if",
"f",
"not",
"in",
"features_seen",
":",
"fv",
"[",
"'missing'",
"]",
"+=",
"1"
] | 40.609195 | 15.563218 |
def convert_to_sequences(dataset, vocab):
"""This function takes a dataset and converts
it into sequences via multiprocessing
"""
start = time.time()
dataset_vocab = map(lambda x: (x, vocab), dataset)
with mp.Pool() as pool:
# Each sample is processed in an asynchronous manner.
output = pool.map(get_sequence, dataset_vocab)
end = time.time()
logging.info('Done! Sequence conversion Time={:.2f}s, #Sentences={}'
.format(end - start, len(dataset)))
return output | [
"def",
"convert_to_sequences",
"(",
"dataset",
",",
"vocab",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"dataset_vocab",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"vocab",
")",
",",
"dataset",
")",
"with",
"mp",
".",
"Pool",
"(",
")",
"as",
"pool",
":",
"# Each sample is processed in an asynchronous manner.",
"output",
"=",
"pool",
".",
"map",
"(",
"get_sequence",
",",
"dataset_vocab",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"'Done! Sequence conversion Time={:.2f}s, #Sentences={}'",
".",
"format",
"(",
"end",
"-",
"start",
",",
"len",
"(",
"dataset",
")",
")",
")",
"return",
"output"
] | 39.923077 | 12.846154 |
def cache_data_model(self, raw):
"""
Cache the data model json.
Take data returned by a requests.get call to Earthref.
Parameters
----------
raw: requests.models.Response
"""
output_json = json.loads(raw.content)
output_file = self.find_cached_dm()
json.dump(output_json, open(output_file, 'w+')) | [
"def",
"cache_data_model",
"(",
"self",
",",
"raw",
")",
":",
"output_json",
"=",
"json",
".",
"loads",
"(",
"raw",
".",
"content",
")",
"output_file",
"=",
"self",
".",
"find_cached_dm",
"(",
")",
"json",
".",
"dump",
"(",
"output_json",
",",
"open",
"(",
"output_file",
",",
"'w+'",
")",
")"
] | 28.153846 | 14.307692 |
def get(self, filter=None, order_by=None, group_by=[], page=None, page_size=None, query_parameters=None, commit=True, async=False, callback=None):
""" Fetch object and directly return them
Note:
`get` won't put the fetched objects in the parent's children list.
You cannot override this behavior. If you want to commit them in the parent
you can use :method:vsdk.NURESTFetcher.fetch or manually add the list with
:method:vsdk.NURESTObject.add_child
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
list: list of vsdk.NURESTObject if any
Example:
>>> print entity.children.get()
[<NUChildren at xxx>, <NUChildren at yyyy>, <NUChildren at zzz>]
"""
return self.fetch(filter=filter, order_by=order_by, group_by=group_by, page=page, page_size=page_size, query_parameters=query_parameters, commit=commit)[2] | [
"def",
"get",
"(",
"self",
",",
"filter",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"group_by",
"=",
"[",
"]",
",",
"page",
"=",
"None",
",",
"page_size",
"=",
"None",
",",
"query_parameters",
"=",
"None",
",",
"commit",
"=",
"True",
",",
"async",
"=",
"False",
",",
"callback",
"=",
"None",
")",
":",
"return",
"self",
".",
"fetch",
"(",
"filter",
"=",
"filter",
",",
"order_by",
"=",
"order_by",
",",
"group_by",
"=",
"group_by",
",",
"page",
"=",
"page",
",",
"page_size",
"=",
"page_size",
",",
"query_parameters",
"=",
"query_parameters",
",",
"commit",
"=",
"commit",
")",
"[",
"2",
"]"
] | 54.576923 | 34.269231 |
def getBackgroundRange(fitParams):
'''
return minimum, average, maximum of the background peak
'''
smn, _, _ = getSignalParameters(fitParams)
bg = fitParams[0]
_, avg, std = bg
bgmn = max(0, avg - 3 * std)
if avg + 4 * std < smn:
bgmx = avg + 4 * std
if avg + 3 * std < smn:
bgmx = avg + 3 * std
if avg + 2 * std < smn:
bgmx = avg + 2 * std
else:
bgmx = avg + std
return bgmn, avg, bgmx | [
"def",
"getBackgroundRange",
"(",
"fitParams",
")",
":",
"smn",
",",
"_",
",",
"_",
"=",
"getSignalParameters",
"(",
"fitParams",
")",
"bg",
"=",
"fitParams",
"[",
"0",
"]",
"_",
",",
"avg",
",",
"std",
"=",
"bg",
"bgmn",
"=",
"max",
"(",
"0",
",",
"avg",
"-",
"3",
"*",
"std",
")",
"if",
"avg",
"+",
"4",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"4",
"*",
"std",
"if",
"avg",
"+",
"3",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"3",
"*",
"std",
"if",
"avg",
"+",
"2",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"2",
"*",
"std",
"else",
":",
"bgmx",
"=",
"avg",
"+",
"std",
"return",
"bgmn",
",",
"avg",
",",
"bgmx"
] | 24.631579 | 18.210526 |
def temp_dir(apply_chown=None, apply_chmod=None, remove_using_sudo=None, remove_force=False):
"""
Creates a temporary directory on the remote machine. The directory is removed when no longer needed. Failure to do
so will be ignored.
:param apply_chown: Optional; change the owner of the directory.
:type apply_chown: unicode
:param apply_chmod: Optional; change the permissions of the directory.
:type apply_chmod: unicode
:param remove_using_sudo: Use sudo for removing the directory. ``None`` (default) means it is used depending on
whether ``apply_chown`` has been set.
:type remove_using_sudo: bool | NoneType
:param remove_force: Force the removal.
:type remove_force: bool
:return: Path to the temporary directory.
:rtype: unicode
"""
path = get_remote_temp()
try:
if apply_chmod:
run(chmod(apply_chmod, path))
if apply_chown:
if remove_using_sudo is None:
remove_using_sudo = True
sudo(chown(apply_chown, path))
yield path
finally:
remove_ignore(path, use_sudo=remove_using_sudo, force=remove_force) | [
"def",
"temp_dir",
"(",
"apply_chown",
"=",
"None",
",",
"apply_chmod",
"=",
"None",
",",
"remove_using_sudo",
"=",
"None",
",",
"remove_force",
"=",
"False",
")",
":",
"path",
"=",
"get_remote_temp",
"(",
")",
"try",
":",
"if",
"apply_chmod",
":",
"run",
"(",
"chmod",
"(",
"apply_chmod",
",",
"path",
")",
")",
"if",
"apply_chown",
":",
"if",
"remove_using_sudo",
"is",
"None",
":",
"remove_using_sudo",
"=",
"True",
"sudo",
"(",
"chown",
"(",
"apply_chown",
",",
"path",
")",
")",
"yield",
"path",
"finally",
":",
"remove_ignore",
"(",
"path",
",",
"use_sudo",
"=",
"remove_using_sudo",
",",
"force",
"=",
"remove_force",
")"
] | 40.607143 | 19.964286 |
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True | [
"def",
"add_membership",
"(",
"self",
",",
"user",
",",
"role",
")",
":",
"targetGroup",
"=",
"AuthGroup",
".",
"objects",
"(",
"role",
"=",
"role",
",",
"creator",
"=",
"self",
".",
"client",
")",
".",
"first",
"(",
")",
"if",
"not",
"targetGroup",
":",
"return",
"False",
"target",
"=",
"AuthMembership",
".",
"objects",
"(",
"user",
"=",
"user",
",",
"creator",
"=",
"self",
".",
"client",
")",
".",
"first",
"(",
")",
"if",
"not",
"target",
":",
"target",
"=",
"AuthMembership",
"(",
"user",
"=",
"user",
",",
"creator",
"=",
"self",
".",
"client",
")",
"if",
"not",
"role",
"in",
"[",
"i",
".",
"role",
"for",
"i",
"in",
"target",
".",
"groups",
"]",
":",
"target",
".",
"groups",
".",
"append",
"(",
"targetGroup",
")",
"target",
".",
"save",
"(",
")",
"return",
"True"
] | 37.428571 | 20.785714 |
def _highlight(self, content, ngrams, highlight):
"""Returns `content` with its n-grams from `ngrams` highlighted (if
`add_class` is True) or unhighlighted.
:param content: text to be modified
:type content: `str`
:param ngrams: n-grams to modify
:type ngrams: `list` of `str`
:param highlight: whether to highlight or unhighlight `ngrams`
:type highlight: `bool`
:rtype: `str`
"""
self._add_highlight = highlight
for ngram in ngrams:
pattern = self._get_regexp_pattern(ngram)
content = re.sub(pattern, self._annotate_tokens, content)
self._ngrams_count += 1
return content | [
"def",
"_highlight",
"(",
"self",
",",
"content",
",",
"ngrams",
",",
"highlight",
")",
":",
"self",
".",
"_add_highlight",
"=",
"highlight",
"for",
"ngram",
"in",
"ngrams",
":",
"pattern",
"=",
"self",
".",
"_get_regexp_pattern",
"(",
"ngram",
")",
"content",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"self",
".",
"_annotate_tokens",
",",
"content",
")",
"self",
".",
"_ngrams_count",
"+=",
"1",
"return",
"content"
] | 36.473684 | 13.315789 |
def allow(self):
"""Allow the add-on to be installed."""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
self.find_primary_button().click() | [
"def",
"allow",
"(",
"self",
")",
":",
"with",
"self",
".",
"selenium",
".",
"context",
"(",
"self",
".",
"selenium",
".",
"CONTEXT_CHROME",
")",
":",
"self",
".",
"find_primary_button",
"(",
")",
".",
"click",
"(",
")"
] | 43.5 | 13.75 |
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
def parse_line(line):
"""Parse single line."""
name, vals = line[:25], line[25:].split()
try:
ctime, cfract, wtime, wfract, ncalls, gflops = vals
except ValueError:
# v8.3 Added two columns at the end [Speedup, Efficacity]
ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
data = {}
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data | [
"def",
"_read",
"(",
"self",
",",
"fh",
",",
"fname",
")",
":",
"if",
"fname",
"in",
"self",
".",
"_timers",
":",
"raise",
"self",
".",
"Error",
"(",
"\"Cannot overwrite timer associated to: %s \"",
"%",
"fname",
")",
"def",
"parse_line",
"(",
"line",
")",
":",
"\"\"\"Parse single line.\"\"\"",
"name",
",",
"vals",
"=",
"line",
"[",
":",
"25",
"]",
",",
"line",
"[",
"25",
":",
"]",
".",
"split",
"(",
")",
"try",
":",
"ctime",
",",
"cfract",
",",
"wtime",
",",
"wfract",
",",
"ncalls",
",",
"gflops",
"=",
"vals",
"except",
"ValueError",
":",
"# v8.3 Added two columns at the end [Speedup, Efficacity]",
"ctime",
",",
"cfract",
",",
"wtime",
",",
"wfract",
",",
"ncalls",
",",
"gflops",
",",
"speedup",
",",
"eff",
"=",
"vals",
"return",
"AbinitTimerSection",
"(",
"name",
",",
"ctime",
",",
"cfract",
",",
"wtime",
",",
"wfract",
",",
"ncalls",
",",
"gflops",
")",
"data",
"=",
"{",
"}",
"inside",
",",
"has_timer",
"=",
"0",
",",
"False",
"for",
"line",
"in",
"fh",
":",
"#print(line.strip())",
"if",
"line",
".",
"startswith",
"(",
"self",
".",
"BEGIN_TAG",
")",
":",
"has_timer",
"=",
"True",
"sections",
"=",
"[",
"]",
"info",
"=",
"{",
"}",
"inside",
"=",
"1",
"line",
"=",
"line",
"[",
"len",
"(",
"self",
".",
"BEGIN_TAG",
")",
":",
"]",
".",
"strip",
"(",
")",
"[",
":",
"-",
"1",
"]",
"info",
"[",
"\"fname\"",
"]",
"=",
"fname",
"for",
"tok",
"in",
"line",
".",
"split",
"(",
"\",\"",
")",
":",
"key",
",",
"val",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"tok",
".",
"split",
"(",
"\"=\"",
")",
"]",
"info",
"[",
"key",
"]",
"=",
"val",
"elif",
"line",
".",
"startswith",
"(",
"self",
".",
"END_TAG",
")",
":",
"inside",
"=",
"0",
"timer",
"=",
"AbinitTimer",
"(",
"sections",
",",
"info",
",",
"cpu_time",
",",
"wall_time",
")",
"mpi_rank",
"=",
"info",
"[",
"\"mpi_rank\"",
"]",
"data",
"[",
"mpi_rank",
"]",
"=",
"timer",
"elif",
"inside",
":",
"inside",
"+=",
"1",
"line",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"if",
"inside",
"==",
"2",
":",
"d",
"=",
"dict",
"(",
")",
"for",
"tok",
"in",
"line",
".",
"split",
"(",
"\",\"",
")",
":",
"key",
",",
"val",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"tok",
".",
"split",
"(",
"\"=\"",
")",
"]",
"d",
"[",
"key",
"]",
"=",
"float",
"(",
"val",
")",
"cpu_time",
",",
"wall_time",
"=",
"d",
"[",
"\"cpu_time\"",
"]",
",",
"d",
"[",
"\"wall_time\"",
"]",
"elif",
"inside",
">",
"5",
":",
"sections",
".",
"append",
"(",
"parse_line",
"(",
"line",
")",
")",
"else",
":",
"try",
":",
"parse_line",
"(",
"line",
")",
"except",
":",
"parser_failed",
"=",
"True",
"if",
"not",
"parser_failed",
":",
"raise",
"self",
".",
"Error",
"(",
"\"line should be empty: \"",
"+",
"str",
"(",
"inside",
")",
"+",
"line",
")",
"if",
"not",
"has_timer",
":",
"raise",
"self",
".",
"Error",
"(",
"\"%s: No timer section found\"",
"%",
"fname",
")",
"# Add it to the dict",
"self",
".",
"_timers",
"[",
"fname",
"]",
"=",
"data"
] | 34.727273 | 19.287879 |
def SCISetStyling(self, line: int, col: int,
numChar: int, style: bytearray):
"""
Pythonic wrapper for the SCI_SETSTYLING command.
For example, the following code applies style #3
to the first five characters in the second line
of the widget:
SCISetStyling((0, 1), 5, 3)
|Args|
* ``line`` (**int**): line number where to start styling.
* ``col`` (**int**): column number where to start styling.
* ``numChar`` (**int**): number of characters to style.
* ``style`` (**int**): Scintilla style number.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
if not self.isPositionValid(line, col):
return
pos = self.positionFromLineIndex(line, col)
self.SendScintilla(self.SCI_STARTSTYLING, pos, 0xFF)
self.SendScintilla(self.SCI_SETSTYLING, numChar, style) | [
"def",
"SCISetStyling",
"(",
"self",
",",
"line",
":",
"int",
",",
"col",
":",
"int",
",",
"numChar",
":",
"int",
",",
"style",
":",
"bytearray",
")",
":",
"if",
"not",
"self",
".",
"isPositionValid",
"(",
"line",
",",
"col",
")",
":",
"return",
"pos",
"=",
"self",
".",
"positionFromLineIndex",
"(",
"line",
",",
"col",
")",
"self",
".",
"SendScintilla",
"(",
"self",
".",
"SCI_STARTSTYLING",
",",
"pos",
",",
"0xFF",
")",
"self",
".",
"SendScintilla",
"(",
"self",
".",
"SCI_SETSTYLING",
",",
"numChar",
",",
"style",
")"
] | 30.53125 | 23.46875 |
def _set_class_path(cls, module_dict=sys.modules):
"""Sets the absolute path to this class as a string.
Used by the Pipeline API to reconstruct the Pipeline sub-class object
at execution time instead of passing around a serialized function.
Args:
module_dict: Used for testing.
"""
# Do not traverse the class hierarchy fetching the class path attribute.
found = cls.__dict__.get('_class_path')
if found is not None:
return
# Do not set the _class_path for the base-class, otherwise all children's
# lookups for _class_path will fall through and return 'Pipeline' above.
# This situation can happen if users call the generic Pipeline.from_id
# to get the result of a Pipeline without knowing its specific class.
if cls is Pipeline:
return
class_path = '%s.%s' % (cls.__module__, cls.__name__)
# When a WSGI handler is invoked as an entry point, any Pipeline class
# defined in the same file as the handler will get __module__ set to
# __main__. Thus we need to find out its real fully qualified path.
if cls.__module__ == '__main__':
for name, module in module_dict.items():
if name == '__main__':
continue
found = getattr(module, cls.__name__, None)
if found is cls:
class_path = '%s.%s' % (name, cls.__name__)
break
cls._class_path = class_path | [
"def",
"_set_class_path",
"(",
"cls",
",",
"module_dict",
"=",
"sys",
".",
"modules",
")",
":",
"# Do not traverse the class hierarchy fetching the class path attribute.",
"found",
"=",
"cls",
".",
"__dict__",
".",
"get",
"(",
"'_class_path'",
")",
"if",
"found",
"is",
"not",
"None",
":",
"return",
"# Do not set the _class_path for the base-class, otherwise all children's",
"# lookups for _class_path will fall through and return 'Pipeline' above.",
"# This situation can happen if users call the generic Pipeline.from_id",
"# to get the result of a Pipeline without knowing its specific class.",
"if",
"cls",
"is",
"Pipeline",
":",
"return",
"class_path",
"=",
"'%s.%s'",
"%",
"(",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
")",
"# When a WSGI handler is invoked as an entry point, any Pipeline class",
"# defined in the same file as the handler will get __module__ set to",
"# __main__. Thus we need to find out its real fully qualified path.",
"if",
"cls",
".",
"__module__",
"==",
"'__main__'",
":",
"for",
"name",
",",
"module",
"in",
"module_dict",
".",
"items",
"(",
")",
":",
"if",
"name",
"==",
"'__main__'",
":",
"continue",
"found",
"=",
"getattr",
"(",
"module",
",",
"cls",
".",
"__name__",
",",
"None",
")",
"if",
"found",
"is",
"cls",
":",
"class_path",
"=",
"'%s.%s'",
"%",
"(",
"name",
",",
"cls",
".",
"__name__",
")",
"break",
"cls",
".",
"_class_path",
"=",
"class_path"
] | 40.323529 | 22.470588 |
def open(self, fname, mode='rb'):
"""
(Re-)opens a backup file
"""
self.close()
self.fp = open(fname, mode)
self.fname = fname | [
"def",
"open",
"(",
"self",
",",
"fname",
",",
"mode",
"=",
"'rb'",
")",
":",
"self",
".",
"close",
"(",
")",
"self",
".",
"fp",
"=",
"open",
"(",
"fname",
",",
"mode",
")",
"self",
".",
"fname",
"=",
"fname"
] | 24 | 7.714286 |
def setRecordSet( self, recordSet ):
"""
Sets the record set instance that this widget will use.
:param recordSet | <orb.RecordSet>
"""
if ( recordSet ):
self.setQuery( recordSet.query() )
self.setGroupBy( recordSet.groupBy() )
self.setPageSize( recordSet.pageSize() )
self.setSortBy( recordSet.order() )
self.uiPagedCHK.setChecked( recordSet.isPaged() )
else:
self.setQuery(Q())
self.setGroupBy('')
self.setPageSize(100)
self.setSortBy('')
self.uiPagedCHK.setChecked( False ) | [
"def",
"setRecordSet",
"(",
"self",
",",
"recordSet",
")",
":",
"if",
"(",
"recordSet",
")",
":",
"self",
".",
"setQuery",
"(",
"recordSet",
".",
"query",
"(",
")",
")",
"self",
".",
"setGroupBy",
"(",
"recordSet",
".",
"groupBy",
"(",
")",
")",
"self",
".",
"setPageSize",
"(",
"recordSet",
".",
"pageSize",
"(",
")",
")",
"self",
".",
"setSortBy",
"(",
"recordSet",
".",
"order",
"(",
")",
")",
"self",
".",
"uiPagedCHK",
".",
"setChecked",
"(",
"recordSet",
".",
"isPaged",
"(",
")",
")",
"else",
":",
"self",
".",
"setQuery",
"(",
"Q",
"(",
")",
")",
"self",
".",
"setGroupBy",
"(",
"''",
")",
"self",
".",
"setPageSize",
"(",
"100",
")",
"self",
".",
"setSortBy",
"(",
"''",
")",
"self",
".",
"uiPagedCHK",
".",
"setChecked",
"(",
"False",
")"
] | 34.9 | 13.7 |
def _prepare_disks(self, disks_name):
"""format disks to xfs and mount it"""
fstab = '/etc/fstab'
for disk in tqdm(disks_name.split(',')):
sudo('umount /dev/{0}'.format(disk), warn_only=True)
if sudo('mkfs.xfs -f /dev/{0}'.format(disk), warn_only=True).failed:
sudo('apt-get update')
sudo('apt-get -y install xfsprogs')
sudo('mkfs.xfs -f /dev/{0}'.format(disk))
sudo('mkdir -p /srv/node/{0}'.format(disk))
files.append(
fstab, '/dev/{0} /srv/node/{1} xfs noatime,nodiratime,nobarrier,logbufs=8 0 2'.format(disk, disk), use_sudo=True)
sudo('mount /srv/node/{0}'.format(disk)) | [
"def",
"_prepare_disks",
"(",
"self",
",",
"disks_name",
")",
":",
"fstab",
"=",
"'/etc/fstab'",
"for",
"disk",
"in",
"tqdm",
"(",
"disks_name",
".",
"split",
"(",
"','",
")",
")",
":",
"sudo",
"(",
"'umount /dev/{0}'",
".",
"format",
"(",
"disk",
")",
",",
"warn_only",
"=",
"True",
")",
"if",
"sudo",
"(",
"'mkfs.xfs -f /dev/{0}'",
".",
"format",
"(",
"disk",
")",
",",
"warn_only",
"=",
"True",
")",
".",
"failed",
":",
"sudo",
"(",
"'apt-get update'",
")",
"sudo",
"(",
"'apt-get -y install xfsprogs'",
")",
"sudo",
"(",
"'mkfs.xfs -f /dev/{0}'",
".",
"format",
"(",
"disk",
")",
")",
"sudo",
"(",
"'mkdir -p /srv/node/{0}'",
".",
"format",
"(",
"disk",
")",
")",
"files",
".",
"append",
"(",
"fstab",
",",
"'/dev/{0} /srv/node/{1} xfs noatime,nodiratime,nobarrier,logbufs=8 0 2'",
".",
"format",
"(",
"disk",
",",
"disk",
")",
",",
"use_sudo",
"=",
"True",
")",
"sudo",
"(",
"'mount /srv/node/{0}'",
".",
"format",
"(",
"disk",
")",
")"
] | 55.538462 | 19.307692 |
def percentage(value: str) -> float:
"""
``argparse`` argument type that checks that its value is a percentage (in
the sense of a float in the range [0, 100]).
"""
try:
fvalue = float(value)
assert 0 <= fvalue <= 100
except (AssertionError, TypeError, ValueError):
raise ArgumentTypeError(
"{!r} is an invalid percentage value".format(value))
return fvalue | [
"def",
"percentage",
"(",
"value",
":",
"str",
")",
"->",
"float",
":",
"try",
":",
"fvalue",
"=",
"float",
"(",
"value",
")",
"assert",
"0",
"<=",
"fvalue",
"<=",
"100",
"except",
"(",
"AssertionError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ArgumentTypeError",
"(",
"\"{!r} is an invalid percentage value\"",
".",
"format",
"(",
"value",
")",
")",
"return",
"fvalue"
] | 34.083333 | 13.75 |
def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
"""
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
sys.exit("Unable to find a local Python package!")
if len(packages) > 1:
sys.exit("Found multiple Python packages: {0!r}".format(packages))
return packages[0] | [
"def",
"_find_package",
"(",
"c",
")",
":",
"# TODO: is there a way to get this from the same place setup.py does w/o",
"# setup.py barfing (since setup() runs at import time and assumes CLI use)?",
"configured_value",
"=",
"c",
".",
"get",
"(",
"\"packaging\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"package\"",
",",
"None",
")",
"if",
"configured_value",
":",
"return",
"configured_value",
"# TODO: tests covering this stuff here (most logic tests simply supply",
"# config above)",
"packages",
"=",
"[",
"path",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"\".\"",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"__init__.py\"",
")",
")",
"and",
"path",
"not",
"in",
"(",
"\"tests\"",
",",
"\"integration\"",
",",
"\"sites\"",
",",
"\"vendor\"",
")",
")",
"]",
"if",
"not",
"packages",
":",
"sys",
".",
"exit",
"(",
"\"Unable to find a local Python package!\"",
")",
"if",
"len",
"(",
"packages",
")",
">",
"1",
":",
"sys",
".",
"exit",
"(",
"\"Found multiple Python packages: {0!r}\"",
".",
"format",
"(",
"packages",
")",
")",
"return",
"packages",
"[",
"0",
"]"
] | 38.90625 | 23.34375 |
def vectorize( self, docs ):
'''
Returns the feature vectors for a set of docs. If model is not already be trained,
then self.train() is called.
Args:
docs (dict or list of tuples): asset_id, body_text of documents
you wish to featurize.
'''
if type(docs) == dict:
docs = docs.items()
if self.model == None:
self.train(docs)
asset_id2vector = {}
unfound = []
for item in docs:
## iterate through the items in docs and check if any are already in the model.
asset_id, _ = item
label = 'DOC_' + str(asset_id)
if label in self.model:
asset_id2vector.update({asset_id: self.model['DOC_' + str(asset_id)]})
else:
unfound.append(item)
if len(unfound) > 0:
## for all assets not in the model, update the model and then get their sentence vectors.
sentences = [self._gen_sentence(item) for item in unfound]
self.update_model(sentences, train=self.stream_train)
asset_id2vector.update({item[0]: self.model['DOC_' + str(item[0])] for item in unfound})
return asset_id2vector | [
"def",
"vectorize",
"(",
"self",
",",
"docs",
")",
":",
"if",
"type",
"(",
"docs",
")",
"==",
"dict",
":",
"docs",
"=",
"docs",
".",
"items",
"(",
")",
"if",
"self",
".",
"model",
"==",
"None",
":",
"self",
".",
"train",
"(",
"docs",
")",
"asset_id2vector",
"=",
"{",
"}",
"unfound",
"=",
"[",
"]",
"for",
"item",
"in",
"docs",
":",
"## iterate through the items in docs and check if any are already in the model.",
"asset_id",
",",
"_",
"=",
"item",
"label",
"=",
"'DOC_'",
"+",
"str",
"(",
"asset_id",
")",
"if",
"label",
"in",
"self",
".",
"model",
":",
"asset_id2vector",
".",
"update",
"(",
"{",
"asset_id",
":",
"self",
".",
"model",
"[",
"'DOC_'",
"+",
"str",
"(",
"asset_id",
")",
"]",
"}",
")",
"else",
":",
"unfound",
".",
"append",
"(",
"item",
")",
"if",
"len",
"(",
"unfound",
")",
">",
"0",
":",
"## for all assets not in the model, update the model and then get their sentence vectors.",
"sentences",
"=",
"[",
"self",
".",
"_gen_sentence",
"(",
"item",
")",
"for",
"item",
"in",
"unfound",
"]",
"self",
".",
"update_model",
"(",
"sentences",
",",
"train",
"=",
"self",
".",
"stream_train",
")",
"asset_id2vector",
".",
"update",
"(",
"{",
"item",
"[",
"0",
"]",
":",
"self",
".",
"model",
"[",
"'DOC_'",
"+",
"str",
"(",
"item",
"[",
"0",
"]",
")",
"]",
"for",
"item",
"in",
"unfound",
"}",
")",
"return",
"asset_id2vector"
] | 34.914286 | 25.714286 |
def set_courses(self, course_ids):
"""Sets the courses.
arg: course_ids (osid.id.Id[]): the course ``Ids``
raise: InvalidArgument - ``course_ids`` is invalid
raise: NullArgument - ``course_ids`` is ``null``
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.set_assets_template
if not isinstance(course_ids, list):
raise errors.InvalidArgument()
if self.get_courses_metadata().is_read_only():
raise errors.NoAccess()
idstr_list = []
for object_id in course_ids:
if not self._is_valid_id(object_id):
raise errors.InvalidArgument()
idstr_list.append(str(object_id))
self._my_map['courseIds'] = idstr_list | [
"def",
"set_courses",
"(",
"self",
",",
"course_ids",
")",
":",
"# Implemented from template for osid.learning.ActivityForm.set_assets_template",
"if",
"not",
"isinstance",
"(",
"course_ids",
",",
"list",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"if",
"self",
".",
"get_courses_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"idstr_list",
"=",
"[",
"]",
"for",
"object_id",
"in",
"course_ids",
":",
"if",
"not",
"self",
".",
"_is_valid_id",
"(",
"object_id",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"idstr_list",
".",
"append",
"(",
"str",
"(",
"object_id",
")",
")",
"self",
".",
"_my_map",
"[",
"'courseIds'",
"]",
"=",
"idstr_list"
] | 42.190476 | 14.809524 |
def setval(self, varname, value):
"""
Set the value of the variable with the given name.
"""
if varname in self:
self[varname]['value'] = value
else:
self[varname] = Variable(self.default_type, value=value) | [
"def",
"setval",
"(",
"self",
",",
"varname",
",",
"value",
")",
":",
"if",
"varname",
"in",
"self",
":",
"self",
"[",
"varname",
"]",
"[",
"'value'",
"]",
"=",
"value",
"else",
":",
"self",
"[",
"varname",
"]",
"=",
"Variable",
"(",
"self",
".",
"default_type",
",",
"value",
"=",
"value",
")"
] | 32.875 | 11.875 |
def servicegroup_add(sg_name, sg_type='HTTP', **connection_args):
'''
Add a new service group
If no service type is specified, HTTP will be used.
Most common service types: HTTP, SSL, and SSL_BRIDGE
CLI Example:
.. code-block:: bash
salt '*' netscaler.servicegroup_add 'serviceGroupName'
salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType'
'''
ret = True
if servicegroup_exists(sg_name):
return False
nitro = _connect(**connection_args)
if nitro is None:
return False
sg = NSServiceGroup()
sg.set_servicegroupname(sg_name)
sg.set_servicetype(sg_type.upper())
try:
NSServiceGroup.add(nitro, sg)
except NSNitroError as error:
log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error)
ret = False
_disconnect(nitro)
return ret | [
"def",
"servicegroup_add",
"(",
"sg_name",
",",
"sg_type",
"=",
"'HTTP'",
",",
"*",
"*",
"connection_args",
")",
":",
"ret",
"=",
"True",
"if",
"servicegroup_exists",
"(",
"sg_name",
")",
":",
"return",
"False",
"nitro",
"=",
"_connect",
"(",
"*",
"*",
"connection_args",
")",
"if",
"nitro",
"is",
"None",
":",
"return",
"False",
"sg",
"=",
"NSServiceGroup",
"(",
")",
"sg",
".",
"set_servicegroupname",
"(",
"sg_name",
")",
"sg",
".",
"set_servicetype",
"(",
"sg_type",
".",
"upper",
"(",
")",
")",
"try",
":",
"NSServiceGroup",
".",
"add",
"(",
"nitro",
",",
"sg",
")",
"except",
"NSNitroError",
"as",
"error",
":",
"log",
".",
"debug",
"(",
"'netscaler module error - NSServiceGroup.add() failed: %s'",
",",
"error",
")",
"ret",
"=",
"False",
"_disconnect",
"(",
"nitro",
")",
"return",
"ret"
] | 29.896552 | 21.344828 |
def add_enrichr_parser(subparsers):
"""Add function 'enrichr' argument parsers."""
argparser_enrichr = subparsers.add_parser("enrichr", help="Using Enrichr API to perform GO analysis.")
# group for required options.
enrichr_opt = argparser_enrichr.add_argument_group("Input arguments")
enrichr_opt.add_argument("-i", "--input-list", action="store", dest="gene_list", type=str, required=True, metavar='IDs',
help="Enrichr uses a list of gene names as input.")
enrichr_opt.add_argument("-g", "--gene-sets", action="store", dest="library", type=str, required=True, metavar='GMT',
help="Enrichr library name(s) required. Separate each name by comma.")
enrichr_opt.add_argument("--org", "--organism", action="store", dest="organism", type=str, default='',
help="Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.")
enrichr_opt.add_argument("--ds", "--description", action="store", dest="descrip", type=str, default='enrichr', metavar='STRING',
help="It is recommended to enter a short description for your list so that multiple lists \
can be differentiated from each other if you choose to save or share your list.")
enrichr_opt.add_argument("--cut", "--cut-off", action="store", dest="thresh", metavar='float', type=float, default=0.05,
help="Adjust-Pval cutoff, used for generating plots. Default: 0.05.")
enrichr_opt.add_argument("--bg", "--background", action="store", dest="bg", default='hsapiens_gene_ensembl', metavar='BGNUM',
help="BioMart Dataset name or Background total genes number. Default: None")
enrichr_opt.add_argument("-t", "--top-term", dest="term", action="store", type=int, default=10, metavar='int',
help="Numbers of top terms shown in the plot. Default: 10")
# enrichr_opt.add_argument("--scale", dest = "scale", action="store", type=float, default=0.5, metavar='float',
# help="scatter dot scale in the dotplot. Default: 0.5")
# enrichr_opt.add_argument("--no-plot", action='store_true', dest='no_plot', default=False,
# help="Suppress the plot output.This is useful only if data are interested. Default: False.")
enrichr_output = argparser_enrichr.add_argument_group("Output figure arguments")
add_output_option(enrichr_output)
return | [
"def",
"add_enrichr_parser",
"(",
"subparsers",
")",
":",
"argparser_enrichr",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"enrichr\"",
",",
"help",
"=",
"\"Using Enrichr API to perform GO analysis.\"",
")",
"# group for required options.",
"enrichr_opt",
"=",
"argparser_enrichr",
".",
"add_argument_group",
"(",
"\"Input arguments\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--input-list\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"gene_list\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'IDs'",
",",
"help",
"=",
"\"Enrichr uses a list of gene names as input.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-g\"",
",",
"\"--gene-sets\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"library\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'GMT'",
",",
"help",
"=",
"\"Enrichr library name(s) required. Separate each name by comma.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--org\"",
",",
"\"--organism\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"organism\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"\"Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--ds\"",
",",
"\"--description\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"descrip\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'enrichr'",
",",
"metavar",
"=",
"'STRING'",
",",
"help",
"=",
"\"It is recommended to enter a short description for your list so that multiple lists \\\n can be differentiated from each other if you choose to save or share your list.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--cut\"",
",",
"\"--cut-off\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"thresh\"",
",",
"metavar",
"=",
"'float'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.05",
",",
"help",
"=",
"\"Adjust-Pval cutoff, used for generating plots. Default: 0.05.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--bg\"",
",",
"\"--background\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"bg\"",
",",
"default",
"=",
"'hsapiens_gene_ensembl'",
",",
"metavar",
"=",
"'BGNUM'",
",",
"help",
"=",
"\"BioMart Dataset name or Background total genes number. Default: None\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--top-term\"",
",",
"dest",
"=",
"\"term\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Numbers of top terms shown in the plot. Default: 10\"",
")",
"# enrichr_opt.add_argument(\"--scale\", dest = \"scale\", action=\"store\", type=float, default=0.5, metavar='float',",
"# help=\"scatter dot scale in the dotplot. Default: 0.5\")",
"# enrichr_opt.add_argument(\"--no-plot\", action='store_true', dest='no_plot', default=False,",
"# help=\"Suppress the plot output.This is useful only if data are interested. Default: False.\")",
"enrichr_output",
"=",
"argparser_enrichr",
".",
"add_argument_group",
"(",
"\"Output figure arguments\"",
")",
"add_output_option",
"(",
"enrichr_output",
")",
"return"
] | 84.6 | 55.266667 |
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta)) | [
"def",
"decrement",
"(",
"self",
",",
"subname",
"=",
"None",
",",
"delta",
"=",
"1",
")",
":",
"delta",
"=",
"-",
"int",
"(",
"delta",
")",
"sign",
"=",
"\"+\"",
"if",
"delta",
">=",
"0",
"else",
"\"\"",
"return",
"self",
".",
"_send",
"(",
"subname",
",",
"\"%s%d\"",
"%",
"(",
"sign",
",",
"delta",
")",
")"
] | 31.7 | 17.2 |
def iplot(self, places=-1, c_poly='default', c_holes='default',
c_sop='r', s_sop=25, extra_height=0, ret=False, ax=None):
"""
Improved plot that allows to visualize the Places in the Space
selectively. It also allows to plot polygons and holes in
different colors and to change the size and the color of the
set of points.
The points can be plotted accordingly to a ndarray colormap.
:param places: Indexes of the Places to visualize.
:type places: int, list or ndarray
:param c_poly: Polygons color.
:type c_poly: matplotlib color, 'default' or 't' (transparent)
:param c_holes: Holes color.
:type c_holes: matplotlib color, 'default' or 't' (transparent)
:param c_sop: Set of points color.
:type c_sop: matplotlib color or colormap
:param s_sop: Set of points size.
:type s_sop: float or ndarray
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: None, mplot3d.Axes3D
"""
if places == -1:
places = range(len(self.places))
elif type(places) == int:
places = [places]
places = np.array(places)
places[places<0] = len(self.places) + places[places<0]
places = np.unique(places)
aux_space = Space([self[i] for i in places])
for place in aux_space:
ax = place.iplot(c_poly, c_holes, c_sop, s_sop, extra_height,
ret=True, ax=ax)
aux_space.center_plot(ax)
if ret: return ax | [
"def",
"iplot",
"(",
"self",
",",
"places",
"=",
"-",
"1",
",",
"c_poly",
"=",
"'default'",
",",
"c_holes",
"=",
"'default'",
",",
"c_sop",
"=",
"'r'",
",",
"s_sop",
"=",
"25",
",",
"extra_height",
"=",
"0",
",",
"ret",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"if",
"places",
"==",
"-",
"1",
":",
"places",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"places",
")",
")",
"elif",
"type",
"(",
"places",
")",
"==",
"int",
":",
"places",
"=",
"[",
"places",
"]",
"places",
"=",
"np",
".",
"array",
"(",
"places",
")",
"places",
"[",
"places",
"<",
"0",
"]",
"=",
"len",
"(",
"self",
".",
"places",
")",
"+",
"places",
"[",
"places",
"<",
"0",
"]",
"places",
"=",
"np",
".",
"unique",
"(",
"places",
")",
"aux_space",
"=",
"Space",
"(",
"[",
"self",
"[",
"i",
"]",
"for",
"i",
"in",
"places",
"]",
")",
"for",
"place",
"in",
"aux_space",
":",
"ax",
"=",
"place",
".",
"iplot",
"(",
"c_poly",
",",
"c_holes",
",",
"c_sop",
",",
"s_sop",
",",
"extra_height",
",",
"ret",
"=",
"True",
",",
"ax",
"=",
"ax",
")",
"aux_space",
".",
"center_plot",
"(",
"ax",
")",
"if",
"ret",
":",
"return",
"ax"
] | 43.891304 | 15.717391 |
def image_groups_get(self, resource_url):
"""Get handle for image group resource at given Url.
Parameters
----------
resource_url : string
Url for image group resource at SCO-API
Returns
-------
scoserv.ImageGroupHandle
Handle for local copy of image group resource
"""
# Get resource directory, Json representation, active flag, and cache id
obj_dir, obj_json, is_active, cache_id = self.get_object(resource_url)
# Create image group handle. Will raise an exception if resource is not
# in cache and cannot be downloaded.
image_group = ImageGroupHandle(obj_json, obj_dir)
# Add resource to cache if not exists
if not cache_id in self.cache:
self.cache_add(resource_url, cache_id)
# Return image group handle
return image_group | [
"def",
"image_groups_get",
"(",
"self",
",",
"resource_url",
")",
":",
"# Get resource directory, Json representation, active flag, and cache id",
"obj_dir",
",",
"obj_json",
",",
"is_active",
",",
"cache_id",
"=",
"self",
".",
"get_object",
"(",
"resource_url",
")",
"# Create image group handle. Will raise an exception if resource is not",
"# in cache and cannot be downloaded.",
"image_group",
"=",
"ImageGroupHandle",
"(",
"obj_json",
",",
"obj_dir",
")",
"# Add resource to cache if not exists",
"if",
"not",
"cache_id",
"in",
"self",
".",
"cache",
":",
"self",
".",
"cache_add",
"(",
"resource_url",
",",
"cache_id",
")",
"# Return image group handle",
"return",
"image_group"
] | 38.217391 | 17.217391 |
def add_options(self):
""" Add program options.
"""
super(RtorrentControl, self).add_options()
# basic options
self.add_bool_option("--help-fields",
help="show available fields and their description")
self.add_bool_option("-n", "--dry-run",
help="don't commit changes, just tell what would happen")
self.add_bool_option("--detach",
help="run the process in the background")
self.prompt.add_options()
# output control
self.add_bool_option("-S", "--shell",
help="escape output following shell rules")
self.add_bool_option("-0", "--nul", "--print0",
help="use a NUL character instead of a linebreak after items")
self.add_bool_option("-c", "--column-headers",
help="print column headers")
self.add_bool_option("-+", "--stats",
help="add sum / avg / median of numerical fields")
self.add_bool_option("--summary",
help="print only statistical summary, without the items")
#self.add_bool_option("-f", "--full",
# help="print full torrent details")
self.add_bool_option("--json",
help="dump all items as JSON (use '-o f1,f2,...' to specify fields)")
self.add_value_option("-o", "--output-format", "FORMAT",
help="specify display format (use '-o-' to disable item display)")
self.add_value_option("-O", "--output-template", "FILE",
help="pass control of output formatting to the specified template")
self.add_value_option("-s", "--sort-fields", "[-]FIELD[,...] [-s...]",
action='append', default=[],
help="fields used for sorting, descending if prefixed with a '-'; '-s*' uses output field list")
self.add_bool_option("-r", "--reverse-sort",
help="reverse the sort order")
self.add_value_option("-A", "--anneal", "MODE [-A...]",
type='choice', action='append', default=[],
choices=('dupes+', 'dupes-', 'dupes=', 'invert', 'unique'),
help="modify result set using some pre-defined methods")
self.add_value_option("-/", "--select", "[N-]M",
help="select result subset by item position (counting from 1)")
self.add_bool_option("-V", "--view-only",
help="show search result only in default ncurses view")
self.add_value_option("--to-view", "--to", "NAME",
help="show search result only in named ncurses view")
self.add_bool_option("--append-view", "--append",
help="DEPRECATED: use '--alter append' instead")
self.add_value_option("--alter-view", "--alter", "MODE",
type='choice', default=None, choices=self.ALTER_MODES,
help="alter view according to mode: {} (modifies -V and --to behaviour)"
.format(', '.join(self.ALTER_MODES)))
self.add_bool_option("--tee-view", "--tee",
help="ADDITIONALLY show search results in ncurses view (modifies -V and --to behaviour)")
self.add_value_option("--from-view", "--from", "NAME",
help="select only items that are on view NAME (NAME can be an info hash to quickly select a single item)")
self.add_value_option("-M", "--modify-view", "NAME",
help="get items from given view and write result back to it (short-cut to combine --from-view and --to-view)")
self.add_value_option("-Q", "--fast-query", "LEVEL",
type='choice', default='=', choices=('=', '0', '1', '2'),
help="enable query optimization (=: use config; 0: off; 1: safe; 2: danger seeker)")
self.add_value_option("--call", "CMD",
help="call an OS command pattern in the shell")
self.add_value_option("--spawn", "CMD [--spawn ...]",
action="append", default=[],
help="execute OS command pattern(s) directly")
# TODO: implement -S
# self.add_bool_option("-S", "--summary",
# help="print statistics")
# torrent state change (actions)
for action in self.ACTION_MODES:
action.setdefault("label", action.name.upper())
action.setdefault("method", action.name)
action.setdefault("interactive", False)
action.setdefault("argshelp", "")
action.setdefault("args", ())
if action.argshelp:
self.add_value_option(*action.options + (action.argshelp,),
**{"help": action.help + (" (implies -i)" if action.interactive else "")})
else:
self.add_bool_option(*action.options,
**{"help": action.help + (" (implies -i)" if action.interactive else "")})
self.add_value_option("--ignore", "|".join(self.IGNORE_OPTIONS),
type="choice", choices=self.IGNORE_OPTIONS,
help="set 'ignore commands' status on torrent")
self.add_value_option("--prio", "|".join(self.PRIO_OPTIONS),
type="choice", choices=self.PRIO_OPTIONS,
help="set priority of torrent")
self.add_bool_option("-F", "--flush", help="flush changes immediately (save session data)") | [
"def",
"add_options",
"(",
"self",
")",
":",
"super",
"(",
"RtorrentControl",
",",
"self",
")",
".",
"add_options",
"(",
")",
"# basic options",
"self",
".",
"add_bool_option",
"(",
"\"--help-fields\"",
",",
"help",
"=",
"\"show available fields and their description\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-n\"",
",",
"\"--dry-run\"",
",",
"help",
"=",
"\"don't commit changes, just tell what would happen\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"--detach\"",
",",
"help",
"=",
"\"run the process in the background\"",
")",
"self",
".",
"prompt",
".",
"add_options",
"(",
")",
"# output control",
"self",
".",
"add_bool_option",
"(",
"\"-S\"",
",",
"\"--shell\"",
",",
"help",
"=",
"\"escape output following shell rules\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-0\"",
",",
"\"--nul\"",
",",
"\"--print0\"",
",",
"help",
"=",
"\"use a NUL character instead of a linebreak after items\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-c\"",
",",
"\"--column-headers\"",
",",
"help",
"=",
"\"print column headers\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-+\"",
",",
"\"--stats\"",
",",
"help",
"=",
"\"add sum / avg / median of numerical fields\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"--summary\"",
",",
"help",
"=",
"\"print only statistical summary, without the items\"",
")",
"#self.add_bool_option(\"-f\", \"--full\",",
"# help=\"print full torrent details\")",
"self",
".",
"add_bool_option",
"(",
"\"--json\"",
",",
"help",
"=",
"\"dump all items as JSON (use '-o f1,f2,...' to specify fields)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-o\"",
",",
"\"--output-format\"",
",",
"\"FORMAT\"",
",",
"help",
"=",
"\"specify display format (use '-o-' to disable item display)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-O\"",
",",
"\"--output-template\"",
",",
"\"FILE\"",
",",
"help",
"=",
"\"pass control of output formatting to the specified template\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-s\"",
",",
"\"--sort-fields\"",
",",
"\"[-]FIELD[,...] [-s...]\"",
",",
"action",
"=",
"'append'",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"fields used for sorting, descending if prefixed with a '-'; '-s*' uses output field list\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-r\"",
",",
"\"--reverse-sort\"",
",",
"help",
"=",
"\"reverse the sort order\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-A\"",
",",
"\"--anneal\"",
",",
"\"MODE [-A...]\"",
",",
"type",
"=",
"'choice'",
",",
"action",
"=",
"'append'",
",",
"default",
"=",
"[",
"]",
",",
"choices",
"=",
"(",
"'dupes+'",
",",
"'dupes-'",
",",
"'dupes='",
",",
"'invert'",
",",
"'unique'",
")",
",",
"help",
"=",
"\"modify result set using some pre-defined methods\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-/\"",
",",
"\"--select\"",
",",
"\"[N-]M\"",
",",
"help",
"=",
"\"select result subset by item position (counting from 1)\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-V\"",
",",
"\"--view-only\"",
",",
"help",
"=",
"\"show search result only in default ncurses view\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--to-view\"",
",",
"\"--to\"",
",",
"\"NAME\"",
",",
"help",
"=",
"\"show search result only in named ncurses view\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"--append-view\"",
",",
"\"--append\"",
",",
"help",
"=",
"\"DEPRECATED: use '--alter append' instead\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--alter-view\"",
",",
"\"--alter\"",
",",
"\"MODE\"",
",",
"type",
"=",
"'choice'",
",",
"default",
"=",
"None",
",",
"choices",
"=",
"self",
".",
"ALTER_MODES",
",",
"help",
"=",
"\"alter view according to mode: {} (modifies -V and --to behaviour)\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"self",
".",
"ALTER_MODES",
")",
")",
")",
"self",
".",
"add_bool_option",
"(",
"\"--tee-view\"",
",",
"\"--tee\"",
",",
"help",
"=",
"\"ADDITIONALLY show search results in ncurses view (modifies -V and --to behaviour)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--from-view\"",
",",
"\"--from\"",
",",
"\"NAME\"",
",",
"help",
"=",
"\"select only items that are on view NAME (NAME can be an info hash to quickly select a single item)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-M\"",
",",
"\"--modify-view\"",
",",
"\"NAME\"",
",",
"help",
"=",
"\"get items from given view and write result back to it (short-cut to combine --from-view and --to-view)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"-Q\"",
",",
"\"--fast-query\"",
",",
"\"LEVEL\"",
",",
"type",
"=",
"'choice'",
",",
"default",
"=",
"'='",
",",
"choices",
"=",
"(",
"'='",
",",
"'0'",
",",
"'1'",
",",
"'2'",
")",
",",
"help",
"=",
"\"enable query optimization (=: use config; 0: off; 1: safe; 2: danger seeker)\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--call\"",
",",
"\"CMD\"",
",",
"help",
"=",
"\"call an OS command pattern in the shell\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--spawn\"",
",",
"\"CMD [--spawn ...]\"",
",",
"action",
"=",
"\"append\"",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"execute OS command pattern(s) directly\"",
")",
"# TODO: implement -S",
"# self.add_bool_option(\"-S\", \"--summary\",",
"# help=\"print statistics\")",
"# torrent state change (actions)",
"for",
"action",
"in",
"self",
".",
"ACTION_MODES",
":",
"action",
".",
"setdefault",
"(",
"\"label\"",
",",
"action",
".",
"name",
".",
"upper",
"(",
")",
")",
"action",
".",
"setdefault",
"(",
"\"method\"",
",",
"action",
".",
"name",
")",
"action",
".",
"setdefault",
"(",
"\"interactive\"",
",",
"False",
")",
"action",
".",
"setdefault",
"(",
"\"argshelp\"",
",",
"\"\"",
")",
"action",
".",
"setdefault",
"(",
"\"args\"",
",",
"(",
")",
")",
"if",
"action",
".",
"argshelp",
":",
"self",
".",
"add_value_option",
"(",
"*",
"action",
".",
"options",
"+",
"(",
"action",
".",
"argshelp",
",",
")",
",",
"*",
"*",
"{",
"\"help\"",
":",
"action",
".",
"help",
"+",
"(",
"\" (implies -i)\"",
"if",
"action",
".",
"interactive",
"else",
"\"\"",
")",
"}",
")",
"else",
":",
"self",
".",
"add_bool_option",
"(",
"*",
"action",
".",
"options",
",",
"*",
"*",
"{",
"\"help\"",
":",
"action",
".",
"help",
"+",
"(",
"\" (implies -i)\"",
"if",
"action",
".",
"interactive",
"else",
"\"\"",
")",
"}",
")",
"self",
".",
"add_value_option",
"(",
"\"--ignore\"",
",",
"\"|\"",
".",
"join",
"(",
"self",
".",
"IGNORE_OPTIONS",
")",
",",
"type",
"=",
"\"choice\"",
",",
"choices",
"=",
"self",
".",
"IGNORE_OPTIONS",
",",
"help",
"=",
"\"set 'ignore commands' status on torrent\"",
")",
"self",
".",
"add_value_option",
"(",
"\"--prio\"",
",",
"\"|\"",
".",
"join",
"(",
"self",
".",
"PRIO_OPTIONS",
")",
",",
"type",
"=",
"\"choice\"",
",",
"choices",
"=",
"self",
".",
"PRIO_OPTIONS",
",",
"help",
"=",
"\"set priority of torrent\"",
")",
"self",
".",
"add_bool_option",
"(",
"\"-F\"",
",",
"\"--flush\"",
",",
"help",
"=",
"\"flush changes immediately (save session data)\"",
")"
] | 55.75 | 21.26087 |
def wait(self, container, timeout=None, condition=None):
"""
Block until a container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
container (str or dict): The container to wait on. If a dict, the
``Id`` key is used.
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/wait", container)
params = {}
if condition is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'wait condition is not supported for API version < 1.30'
)
params['condition'] = condition
res = self._post(url, timeout=timeout, params=params)
return self._result(res, True) | [
"def",
"wait",
"(",
"self",
",",
"container",
",",
"timeout",
"=",
"None",
",",
"condition",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_url",
"(",
"\"/containers/{0}/wait\"",
",",
"container",
")",
"params",
"=",
"{",
"}",
"if",
"condition",
"is",
"not",
"None",
":",
"if",
"utils",
".",
"version_lt",
"(",
"self",
".",
"_version",
",",
"'1.30'",
")",
":",
"raise",
"errors",
".",
"InvalidVersion",
"(",
"'wait condition is not supported for API version < 1.30'",
")",
"params",
"[",
"'condition'",
"]",
"=",
"condition",
"res",
"=",
"self",
".",
"_post",
"(",
"url",
",",
"timeout",
"=",
"timeout",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_result",
"(",
"res",
",",
"True",
")"
] | 39.764706 | 19.588235 |
def ldirectory(inpath, outpath, args, scope):
"""Compile all *.less files in directory
Args:
inpath (str): Path to compile
outpath (str): Output directory
args (object): Argparse Object
scope (Scope): Scope object or None
"""
yacctab = 'yacctab' if args.debug else None
if not outpath:
sys.exit("Compile directory option needs -o ...")
else:
if not os.path.isdir(outpath):
if args.verbose:
print("Creating '%s'" % outpath, file=sys.stderr)
if not args.dry_run:
os.mkdir(outpath)
less = glob.glob(os.path.join(inpath, '*.less'))
f = formatter.Formatter(args)
for lf in less:
outf = os.path.splitext(os.path.basename(lf))
minx = '.min' if args.min_ending else ''
outf = "%s/%s%s.css" % (outpath, outf[0], minx)
if not args.force and os.path.exists(outf):
recompile = os.path.getmtime(outf) < os.path.getmtime(lf)
else:
recompile = True
if recompile:
print('%s -> %s' % (lf, outf))
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=scope,
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=lf, debuglevel=0)
css = f.format(p)
if not args.dry_run:
with open(outf, 'w') as outfile:
outfile.write(css)
elif args.verbose:
print('skipping %s, not modified' % lf, file=sys.stderr)
sys.stdout.flush()
if args.recurse:
[
ldirectory(
os.path.join(inpath, name), os.path.join(outpath, name), args,
scope) for name in os.listdir(inpath)
if os.path.isdir(os.path.join(inpath, name))
and not name.startswith('.') and not name == outpath
] | [
"def",
"ldirectory",
"(",
"inpath",
",",
"outpath",
",",
"args",
",",
"scope",
")",
":",
"yacctab",
"=",
"'yacctab'",
"if",
"args",
".",
"debug",
"else",
"None",
"if",
"not",
"outpath",
":",
"sys",
".",
"exit",
"(",
"\"Compile directory option needs -o ...\"",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outpath",
")",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"Creating '%s'\"",
"%",
"outpath",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"not",
"args",
".",
"dry_run",
":",
"os",
".",
"mkdir",
"(",
"outpath",
")",
"less",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"inpath",
",",
"'*.less'",
")",
")",
"f",
"=",
"formatter",
".",
"Formatter",
"(",
"args",
")",
"for",
"lf",
"in",
"less",
":",
"outf",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"lf",
")",
")",
"minx",
"=",
"'.min'",
"if",
"args",
".",
"min_ending",
"else",
"''",
"outf",
"=",
"\"%s/%s%s.css\"",
"%",
"(",
"outpath",
",",
"outf",
"[",
"0",
"]",
",",
"minx",
")",
"if",
"not",
"args",
".",
"force",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"outf",
")",
":",
"recompile",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"outf",
")",
"<",
"os",
".",
"path",
".",
"getmtime",
"(",
"lf",
")",
"else",
":",
"recompile",
"=",
"True",
"if",
"recompile",
":",
"print",
"(",
"'%s -> %s'",
"%",
"(",
"lf",
",",
"outf",
")",
")",
"p",
"=",
"parser",
".",
"LessParser",
"(",
"yacc_debug",
"=",
"(",
"args",
".",
"debug",
")",
",",
"lex_optimize",
"=",
"True",
",",
"yacc_optimize",
"=",
"(",
"not",
"args",
".",
"debug",
")",
",",
"scope",
"=",
"scope",
",",
"tabfile",
"=",
"yacctab",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"p",
".",
"parse",
"(",
"filename",
"=",
"lf",
",",
"debuglevel",
"=",
"0",
")",
"css",
"=",
"f",
".",
"format",
"(",
"p",
")",
"if",
"not",
"args",
".",
"dry_run",
":",
"with",
"open",
"(",
"outf",
",",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"css",
")",
"elif",
"args",
".",
"verbose",
":",
"print",
"(",
"'skipping %s, not modified'",
"%",
"lf",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"args",
".",
"recurse",
":",
"[",
"ldirectory",
"(",
"os",
".",
"path",
".",
"join",
"(",
"inpath",
",",
"name",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"name",
")",
",",
"args",
",",
"scope",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"inpath",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"inpath",
",",
"name",
")",
")",
"and",
"not",
"name",
".",
"startswith",
"(",
"'.'",
")",
"and",
"not",
"name",
"==",
"outpath",
"]"
] | 37.519231 | 12.961538 |
def classify_clusters(points, n=10):
"""
Return an array of K-Means cluster classes for an array of `shapely.geometry.Point` objects.
"""
arr = [[p.x, p.y] for p in points.values]
clf = KMeans(n_clusters=n)
clf.fit(arr)
classes = clf.predict(arr)
return classes | [
"def",
"classify_clusters",
"(",
"points",
",",
"n",
"=",
"10",
")",
":",
"arr",
"=",
"[",
"[",
"p",
".",
"x",
",",
"p",
".",
"y",
"]",
"for",
"p",
"in",
"points",
".",
"values",
"]",
"clf",
"=",
"KMeans",
"(",
"n_clusters",
"=",
"n",
")",
"clf",
".",
"fit",
"(",
"arr",
")",
"classes",
"=",
"clf",
".",
"predict",
"(",
"arr",
")",
"return",
"classes"
] | 31.666667 | 14.555556 |
def _characteristic_changed(self, characteristic):
"""Called when the specified characteristic has changed its value."""
# Called when a characteristic is changed. Get the on_changed handler
# for this characteristic (if it exists) and call it.
on_changed = self._char_on_changed.get(characteristic, None)
if on_changed is not None:
on_changed(characteristic.value().bytes().tobytes())
# Also tell the characteristic that it has a new value.
# First get the service that is associated with this characteristic.
char = characteristic_list().get(characteristic)
if char is not None:
char._value_read.set() | [
"def",
"_characteristic_changed",
"(",
"self",
",",
"characteristic",
")",
":",
"# Called when a characteristic is changed. Get the on_changed handler",
"# for this characteristic (if it exists) and call it.",
"on_changed",
"=",
"self",
".",
"_char_on_changed",
".",
"get",
"(",
"characteristic",
",",
"None",
")",
"if",
"on_changed",
"is",
"not",
"None",
":",
"on_changed",
"(",
"characteristic",
".",
"value",
"(",
")",
".",
"bytes",
"(",
")",
".",
"tobytes",
"(",
")",
")",
"# Also tell the characteristic that it has a new value.",
"# First get the service that is associated with this characteristic.",
"char",
"=",
"characteristic_list",
"(",
")",
".",
"get",
"(",
"characteristic",
")",
"if",
"char",
"is",
"not",
"None",
":",
"char",
".",
"_value_read",
".",
"set",
"(",
")"
] | 57.416667 | 18.333333 |
def hash(self, id):
""" Creates a unique filename in the cache for the id.
"""
h = md5(id).hexdigest()
return os.path.join(self.path, h+self.type) | [
"def",
"hash",
"(",
"self",
",",
"id",
")",
":",
"h",
"=",
"md5",
"(",
"id",
")",
".",
"hexdigest",
"(",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"h",
"+",
"self",
".",
"type",
")"
] | 24.857143 | 17.285714 |
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched | [
"def",
"match",
"(",
"self",
",",
"context",
")",
":",
"while",
"context",
".",
"remaining_codes",
"(",
")",
">",
"0",
"and",
"context",
".",
"has_matched",
"is",
"None",
":",
"opcode",
"=",
"context",
".",
"peek_code",
"(",
")",
"if",
"not",
"self",
".",
"dispatch",
"(",
"opcode",
",",
"context",
")",
":",
"return",
"None",
"if",
"context",
".",
"has_matched",
"is",
"None",
":",
"context",
".",
"has_matched",
"=",
"False",
"return",
"context",
".",
"has_matched"
] | 47.181818 | 10.363636 |
def angToDisc(nside, lon, lat, radius, inclusive=False, fact=4, nest=False):
"""
Wrap `query_disc` to use lon, lat, and radius in degrees.
"""
vec = angToVec(lon,lat)
return query_disc(nside,vec,radius,inclusive,fact,nest) | [
"def",
"angToDisc",
"(",
"nside",
",",
"lon",
",",
"lat",
",",
"radius",
",",
"inclusive",
"=",
"False",
",",
"fact",
"=",
"4",
",",
"nest",
"=",
"False",
")",
":",
"vec",
"=",
"angToVec",
"(",
"lon",
",",
"lat",
")",
"return",
"query_disc",
"(",
"nside",
",",
"vec",
",",
"radius",
",",
"inclusive",
",",
"fact",
",",
"nest",
")"
] | 39.5 | 14.833333 |
def __insert_represented_points(self, cluster):
"""!
@brief Insert representation points to the k-d tree.
@param[in] cluster (cure_cluster): Cluster whose representation points should be inserted.
"""
for point in cluster.rep:
self.__tree.insert(point, cluster) | [
"def",
"__insert_represented_points",
"(",
"self",
",",
"cluster",
")",
":",
"for",
"point",
"in",
"cluster",
".",
"rep",
":",
"self",
".",
"__tree",
".",
"insert",
"(",
"point",
",",
"cluster",
")"
] | 34 | 19.3 |
def branch(self, branch_name, start_point='HEAD', force=True,
checkout=False):
"""Create branch as in `git branch <branch_name> <start_point>`.
If 'checkout' is True, checkout the branch after creation.
"""
return git_branch(
self.repo_dir, branch_name, start_point, force=force,
checkout=checkout) | [
"def",
"branch",
"(",
"self",
",",
"branch_name",
",",
"start_point",
"=",
"'HEAD'",
",",
"force",
"=",
"True",
",",
"checkout",
"=",
"False",
")",
":",
"return",
"git_branch",
"(",
"self",
".",
"repo_dir",
",",
"branch_name",
",",
"start_point",
",",
"force",
"=",
"force",
",",
"checkout",
"=",
"checkout",
")"
] | 40.222222 | 16.111111 |
def add_resource(mt_file, ref, cache):
"""Add a resources entry, downloading the intuiting the file, replacing entries with
the same reference"""
if isinstance(mt_file, MetapackDoc):
doc = mt_file
else:
doc = MetapackDoc(mt_file)
if not 'Resources' in doc:
doc.new_section('Resources')
doc['Resources'].args = [e for e in set(doc['Resources'].args + ['Name', 'StartLine', 'HeaderLines', 'Encoding']) if
e]
seen_names = set()
u = parse_app_url(ref)
# The web and file URLs don't list the same.
if u.proto == 'file':
entries = u.list()
else:
entries = [ssu for su in u.list() for ssu in su.list()]
errors = []
for e in entries:
if not add_single_resource(doc, e, cache=cache, seen_names=seen_names):
errors.append(e)
if errors:
prt()
warn("Found, but failed to add these urls:")
for e in errors:
print(' ', e)
write_doc(doc, mt_file) | [
"def",
"add_resource",
"(",
"mt_file",
",",
"ref",
",",
"cache",
")",
":",
"if",
"isinstance",
"(",
"mt_file",
",",
"MetapackDoc",
")",
":",
"doc",
"=",
"mt_file",
"else",
":",
"doc",
"=",
"MetapackDoc",
"(",
"mt_file",
")",
"if",
"not",
"'Resources'",
"in",
"doc",
":",
"doc",
".",
"new_section",
"(",
"'Resources'",
")",
"doc",
"[",
"'Resources'",
"]",
".",
"args",
"=",
"[",
"e",
"for",
"e",
"in",
"set",
"(",
"doc",
"[",
"'Resources'",
"]",
".",
"args",
"+",
"[",
"'Name'",
",",
"'StartLine'",
",",
"'HeaderLines'",
",",
"'Encoding'",
"]",
")",
"if",
"e",
"]",
"seen_names",
"=",
"set",
"(",
")",
"u",
"=",
"parse_app_url",
"(",
"ref",
")",
"# The web and file URLs don't list the same.",
"if",
"u",
".",
"proto",
"==",
"'file'",
":",
"entries",
"=",
"u",
".",
"list",
"(",
")",
"else",
":",
"entries",
"=",
"[",
"ssu",
"for",
"su",
"in",
"u",
".",
"list",
"(",
")",
"for",
"ssu",
"in",
"su",
".",
"list",
"(",
")",
"]",
"errors",
"=",
"[",
"]",
"for",
"e",
"in",
"entries",
":",
"if",
"not",
"add_single_resource",
"(",
"doc",
",",
"e",
",",
"cache",
"=",
"cache",
",",
"seen_names",
"=",
"seen_names",
")",
":",
"errors",
".",
"append",
"(",
"e",
")",
"if",
"errors",
":",
"prt",
"(",
")",
"warn",
"(",
"\"Found, but failed to add these urls:\"",
")",
"for",
"e",
"in",
"errors",
":",
"print",
"(",
"' '",
",",
"e",
")",
"write_doc",
"(",
"doc",
",",
"mt_file",
")"
] | 25.435897 | 23.717949 |
def hastext(self, cls='current',strict=True, correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.hastext`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return e.hastext(cls,strict, correctionhandling)
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return e.hastext(cls,strict, correctionhandling)
return False | [
"def",
"hastext",
"(",
"self",
",",
"cls",
"=",
"'current'",
",",
"strict",
"=",
"True",
",",
"correctionhandling",
"=",
"CorrectionHandling",
".",
"CURRENT",
")",
":",
"if",
"cls",
"==",
"'original'",
":",
"correctionhandling",
"=",
"CorrectionHandling",
".",
"ORIGINAL",
"#backward compatibility",
"if",
"correctionhandling",
"in",
"(",
"CorrectionHandling",
".",
"CURRENT",
",",
"CorrectionHandling",
".",
"EITHER",
")",
":",
"for",
"e",
"in",
"self",
":",
"if",
"isinstance",
"(",
"e",
",",
"New",
")",
"or",
"isinstance",
"(",
"e",
",",
"Current",
")",
":",
"return",
"e",
".",
"hastext",
"(",
"cls",
",",
"strict",
",",
"correctionhandling",
")",
"if",
"correctionhandling",
"in",
"(",
"CorrectionHandling",
".",
"ORIGINAL",
",",
"CorrectionHandling",
".",
"EITHER",
")",
":",
"for",
"e",
"in",
"self",
":",
"if",
"isinstance",
"(",
"e",
",",
"Original",
")",
":",
"return",
"e",
".",
"hastext",
"(",
"cls",
",",
"strict",
",",
"correctionhandling",
")",
"return",
"False"
] | 61.416667 | 28.666667 |
def _numbers_decades(N):
"""
>>> _numbers_decades(45)
' 1 2 3 4'
"""
N = N // 10
lst = range(1, N + 1)
return "".join(map(lambda i: "%10s" % i, lst)) | [
"def",
"_numbers_decades",
"(",
"N",
")",
":",
"N",
"=",
"N",
"//",
"10",
"lst",
"=",
"range",
"(",
"1",
",",
"N",
"+",
"1",
")",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"i",
":",
"\"%10s\"",
"%",
"i",
",",
"lst",
")",
")"
] | 25.25 | 10.5 |
def register_lookup(cls, lookup, lookup_name=None):
"""Register a Lookup to a class"""
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup | [
"def",
"register_lookup",
"(",
"cls",
",",
"lookup",
",",
"lookup_name",
"=",
"None",
")",
":",
"if",
"lookup_name",
"is",
"None",
":",
"lookup_name",
"=",
"lookup",
".",
"lookup_name",
"if",
"'class_lookups'",
"not",
"in",
"cls",
".",
"__dict__",
":",
"cls",
".",
"class_lookups",
"=",
"{",
"}",
"cls",
".",
"class_lookups",
"[",
"lookup_name",
"]",
"=",
"lookup",
"cls",
".",
"_clear_cached_lookups",
"(",
")",
"return",
"lookup"
] | 32 | 13.454545 |
def get(self, key, cache=None):
"""Query the server for an item, parse the JSON, and return the result.
Keyword arguments:
key -- the key of the item that you'd like to retrieve. Required.
cache -- the name of the cache that the item resides in. Defaults to
None, which uses self.name. If no name is set, raises a
ValueError.
"""
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
url = "caches/%s/items/%s" % (cache, key)
result = self.client.get(url)
return Item(values=result["body"]) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"cache",
"=",
"None",
")",
":",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"self",
".",
"name",
"if",
"cache",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cache name must be set\"",
")",
"cache",
"=",
"quote_plus",
"(",
"cache",
")",
"key",
"=",
"quote_plus",
"(",
"key",
")",
"url",
"=",
"\"caches/%s/items/%s\"",
"%",
"(",
"cache",
",",
"key",
")",
"result",
"=",
"self",
".",
"client",
".",
"get",
"(",
"url",
")",
"return",
"Item",
"(",
"values",
"=",
"result",
"[",
"\"body\"",
"]",
")"
] | 39.944444 | 14.611111 |
def set_published_date(self, date=None):
"""
Set the published date of a IOC to the current date.
User may specify the date they want to set as well.
:param date: Date value to set the published date to. This should be in the xsdDate form.
This defaults to the current date if it is not provided.
xsdDate Form: YYYY-MM-DDTHH:MM:SS
:return: True
:raises: IOCParseError if date format is not valid.
"""
if date:
match = re.match(DATE_REGEX, date)
if not match:
raise IOCParseError('Published date is not valid. Must be in the form YYYY-MM-DDTHH:MM:SS')
ioc_et.set_root_published_date(self.root, date)
return True | [
"def",
"set_published_date",
"(",
"self",
",",
"date",
"=",
"None",
")",
":",
"if",
"date",
":",
"match",
"=",
"re",
".",
"match",
"(",
"DATE_REGEX",
",",
"date",
")",
"if",
"not",
"match",
":",
"raise",
"IOCParseError",
"(",
"'Published date is not valid. Must be in the form YYYY-MM-DDTHH:MM:SS'",
")",
"ioc_et",
".",
"set_root_published_date",
"(",
"self",
".",
"root",
",",
"date",
")",
"return",
"True"
] | 43.235294 | 20.647059 |
def _sort_by_region(fnames, regions, ref_file, config):
"""Sort a set of regionally split files by region for ordered output.
"""
contig_order = {}
for i, sq in enumerate(ref.file_contigs(ref_file, config)):
contig_order[sq.name] = i
sitems = []
assert len(regions) == len(fnames), (regions, fnames)
added_fnames = set([])
for region, fname in zip(regions, fnames):
if fname not in added_fnames:
if isinstance(region, (list, tuple)):
c, s, e = region
elif isinstance(region, six.string_types) and region.find(":") >= 0:
c, coords = region.split(":")
s, e = [int(x) for x in coords.split("-")]
else:
c = region
s, e = 0, 0
sitems.append(((contig_order[c], s, e), c, fname))
added_fnames.add(fname)
sitems.sort()
return [(x[1], x[2]) for x in sitems] | [
"def",
"_sort_by_region",
"(",
"fnames",
",",
"regions",
",",
"ref_file",
",",
"config",
")",
":",
"contig_order",
"=",
"{",
"}",
"for",
"i",
",",
"sq",
"in",
"enumerate",
"(",
"ref",
".",
"file_contigs",
"(",
"ref_file",
",",
"config",
")",
")",
":",
"contig_order",
"[",
"sq",
".",
"name",
"]",
"=",
"i",
"sitems",
"=",
"[",
"]",
"assert",
"len",
"(",
"regions",
")",
"==",
"len",
"(",
"fnames",
")",
",",
"(",
"regions",
",",
"fnames",
")",
"added_fnames",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"region",
",",
"fname",
"in",
"zip",
"(",
"regions",
",",
"fnames",
")",
":",
"if",
"fname",
"not",
"in",
"added_fnames",
":",
"if",
"isinstance",
"(",
"region",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"c",
",",
"s",
",",
"e",
"=",
"region",
"elif",
"isinstance",
"(",
"region",
",",
"six",
".",
"string_types",
")",
"and",
"region",
".",
"find",
"(",
"\":\"",
")",
">=",
"0",
":",
"c",
",",
"coords",
"=",
"region",
".",
"split",
"(",
"\":\"",
")",
"s",
",",
"e",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"coords",
".",
"split",
"(",
"\"-\"",
")",
"]",
"else",
":",
"c",
"=",
"region",
"s",
",",
"e",
"=",
"0",
",",
"0",
"sitems",
".",
"append",
"(",
"(",
"(",
"contig_order",
"[",
"c",
"]",
",",
"s",
",",
"e",
")",
",",
"c",
",",
"fname",
")",
")",
"added_fnames",
".",
"add",
"(",
"fname",
")",
"sitems",
".",
"sort",
"(",
")",
"return",
"[",
"(",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"2",
"]",
")",
"for",
"x",
"in",
"sitems",
"]"
] | 40.086957 | 13.478261 |
def include(context, bundle_name, version):
"""Include a bundle of files into the internal space.
Use bundle name if you simply want to inlcude the latest version.
"""
store = Store(context.obj['database'], context.obj['root'])
if version:
version_obj = store.Version.get(version)
if version_obj is None:
click.echo(click.style('version not found', fg='red'))
else:
bundle_obj = store.bundle(bundle_name)
if bundle_obj is None:
click.echo(click.style('bundle not found', fg='red'))
version_obj = bundle_obj.versions[0]
try:
include_version(context.obj['root'], version_obj)
except VersionIncludedError as error:
click.echo(click.style(error.message, fg='red'))
context.abort()
version_obj.included_at = dt.datetime.now()
store.commit()
click.echo(click.style('included all files!', fg='green')) | [
"def",
"include",
"(",
"context",
",",
"bundle_name",
",",
"version",
")",
":",
"store",
"=",
"Store",
"(",
"context",
".",
"obj",
"[",
"'database'",
"]",
",",
"context",
".",
"obj",
"[",
"'root'",
"]",
")",
"if",
"version",
":",
"version_obj",
"=",
"store",
".",
"Version",
".",
"get",
"(",
"version",
")",
"if",
"version_obj",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'version not found'",
",",
"fg",
"=",
"'red'",
")",
")",
"else",
":",
"bundle_obj",
"=",
"store",
".",
"bundle",
"(",
"bundle_name",
")",
"if",
"bundle_obj",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'bundle not found'",
",",
"fg",
"=",
"'red'",
")",
")",
"version_obj",
"=",
"bundle_obj",
".",
"versions",
"[",
"0",
"]",
"try",
":",
"include_version",
"(",
"context",
".",
"obj",
"[",
"'root'",
"]",
",",
"version_obj",
")",
"except",
"VersionIncludedError",
"as",
"error",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"error",
".",
"message",
",",
"fg",
"=",
"'red'",
")",
")",
"context",
".",
"abort",
"(",
")",
"version_obj",
".",
"included_at",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
"store",
".",
"commit",
"(",
")",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'included all files!'",
",",
"fg",
"=",
"'green'",
")",
")"
] | 36.2 | 18.12 |
def _after_valid_time_range(self):
"""
In case of uncertainty (times not specified), we assume that we are in a valid range.
"""
if self.end_time is not None:
try:
if self.time > self.end_time:
return True
except TypeError:
return False
return False | [
"def",
"_after_valid_time_range",
"(",
"self",
")",
":",
"if",
"self",
".",
"end_time",
"is",
"not",
"None",
":",
"try",
":",
"if",
"self",
".",
"time",
">",
"self",
".",
"end_time",
":",
"return",
"True",
"except",
"TypeError",
":",
"return",
"False",
"return",
"False"
] | 32.272727 | 13 |
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret | [
"def",
"source_present",
"(",
"name",
",",
"source_type",
"=",
"'imgapi'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"if",
"name",
"in",
"__salt__",
"[",
"'imgadm.sources'",
"]",
"(",
")",
":",
"# source is present",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'image source {0} is present'",
".",
"format",
"(",
"name",
")",
"else",
":",
"# add new source",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"res",
"=",
"{",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"else",
":",
"res",
"=",
"__salt__",
"[",
"'imgadm.source_add'",
"]",
"(",
"name",
",",
"source_type",
")",
"ret",
"[",
"'result'",
"]",
"=",
"(",
"name",
"in",
"res",
")",
"if",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'image source {0} added'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'added'",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'image source {0} not added'",
".",
"format",
"(",
"name",
")",
"if",
"'Error'",
"in",
"res",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{0}: {1}'",
".",
"format",
"(",
"ret",
"[",
"'comment'",
"]",
",",
"res",
"[",
"'Error'",
"]",
")",
"return",
"ret"
] | 29.25 | 20.694444 |
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
"""
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
"""
if q_mesh_shift is None:
shift = np.zeros(3, dtype='double')
else:
shift = np.array(q_mesh_shift, dtype='double')
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all(): # zero or half shift
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else: # Monkhorst-pack
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift | [
"def",
"_shift2boolean",
"(",
"self",
",",
"q_mesh_shift",
",",
"is_gamma_center",
"=",
"False",
",",
"tolerance",
"=",
"1e-5",
")",
":",
"if",
"q_mesh_shift",
"is",
"None",
":",
"shift",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"'double'",
")",
"else",
":",
"shift",
"=",
"np",
".",
"array",
"(",
"q_mesh_shift",
",",
"dtype",
"=",
"'double'",
")",
"diffby2",
"=",
"np",
".",
"abs",
"(",
"shift",
"*",
"2",
"-",
"np",
".",
"rint",
"(",
"shift",
"*",
"2",
")",
")",
"if",
"(",
"diffby2",
"<",
"0.01",
")",
".",
"all",
"(",
")",
":",
"# zero or half shift",
"diff",
"=",
"np",
".",
"abs",
"(",
"shift",
"-",
"np",
".",
"rint",
"(",
"shift",
")",
")",
"if",
"is_gamma_center",
":",
"is_shift",
"=",
"list",
"(",
"diff",
">",
"0.1",
")",
"else",
":",
"# Monkhorst-pack",
"is_shift",
"=",
"list",
"(",
"np",
".",
"logical_xor",
"(",
"(",
"diff",
">",
"0.1",
")",
",",
"(",
"self",
".",
"_mesh",
"%",
"2",
"==",
"0",
")",
")",
"*",
"1",
")",
"else",
":",
"is_shift",
"=",
"None",
"return",
"is_shift"
] | 34.5 | 15.730769 |
def require_axis(f):
""" Check if the object of the function has axis and sel_axis members """
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' %
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper | [
"def",
"require_axis",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"None",
"in",
"(",
"self",
".",
"axis",
",",
"self",
".",
"sel_axis",
")",
":",
"raise",
"ValueError",
"(",
"'%(func_name) requires the node %(node)s '",
"'to have an axis and a sel_axis function'",
"%",
"dict",
"(",
"func_name",
"=",
"f",
".",
"__name__",
",",
"node",
"=",
"repr",
"(",
"self",
")",
")",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapper"
] | 34.769231 | 21 |
def classify_harmonic(self, partial_labels, use_CMN=True):
'''Harmonic function method for semi-supervised classification,
also known as the Gaussian Mean Fields algorithm.
partial_labels: (n,) array of integer labels, -1 for unlabeled.
use_CMN : when True, apply Class Mass Normalization
From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions"
by Zhu, Ghahramani, and Lafferty in 2003.
Based on the matlab code at:
http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m
'''
# prepare labels
labels = np.array(partial_labels, copy=True)
unlabeled = labels == -1
# convert known labels to one-hot encoding
fl, classes = _onehot(labels[~unlabeled])
L = self.laplacian(normed=False)
if ss.issparse(L):
L = L.tocsr()[unlabeled].toarray()
else:
L = L[unlabeled]
Lul = L[:,~unlabeled]
Luu = L[:,unlabeled]
fu = -np.linalg.solve(Luu, Lul.dot(fl))
if use_CMN:
scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0)
fu *= scale
# assign new labels
labels[unlabeled] = classes[fu.argmax(axis=1)]
return labels | [
"def",
"classify_harmonic",
"(",
"self",
",",
"partial_labels",
",",
"use_CMN",
"=",
"True",
")",
":",
"# prepare labels",
"labels",
"=",
"np",
".",
"array",
"(",
"partial_labels",
",",
"copy",
"=",
"True",
")",
"unlabeled",
"=",
"labels",
"==",
"-",
"1",
"# convert known labels to one-hot encoding",
"fl",
",",
"classes",
"=",
"_onehot",
"(",
"labels",
"[",
"~",
"unlabeled",
"]",
")",
"L",
"=",
"self",
".",
"laplacian",
"(",
"normed",
"=",
"False",
")",
"if",
"ss",
".",
"issparse",
"(",
"L",
")",
":",
"L",
"=",
"L",
".",
"tocsr",
"(",
")",
"[",
"unlabeled",
"]",
".",
"toarray",
"(",
")",
"else",
":",
"L",
"=",
"L",
"[",
"unlabeled",
"]",
"Lul",
"=",
"L",
"[",
":",
",",
"~",
"unlabeled",
"]",
"Luu",
"=",
"L",
"[",
":",
",",
"unlabeled",
"]",
"fu",
"=",
"-",
"np",
".",
"linalg",
".",
"solve",
"(",
"Luu",
",",
"Lul",
".",
"dot",
"(",
"fl",
")",
")",
"if",
"use_CMN",
":",
"scale",
"=",
"(",
"1",
"+",
"fl",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"/",
"fu",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"fu",
"*=",
"scale",
"# assign new labels",
"labels",
"[",
"unlabeled",
"]",
"=",
"classes",
"[",
"fu",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
"]",
"return",
"labels"
] | 30.027027 | 21.540541 |
def get_pubkey(self):
"""
Get the public key of the certificate signing request.
:return: The public key.
:rtype: :py:class:`PKey`
"""
pkey = PKey.__new__(PKey)
pkey._pkey = _lib.X509_REQ_get_pubkey(self._req)
_openssl_assert(pkey._pkey != _ffi.NULL)
pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)
pkey._only_public = True
return pkey | [
"def",
"get_pubkey",
"(",
"self",
")",
":",
"pkey",
"=",
"PKey",
".",
"__new__",
"(",
"PKey",
")",
"pkey",
".",
"_pkey",
"=",
"_lib",
".",
"X509_REQ_get_pubkey",
"(",
"self",
".",
"_req",
")",
"_openssl_assert",
"(",
"pkey",
".",
"_pkey",
"!=",
"_ffi",
".",
"NULL",
")",
"pkey",
".",
"_pkey",
"=",
"_ffi",
".",
"gc",
"(",
"pkey",
".",
"_pkey",
",",
"_lib",
".",
"EVP_PKEY_free",
")",
"pkey",
".",
"_only_public",
"=",
"True",
"return",
"pkey"
] | 32.076923 | 13.615385 |
def connectRelay(self):
"""Builds the target protocol and connects it to the relay transport.
"""
self.protocol = self.connector.buildProtocol(None)
self.connected = True
self.protocol.makeConnection(self) | [
"def",
"connectRelay",
"(",
"self",
")",
":",
"self",
".",
"protocol",
"=",
"self",
".",
"connector",
".",
"buildProtocol",
"(",
"None",
")",
"self",
".",
"connected",
"=",
"True",
"self",
".",
"protocol",
".",
"makeConnection",
"(",
"self",
")"
] | 40 | 8 |
def rolling_percentileofscore(series, window, min_periods=None):
"""Computue the score percentile for the specified window."""
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index)
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) | [
"def",
"rolling_percentileofscore",
"(",
"series",
",",
"window",
",",
"min_periods",
"=",
"None",
")",
":",
"import",
"scipy",
".",
"stats",
"as",
"stats",
"def",
"_percentile",
"(",
"arr",
")",
":",
"score",
"=",
"arr",
"[",
"-",
"1",
"]",
"vals",
"=",
"arr",
"[",
":",
"-",
"1",
"]",
"return",
"stats",
".",
"percentileofscore",
"(",
"vals",
",",
"score",
")",
"notnull",
"=",
"series",
".",
"dropna",
"(",
")",
"min_periods",
"=",
"min_periods",
"or",
"window",
"if",
"notnull",
".",
"empty",
":",
"return",
"pd",
".",
"Series",
"(",
"np",
".",
"nan",
",",
"index",
"=",
"series",
".",
"index",
")",
"else",
":",
"return",
"pd",
".",
"rolling_apply",
"(",
"notnull",
",",
"window",
",",
"_percentile",
",",
"min_periods",
"=",
"min_periods",
")",
".",
"reindex",
"(",
"series",
".",
"index",
")"
] | 36 | 21 |
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
if self._nTrackChunks < 4: self._nTrackChunks= 4
if not hasattr(self,'nInterpolatedTrackChunks'):
self.nInterpolatedTrackChunks= 1001
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
if self._useTM:
return self._determine_stream_track_TM()
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1]
auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2]
auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),
use_physical=False)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x:self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None | [
"def",
"_determine_stream_track",
"(",
"self",
",",
"nTrackChunks",
")",
":",
"#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream",
"if",
"nTrackChunks",
"is",
"None",
":",
"#default is floor(self._deltaAngleTrack/0.15)+1",
"self",
".",
"_nTrackChunks",
"=",
"int",
"(",
"numpy",
".",
"floor",
"(",
"self",
".",
"_deltaAngleTrack",
"/",
"0.15",
")",
")",
"+",
"1",
"else",
":",
"self",
".",
"_nTrackChunks",
"=",
"nTrackChunks",
"if",
"self",
".",
"_nTrackChunks",
"<",
"4",
":",
"self",
".",
"_nTrackChunks",
"=",
"4",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'nInterpolatedTrackChunks'",
")",
":",
"self",
".",
"nInterpolatedTrackChunks",
"=",
"1001",
"dt",
"=",
"self",
".",
"_deltaAngleTrack",
"/",
"self",
".",
"_progenitor_Omega_along_dOmega",
"self",
".",
"_trackts",
"=",
"numpy",
".",
"linspace",
"(",
"0.",
",",
"2",
"*",
"dt",
",",
"2",
"*",
"self",
".",
"_nTrackChunks",
"-",
"1",
")",
"#to be sure that we cover it",
"if",
"self",
".",
"_useTM",
":",
"return",
"self",
".",
"_determine_stream_track_TM",
"(",
")",
"#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle",
"prog_stream_offset",
"=",
"_determine_stream_track_single",
"(",
"self",
".",
"_aA",
",",
"self",
".",
"_progenitor",
",",
"0.",
",",
"#time = 0",
"self",
".",
"_progenitor_angle",
",",
"self",
".",
"_sigMeanSign",
",",
"self",
".",
"_dsigomeanProgDirection",
",",
"lambda",
"x",
":",
"self",
".",
"meanOmega",
"(",
"x",
",",
"use_physical",
"=",
"False",
")",
",",
"0.",
")",
"#angle = 0",
"auxiliaryTrack",
"=",
"Orbit",
"(",
"prog_stream_offset",
"[",
"3",
"]",
")",
"if",
"dt",
"<",
"0.",
":",
"self",
".",
"_trackts",
"=",
"numpy",
".",
"linspace",
"(",
"0.",
",",
"-",
"2.",
"*",
"dt",
",",
"2",
"*",
"self",
".",
"_nTrackChunks",
"-",
"1",
")",
"#Flip velocities before integrating",
"auxiliaryTrack",
"=",
"auxiliaryTrack",
".",
"flip",
"(",
")",
"auxiliaryTrack",
".",
"integrate",
"(",
"self",
".",
"_trackts",
",",
"self",
".",
"_pot",
")",
"if",
"dt",
"<",
"0.",
":",
"#Flip velocities again",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"1",
"]",
"=",
"-",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"1",
"]",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"2",
"]",
"=",
"-",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"2",
"]",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"4",
"]",
"=",
"-",
"auxiliaryTrack",
".",
"_orb",
".",
"orbit",
"[",
":",
",",
"4",
"]",
"#Calculate the actions, frequencies, and angle for this auxiliary orbit",
"acfs",
"=",
"self",
".",
"_aA",
".",
"actionsFreqs",
"(",
"auxiliaryTrack",
"(",
"0.",
")",
",",
"use_physical",
"=",
"False",
")",
"auxiliary_Omega",
"=",
"numpy",
".",
"array",
"(",
"[",
"acfs",
"[",
"3",
"]",
",",
"acfs",
"[",
"4",
"]",
",",
"acfs",
"[",
"5",
"]",
"]",
")",
".",
"reshape",
"(",
"3",
")",
"auxiliary_Omega_along_dOmega",
"=",
"numpy",
".",
"dot",
"(",
"auxiliary_Omega",
",",
"self",
".",
"_dsigomeanProgDirection",
")",
"#Now calculate the actions, frequencies, and angles + Jacobian for each chunk",
"allAcfsTrack",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
",",
"9",
")",
")",
"alljacsTrack",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
",",
"6",
",",
"6",
")",
")",
"allinvjacsTrack",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
",",
"6",
",",
"6",
")",
")",
"thetasTrack",
"=",
"numpy",
".",
"linspace",
"(",
"0.",
",",
"self",
".",
"_deltaAngleTrack",
",",
"self",
".",
"_nTrackChunks",
")",
"ObsTrack",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
",",
"6",
")",
")",
"ObsTrackAA",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
",",
"6",
")",
")",
"detdOdJps",
"=",
"numpy",
".",
"empty",
"(",
"(",
"self",
".",
"_nTrackChunks",
")",
")",
"if",
"self",
".",
"_multi",
"is",
"None",
":",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
":",
"multiOut",
"=",
"_determine_stream_track_single",
"(",
"self",
".",
"_aA",
",",
"auxiliaryTrack",
",",
"self",
".",
"_trackts",
"[",
"ii",
"]",
"*",
"numpy",
".",
"fabs",
"(",
"self",
".",
"_progenitor_Omega_along_dOmega",
"/",
"auxiliary_Omega_along_dOmega",
")",
",",
"#this factor accounts for the difference in frequency between the progenitor and the auxiliary track",
"self",
".",
"_progenitor_angle",
",",
"self",
".",
"_sigMeanSign",
",",
"self",
".",
"_dsigomeanProgDirection",
",",
"lambda",
"x",
":",
"self",
".",
"meanOmega",
"(",
"x",
",",
"use_physical",
"=",
"False",
")",
",",
"thetasTrack",
"[",
"ii",
"]",
")",
"allAcfsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"0",
"]",
"alljacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"1",
"]",
"allinvjacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"2",
"]",
"ObsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"3",
"]",
"ObsTrackAA",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"4",
"]",
"detdOdJps",
"[",
"ii",
"]",
"=",
"multiOut",
"[",
"5",
"]",
"else",
":",
"multiOut",
"=",
"multi",
".",
"parallel_map",
"(",
"(",
"lambda",
"x",
":",
"_determine_stream_track_single",
"(",
"self",
".",
"_aA",
",",
"auxiliaryTrack",
",",
"self",
".",
"_trackts",
"[",
"x",
"]",
"*",
"numpy",
".",
"fabs",
"(",
"self",
".",
"_progenitor_Omega_along_dOmega",
"/",
"auxiliary_Omega_along_dOmega",
")",
",",
"self",
".",
"_progenitor_angle",
",",
"self",
".",
"_sigMeanSign",
",",
"self",
".",
"_dsigomeanProgDirection",
",",
"lambda",
"x",
":",
"self",
".",
"meanOmega",
"(",
"x",
",",
"use_physical",
"=",
"False",
")",
",",
"thetasTrack",
"[",
"x",
"]",
")",
")",
",",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
",",
"numcores",
"=",
"numpy",
".",
"amin",
"(",
"[",
"self",
".",
"_nTrackChunks",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
",",
"self",
".",
"_multi",
"]",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
":",
"allAcfsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"0",
"]",
"alljacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"1",
"]",
"allinvjacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"2",
"]",
"ObsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"3",
"]",
"ObsTrackAA",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"4",
"]",
"detdOdJps",
"[",
"ii",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"5",
"]",
"#Repeat the track calculation using the previous track, to get closer to it",
"for",
"nn",
"in",
"range",
"(",
"self",
".",
"nTrackIterations",
")",
":",
"if",
"self",
".",
"_multi",
"is",
"None",
":",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
":",
"multiOut",
"=",
"_determine_stream_track_single",
"(",
"self",
".",
"_aA",
",",
"Orbit",
"(",
"ObsTrack",
"[",
"ii",
",",
":",
"]",
")",
",",
"0.",
",",
"self",
".",
"_progenitor_angle",
",",
"self",
".",
"_sigMeanSign",
",",
"self",
".",
"_dsigomeanProgDirection",
",",
"lambda",
"x",
":",
"self",
".",
"meanOmega",
"(",
"x",
",",
"use_physical",
"=",
"False",
")",
",",
"thetasTrack",
"[",
"ii",
"]",
")",
"allAcfsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"0",
"]",
"alljacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"1",
"]",
"allinvjacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"2",
"]",
"ObsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"3",
"]",
"ObsTrackAA",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"4",
"]",
"detdOdJps",
"[",
"ii",
"]",
"=",
"multiOut",
"[",
"5",
"]",
"else",
":",
"multiOut",
"=",
"multi",
".",
"parallel_map",
"(",
"(",
"lambda",
"x",
":",
"_determine_stream_track_single",
"(",
"self",
".",
"_aA",
",",
"Orbit",
"(",
"ObsTrack",
"[",
"x",
",",
":",
"]",
")",
",",
"0.",
",",
"self",
".",
"_progenitor_angle",
",",
"self",
".",
"_sigMeanSign",
",",
"self",
".",
"_dsigomeanProgDirection",
",",
"lambda",
"x",
":",
"self",
".",
"meanOmega",
"(",
"x",
",",
"use_physical",
"=",
"False",
")",
",",
"thetasTrack",
"[",
"x",
"]",
")",
")",
",",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
",",
"numcores",
"=",
"numpy",
".",
"amin",
"(",
"[",
"self",
".",
"_nTrackChunks",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
",",
"self",
".",
"_multi",
"]",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"_nTrackChunks",
")",
":",
"allAcfsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"0",
"]",
"alljacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"1",
"]",
"allinvjacsTrack",
"[",
"ii",
",",
":",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"2",
"]",
"ObsTrack",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"3",
"]",
"ObsTrackAA",
"[",
"ii",
",",
":",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"4",
"]",
"detdOdJps",
"[",
"ii",
"]",
"=",
"multiOut",
"[",
"ii",
"]",
"[",
"5",
"]",
"#Store the track",
"self",
".",
"_thetasTrack",
"=",
"thetasTrack",
"self",
".",
"_ObsTrack",
"=",
"ObsTrack",
"self",
".",
"_ObsTrackAA",
"=",
"ObsTrackAA",
"self",
".",
"_allAcfsTrack",
"=",
"allAcfsTrack",
"self",
".",
"_alljacsTrack",
"=",
"alljacsTrack",
"self",
".",
"_allinvjacsTrack",
"=",
"allinvjacsTrack",
"self",
".",
"_detdOdJps",
"=",
"detdOdJps",
"self",
".",
"_meandetdOdJp",
"=",
"numpy",
".",
"mean",
"(",
"self",
".",
"_detdOdJps",
")",
"self",
".",
"_logmeandetdOdJp",
"=",
"numpy",
".",
"log",
"(",
"self",
".",
"_meandetdOdJp",
")",
"self",
".",
"_calc_ObsTrackXY",
"(",
")",
"return",
"None"
] | 60.884058 | 24.905797 |
def to_array(self):
"""
Serializes this InlineQueryResultGif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineQueryResultGif, self).to_array()
# 'type' and 'id' given by superclass
array['gif_url'] = u(self.gif_url) # py2: type unicode, py3: type str
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.gif_width is not None:
array['gif_width'] = int(self.gif_width) # type int
if self.gif_height is not None:
array['gif_height'] = int(self.gif_height) # type int
if self.gif_duration is not None:
array['gif_duration'] = int(self.gif_duration) # type int
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array | [
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"InlineQueryResultGif",
",",
"self",
")",
".",
"to_array",
"(",
")",
"# 'type' and 'id' given by superclass",
"array",
"[",
"'gif_url'",
"]",
"=",
"u",
"(",
"self",
".",
"gif_url",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'thumb_url'",
"]",
"=",
"u",
"(",
"self",
".",
"thumb_url",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"gif_width",
"is",
"not",
"None",
":",
"array",
"[",
"'gif_width'",
"]",
"=",
"int",
"(",
"self",
".",
"gif_width",
")",
"# type int",
"if",
"self",
".",
"gif_height",
"is",
"not",
"None",
":",
"array",
"[",
"'gif_height'",
"]",
"=",
"int",
"(",
"self",
".",
"gif_height",
")",
"# type int",
"if",
"self",
".",
"gif_duration",
"is",
"not",
"None",
":",
"array",
"[",
"'gif_duration'",
"]",
"=",
"int",
"(",
"self",
".",
"gif_duration",
")",
"# type int",
"if",
"self",
".",
"title",
"is",
"not",
"None",
":",
"array",
"[",
"'title'",
"]",
"=",
"u",
"(",
"self",
".",
"title",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"caption",
"is",
"not",
"None",
":",
"array",
"[",
"'caption'",
"]",
"=",
"u",
"(",
"self",
".",
"caption",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"parse_mode",
"is",
"not",
"None",
":",
"array",
"[",
"'parse_mode'",
"]",
"=",
"u",
"(",
"self",
".",
"parse_mode",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"reply_markup",
"is",
"not",
"None",
":",
"array",
"[",
"'reply_markup'",
"]",
"=",
"self",
".",
"reply_markup",
".",
"to_array",
"(",
")",
"# type InlineKeyboardMarkup",
"if",
"self",
".",
"input_message_content",
"is",
"not",
"None",
":",
"array",
"[",
"'input_message_content'",
"]",
"=",
"self",
".",
"input_message_content",
".",
"to_array",
"(",
")",
"# type InputMessageContent",
"return",
"array"
] | 51.214286 | 21.5 |
def get_all_build_config_set_records(self, id, **kwargs):
"""
Get all build config set execution records associated with this build config set, returns empty list if none are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_build_config_set_records(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build config set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_build_config_set_records_with_http_info(id, **kwargs)
else:
(data) = self.get_all_build_config_set_records_with_http_info(id, **kwargs)
return data | [
"def",
"get_all_build_config_set_records",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"get_all_build_config_set_records_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_all_build_config_set_records_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 45.931034 | 19.655172 |
def cmd_ip_internal(verbose):
"""Get the local IP address(es) of the local interfaces.
Example:
\b
$ habu.ip.internal
{
"lo": {
"ipv4": [
{
"addr": "127.0.0.1",
"netmask": "255.0.0.0",
"peer": "127.0.0.1"
}
],
"link_layer": [
{
"addr": "00:00:00:00:00:00",
"peer": "00:00:00:00:00:00"
}
],
"ipv6": [
{
"addr": "::1",
"netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128"
}
]
},
...
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
print("Gathering NIC details...", file=sys.stderr)
result = get_internal_ip()
if result:
print(json.dumps(result, indent=4))
else:
print("[X] Unable to get detail about the interfaces")
return True | [
"def",
"cmd_ip_internal",
"(",
"verbose",
")",
":",
"if",
"verbose",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"'%(message)s'",
")",
"print",
"(",
"\"Gathering NIC details...\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"result",
"=",
"get_internal_ip",
"(",
")",
"if",
"result",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"result",
",",
"indent",
"=",
"4",
")",
")",
"else",
":",
"print",
"(",
"\"[X] Unable to get detail about the interfaces\"",
")",
"return",
"True"
] | 21.093023 | 23.255814 |
def python(self, cmd):
"""Execute a python script using the virtual environment python."""
python_bin = self.cmd_path('python')
cmd = '{0} {1}'.format(python_bin, cmd)
return self._execute(cmd) | [
"def",
"python",
"(",
"self",
",",
"cmd",
")",
":",
"python_bin",
"=",
"self",
".",
"cmd_path",
"(",
"'python'",
")",
"cmd",
"=",
"'{0} {1}'",
".",
"format",
"(",
"python_bin",
",",
"cmd",
")",
"return",
"self",
".",
"_execute",
"(",
"cmd",
")"
] | 44.2 | 7.2 |
def make_cluster_vector(rev_dict, n_src):
""" Converts the cluster membership dictionary to an array
Parameters
----------
rev_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster.
n_src : int
Number of source in the array
Returns
-------
out_array : `numpy.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
"""
out_array = -1 * np.ones((n_src), int)
for k, v in rev_dict.items():
out_array[k] = v
# We need this to make sure the see source points at itself
out_array[v] = v
return out_array | [
"def",
"make_cluster_vector",
"(",
"rev_dict",
",",
"n_src",
")",
":",
"out_array",
"=",
"-",
"1",
"*",
"np",
".",
"ones",
"(",
"(",
"n_src",
")",
",",
"int",
")",
"for",
"k",
",",
"v",
"in",
"rev_dict",
".",
"items",
"(",
")",
":",
"out_array",
"[",
"k",
"]",
"=",
"v",
"# We need this to make sure the see source points at itself",
"out_array",
"[",
"v",
"]",
"=",
"v",
"return",
"out_array"
] | 30.458333 | 19 |
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded
Returns:
dictionary: The transformed init_params
"""
init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)
init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME))
init_params['source_dir'] = json.loads(init_params['hyperparameters'].get(DIR_PARAM_NAME))
init_params['enable_cloudwatch_metrics'] = json.loads(
init_params['hyperparameters'].get(CLOUDWATCH_METRICS_PARAM_NAME))
init_params['container_log_level'] = json.loads(
init_params['hyperparameters'].get(CONTAINER_LOG_LEVEL_PARAM_NAME))
hyperparameters = {}
for k, v in init_params['hyperparameters'].items():
# Tuning jobs add this special hyperparameter which is not JSON serialized
if k == '_tuning_objective_metric':
if v.startswith('"') and v.endswith('"'):
v = v.strip('"')
hyperparameters[k] = v
else:
hyperparameters[k] = json.loads(v)
init_params['hyperparameters'] = hyperparameters
return init_params | [
"def",
"_prepare_init_params_from_job_description",
"(",
"cls",
",",
"job_details",
",",
"model_channel_name",
"=",
"None",
")",
":",
"init_params",
"=",
"super",
"(",
"Framework",
",",
"cls",
")",
".",
"_prepare_init_params_from_job_description",
"(",
"job_details",
",",
"model_channel_name",
")",
"init_params",
"[",
"'entry_point'",
"]",
"=",
"json",
".",
"loads",
"(",
"init_params",
"[",
"'hyperparameters'",
"]",
".",
"get",
"(",
"SCRIPT_PARAM_NAME",
")",
")",
"init_params",
"[",
"'source_dir'",
"]",
"=",
"json",
".",
"loads",
"(",
"init_params",
"[",
"'hyperparameters'",
"]",
".",
"get",
"(",
"DIR_PARAM_NAME",
")",
")",
"init_params",
"[",
"'enable_cloudwatch_metrics'",
"]",
"=",
"json",
".",
"loads",
"(",
"init_params",
"[",
"'hyperparameters'",
"]",
".",
"get",
"(",
"CLOUDWATCH_METRICS_PARAM_NAME",
")",
")",
"init_params",
"[",
"'container_log_level'",
"]",
"=",
"json",
".",
"loads",
"(",
"init_params",
"[",
"'hyperparameters'",
"]",
".",
"get",
"(",
"CONTAINER_LOG_LEVEL_PARAM_NAME",
")",
")",
"hyperparameters",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"init_params",
"[",
"'hyperparameters'",
"]",
".",
"items",
"(",
")",
":",
"# Tuning jobs add this special hyperparameter which is not JSON serialized",
"if",
"k",
"==",
"'_tuning_objective_metric'",
":",
"if",
"v",
".",
"startswith",
"(",
"'\"'",
")",
"and",
"v",
".",
"endswith",
"(",
"'\"'",
")",
":",
"v",
"=",
"v",
".",
"strip",
"(",
"'\"'",
")",
"hyperparameters",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"hyperparameters",
"[",
"k",
"]",
"=",
"json",
".",
"loads",
"(",
"v",
")",
"init_params",
"[",
"'hyperparameters'",
"]",
"=",
"hyperparameters",
"return",
"init_params"
] | 47.424242 | 29.939394 |
def user(self, user: str) -> "ChildHTTPAPI":
"""
Get a child HTTPAPI instance.
Args:
user: The Matrix ID of the user whose API to get.
Returns:
A HTTPAPI instance that always uses the given Matrix ID.
"""
if self.is_real_user:
raise ValueError("Can't get child of real user")
try:
return self.children[user]
except KeyError:
child = ChildHTTPAPI(user, self)
self.children[user] = child
return child | [
"def",
"user",
"(",
"self",
",",
"user",
":",
"str",
")",
"->",
"\"ChildHTTPAPI\"",
":",
"if",
"self",
".",
"is_real_user",
":",
"raise",
"ValueError",
"(",
"\"Can't get child of real user\"",
")",
"try",
":",
"return",
"self",
".",
"children",
"[",
"user",
"]",
"except",
"KeyError",
":",
"child",
"=",
"ChildHTTPAPI",
"(",
"user",
",",
"self",
")",
"self",
".",
"children",
"[",
"user",
"]",
"=",
"child",
"return",
"child"
] | 27.947368 | 17.105263 |
def get_header_path() -> str:
"""Return local folder path of header files."""
import os
return os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/headers/' | [
"def",
"get_header_path",
"(",
")",
"->",
"str",
":",
"import",
"os",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
"+",
"'/headers/'"
] | 44.5 | 20.75 |
def _set_module_names_for_sphinx(modules: List, new_name: str):
""" Trick sphinx into displaying the desired module in these objects' documentation. """
for obj in modules:
obj.__module__ = new_name | [
"def",
"_set_module_names_for_sphinx",
"(",
"modules",
":",
"List",
",",
"new_name",
":",
"str",
")",
":",
"for",
"obj",
"in",
"modules",
":",
"obj",
".",
"__module__",
"=",
"new_name"
] | 52.75 | 11.75 |
def rotate_slaves(self):
"Round-robin slave balancer"
slaves = self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in xrange(len(slaves)):
self.slave_rr_counter = (
self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
try:
yield self.get_master_address()
except MasterNotFoundError:
pass
raise SlaveNotFoundError('No slave found for %r' % (self.service_name)) | [
"def",
"rotate_slaves",
"(",
"self",
")",
":",
"slaves",
"=",
"self",
".",
"sentinel_manager",
".",
"discover_slaves",
"(",
"self",
".",
"service_name",
")",
"if",
"slaves",
":",
"if",
"self",
".",
"slave_rr_counter",
"is",
"None",
":",
"self",
".",
"slave_rr_counter",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"slaves",
")",
"-",
"1",
")",
"for",
"_",
"in",
"xrange",
"(",
"len",
"(",
"slaves",
")",
")",
":",
"self",
".",
"slave_rr_counter",
"=",
"(",
"self",
".",
"slave_rr_counter",
"+",
"1",
")",
"%",
"len",
"(",
"slaves",
")",
"slave",
"=",
"slaves",
"[",
"self",
".",
"slave_rr_counter",
"]",
"yield",
"slave",
"# Fallback to the master connection",
"try",
":",
"yield",
"self",
".",
"get_master_address",
"(",
")",
"except",
"MasterNotFoundError",
":",
"pass",
"raise",
"SlaveNotFoundError",
"(",
"'No slave found for %r'",
"%",
"(",
"self",
".",
"service_name",
")",
")"
] | 42.352941 | 15.529412 |
def ParseJavaFlags(self, start_line=0):
"""Parse Java style flags (com.google.common.flags)."""
# The java flags prints starts with a "Standard flags" "module"
# that doesn't follow the standard module syntax.
modname = 'Standard flags' # name of current module
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
logging.vlog(2, 'Line: "%s"' % line)
if not line: # blank lines terminate module
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_java_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_java_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag) | [
"def",
"ParseJavaFlags",
"(",
"self",
",",
"start_line",
"=",
"0",
")",
":",
"# The java flags prints starts with a \"Standard flags\" \"module\"",
"# that doesn't follow the standard module syntax.",
"modname",
"=",
"'Standard flags'",
"# name of current module",
"self",
".",
"module_list",
".",
"append",
"(",
"modname",
")",
"self",
".",
"modules",
".",
"setdefault",
"(",
"modname",
",",
"[",
"]",
")",
"modlist",
"=",
"self",
".",
"modules",
"[",
"modname",
"]",
"flag",
"=",
"None",
"for",
"line_num",
"in",
"range",
"(",
"start_line",
",",
"len",
"(",
"self",
".",
"output",
")",
")",
":",
"# collect flags",
"line",
"=",
"self",
".",
"output",
"[",
"line_num",
"]",
".",
"rstrip",
"(",
")",
"logging",
".",
"vlog",
"(",
"2",
",",
"'Line: \"%s\"'",
"%",
"line",
")",
"if",
"not",
"line",
":",
"# blank lines terminate module",
"if",
"flag",
":",
"# save last flag",
"modlist",
".",
"append",
"(",
"flag",
")",
"flag",
"=",
"None",
"continue",
"mobj",
"=",
"self",
".",
"module_java_re",
".",
"match",
"(",
"line",
")",
"if",
"mobj",
":",
"# start of a new module",
"modname",
"=",
"mobj",
".",
"group",
"(",
"1",
")",
"logging",
".",
"debug",
"(",
"'Module: %s'",
"%",
"line",
")",
"if",
"flag",
":",
"modlist",
".",
"append",
"(",
"flag",
")",
"self",
".",
"module_list",
".",
"append",
"(",
"modname",
")",
"self",
".",
"modules",
".",
"setdefault",
"(",
"modname",
",",
"[",
"]",
")",
"modlist",
"=",
"self",
".",
"modules",
"[",
"modname",
"]",
"flag",
"=",
"None",
"continue",
"mobj",
"=",
"self",
".",
"flag_java_re",
".",
"match",
"(",
"line",
")",
"if",
"mobj",
":",
"# start of a new flag",
"if",
"flag",
":",
"# save last flag",
"modlist",
".",
"append",
"(",
"flag",
")",
"logging",
".",
"debug",
"(",
"'Flag: %s'",
"%",
"line",
")",
"flag",
"=",
"Flag",
"(",
"mobj",
".",
"group",
"(",
"1",
")",
",",
"mobj",
".",
"group",
"(",
"2",
")",
")",
"continue",
"# append to flag help. type and default are part of the main text",
"if",
"flag",
":",
"flag",
".",
"help",
"+=",
"' '",
"+",
"line",
".",
"strip",
"(",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Extra: %s'",
"%",
"line",
")",
"if",
"flag",
":",
"modlist",
".",
"append",
"(",
"flag",
")"
] | 35.891304 | 15.869565 |
def interact_alice(agent: Agent):
"""
Exchange messages between basic pipelines and the Yandex.Dialogs service.
If the pipeline returns multiple values, only the first one is forwarded to Yandex.
"""
data = request.get_json()
text = data['request'].get('command', '').strip()
payload = data['request'].get('payload')
session_id = data['session']['session_id']
user_id = data['session']['user_id']
message_id = data['session']['message_id']
dialog_id = DialogID(user_id, session_id)
response = {
'response': {
'end_session': True,
'text': ''
},
"session": {
'session_id': session_id,
'message_id': message_id,
'user_id': user_id
},
'version': '1.0'
}
agent_response: Union[str, RichMessage] = agent([payload or text], [dialog_id])[0]
if isinstance(agent_response, RichMessage):
response['response']['text'] = '\n'.join([j['content']
for j in agent_response.json()
if j['type'] == 'plain_text'])
else:
response['response']['text'] = str(agent_response)
return jsonify(response), 200 | [
"def",
"interact_alice",
"(",
"agent",
":",
"Agent",
")",
":",
"data",
"=",
"request",
".",
"get_json",
"(",
")",
"text",
"=",
"data",
"[",
"'request'",
"]",
".",
"get",
"(",
"'command'",
",",
"''",
")",
".",
"strip",
"(",
")",
"payload",
"=",
"data",
"[",
"'request'",
"]",
".",
"get",
"(",
"'payload'",
")",
"session_id",
"=",
"data",
"[",
"'session'",
"]",
"[",
"'session_id'",
"]",
"user_id",
"=",
"data",
"[",
"'session'",
"]",
"[",
"'user_id'",
"]",
"message_id",
"=",
"data",
"[",
"'session'",
"]",
"[",
"'message_id'",
"]",
"dialog_id",
"=",
"DialogID",
"(",
"user_id",
",",
"session_id",
")",
"response",
"=",
"{",
"'response'",
":",
"{",
"'end_session'",
":",
"True",
",",
"'text'",
":",
"''",
"}",
",",
"\"session\"",
":",
"{",
"'session_id'",
":",
"session_id",
",",
"'message_id'",
":",
"message_id",
",",
"'user_id'",
":",
"user_id",
"}",
",",
"'version'",
":",
"'1.0'",
"}",
"agent_response",
":",
"Union",
"[",
"str",
",",
"RichMessage",
"]",
"=",
"agent",
"(",
"[",
"payload",
"or",
"text",
"]",
",",
"[",
"dialog_id",
"]",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"agent_response",
",",
"RichMessage",
")",
":",
"response",
"[",
"'response'",
"]",
"[",
"'text'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"j",
"[",
"'content'",
"]",
"for",
"j",
"in",
"agent_response",
".",
"json",
"(",
")",
"if",
"j",
"[",
"'type'",
"]",
"==",
"'plain_text'",
"]",
")",
"else",
":",
"response",
"[",
"'response'",
"]",
"[",
"'text'",
"]",
"=",
"str",
"(",
"agent_response",
")",
"return",
"jsonify",
"(",
"response",
")",
",",
"200"
] | 33.324324 | 20.621622 |
def count_alleles(self, max_allele=None, subpop=None):
"""Count the number of calls of each allele per variant.
Parameters
----------
max_allele : int, optional
The highest allele index to count. Alleles greater than this
index will be ignored.
subpop : array_like, int, optional
Indices of haplotypes to include.
Returns
-------
ac : AlleleCountsArray, int, shape (n_variants, n_alleles)
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 2, -1, -1]], dtype='i1')
>>> ac = h.count_alleles()
>>> ac
<AlleleCountsArray shape=(3, 3) dtype=int32>
3 1 0
1 3 0
1 0 1
"""
# check inputs
subpop = _normalize_subpop_arg(subpop, self.shape[1])
# determine alleles to count
if max_allele is None:
max_allele = self.max()
# use optimisations
values = memoryview_safe(self.values)
if subpop is None:
ac = haplotype_array_count_alleles(values, max_allele)
else:
ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop)
return AlleleCountsArray(ac, copy=False) | [
"def",
"count_alleles",
"(",
"self",
",",
"max_allele",
"=",
"None",
",",
"subpop",
"=",
"None",
")",
":",
"# check inputs",
"subpop",
"=",
"_normalize_subpop_arg",
"(",
"subpop",
",",
"self",
".",
"shape",
"[",
"1",
"]",
")",
"# determine alleles to count",
"if",
"max_allele",
"is",
"None",
":",
"max_allele",
"=",
"self",
".",
"max",
"(",
")",
"# use optimisations",
"values",
"=",
"memoryview_safe",
"(",
"self",
".",
"values",
")",
"if",
"subpop",
"is",
"None",
":",
"ac",
"=",
"haplotype_array_count_alleles",
"(",
"values",
",",
"max_allele",
")",
"else",
":",
"ac",
"=",
"haplotype_array_count_alleles_subpop",
"(",
"values",
",",
"max_allele",
",",
"subpop",
")",
"return",
"AlleleCountsArray",
"(",
"ac",
",",
"copy",
"=",
"False",
")"
] | 28.531915 | 21.574468 |
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return libindex.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except Exception:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return libindex.get_value_at(self, i) | [
"def",
"_ixs",
"(",
"self",
",",
"i",
",",
"axis",
"=",
"0",
")",
":",
"try",
":",
"# dispatch to the values if we need",
"values",
"=",
"self",
".",
"_values",
"if",
"isinstance",
"(",
"values",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"libindex",
".",
"get_value_at",
"(",
"values",
",",
"i",
")",
"else",
":",
"return",
"values",
"[",
"i",
"]",
"except",
"IndexError",
":",
"raise",
"except",
"Exception",
":",
"if",
"isinstance",
"(",
"i",
",",
"slice",
")",
":",
"indexer",
"=",
"self",
".",
"index",
".",
"_convert_slice_indexer",
"(",
"i",
",",
"kind",
"=",
"'iloc'",
")",
"return",
"self",
".",
"_get_values",
"(",
"indexer",
")",
"else",
":",
"label",
"=",
"self",
".",
"index",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"label",
",",
"Index",
")",
":",
"return",
"self",
".",
"take",
"(",
"i",
",",
"axis",
"=",
"axis",
",",
"convert",
"=",
"True",
")",
"else",
":",
"return",
"libindex",
".",
"get_value_at",
"(",
"self",
",",
"i",
")"
] | 30.75 | 17.25 |
def set_world(self, grd, start_y_x, y_x):
"""
tell the agent to move to location y,x
Why is there another grd object in the agent? Because
this is NOT the main grid, rather a copy for the agent
to overwrite with planning routes, etc.
The real grid is initialised in World.__init__() class
"""
self.grd = grd
self.start_y = start_y_x[0]
self.start_x = start_y_x[1]
self.current_y = start_y_x[0]
self.current_x = start_y_x[1]
self.target_y = y_x[0]
self.target_x = y_x[1]
self.backtrack = [0,0] # set only if blocked and agent needs to go back
self.prefer_x = 0 # set only if backtracked as preferred direction x
self.prefer_y = 0 | [
"def",
"set_world",
"(",
"self",
",",
"grd",
",",
"start_y_x",
",",
"y_x",
")",
":",
"self",
".",
"grd",
"=",
"grd",
"self",
".",
"start_y",
"=",
"start_y_x",
"[",
"0",
"]",
"self",
".",
"start_x",
"=",
"start_y_x",
"[",
"1",
"]",
"self",
".",
"current_y",
"=",
"start_y_x",
"[",
"0",
"]",
"self",
".",
"current_x",
"=",
"start_y_x",
"[",
"1",
"]",
"self",
".",
"target_y",
"=",
"y_x",
"[",
"0",
"]",
"self",
".",
"target_x",
"=",
"y_x",
"[",
"1",
"]",
"self",
".",
"backtrack",
"=",
"[",
"0",
",",
"0",
"]",
"# set only if blocked and agent needs to go back",
"self",
".",
"prefer_x",
"=",
"0",
"# set only if backtracked as preferred direction x",
"self",
".",
"prefer_y",
"=",
"0"
] | 42.111111 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.