code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def members(self, name=None, status=None, tags=None):
"""
Lists members of a Serf cluster, optionally filtered by one or more
filters:
`name` is a string, supporting regex matching on node names.
`status` is a string, supporting regex matching on node status.
`tags` is a dict of tag names and values, supporting regex matching
on values.
"""
filters = {}
if name is not None:
filters['Name'] = name
if status is not None:
filters['Status'] = status
if tags is not None:
filters['Tags'] = tags
if len(filters) == 0:
return self.connection.call('members')
else:
return self.connection.call('members-filtered', filters)
|
def function[members, parameter[self, name, status, tags]]:
constant[
Lists members of a Serf cluster, optionally filtered by one or more
filters:
`name` is a string, supporting regex matching on node names.
`status` is a string, supporting regex matching on node status.
`tags` is a dict of tag names and values, supporting regex matching
on values.
]
variable[filters] assign[=] dictionary[[], []]
if compare[name[name] is_not constant[None]] begin[:]
call[name[filters]][constant[Name]] assign[=] name[name]
if compare[name[status] is_not constant[None]] begin[:]
call[name[filters]][constant[Status]] assign[=] name[status]
if compare[name[tags] is_not constant[None]] begin[:]
call[name[filters]][constant[Tags]] assign[=] name[tags]
if compare[call[name[len], parameter[name[filters]]] equal[==] constant[0]] begin[:]
return[call[name[self].connection.call, parameter[constant[members]]]]
|
keyword[def] identifier[members] ( identifier[self] , identifier[name] = keyword[None] , identifier[status] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[filters] ={}
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[filters] [ literal[string] ]= identifier[name]
keyword[if] identifier[status] keyword[is] keyword[not] keyword[None] :
identifier[filters] [ literal[string] ]= identifier[status]
keyword[if] identifier[tags] keyword[is] keyword[not] keyword[None] :
identifier[filters] [ literal[string] ]= identifier[tags]
keyword[if] identifier[len] ( identifier[filters] )== literal[int] :
keyword[return] identifier[self] . identifier[connection] . identifier[call] ( literal[string] )
keyword[else] :
keyword[return] identifier[self] . identifier[connection] . identifier[call] ( literal[string] , identifier[filters] )
|
def members(self, name=None, status=None, tags=None):
"""
Lists members of a Serf cluster, optionally filtered by one or more
filters:
`name` is a string, supporting regex matching on node names.
`status` is a string, supporting regex matching on node status.
`tags` is a dict of tag names and values, supporting regex matching
on values.
"""
filters = {}
if name is not None:
filters['Name'] = name # depends on [control=['if'], data=['name']]
if status is not None:
filters['Status'] = status # depends on [control=['if'], data=['status']]
if tags is not None:
filters['Tags'] = tags # depends on [control=['if'], data=['tags']]
if len(filters) == 0:
return self.connection.call('members') # depends on [control=['if'], data=[]]
else:
return self.connection.call('members-filtered', filters)
|
def _add_labels(ax: Axes, h: Union[Histogram1D, Histogram2D], kwargs: dict):
"""Add axis and plot labels.
TODO: Document kwargs
"""
title = kwargs.pop("title", h.title)
xlabel = kwargs.pop("xlabel", h.axis_names[0])
ylabel = kwargs.pop("ylabel", h.axis_names[1] if len(h.axis_names) == 2 else None)
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.get_figure().tight_layout()
|
def function[_add_labels, parameter[ax, h, kwargs]]:
constant[Add axis and plot labels.
TODO: Document kwargs
]
variable[title] assign[=] call[name[kwargs].pop, parameter[constant[title], name[h].title]]
variable[xlabel] assign[=] call[name[kwargs].pop, parameter[constant[xlabel], call[name[h].axis_names][constant[0]]]]
variable[ylabel] assign[=] call[name[kwargs].pop, parameter[constant[ylabel], <ast.IfExp object at 0x7da207f00af0>]]
if name[title] begin[:]
call[name[ax].set_title, parameter[name[title]]]
if name[xlabel] begin[:]
call[name[ax].set_xlabel, parameter[name[xlabel]]]
if name[ylabel] begin[:]
call[name[ax].set_ylabel, parameter[name[ylabel]]]
call[call[name[ax].get_figure, parameter[]].tight_layout, parameter[]]
|
keyword[def] identifier[_add_labels] ( identifier[ax] : identifier[Axes] , identifier[h] : identifier[Union] [ identifier[Histogram1D] , identifier[Histogram2D] ], identifier[kwargs] : identifier[dict] ):
literal[string]
identifier[title] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[h] . identifier[title] )
identifier[xlabel] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[h] . identifier[axis_names] [ literal[int] ])
identifier[ylabel] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[h] . identifier[axis_names] [ literal[int] ] keyword[if] identifier[len] ( identifier[h] . identifier[axis_names] )== literal[int] keyword[else] keyword[None] )
keyword[if] identifier[title] :
identifier[ax] . identifier[set_title] ( identifier[title] )
keyword[if] identifier[xlabel] :
identifier[ax] . identifier[set_xlabel] ( identifier[xlabel] )
keyword[if] identifier[ylabel] :
identifier[ax] . identifier[set_ylabel] ( identifier[ylabel] )
identifier[ax] . identifier[get_figure] (). identifier[tight_layout] ()
|
def _add_labels(ax: Axes, h: Union[Histogram1D, Histogram2D], kwargs: dict):
"""Add axis and plot labels.
TODO: Document kwargs
"""
title = kwargs.pop('title', h.title)
xlabel = kwargs.pop('xlabel', h.axis_names[0])
ylabel = kwargs.pop('ylabel', h.axis_names[1] if len(h.axis_names) == 2 else None)
if title:
ax.set_title(title) # depends on [control=['if'], data=[]]
if xlabel:
ax.set_xlabel(xlabel) # depends on [control=['if'], data=[]]
if ylabel:
ax.set_ylabel(ylabel) # depends on [control=['if'], data=[]]
ax.get_figure().tight_layout()
|
def main():
"""
Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt
"""
print(main.__doc__)
if '-h' in sys.argv:sys.exit()
cont,Int,Top=1,[],[]
while cont==1:
try:
Int.append(int(input(" Enter desired treatment step interval: <return> to quit ")))
Top.append(int(input(" Enter upper bound for this interval: ")))
except:
cont=0
pmag.chart_maker(Int,Top)
|
def function[main, parameter[]]:
constant[
Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt
]
call[name[print], parameter[name[main].__doc__]]
if compare[constant[-h] in name[sys].argv] begin[:]
call[name[sys].exit, parameter[]]
<ast.Tuple object at 0x7da20e955540> assign[=] tuple[[<ast.Constant object at 0x7da20e956b00>, <ast.List object at 0x7da20e9551e0>, <ast.List object at 0x7da20e956b30>]]
while compare[name[cont] equal[==] constant[1]] begin[:]
<ast.Try object at 0x7da20e9578e0>
call[name[pmag].chart_maker, parameter[name[Int], name[Top]]]
|
keyword[def] identifier[main] ():
literal[string]
identifier[print] ( identifier[main] . identifier[__doc__] )
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] : identifier[sys] . identifier[exit] ()
identifier[cont] , identifier[Int] , identifier[Top] = literal[int] ,[],[]
keyword[while] identifier[cont] == literal[int] :
keyword[try] :
identifier[Int] . identifier[append] ( identifier[int] ( identifier[input] ( literal[string] )))
identifier[Top] . identifier[append] ( identifier[int] ( identifier[input] ( literal[string] )))
keyword[except] :
identifier[cont] = literal[int]
identifier[pmag] . identifier[chart_maker] ( identifier[Int] , identifier[Top] )
|
def main():
"""
Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt
"""
print(main.__doc__)
if '-h' in sys.argv:
sys.exit() # depends on [control=['if'], data=[]]
(cont, Int, Top) = (1, [], [])
while cont == 1:
try:
Int.append(int(input(' Enter desired treatment step interval: <return> to quit ')))
Top.append(int(input(' Enter upper bound for this interval: '))) # depends on [control=['try'], data=[]]
except:
cont = 0 # depends on [control=['except'], data=[]] # depends on [control=['while'], data=['cont']]
pmag.chart_maker(Int, Top)
|
def waiter(self):
"""Return a :class:`~asyncio.Future` called back once the event
has been fired.
If the event has been fired already return a resolved future.
This method is available only for one-time events
"""
assert self._onetime, 'One time events only can invoke waiter'
if not self._waiter:
self._waiter = get_event_loop().create_future()
if self.fired():
self._waiter.set_result(None)
return self._waiter
|
def function[waiter, parameter[self]]:
constant[Return a :class:`~asyncio.Future` called back once the event
has been fired.
If the event has been fired already return a resolved future.
This method is available only for one-time events
]
assert[name[self]._onetime]
if <ast.UnaryOp object at 0x7da20c9902b0> begin[:]
name[self]._waiter assign[=] call[call[name[get_event_loop], parameter[]].create_future, parameter[]]
if call[name[self].fired, parameter[]] begin[:]
call[name[self]._waiter.set_result, parameter[constant[None]]]
return[name[self]._waiter]
|
keyword[def] identifier[waiter] ( identifier[self] ):
literal[string]
keyword[assert] identifier[self] . identifier[_onetime] , literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_waiter] :
identifier[self] . identifier[_waiter] = identifier[get_event_loop] (). identifier[create_future] ()
keyword[if] identifier[self] . identifier[fired] ():
identifier[self] . identifier[_waiter] . identifier[set_result] ( keyword[None] )
keyword[return] identifier[self] . identifier[_waiter]
|
def waiter(self):
"""Return a :class:`~asyncio.Future` called back once the event
has been fired.
If the event has been fired already return a resolved future.
This method is available only for one-time events
"""
assert self._onetime, 'One time events only can invoke waiter'
if not self._waiter:
self._waiter = get_event_loop().create_future()
if self.fired():
self._waiter.set_result(None) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._waiter
|
def _query_filter(search, urlkwargs, definitions):
"""Ingest query filter in query."""
filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions)
for filter_ in filters:
search = search.filter(filter_)
return (search, urlkwargs)
|
def function[_query_filter, parameter[search, urlkwargs, definitions]]:
constant[Ingest query filter in query.]
<ast.Tuple object at 0x7da1b0342380> assign[=] call[name[_create_filter_dsl], parameter[name[urlkwargs], name[definitions]]]
for taget[name[filter_]] in starred[name[filters]] begin[:]
variable[search] assign[=] call[name[search].filter, parameter[name[filter_]]]
return[tuple[[<ast.Name object at 0x7da1b0341c90>, <ast.Name object at 0x7da1b0342110>]]]
|
keyword[def] identifier[_query_filter] ( identifier[search] , identifier[urlkwargs] , identifier[definitions] ):
literal[string]
identifier[filters] , identifier[urlkwargs] = identifier[_create_filter_dsl] ( identifier[urlkwargs] , identifier[definitions] )
keyword[for] identifier[filter_] keyword[in] identifier[filters] :
identifier[search] = identifier[search] . identifier[filter] ( identifier[filter_] )
keyword[return] ( identifier[search] , identifier[urlkwargs] )
|
def _query_filter(search, urlkwargs, definitions):
"""Ingest query filter in query."""
(filters, urlkwargs) = _create_filter_dsl(urlkwargs, definitions)
for filter_ in filters:
search = search.filter(filter_) # depends on [control=['for'], data=['filter_']]
return (search, urlkwargs)
|
def get_status_from_resource(self, response):
"""Process the latest status update retrieved from the same URL as
the previous request.
:param requests.Response response: latest REST call response.
:raises: BadResponse if status not 200 or 204.
"""
self._raise_if_bad_http_status_and_method(response)
if self._is_empty(response):
raise BadResponse('The response from long running operation '
'does not contain a body.')
status = self._get_provisioning_state(response)
self.status = status or 'Succeeded'
self.resource = self._deserialize(response)
|
def function[get_status_from_resource, parameter[self, response]]:
constant[Process the latest status update retrieved from the same URL as
the previous request.
:param requests.Response response: latest REST call response.
:raises: BadResponse if status not 200 or 204.
]
call[name[self]._raise_if_bad_http_status_and_method, parameter[name[response]]]
if call[name[self]._is_empty, parameter[name[response]]] begin[:]
<ast.Raise object at 0x7da2054a4ac0>
variable[status] assign[=] call[name[self]._get_provisioning_state, parameter[name[response]]]
name[self].status assign[=] <ast.BoolOp object at 0x7da2054a7730>
name[self].resource assign[=] call[name[self]._deserialize, parameter[name[response]]]
|
keyword[def] identifier[get_status_from_resource] ( identifier[self] , identifier[response] ):
literal[string]
identifier[self] . identifier[_raise_if_bad_http_status_and_method] ( identifier[response] )
keyword[if] identifier[self] . identifier[_is_empty] ( identifier[response] ):
keyword[raise] identifier[BadResponse] ( literal[string]
literal[string] )
identifier[status] = identifier[self] . identifier[_get_provisioning_state] ( identifier[response] )
identifier[self] . identifier[status] = identifier[status] keyword[or] literal[string]
identifier[self] . identifier[resource] = identifier[self] . identifier[_deserialize] ( identifier[response] )
|
def get_status_from_resource(self, response):
"""Process the latest status update retrieved from the same URL as
the previous request.
:param requests.Response response: latest REST call response.
:raises: BadResponse if status not 200 or 204.
"""
self._raise_if_bad_http_status_and_method(response)
if self._is_empty(response):
raise BadResponse('The response from long running operation does not contain a body.') # depends on [control=['if'], data=[]]
status = self._get_provisioning_state(response)
self.status = status or 'Succeeded'
self.resource = self._deserialize(response)
|
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec):
"""Process computed edge rule
Recursively processes BELAst versus a single computed edge rule
Args:
edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs
ast (Function): BEL Function AST
rule (Mapping[str, Any]: computed edge rule
"""
ast_type = ast.__class__.__name__
trigger_functions = rule.get("trigger_function", [])
trigger_types = rule.get("trigger_type", [])
rule_subject = rule.get("subject")
rule_relation = rule.get("relation")
rule_object = rule.get("object")
log.debug(f"Running {rule_relation} Type: {ast_type}")
if isinstance(ast, Function):
function_name = ast.name
args = ast.args
parent_function = ast.parent_function
if function_name in trigger_functions:
if rule_subject == "trigger_value":
subject = ast
if rule_object == "args":
for arg in args:
log.debug(f"1: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"2: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
elif ast_type in trigger_types:
if rule_subject == "trigger_value":
subject = ast
if rule_object == "args":
for arg in args:
log.debug(f"3: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"4: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
if isinstance(ast, NSArg):
term = "{}:{}".format(ast.namespace, ast.value)
parent_function = ast.parent_function
if ast_type in trigger_types:
if rule_subject == "trigger_value":
subject = term
if rule_object == "args":
for arg in args:
log.debug(f"5: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"6: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
# Recursively process every element by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
process_rule(edges, arg, rule, spec)
|
def function[process_rule, parameter[edges, ast, rule, spec]]:
constant[Process computed edge rule
Recursively processes BELAst versus a single computed edge rule
Args:
edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs
ast (Function): BEL Function AST
rule (Mapping[str, Any]: computed edge rule
]
variable[ast_type] assign[=] name[ast].__class__.__name__
variable[trigger_functions] assign[=] call[name[rule].get, parameter[constant[trigger_function], list[[]]]]
variable[trigger_types] assign[=] call[name[rule].get, parameter[constant[trigger_type], list[[]]]]
variable[rule_subject] assign[=] call[name[rule].get, parameter[constant[subject]]]
variable[rule_relation] assign[=] call[name[rule].get, parameter[constant[relation]]]
variable[rule_object] assign[=] call[name[rule].get, parameter[constant[object]]]
call[name[log].debug, parameter[<ast.JoinedStr object at 0x7da1b1944ca0>]]
if call[name[isinstance], parameter[name[ast], name[Function]]] begin[:]
variable[function_name] assign[=] name[ast].name
variable[args] assign[=] name[ast].args
variable[parent_function] assign[=] name[ast].parent_function
if compare[name[function_name] in name[trigger_functions]] begin[:]
if compare[name[rule_subject] equal[==] constant[trigger_value]] begin[:]
variable[subject] assign[=] name[ast]
if compare[name[rule_object] equal[==] constant[args]] begin[:]
for taget[name[arg]] in starred[name[args]] begin[:]
call[name[log].debug, parameter[<ast.JoinedStr object at 0x7da1b1945060>]]
variable[edge_ast] assign[=] call[name[BELAst], parameter[name[subject], name[rule_relation], name[arg], name[spec]]]
call[name[edges].append, parameter[name[edge_ast]]]
if call[name[isinstance], parameter[name[ast], name[NSArg]]] begin[:]
variable[term] assign[=] call[constant[{}:{}].format, parameter[name[ast].namespace, name[ast].value]]
variable[parent_function] assign[=] name[ast].parent_function
if compare[name[ast_type] in name[trigger_types]] begin[:]
if compare[name[rule_subject] equal[==] constant[trigger_value]] begin[:]
variable[subject] assign[=] name[term]
if compare[name[rule_object] equal[==] constant[args]] begin[:]
for taget[name[arg]] in starred[name[args]] begin[:]
call[name[log].debug, parameter[<ast.JoinedStr object at 0x7da20eb29cc0>]]
variable[edge_ast] assign[=] call[name[BELAst], parameter[name[subject], name[rule_relation], name[arg], name[spec]]]
call[name[edges].append, parameter[name[edge_ast]]]
if call[name[hasattr], parameter[name[ast], constant[args]]] begin[:]
for taget[name[arg]] in starred[name[ast].args] begin[:]
call[name[process_rule], parameter[name[edges], name[arg], name[rule], name[spec]]]
|
keyword[def] identifier[process_rule] ( identifier[edges] : identifier[Edges] , identifier[ast] : identifier[Function] , identifier[rule] : identifier[Mapping] [ identifier[str] , identifier[Any] ], identifier[spec] : identifier[BELSpec] ):
literal[string]
identifier[ast_type] = identifier[ast] . identifier[__class__] . identifier[__name__]
identifier[trigger_functions] = identifier[rule] . identifier[get] ( literal[string] ,[])
identifier[trigger_types] = identifier[rule] . identifier[get] ( literal[string] ,[])
identifier[rule_subject] = identifier[rule] . identifier[get] ( literal[string] )
identifier[rule_relation] = identifier[rule] . identifier[get] ( literal[string] )
identifier[rule_object] = identifier[rule] . identifier[get] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[ast] , identifier[Function] ):
identifier[function_name] = identifier[ast] . identifier[name]
identifier[args] = identifier[ast] . identifier[args]
identifier[parent_function] = identifier[ast] . identifier[parent_function]
keyword[if] identifier[function_name] keyword[in] identifier[trigger_functions] :
keyword[if] identifier[rule_subject] == literal[string] :
identifier[subject] = identifier[ast]
keyword[if] identifier[rule_object] == literal[string] :
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[arg] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[elif] identifier[rule_object] == literal[string] keyword[and] identifier[parent_function] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[parent_function] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[elif] identifier[ast_type] keyword[in] identifier[trigger_types] :
keyword[if] identifier[rule_subject] == literal[string] :
identifier[subject] = identifier[ast]
keyword[if] identifier[rule_object] == literal[string] :
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[arg] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[elif] identifier[rule_object] == literal[string] keyword[and] identifier[parent_function] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[parent_function] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[if] identifier[isinstance] ( identifier[ast] , identifier[NSArg] ):
identifier[term] = literal[string] . identifier[format] ( identifier[ast] . identifier[namespace] , identifier[ast] . identifier[value] )
identifier[parent_function] = identifier[ast] . identifier[parent_function]
keyword[if] identifier[ast_type] keyword[in] identifier[trigger_types] :
keyword[if] identifier[rule_subject] == literal[string] :
identifier[subject] = identifier[term]
keyword[if] identifier[rule_object] == literal[string] :
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[arg] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[elif] identifier[rule_object] == literal[string] keyword[and] identifier[parent_function] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[edge_ast] = identifier[BELAst] ( identifier[subject] , identifier[rule_relation] , identifier[parent_function] , identifier[spec] )
identifier[edges] . identifier[append] ( identifier[edge_ast] )
keyword[if] identifier[hasattr] ( identifier[ast] , literal[string] ):
keyword[for] identifier[arg] keyword[in] identifier[ast] . identifier[args] :
identifier[process_rule] ( identifier[edges] , identifier[arg] , identifier[rule] , identifier[spec] )
|
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec):
"""Process computed edge rule
Recursively processes BELAst versus a single computed edge rule
Args:
edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs
ast (Function): BEL Function AST
rule (Mapping[str, Any]: computed edge rule
"""
ast_type = ast.__class__.__name__
trigger_functions = rule.get('trigger_function', [])
trigger_types = rule.get('trigger_type', [])
rule_subject = rule.get('subject')
rule_relation = rule.get('relation')
rule_object = rule.get('object')
log.debug(f'Running {rule_relation} Type: {ast_type}')
if isinstance(ast, Function):
function_name = ast.name
args = ast.args
parent_function = ast.parent_function
if function_name in trigger_functions:
if rule_subject == 'trigger_value':
subject = ast # depends on [control=['if'], data=[]]
if rule_object == 'args':
for arg in args:
log.debug(f'1: {subject} {arg}')
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
elif rule_object == 'parent_function' and parent_function:
log.debug(f'2: {subject} {parent_function}')
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif ast_type in trigger_types:
if rule_subject == 'trigger_value':
subject = ast # depends on [control=['if'], data=[]]
if rule_object == 'args':
for arg in args:
log.debug(f'3: {subject} {arg}')
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
elif rule_object == 'parent_function' and parent_function:
log.debug(f'4: {subject} {parent_function}')
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(ast, NSArg):
term = '{}:{}'.format(ast.namespace, ast.value)
parent_function = ast.parent_function
if ast_type in trigger_types:
if rule_subject == 'trigger_value':
subject = term # depends on [control=['if'], data=[]]
if rule_object == 'args':
for arg in args:
log.debug(f'5: {subject} {arg}')
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
elif rule_object == 'parent_function' and parent_function:
log.debug(f'6: {subject} {parent_function}')
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Recursively process every element by processing BELAst and Functions
if hasattr(ast, 'args'):
for arg in ast.args:
process_rule(edges, arg, rule, spec) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
|
def rinse_rpnexp(self, rpnexp, rpndict):
""" replace valid keyword of rpnexp from rpndict
e.g. rpnexp = 'b a /', rpndict = {'b': 10}
then after rinsing, rpnexp = '10 a /'
return rinsed rpnexp
"""
for wd in rpnexp.split():
if wd in rpndict:
try:
val = float(rpndict[wd])
rpnexp = rpnexp.replace(wd, str(val))
except:
pass
return rpnexp
|
def function[rinse_rpnexp, parameter[self, rpnexp, rpndict]]:
constant[ replace valid keyword of rpnexp from rpndict
e.g. rpnexp = 'b a /', rpndict = {'b': 10}
then after rinsing, rpnexp = '10 a /'
return rinsed rpnexp
]
for taget[name[wd]] in starred[call[name[rpnexp].split, parameter[]]] begin[:]
if compare[name[wd] in name[rpndict]] begin[:]
<ast.Try object at 0x7da1b094a1d0>
return[name[rpnexp]]
|
keyword[def] identifier[rinse_rpnexp] ( identifier[self] , identifier[rpnexp] , identifier[rpndict] ):
literal[string]
keyword[for] identifier[wd] keyword[in] identifier[rpnexp] . identifier[split] ():
keyword[if] identifier[wd] keyword[in] identifier[rpndict] :
keyword[try] :
identifier[val] = identifier[float] ( identifier[rpndict] [ identifier[wd] ])
identifier[rpnexp] = identifier[rpnexp] . identifier[replace] ( identifier[wd] , identifier[str] ( identifier[val] ))
keyword[except] :
keyword[pass]
keyword[return] identifier[rpnexp]
|
def rinse_rpnexp(self, rpnexp, rpndict):
""" replace valid keyword of rpnexp from rpndict
e.g. rpnexp = 'b a /', rpndict = {'b': 10}
then after rinsing, rpnexp = '10 a /'
return rinsed rpnexp
"""
for wd in rpnexp.split():
if wd in rpndict:
try:
val = float(rpndict[wd])
rpnexp = rpnexp.replace(wd, str(val)) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['wd', 'rpndict']] # depends on [control=['for'], data=['wd']]
return rpnexp
|
def disableEffect(self, name):
"""Disable an effect."""
try:
del self._effects[name]
self.__getattribute__(
'_effectdisable_%s' % name.replace("-", "_")
)()
except KeyError:
pass
except AttributeError:
pass
|
def function[disableEffect, parameter[self, name]]:
constant[Disable an effect.]
<ast.Try object at 0x7da2045669e0>
|
keyword[def] identifier[disableEffect] ( identifier[self] , identifier[name] ):
literal[string]
keyword[try] :
keyword[del] identifier[self] . identifier[_effects] [ identifier[name] ]
identifier[self] . identifier[__getattribute__] (
literal[string] % identifier[name] . identifier[replace] ( literal[string] , literal[string] )
)()
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[except] identifier[AttributeError] :
keyword[pass]
|
def disableEffect(self, name):
"""Disable an effect."""
try:
del self._effects[name]
self.__getattribute__('_effectdisable_%s' % name.replace('-', '_'))() # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
|
def settle(
self,
channel_identifier: ChannelID,
transferred_amount: TokenAmount,
locked_amount: TokenAmount,
locksroot: Locksroot,
partner: Address,
partner_transferred_amount: TokenAmount,
partner_locked_amount: TokenAmount,
partner_locksroot: Locksroot,
given_block_identifier: BlockSpecification,
):
""" Settle the channel. """
log_details = {
'channel_identifier': channel_identifier,
'token_network': pex(self.address),
'node': pex(self.node_address),
'partner': pex(partner),
'transferred_amount': transferred_amount,
'locked_amount': locked_amount,
'locksroot': encode_hex(locksroot),
'partner_transferred_amount': partner_transferred_amount,
'partner_locked_amount': partner_locked_amount,
'partner_locksroot': encode_hex(partner_locksroot),
}
log.debug('settle called', **log_details)
checking_block = self.client.get_checking_block()
# and now find out
our_maximum = transferred_amount + locked_amount
partner_maximum = partner_transferred_amount + partner_locked_amount
# The second participant transferred + locked amount must be higher
our_bp_is_larger = our_maximum > partner_maximum
if our_bp_is_larger:
kwargs = {
'participant1': partner,
'participant1_transferred_amount': partner_transferred_amount,
'participant1_locked_amount': partner_locked_amount,
'participant1_locksroot': partner_locksroot,
'participant2': self.node_address,
'participant2_transferred_amount': transferred_amount,
'participant2_locked_amount': locked_amount,
'participant2_locksroot': locksroot,
}
else:
kwargs = {
'participant1': self.node_address,
'participant1_transferred_amount': transferred_amount,
'participant1_locked_amount': locked_amount,
'participant1_locksroot': locksroot,
'participant2': partner,
'participant2_transferred_amount': partner_transferred_amount,
'participant2_locked_amount': partner_locked_amount,
'participant2_locksroot': partner_locksroot,
}
try:
self._settle_preconditions(
channel_identifier=channel_identifier,
partner=partner,
block_identifier=given_block_identifier,
)
except NoStateForBlockIdentifier:
# If preconditions end up being on pruned state skip them. Estimate
# gas will stop us from sending a transaction that will fail
pass
with self.channel_operations_lock[partner]:
error_prefix = 'Call to settle will fail'
gas_limit = self.proxy.estimate_gas(
checking_block,
'settleChannel',
channel_identifier=channel_identifier,
**kwargs,
)
if gas_limit:
error_prefix = 'settle call failed'
gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_SETTLE_CHANNEL)
transaction_hash = self.proxy.transact(
'settleChannel',
gas_limit,
channel_identifier=channel_identifier,
**kwargs,
)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
transaction_executed = gas_limit is not None
if not transaction_executed or receipt_or_none:
if transaction_executed:
block = receipt_or_none['blockNumber']
else:
block = checking_block
self.proxy.jsonrpc_client.check_for_insufficient_eth(
transaction_name='settleChannel',
transaction_executed=transaction_executed,
required_gas=GAS_REQUIRED_FOR_SETTLE_CHANNEL,
block_identifier=block,
)
msg = self._check_channel_state_after_settle(
participant1=self.node_address,
participant2=partner,
block_identifier=block,
channel_identifier=channel_identifier,
)
error_msg = f'{error_prefix}. {msg}'
log.critical(error_msg, **log_details)
raise RaidenUnrecoverableError(error_msg)
log.info('settle successful', **log_details)
|
def function[settle, parameter[self, channel_identifier, transferred_amount, locked_amount, locksroot, partner, partner_transferred_amount, partner_locked_amount, partner_locksroot, given_block_identifier]]:
constant[ Settle the channel. ]
variable[log_details] assign[=] dictionary[[<ast.Constant object at 0x7da1b19db880>, <ast.Constant object at 0x7da1b19d8eb0>, <ast.Constant object at 0x7da1b19d9ff0>, <ast.Constant object at 0x7da1b19d9900>, <ast.Constant object at 0x7da1b19d96f0>, <ast.Constant object at 0x7da1b19da3b0>, <ast.Constant object at 0x7da1b19db280>, <ast.Constant object at 0x7da1b19d8100>, <ast.Constant object at 0x7da1b19d8ca0>, <ast.Constant object at 0x7da1b19d9090>], [<ast.Name object at 0x7da1b19d8280>, <ast.Call object at 0x7da1b19dada0>, <ast.Call object at 0x7da1b19da650>, <ast.Call object at 0x7da1b19d8e20>, <ast.Name object at 0x7da1b19db9a0>, <ast.Name object at 0x7da1b19d9ba0>, <ast.Call object at 0x7da1b19d8c10>, <ast.Name object at 0x7da1b19d95d0>, <ast.Name object at 0x7da1b19dbcd0>, <ast.Call object at 0x7da1b19da2c0>]]
call[name[log].debug, parameter[constant[settle called]]]
variable[checking_block] assign[=] call[name[self].client.get_checking_block, parameter[]]
variable[our_maximum] assign[=] binary_operation[name[transferred_amount] + name[locked_amount]]
variable[partner_maximum] assign[=] binary_operation[name[partner_transferred_amount] + name[partner_locked_amount]]
variable[our_bp_is_larger] assign[=] compare[name[our_maximum] greater[>] name[partner_maximum]]
if name[our_bp_is_larger] begin[:]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b19db430>, <ast.Constant object at 0x7da1b19db250>, <ast.Constant object at 0x7da1b19db190>, <ast.Constant object at 0x7da1b19db550>, <ast.Constant object at 0x7da1b19db610>, <ast.Constant object at 0x7da1b19d91b0>, <ast.Constant object at 0x7da1b19dbfa0>, <ast.Constant object at 0x7da1b19d99c0>], [<ast.Name object at 0x7da1b19daad0>, <ast.Name object at 0x7da1b19dba00>, <ast.Name object at 0x7da1b1907460>, <ast.Name object at 0x7da1b1906950>, <ast.Attribute object at 0x7da1b1907280>, <ast.Name object at 0x7da1b19061d0>, <ast.Name object at 0x7da1b1907640>, <ast.Name object at 0x7da1b1907250>]]
<ast.Try object at 0x7da1b19077f0>
with call[name[self].channel_operations_lock][name[partner]] begin[:]
variable[error_prefix] assign[=] constant[Call to settle will fail]
variable[gas_limit] assign[=] call[name[self].proxy.estimate_gas, parameter[name[checking_block], constant[settleChannel]]]
if name[gas_limit] begin[:]
variable[error_prefix] assign[=] constant[settle call failed]
variable[gas_limit] assign[=] call[name[safe_gas_limit], parameter[name[gas_limit], name[GAS_REQUIRED_FOR_SETTLE_CHANNEL]]]
variable[transaction_hash] assign[=] call[name[self].proxy.transact, parameter[constant[settleChannel], name[gas_limit]]]
call[name[self].client.poll, parameter[name[transaction_hash]]]
variable[receipt_or_none] assign[=] call[name[check_transaction_threw], parameter[name[self].client, name[transaction_hash]]]
variable[transaction_executed] assign[=] compare[name[gas_limit] is_not constant[None]]
if <ast.BoolOp object at 0x7da1b1714640> begin[:]
if name[transaction_executed] begin[:]
variable[block] assign[=] call[name[receipt_or_none]][constant[blockNumber]]
call[name[self].proxy.jsonrpc_client.check_for_insufficient_eth, parameter[]]
variable[msg] assign[=] call[name[self]._check_channel_state_after_settle, parameter[]]
variable[error_msg] assign[=] <ast.JoinedStr object at 0x7da1b1716ef0>
call[name[log].critical, parameter[name[error_msg]]]
<ast.Raise object at 0x7da1b1717bb0>
call[name[log].info, parameter[constant[settle successful]]]
|
keyword[def] identifier[settle] (
identifier[self] ,
identifier[channel_identifier] : identifier[ChannelID] ,
identifier[transferred_amount] : identifier[TokenAmount] ,
identifier[locked_amount] : identifier[TokenAmount] ,
identifier[locksroot] : identifier[Locksroot] ,
identifier[partner] : identifier[Address] ,
identifier[partner_transferred_amount] : identifier[TokenAmount] ,
identifier[partner_locked_amount] : identifier[TokenAmount] ,
identifier[partner_locksroot] : identifier[Locksroot] ,
identifier[given_block_identifier] : identifier[BlockSpecification] ,
):
literal[string]
identifier[log_details] ={
literal[string] : identifier[channel_identifier] ,
literal[string] : identifier[pex] ( identifier[self] . identifier[address] ),
literal[string] : identifier[pex] ( identifier[self] . identifier[node_address] ),
literal[string] : identifier[pex] ( identifier[partner] ),
literal[string] : identifier[transferred_amount] ,
literal[string] : identifier[locked_amount] ,
literal[string] : identifier[encode_hex] ( identifier[locksroot] ),
literal[string] : identifier[partner_transferred_amount] ,
literal[string] : identifier[partner_locked_amount] ,
literal[string] : identifier[encode_hex] ( identifier[partner_locksroot] ),
}
identifier[log] . identifier[debug] ( literal[string] ,** identifier[log_details] )
identifier[checking_block] = identifier[self] . identifier[client] . identifier[get_checking_block] ()
identifier[our_maximum] = identifier[transferred_amount] + identifier[locked_amount]
identifier[partner_maximum] = identifier[partner_transferred_amount] + identifier[partner_locked_amount]
identifier[our_bp_is_larger] = identifier[our_maximum] > identifier[partner_maximum]
keyword[if] identifier[our_bp_is_larger] :
identifier[kwargs] ={
literal[string] : identifier[partner] ,
literal[string] : identifier[partner_transferred_amount] ,
literal[string] : identifier[partner_locked_amount] ,
literal[string] : identifier[partner_locksroot] ,
literal[string] : identifier[self] . identifier[node_address] ,
literal[string] : identifier[transferred_amount] ,
literal[string] : identifier[locked_amount] ,
literal[string] : identifier[locksroot] ,
}
keyword[else] :
identifier[kwargs] ={
literal[string] : identifier[self] . identifier[node_address] ,
literal[string] : identifier[transferred_amount] ,
literal[string] : identifier[locked_amount] ,
literal[string] : identifier[locksroot] ,
literal[string] : identifier[partner] ,
literal[string] : identifier[partner_transferred_amount] ,
literal[string] : identifier[partner_locked_amount] ,
literal[string] : identifier[partner_locksroot] ,
}
keyword[try] :
identifier[self] . identifier[_settle_preconditions] (
identifier[channel_identifier] = identifier[channel_identifier] ,
identifier[partner] = identifier[partner] ,
identifier[block_identifier] = identifier[given_block_identifier] ,
)
keyword[except] identifier[NoStateForBlockIdentifier] :
keyword[pass]
keyword[with] identifier[self] . identifier[channel_operations_lock] [ identifier[partner] ]:
identifier[error_prefix] = literal[string]
identifier[gas_limit] = identifier[self] . identifier[proxy] . identifier[estimate_gas] (
identifier[checking_block] ,
literal[string] ,
identifier[channel_identifier] = identifier[channel_identifier] ,
** identifier[kwargs] ,
)
keyword[if] identifier[gas_limit] :
identifier[error_prefix] = literal[string]
identifier[gas_limit] = identifier[safe_gas_limit] ( identifier[gas_limit] , identifier[GAS_REQUIRED_FOR_SETTLE_CHANNEL] )
identifier[transaction_hash] = identifier[self] . identifier[proxy] . identifier[transact] (
literal[string] ,
identifier[gas_limit] ,
identifier[channel_identifier] = identifier[channel_identifier] ,
** identifier[kwargs] ,
)
identifier[self] . identifier[client] . identifier[poll] ( identifier[transaction_hash] )
identifier[receipt_or_none] = identifier[check_transaction_threw] ( identifier[self] . identifier[client] , identifier[transaction_hash] )
identifier[transaction_executed] = identifier[gas_limit] keyword[is] keyword[not] keyword[None]
keyword[if] keyword[not] identifier[transaction_executed] keyword[or] identifier[receipt_or_none] :
keyword[if] identifier[transaction_executed] :
identifier[block] = identifier[receipt_or_none] [ literal[string] ]
keyword[else] :
identifier[block] = identifier[checking_block]
identifier[self] . identifier[proxy] . identifier[jsonrpc_client] . identifier[check_for_insufficient_eth] (
identifier[transaction_name] = literal[string] ,
identifier[transaction_executed] = identifier[transaction_executed] ,
identifier[required_gas] = identifier[GAS_REQUIRED_FOR_SETTLE_CHANNEL] ,
identifier[block_identifier] = identifier[block] ,
)
identifier[msg] = identifier[self] . identifier[_check_channel_state_after_settle] (
identifier[participant1] = identifier[self] . identifier[node_address] ,
identifier[participant2] = identifier[partner] ,
identifier[block_identifier] = identifier[block] ,
identifier[channel_identifier] = identifier[channel_identifier] ,
)
identifier[error_msg] = literal[string]
identifier[log] . identifier[critical] ( identifier[error_msg] ,** identifier[log_details] )
keyword[raise] identifier[RaidenUnrecoverableError] ( identifier[error_msg] )
identifier[log] . identifier[info] ( literal[string] ,** identifier[log_details] )
|
def settle(self, channel_identifier: ChannelID, transferred_amount: TokenAmount, locked_amount: TokenAmount, locksroot: Locksroot, partner: Address, partner_transferred_amount: TokenAmount, partner_locked_amount: TokenAmount, partner_locksroot: Locksroot, given_block_identifier: BlockSpecification):
""" Settle the channel. """
log_details = {'channel_identifier': channel_identifier, 'token_network': pex(self.address), 'node': pex(self.node_address), 'partner': pex(partner), 'transferred_amount': transferred_amount, 'locked_amount': locked_amount, 'locksroot': encode_hex(locksroot), 'partner_transferred_amount': partner_transferred_amount, 'partner_locked_amount': partner_locked_amount, 'partner_locksroot': encode_hex(partner_locksroot)}
log.debug('settle called', **log_details)
checking_block = self.client.get_checking_block()
# and now find out
our_maximum = transferred_amount + locked_amount
partner_maximum = partner_transferred_amount + partner_locked_amount
# The second participant transferred + locked amount must be higher
our_bp_is_larger = our_maximum > partner_maximum
if our_bp_is_larger:
kwargs = {'participant1': partner, 'participant1_transferred_amount': partner_transferred_amount, 'participant1_locked_amount': partner_locked_amount, 'participant1_locksroot': partner_locksroot, 'participant2': self.node_address, 'participant2_transferred_amount': transferred_amount, 'participant2_locked_amount': locked_amount, 'participant2_locksroot': locksroot} # depends on [control=['if'], data=[]]
else:
kwargs = {'participant1': self.node_address, 'participant1_transferred_amount': transferred_amount, 'participant1_locked_amount': locked_amount, 'participant1_locksroot': locksroot, 'participant2': partner, 'participant2_transferred_amount': partner_transferred_amount, 'participant2_locked_amount': partner_locked_amount, 'participant2_locksroot': partner_locksroot}
try:
self._settle_preconditions(channel_identifier=channel_identifier, partner=partner, block_identifier=given_block_identifier) # depends on [control=['try'], data=[]]
except NoStateForBlockIdentifier:
# If preconditions end up being on pruned state skip them. Estimate
# gas will stop us from sending a transaction that will fail
pass # depends on [control=['except'], data=[]]
with self.channel_operations_lock[partner]:
error_prefix = 'Call to settle will fail'
gas_limit = self.proxy.estimate_gas(checking_block, 'settleChannel', channel_identifier=channel_identifier, **kwargs)
if gas_limit:
error_prefix = 'settle call failed'
gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_SETTLE_CHANNEL)
transaction_hash = self.proxy.transact('settleChannel', gas_limit, channel_identifier=channel_identifier, **kwargs)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
transaction_executed = gas_limit is not None
if not transaction_executed or receipt_or_none:
if transaction_executed:
block = receipt_or_none['blockNumber'] # depends on [control=['if'], data=[]]
else:
block = checking_block
self.proxy.jsonrpc_client.check_for_insufficient_eth(transaction_name='settleChannel', transaction_executed=transaction_executed, required_gas=GAS_REQUIRED_FOR_SETTLE_CHANNEL, block_identifier=block)
msg = self._check_channel_state_after_settle(participant1=self.node_address, participant2=partner, block_identifier=block, channel_identifier=channel_identifier)
error_msg = f'{error_prefix}. {msg}'
log.critical(error_msg, **log_details)
raise RaidenUnrecoverableError(error_msg) # depends on [control=['if'], data=[]]
log.info('settle successful', **log_details)
|
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
|
def function[stream, parameter[self]]:
constant[
Return the current zmqstream, creating one if necessary
]
if <ast.UnaryOp object at 0x7da1b1f6f4c0> begin[:]
name[self]._stream assign[=] call[name[zmq].eventloop.zmqstream.ZMQStream, parameter[name[self]._socket]]
return[name[self]._stream]
|
keyword[def] identifier[stream] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_stream] = identifier[zmq] . identifier[eventloop] . identifier[zmqstream] . identifier[ZMQStream] ( identifier[self] . identifier[_socket] , identifier[io_loop] = identifier[self] . identifier[io_loop] )
keyword[return] identifier[self] . identifier[_stream]
|
def stream(self):
"""
Return the current zmqstream, creating one if necessary
"""
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) # depends on [control=['if'], data=[]]
return self._stream
|
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('header', 'header_signature', 'logline'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLogLine(parser_mediator, structure)
elif key == 'header':
self._ParseHeader(parser_mediator, structure)
elif key == 'header_signature':
# If this key is matched (after others keys failed) we got a different
# localized header and we should stop parsing until a new good header
# is found. Stop parsing is done setting xchat_year to 0.
# Note that the code assumes that LINE_STRUCTURES will be used in the
# exact order as defined!
logger.warning('Unknown locale header.')
self._xchat_year = 0
|
def function[ParseRecord, parameter[self, parser_mediator, key, structure]]:
constant[Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18ede5b10>, <ast.Constant object at 0x7da18ede74c0>, <ast.Constant object at 0x7da18ede66e0>]]] begin[:]
<ast.Raise object at 0x7da18ede6380>
if compare[name[key] equal[==] constant[logline]] begin[:]
call[name[self]._ParseLogLine, parameter[name[parser_mediator], name[structure]]]
|
keyword[def] identifier[ParseRecord] ( identifier[self] , identifier[parser_mediator] , identifier[key] , identifier[structure] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[errors] . identifier[ParseError] (
literal[string] . identifier[format] ( identifier[key] ))
keyword[if] identifier[key] == literal[string] :
identifier[self] . identifier[_ParseLogLine] ( identifier[parser_mediator] , identifier[structure] )
keyword[elif] identifier[key] == literal[string] :
identifier[self] . identifier[_ParseHeader] ( identifier[parser_mediator] , identifier[structure] )
keyword[elif] identifier[key] == literal[string] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[_xchat_year] = literal[int]
|
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('header', 'header_signature', 'logline'):
raise errors.ParseError('Unable to parse record, unknown structure: {0:s}'.format(key)) # depends on [control=['if'], data=['key']]
if key == 'logline':
self._ParseLogLine(parser_mediator, structure) # depends on [control=['if'], data=[]]
elif key == 'header':
self._ParseHeader(parser_mediator, structure) # depends on [control=['if'], data=[]]
elif key == 'header_signature':
# If this key is matched (after others keys failed) we got a different
# localized header and we should stop parsing until a new good header
# is found. Stop parsing is done setting xchat_year to 0.
# Note that the code assumes that LINE_STRUCTURES will be used in the
# exact order as defined!
logger.warning('Unknown locale header.')
self._xchat_year = 0 # depends on [control=['if'], data=[]]
|
def language_model(self,verbose=True):
""" builds a Tamil bigram letter model """
# use a generator in corpus
p2 = None
p1 = None
for next_letter in self.corpus.next_tamil_letter():
# update frequency from corpus
if p2:
trig = p2+p1+next_letter
self.letter3[trig] = 1 + self.letter3.get(trig,0)
p2 = p1
p1 = next_letter #update always
return
|
def function[language_model, parameter[self, verbose]]:
constant[ builds a Tamil bigram letter model ]
variable[p2] assign[=] constant[None]
variable[p1] assign[=] constant[None]
for taget[name[next_letter]] in starred[call[name[self].corpus.next_tamil_letter, parameter[]]] begin[:]
if name[p2] begin[:]
variable[trig] assign[=] binary_operation[binary_operation[name[p2] + name[p1]] + name[next_letter]]
call[name[self].letter3][name[trig]] assign[=] binary_operation[constant[1] + call[name[self].letter3.get, parameter[name[trig], constant[0]]]]
variable[p2] assign[=] name[p1]
variable[p1] assign[=] name[next_letter]
return[None]
|
keyword[def] identifier[language_model] ( identifier[self] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[p2] = keyword[None]
identifier[p1] = keyword[None]
keyword[for] identifier[next_letter] keyword[in] identifier[self] . identifier[corpus] . identifier[next_tamil_letter] ():
keyword[if] identifier[p2] :
identifier[trig] = identifier[p2] + identifier[p1] + identifier[next_letter]
identifier[self] . identifier[letter3] [ identifier[trig] ]= literal[int] + identifier[self] . identifier[letter3] . identifier[get] ( identifier[trig] , literal[int] )
identifier[p2] = identifier[p1]
identifier[p1] = identifier[next_letter]
keyword[return]
|
def language_model(self, verbose=True):
""" builds a Tamil bigram letter model """
# use a generator in corpus
p2 = None
p1 = None
for next_letter in self.corpus.next_tamil_letter():
# update frequency from corpus
if p2:
trig = p2 + p1 + next_letter
self.letter3[trig] = 1 + self.letter3.get(trig, 0) # depends on [control=['if'], data=[]]
p2 = p1
p1 = next_letter #update always # depends on [control=['for'], data=['next_letter']]
return
|
def clean_ip(ip):
"""
Cleans the ip address up, useful for removing leading zeros, e.g.::
1234:0:01:02:: -> 1234:0:1:2::
1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a
1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1::
0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0
:type ip: string
:param ip: An IP address.
:rtype: string
:return: The cleaned up IP.
"""
theip = normalize_ip(ip)
segments = ['%x' % int(s, 16) for s in theip.split(':')]
# Find the longest consecutive sequence of zeroes.
seq = {0: 0}
start = None
count = 0
for n, segment in enumerate(segments):
if segment != '0':
start = None
count = 0
continue
if start is None:
start = n
count += 1
seq[count] = start
# Replace those zeroes by a double colon.
count = max(seq)
start = seq[count]
result = []
for n, segment in enumerate(segments):
if n == start and count > 1:
if n == 0:
result.append('')
result.append('')
if n == 7:
result.append('')
continue
elif start < n < start + count:
if n == 7:
result.append('')
continue
result.append(segment)
return ':'.join(result)
|
def function[clean_ip, parameter[ip]]:
constant[
Cleans the ip address up, useful for removing leading zeros, e.g.::
1234:0:01:02:: -> 1234:0:1:2::
1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a
1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1::
0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0
:type ip: string
:param ip: An IP address.
:rtype: string
:return: The cleaned up IP.
]
variable[theip] assign[=] call[name[normalize_ip], parameter[name[ip]]]
variable[segments] assign[=] <ast.ListComp object at 0x7da1b0651210>
variable[seq] assign[=] dictionary[[<ast.Constant object at 0x7da1b0652560>], [<ast.Constant object at 0x7da1b0651b70>]]
variable[start] assign[=] constant[None]
variable[count] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0653190>, <ast.Name object at 0x7da1b06503a0>]]] in starred[call[name[enumerate], parameter[name[segments]]]] begin[:]
if compare[name[segment] not_equal[!=] constant[0]] begin[:]
variable[start] assign[=] constant[None]
variable[count] assign[=] constant[0]
continue
if compare[name[start] is constant[None]] begin[:]
variable[start] assign[=] name[n]
<ast.AugAssign object at 0x7da1b06534f0>
call[name[seq]][name[count]] assign[=] name[start]
variable[count] assign[=] call[name[max], parameter[name[seq]]]
variable[start] assign[=] call[name[seq]][name[count]]
variable[result] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0651780>, <ast.Name object at 0x7da1b0651720>]]] in starred[call[name[enumerate], parameter[name[segments]]]] begin[:]
if <ast.BoolOp object at 0x7da1b06511e0> begin[:]
if compare[name[n] equal[==] constant[0]] begin[:]
call[name[result].append, parameter[constant[]]]
call[name[result].append, parameter[constant[]]]
if compare[name[n] equal[==] constant[7]] begin[:]
call[name[result].append, parameter[constant[]]]
continue
call[name[result].append, parameter[name[segment]]]
return[call[constant[:].join, parameter[name[result]]]]
|
keyword[def] identifier[clean_ip] ( identifier[ip] ):
literal[string]
identifier[theip] = identifier[normalize_ip] ( identifier[ip] )
identifier[segments] =[ literal[string] % identifier[int] ( identifier[s] , literal[int] ) keyword[for] identifier[s] keyword[in] identifier[theip] . identifier[split] ( literal[string] )]
identifier[seq] ={ literal[int] : literal[int] }
identifier[start] = keyword[None]
identifier[count] = literal[int]
keyword[for] identifier[n] , identifier[segment] keyword[in] identifier[enumerate] ( identifier[segments] ):
keyword[if] identifier[segment] != literal[string] :
identifier[start] = keyword[None]
identifier[count] = literal[int]
keyword[continue]
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[start] = identifier[n]
identifier[count] += literal[int]
identifier[seq] [ identifier[count] ]= identifier[start]
identifier[count] = identifier[max] ( identifier[seq] )
identifier[start] = identifier[seq] [ identifier[count] ]
identifier[result] =[]
keyword[for] identifier[n] , identifier[segment] keyword[in] identifier[enumerate] ( identifier[segments] ):
keyword[if] identifier[n] == identifier[start] keyword[and] identifier[count] > literal[int] :
keyword[if] identifier[n] == literal[int] :
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[append] ( literal[string] )
keyword[if] identifier[n] == literal[int] :
identifier[result] . identifier[append] ( literal[string] )
keyword[continue]
keyword[elif] identifier[start] < identifier[n] < identifier[start] + identifier[count] :
keyword[if] identifier[n] == literal[int] :
identifier[result] . identifier[append] ( literal[string] )
keyword[continue]
identifier[result] . identifier[append] ( identifier[segment] )
keyword[return] literal[string] . identifier[join] ( identifier[result] )
|
def clean_ip(ip):
"""
Cleans the ip address up, useful for removing leading zeros, e.g.::
1234:0:01:02:: -> 1234:0:1:2::
1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a
1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1::
0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0
:type ip: string
:param ip: An IP address.
:rtype: string
:return: The cleaned up IP.
"""
theip = normalize_ip(ip)
segments = ['%x' % int(s, 16) for s in theip.split(':')]
# Find the longest consecutive sequence of zeroes.
seq = {0: 0}
start = None
count = 0
for (n, segment) in enumerate(segments):
if segment != '0':
start = None
count = 0
continue # depends on [control=['if'], data=[]]
if start is None:
start = n # depends on [control=['if'], data=['start']]
count += 1
seq[count] = start # depends on [control=['for'], data=[]]
# Replace those zeroes by a double colon.
count = max(seq)
start = seq[count]
result = []
for (n, segment) in enumerate(segments):
if n == start and count > 1:
if n == 0:
result.append('') # depends on [control=['if'], data=[]]
result.append('')
if n == 7:
result.append('') # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
elif start < n < start + count:
if n == 7:
result.append('') # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['n']]
result.append(segment) # depends on [control=['for'], data=[]]
return ':'.join(result)
|
def deletegroupmember(self, group_id, user_id):
"""
Delete a group member
:param group_id: group id to remove the member from
:param user_id: user id
:return: always true
"""
request = requests.delete(
'{0}/{1}/members/{2}'.format(self.groups_url, group_id, user_id),
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return True
|
def function[deletegroupmember, parameter[self, group_id, user_id]]:
constant[
Delete a group member
:param group_id: group id to remove the member from
:param user_id: user id
:return: always true
]
variable[request] assign[=] call[name[requests].delete, parameter[call[constant[{0}/{1}/members/{2}].format, parameter[name[self].groups_url, name[group_id], name[user_id]]]]]
if compare[name[request].status_code equal[==] constant[200]] begin[:]
return[constant[True]]
|
keyword[def] identifier[deletegroupmember] ( identifier[self] , identifier[group_id] , identifier[user_id] ):
literal[string]
identifier[request] = identifier[requests] . identifier[delete] (
literal[string] . identifier[format] ( identifier[self] . identifier[groups_url] , identifier[group_id] , identifier[user_id] ),
identifier[headers] = identifier[self] . identifier[headers] , identifier[verify] = identifier[self] . identifier[verify_ssl] , identifier[auth] = identifier[self] . identifier[auth] , identifier[timeout] = identifier[self] . identifier[timeout] )
keyword[if] identifier[request] . identifier[status_code] == literal[int] :
keyword[return] keyword[True]
|
def deletegroupmember(self, group_id, user_id):
"""
Delete a group member
:param group_id: group id to remove the member from
:param user_id: user id
:return: always true
"""
request = requests.delete('{0}/{1}/members/{2}'.format(self.groups_url, group_id, user_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return True # depends on [control=['if'], data=[]]
|
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage
|
def function[_from_java, parameter[cls, java_stage]]:
constant[
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
]
<ast.Tuple object at 0x7da1b20a9a20> assign[=] call[call[name[super], parameter[name[CrossValidator], name[cls]]]._from_java_impl, parameter[name[java_stage]]]
variable[numFolds] assign[=] call[name[java_stage].getNumFolds, parameter[]]
variable[seed] assign[=] call[name[java_stage].getSeed, parameter[]]
variable[parallelism] assign[=] call[name[java_stage].getParallelism, parameter[]]
variable[collectSubModels] assign[=] call[name[java_stage].getCollectSubModels, parameter[]]
variable[py_stage] assign[=] call[name[cls], parameter[]]
call[name[py_stage]._resetUid, parameter[call[name[java_stage].uid, parameter[]]]]
return[name[py_stage]]
|
keyword[def] identifier[_from_java] ( identifier[cls] , identifier[java_stage] ):
literal[string]
identifier[estimator] , identifier[epms] , identifier[evaluator] = identifier[super] ( identifier[CrossValidator] , identifier[cls] ). identifier[_from_java_impl] ( identifier[java_stage] )
identifier[numFolds] = identifier[java_stage] . identifier[getNumFolds] ()
identifier[seed] = identifier[java_stage] . identifier[getSeed] ()
identifier[parallelism] = identifier[java_stage] . identifier[getParallelism] ()
identifier[collectSubModels] = identifier[java_stage] . identifier[getCollectSubModels] ()
identifier[py_stage] = identifier[cls] ( identifier[estimator] = identifier[estimator] , identifier[estimatorParamMaps] = identifier[epms] , identifier[evaluator] = identifier[evaluator] ,
identifier[numFolds] = identifier[numFolds] , identifier[seed] = identifier[seed] , identifier[parallelism] = identifier[parallelism] ,
identifier[collectSubModels] = identifier[collectSubModels] )
identifier[py_stage] . identifier[_resetUid] ( identifier[java_stage] . identifier[uid] ())
keyword[return] identifier[py_stage]
|
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
(estimator, epms, evaluator) = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator, numFolds=numFolds, seed=seed, parallelism=parallelism, collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage
|
def create(self, project, slug, content, **attrs):
"""
create a new :class:`WikiPage`
:param project: :class:`Project` id
:param slug: slug of the wiki page
:param content: content of the wiki page
:param attrs: optional attributes for the :class:`WikiPage`
"""
attrs.update({'project': project, 'slug': slug, 'content': content})
return self._new_resource(payload=attrs)
|
def function[create, parameter[self, project, slug, content]]:
constant[
create a new :class:`WikiPage`
:param project: :class:`Project` id
:param slug: slug of the wiki page
:param content: content of the wiki page
:param attrs: optional attributes for the :class:`WikiPage`
]
call[name[attrs].update, parameter[dictionary[[<ast.Constant object at 0x7da207f9b640>, <ast.Constant object at 0x7da207f99900>, <ast.Constant object at 0x7da207f9b190>], [<ast.Name object at 0x7da207f9b9a0>, <ast.Name object at 0x7da207f9b010>, <ast.Name object at 0x7da207f9ae30>]]]]
return[call[name[self]._new_resource, parameter[]]]
|
keyword[def] identifier[create] ( identifier[self] , identifier[project] , identifier[slug] , identifier[content] ,** identifier[attrs] ):
literal[string]
identifier[attrs] . identifier[update] ({ literal[string] : identifier[project] , literal[string] : identifier[slug] , literal[string] : identifier[content] })
keyword[return] identifier[self] . identifier[_new_resource] ( identifier[payload] = identifier[attrs] )
|
def create(self, project, slug, content, **attrs):
"""
create a new :class:`WikiPage`
:param project: :class:`Project` id
:param slug: slug of the wiki page
:param content: content of the wiki page
:param attrs: optional attributes for the :class:`WikiPage`
"""
attrs.update({'project': project, 'slug': slug, 'content': content})
return self._new_resource(payload=attrs)
|
def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
"""
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate
|
def function[register, parameter[cls, name]]:
constant[ Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
]
def function[decorate, parameter[klazz]]:
constant[ Decorator. ]
if compare[name[name] in name[cls]._events] begin[:]
<ast.Raise object at 0x7da1b11ede10>
call[name[cls]._events][name[name]] assign[=] list[[<ast.Attribute object at 0x7da1b101b9d0>, <ast.Attribute object at 0x7da1b1019e40>]]
name[klazz].name assign[=] name[name]
return[name[klazz]]
return[name[decorate]]
|
keyword[def] identifier[register] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[def] identifier[decorate] ( identifier[klazz] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[cls] . identifier[_events] :
keyword[raise] identifier[GerritError] ( literal[string] % identifier[name] )
identifier[cls] . identifier[_events] [ identifier[name] ]=[ identifier[klazz] . identifier[__module__] , identifier[klazz] . identifier[__name__] ]
identifier[klazz] . identifier[name] = identifier[name]
keyword[return] identifier[klazz]
keyword[return] identifier[decorate]
|
def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
"""
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError('Duplicate event: %s' % name) # depends on [control=['if'], data=['name']]
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate
|
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields
if isinstance(f, models.DateField)])
else:
return dict([(f.name, f)
for f in model._meta.fields
if isinstance(f, models.DateField) and
(f.name in self.field_names)])
|
def function[field_dict, parameter[self, model]]:
constant[
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
]
if compare[name[self].field_names is constant[None]] begin[:]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da2045679a0>]]]
|
keyword[def] identifier[field_dict] ( identifier[self] , identifier[model] ):
literal[string]
keyword[if] identifier[self] . identifier[field_names] keyword[is] keyword[None] :
keyword[return] identifier[dict] ([( identifier[f] . identifier[name] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[model] . identifier[_meta] . identifier[fields]
keyword[if] identifier[isinstance] ( identifier[f] , identifier[models] . identifier[DateField] )])
keyword[else] :
keyword[return] identifier[dict] ([( identifier[f] . identifier[name] , identifier[f] )
keyword[for] identifier[f] keyword[in] identifier[model] . identifier[_meta] . identifier[fields]
keyword[if] identifier[isinstance] ( identifier[f] , identifier[models] . identifier[DateField] ) keyword[and]
( identifier[f] . identifier[name] keyword[in] identifier[self] . identifier[field_names] )])
|
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)]) # depends on [control=['if'], data=[]]
else:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])
|
def _decompose_bytes_to_bit_arr(arr):
"""
Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
bit_arr = []
for idx in range(len(arr)):
for i in reversed(range(8)):
bit_arr.append((arr[idx] >> i) & (1 << 0))
return bit_arr
|
def function[_decompose_bytes_to_bit_arr, parameter[arr]]:
constant[
Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
]
variable[bit_arr] assign[=] list[[]]
for taget[name[idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[arr]]]]]] begin[:]
for taget[name[i]] in starred[call[name[reversed], parameter[call[name[range], parameter[constant[8]]]]]] begin[:]
call[name[bit_arr].append, parameter[binary_operation[binary_operation[call[name[arr]][name[idx]] <ast.RShift object at 0x7da2590d6a40> name[i]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> constant[0]]]]]
return[name[bit_arr]]
|
keyword[def] identifier[_decompose_bytes_to_bit_arr] ( identifier[arr] ):
literal[string]
identifier[bit_arr] =[]
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[arr] )):
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[range] ( literal[int] )):
identifier[bit_arr] . identifier[append] (( identifier[arr] [ identifier[idx] ]>> identifier[i] )&( literal[int] << literal[int] ))
keyword[return] identifier[bit_arr]
|
def _decompose_bytes_to_bit_arr(arr):
"""
Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
bit_arr = []
for idx in range(len(arr)):
for i in reversed(range(8)):
bit_arr.append(arr[idx] >> i & 1 << 0) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['idx']]
return bit_arr
|
def _extract_image_urls(arg: Message_T) -> List[str]:
"""Extract all image urls from a message-like object."""
arg_as_msg = Message(arg)
return [s.data['url'] for s in arg_as_msg
if s.type == 'image' and 'url' in s.data]
|
def function[_extract_image_urls, parameter[arg]]:
constant[Extract all image urls from a message-like object.]
variable[arg_as_msg] assign[=] call[name[Message], parameter[name[arg]]]
return[<ast.ListComp object at 0x7da20c9928f0>]
|
keyword[def] identifier[_extract_image_urls] ( identifier[arg] : identifier[Message_T] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[arg_as_msg] = identifier[Message] ( identifier[arg] )
keyword[return] [ identifier[s] . identifier[data] [ literal[string] ] keyword[for] identifier[s] keyword[in] identifier[arg_as_msg]
keyword[if] identifier[s] . identifier[type] == literal[string] keyword[and] literal[string] keyword[in] identifier[s] . identifier[data] ]
|
def _extract_image_urls(arg: Message_T) -> List[str]:
"""Extract all image urls from a message-like object."""
arg_as_msg = Message(arg)
return [s.data['url'] for s in arg_as_msg if s.type == 'image' and 'url' in s.data]
|
def postfix_to_optree(nodes):
"""Convert a list of nodes in postfix order to an Optree."""
while len(nodes) > 1:
nodes = _reduce(nodes)
if len(nodes) == 0:
raise OperatorError("Empty node list")
node = nodes[0]
if isinstance(node, OperatorNode):
raise OperatorError("Operator without operands")
if isinstance(node, OptreeNode):
return node
return OptreeNode(None, (node, ))
|
def function[postfix_to_optree, parameter[nodes]]:
constant[Convert a list of nodes in postfix order to an Optree.]
while compare[call[name[len], parameter[name[nodes]]] greater[>] constant[1]] begin[:]
variable[nodes] assign[=] call[name[_reduce], parameter[name[nodes]]]
if compare[call[name[len], parameter[name[nodes]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b01fe410>
variable[node] assign[=] call[name[nodes]][constant[0]]
if call[name[isinstance], parameter[name[node], name[OperatorNode]]] begin[:]
<ast.Raise object at 0x7da1b01ff550>
if call[name[isinstance], parameter[name[node], name[OptreeNode]]] begin[:]
return[name[node]]
return[call[name[OptreeNode], parameter[constant[None], tuple[[<ast.Name object at 0x7da1b0118460>]]]]]
|
keyword[def] identifier[postfix_to_optree] ( identifier[nodes] ):
literal[string]
keyword[while] identifier[len] ( identifier[nodes] )> literal[int] :
identifier[nodes] = identifier[_reduce] ( identifier[nodes] )
keyword[if] identifier[len] ( identifier[nodes] )== literal[int] :
keyword[raise] identifier[OperatorError] ( literal[string] )
identifier[node] = identifier[nodes] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[node] , identifier[OperatorNode] ):
keyword[raise] identifier[OperatorError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[node] , identifier[OptreeNode] ):
keyword[return] identifier[node]
keyword[return] identifier[OptreeNode] ( keyword[None] ,( identifier[node] ,))
|
def postfix_to_optree(nodes):
"""Convert a list of nodes in postfix order to an Optree."""
while len(nodes) > 1:
nodes = _reduce(nodes) # depends on [control=['while'], data=[]]
if len(nodes) == 0:
raise OperatorError('Empty node list') # depends on [control=['if'], data=[]]
node = nodes[0]
if isinstance(node, OperatorNode):
raise OperatorError('Operator without operands') # depends on [control=['if'], data=[]]
if isinstance(node, OptreeNode):
return node # depends on [control=['if'], data=[]]
return OptreeNode(None, (node,))
|
def show_current_number(parser, token):
"""Show the current page number, or insert it in the context.
This tag can for example be useful to change the page title according to
the current page number.
To just show current page number:
.. code-block:: html+django
{% show_current_number %}
If you use multiple paginations in the same page, you can get the page
number for a specific pagination using the querystring key, e.g.:
.. code-block:: html+django
{% show_current_number using mykey %}
The default page when no querystring is specified is 1. If you changed it
in the `paginate`_ template tag, you have to call ``show_current_number``
according to your choice, e.g.:
.. code-block:: html+django
{% show_current_number starting from page 3 %}
This can be also achieved using a template variable you passed to the
context, e.g.:
.. code-block:: html+django
{% show_current_number starting from page page_number %}
You can of course mix it all (the order of arguments is important):
.. code-block:: html+django
{% show_current_number starting from page 3 using mykey %}
If you want to insert the current page number in the context, without
actually displaying it in the template, use the *as* argument, i.e.:
.. code-block:: html+django
{% show_current_number as page_number %}
{% show_current_number
starting from page 3 using mykey as page_number %}
"""
# Validate args.
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
key = None
number = None
tag_name = token.contents[0]
var_name = None
else:
# Use a regexp to catch args.
match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args)
if match is None:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Retrieve objects.
groupdict = match.groupdict()
key = groupdict['key']
number = groupdict['number']
var_name = groupdict['var_name']
# Call the node.
return ShowCurrentNumberNode(number, key, var_name)
|
def function[show_current_number, parameter[parser, token]]:
constant[Show the current page number, or insert it in the context.
This tag can for example be useful to change the page title according to
the current page number.
To just show current page number:
.. code-block:: html+django
{% show_current_number %}
If you use multiple paginations in the same page, you can get the page
number for a specific pagination using the querystring key, e.g.:
.. code-block:: html+django
{% show_current_number using mykey %}
The default page when no querystring is specified is 1. If you changed it
in the `paginate`_ template tag, you have to call ``show_current_number``
according to your choice, e.g.:
.. code-block:: html+django
{% show_current_number starting from page 3 %}
This can be also achieved using a template variable you passed to the
context, e.g.:
.. code-block:: html+django
{% show_current_number starting from page page_number %}
You can of course mix it all (the order of arguments is important):
.. code-block:: html+django
{% show_current_number starting from page 3 using mykey %}
If you want to insert the current page number in the context, without
actually displaying it in the template, use the *as* argument, i.e.:
.. code-block:: html+django
{% show_current_number as page_number %}
{% show_current_number
starting from page 3 using mykey as page_number %}
]
<ast.Try object at 0x7da1b12f3580>
return[call[name[ShowCurrentNumberNode], parameter[name[number], name[key], name[var_name]]]]
|
keyword[def] identifier[show_current_number] ( identifier[parser] , identifier[token] ):
literal[string]
keyword[try] :
identifier[tag_name] , identifier[args] = identifier[token] . identifier[contents] . identifier[split] ( keyword[None] , literal[int] )
keyword[except] identifier[ValueError] :
identifier[key] = keyword[None]
identifier[number] = keyword[None]
identifier[tag_name] = identifier[token] . identifier[contents] [ literal[int] ]
identifier[var_name] = keyword[None]
keyword[else] :
identifier[match] = identifier[SHOW_CURRENT_NUMBER_EXPRESSION] . identifier[match] ( identifier[args] )
keyword[if] identifier[match] keyword[is] keyword[None] :
identifier[msg] = literal[string] % identifier[tag_name]
keyword[raise] identifier[template] . identifier[TemplateSyntaxError] ( identifier[msg] )
identifier[groupdict] = identifier[match] . identifier[groupdict] ()
identifier[key] = identifier[groupdict] [ literal[string] ]
identifier[number] = identifier[groupdict] [ literal[string] ]
identifier[var_name] = identifier[groupdict] [ literal[string] ]
keyword[return] identifier[ShowCurrentNumberNode] ( identifier[number] , identifier[key] , identifier[var_name] )
|
def show_current_number(parser, token):
"""Show the current page number, or insert it in the context.
This tag can for example be useful to change the page title according to
the current page number.
To just show current page number:
.. code-block:: html+django
{% show_current_number %}
If you use multiple paginations in the same page, you can get the page
number for a specific pagination using the querystring key, e.g.:
.. code-block:: html+django
{% show_current_number using mykey %}
The default page when no querystring is specified is 1. If you changed it
in the `paginate`_ template tag, you have to call ``show_current_number``
according to your choice, e.g.:
.. code-block:: html+django
{% show_current_number starting from page 3 %}
This can be also achieved using a template variable you passed to the
context, e.g.:
.. code-block:: html+django
{% show_current_number starting from page page_number %}
You can of course mix it all (the order of arguments is important):
.. code-block:: html+django
{% show_current_number starting from page 3 using mykey %}
If you want to insert the current page number in the context, without
actually displaying it in the template, use the *as* argument, i.e.:
.. code-block:: html+django
{% show_current_number as page_number %}
{% show_current_number
starting from page 3 using mykey as page_number %}
"""
# Validate args.
try:
(tag_name, args) = token.contents.split(None, 1) # depends on [control=['try'], data=[]]
except ValueError:
key = None
number = None
tag_name = token.contents[0]
var_name = None # depends on [control=['except'], data=[]]
else:
# Use a regexp to catch args.
match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args)
if match is None:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg) # depends on [control=['if'], data=[]]
# Retrieve objects.
groupdict = match.groupdict()
key = groupdict['key']
number = groupdict['number']
var_name = groupdict['var_name']
# Call the node.
return ShowCurrentNumberNode(number, key, var_name)
|
def start(self):
"""
The main loop, run forever.
"""
while True:
self.thread_debug("Interval starting")
for thr in threading.enumerate():
self.thread_debug(" " + str(thr))
self.feed_monitors()
start = time.time()
# wait fore queue to empty
self.workers_queue.join()
end = time.time()
diff = self.config['interval']['test'] - (end - start)
if diff <= 0:
# alarm
self.stats.procwin = -diff
self.thread_debug("Cannot keep up with tests! {} seconds late"
.format(abs(diff)))
else:
self.thread_debug("waiting {} seconds...".format(diff))
time.sleep(diff)
|
def function[start, parameter[self]]:
constant[
The main loop, run forever.
]
while constant[True] begin[:]
call[name[self].thread_debug, parameter[constant[Interval starting]]]
for taget[name[thr]] in starred[call[name[threading].enumerate, parameter[]]] begin[:]
call[name[self].thread_debug, parameter[binary_operation[constant[ ] + call[name[str], parameter[name[thr]]]]]]
call[name[self].feed_monitors, parameter[]]
variable[start] assign[=] call[name[time].time, parameter[]]
call[name[self].workers_queue.join, parameter[]]
variable[end] assign[=] call[name[time].time, parameter[]]
variable[diff] assign[=] binary_operation[call[call[name[self].config][constant[interval]]][constant[test]] - binary_operation[name[end] - name[start]]]
if compare[name[diff] less_or_equal[<=] constant[0]] begin[:]
name[self].stats.procwin assign[=] <ast.UnaryOp object at 0x7da18f09fd60>
call[name[self].thread_debug, parameter[call[constant[Cannot keep up with tests! {} seconds late].format, parameter[call[name[abs], parameter[name[diff]]]]]]]
|
keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
identifier[self] . identifier[thread_debug] ( literal[string] )
keyword[for] identifier[thr] keyword[in] identifier[threading] . identifier[enumerate] ():
identifier[self] . identifier[thread_debug] ( literal[string] + identifier[str] ( identifier[thr] ))
identifier[self] . identifier[feed_monitors] ()
identifier[start] = identifier[time] . identifier[time] ()
identifier[self] . identifier[workers_queue] . identifier[join] ()
identifier[end] = identifier[time] . identifier[time] ()
identifier[diff] = identifier[self] . identifier[config] [ literal[string] ][ literal[string] ]-( identifier[end] - identifier[start] )
keyword[if] identifier[diff] <= literal[int] :
identifier[self] . identifier[stats] . identifier[procwin] =- identifier[diff]
identifier[self] . identifier[thread_debug] ( literal[string]
. identifier[format] ( identifier[abs] ( identifier[diff] )))
keyword[else] :
identifier[self] . identifier[thread_debug] ( literal[string] . identifier[format] ( identifier[diff] ))
identifier[time] . identifier[sleep] ( identifier[diff] )
|
def start(self):
"""
The main loop, run forever.
"""
while True:
self.thread_debug('Interval starting')
for thr in threading.enumerate():
self.thread_debug(' ' + str(thr)) # depends on [control=['for'], data=['thr']]
self.feed_monitors()
start = time.time()
# wait fore queue to empty
self.workers_queue.join()
end = time.time()
diff = self.config['interval']['test'] - (end - start)
if diff <= 0:
# alarm
self.stats.procwin = -diff
self.thread_debug('Cannot keep up with tests! {} seconds late'.format(abs(diff))) # depends on [control=['if'], data=['diff']]
else:
self.thread_debug('waiting {} seconds...'.format(diff))
time.sleep(diff) # depends on [control=['while'], data=[]]
|
def policy_assignment_delete(name, scope, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
'''
result = False
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_assignments.delete(
policy_assignment_name=name,
scope=scope
)
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
return result
|
def function[policy_assignment_delete, parameter[name, scope]]:
constant[
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign /subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
]
variable[result] assign[=] constant[False]
variable[polconn] assign[=] call[call[name[__utils__]][constant[azurearm.get_client]], parameter[constant[policy]]]
<ast.Try object at 0x7da1b1f6e8c0>
return[name[result]]
|
keyword[def] identifier[policy_assignment_delete] ( identifier[name] , identifier[scope] ,** identifier[kwargs] ):
literal[string]
identifier[result] = keyword[False]
identifier[polconn] = identifier[__utils__] [ literal[string] ]( literal[string] ,** identifier[kwargs] )
keyword[try] :
identifier[policy] = identifier[polconn] . identifier[policy_assignments] . identifier[delete] (
identifier[policy_assignment_name] = identifier[name] ,
identifier[scope] = identifier[scope]
)
identifier[result] = keyword[True]
keyword[except] identifier[CloudError] keyword[as] identifier[exc] :
identifier[__utils__] [ literal[string] ]( literal[string] , identifier[str] ( identifier[exc] ),** identifier[kwargs] )
keyword[return] identifier[result]
|
def policy_assignment_delete(name, scope, **kwargs):
"""
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign /subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
"""
result = False
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_assignments.delete(policy_assignment_name=name, scope=scope)
result = True # depends on [control=['try'], data=[]]
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs) # depends on [control=['except'], data=['exc']]
return result
|
def process_preprocessed(isi_preprocessor, num_processes=1,
output_dir=None, cleanup=True, add_grounding=True):
"""Process a directory of abstracts and/or papers preprocessed using the
specified IsiPreprocessor, to produce a list of extracted INDRA statements.
Parameters
----------
isi_preprocessor : indra.sources.isi.preprocessor.IsiPreprocessor
Preprocessor object that has already preprocessed the documents we
want to read and process with the ISI reader
num_processes : Optional[int]
Number of processes to parallelize over
output_dir : Optional[str]
The directory into which to put reader output; if omitted or None,
uses a temporary directory.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted statements
"""
# Create a temporary directory to store the output
if output_dir is None:
output_dir = tempfile.mkdtemp('indra_isi_processor_output')
else:
output_dir = os.path.abspath(output_dir)
tmp_dir = tempfile.mkdtemp('indra_isi_processor_tmp')
# Form the command to invoke the ISI reader via Docker
dir_name = isi_preprocessor.preprocessed_dir
# We call realpath on all these paths so that any symbolic links
# are generated out - this is needed on Mac
input_binding = os.path.realpath(dir_name) + ':/input:ro'
output_binding = os.path.realpath(output_dir) + ':/output:rw'
tmp_binding = os.path.realpath(tmp_dir) + ':/temp:rw'
command = ['docker', 'run', '-it', '--rm',
'-v', input_binding, '-v', output_binding, '-v', tmp_binding,
'sahilgar/bigmechisi', './myprocesspapers.sh',
'-c', str(num_processes)]
# Invoke the ISI reader
logger.info('Running command:')
logger.info(' '.join(command))
ret = subprocess.call(command)
if ret != 0:
logger.error('Docker returned non-zero status code')
ips = []
for basename, pmid in isi_preprocessor.pmids.items():
fname = os.path.join(output_dir, '%s.json' % basename)
ip = process_json_file(fname, pmid=pmid,
extra_annotations=isi_preprocessor.extra_annotations.get(fname, {}),
add_grounding=False)
ips.append(ip)
# Remove the temporary output directory
if output_dir is None:
if cleanup:
shutil.rmtree(output_dir)
else:
logger.info('Not cleaning up %s' % output_dir)
if cleanup:
shutil.rmtree(tmp_dir)
else:
logger.info('Not cleaning up %s' % output_dir)
if len(ips) > 1:
for ip in ips[1:]:
ips[0].statements += ip.statements
if ips:
if add_grounding:
ips[0].add_grounding()
return ips[0]
else:
return None
|
def function[process_preprocessed, parameter[isi_preprocessor, num_processes, output_dir, cleanup, add_grounding]]:
constant[Process a directory of abstracts and/or papers preprocessed using the
specified IsiPreprocessor, to produce a list of extracted INDRA statements.
Parameters
----------
isi_preprocessor : indra.sources.isi.preprocessor.IsiPreprocessor
Preprocessor object that has already preprocessed the documents we
want to read and process with the ISI reader
num_processes : Optional[int]
Number of processes to parallelize over
output_dir : Optional[str]
The directory into which to put reader output; if omitted or None,
uses a temporary directory.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted statements
]
if compare[name[output_dir] is constant[None]] begin[:]
variable[output_dir] assign[=] call[name[tempfile].mkdtemp, parameter[constant[indra_isi_processor_output]]]
variable[tmp_dir] assign[=] call[name[tempfile].mkdtemp, parameter[constant[indra_isi_processor_tmp]]]
variable[dir_name] assign[=] name[isi_preprocessor].preprocessed_dir
variable[input_binding] assign[=] binary_operation[call[name[os].path.realpath, parameter[name[dir_name]]] + constant[:/input:ro]]
variable[output_binding] assign[=] binary_operation[call[name[os].path.realpath, parameter[name[output_dir]]] + constant[:/output:rw]]
variable[tmp_binding] assign[=] binary_operation[call[name[os].path.realpath, parameter[name[tmp_dir]]] + constant[:/temp:rw]]
variable[command] assign[=] list[[<ast.Constant object at 0x7da18c4cfe50>, <ast.Constant object at 0x7da18c4cdc60>, <ast.Constant object at 0x7da18c4ccbb0>, <ast.Constant object at 0x7da18c4cefb0>, <ast.Constant object at 0x7da18c4ccd30>, <ast.Name object at 0x7da18c4cc6d0>, <ast.Constant object at 0x7da18c4ce0b0>, <ast.Name object at 0x7da18c4cde10>, <ast.Constant object at 0x7da18c4cf8e0>, <ast.Name object at 0x7da18c4cf1c0>, <ast.Constant object at 0x7da18c4ce770>, <ast.Constant object at 0x7da18c4ce5f0>, <ast.Constant object at 0x7da18c4cead0>, <ast.Call object at 0x7da18c4cc9d0>]]
call[name[logger].info, parameter[constant[Running command:]]]
call[name[logger].info, parameter[call[constant[ ].join, parameter[name[command]]]]]
variable[ret] assign[=] call[name[subprocess].call, parameter[name[command]]]
if compare[name[ret] not_equal[!=] constant[0]] begin[:]
call[name[logger].error, parameter[constant[Docker returned non-zero status code]]]
variable[ips] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cce80>, <ast.Name object at 0x7da18c4ce7d0>]]] in starred[call[name[isi_preprocessor].pmids.items, parameter[]]] begin[:]
variable[fname] assign[=] call[name[os].path.join, parameter[name[output_dir], binary_operation[constant[%s.json] <ast.Mod object at 0x7da2590d6920> name[basename]]]]
variable[ip] assign[=] call[name[process_json_file], parameter[name[fname]]]
call[name[ips].append, parameter[name[ip]]]
if compare[name[output_dir] is constant[None]] begin[:]
if name[cleanup] begin[:]
call[name[shutil].rmtree, parameter[name[output_dir]]]
if name[cleanup] begin[:]
call[name[shutil].rmtree, parameter[name[tmp_dir]]]
if compare[call[name[len], parameter[name[ips]]] greater[>] constant[1]] begin[:]
for taget[name[ip]] in starred[call[name[ips]][<ast.Slice object at 0x7da2041d8280>]] begin[:]
<ast.AugAssign object at 0x7da2041da350>
if name[ips] begin[:]
if name[add_grounding] begin[:]
call[call[name[ips]][constant[0]].add_grounding, parameter[]]
return[call[name[ips]][constant[0]]]
|
keyword[def] identifier[process_preprocessed] ( identifier[isi_preprocessor] , identifier[num_processes] = literal[int] ,
identifier[output_dir] = keyword[None] , identifier[cleanup] = keyword[True] , identifier[add_grounding] = keyword[True] ):
literal[string]
keyword[if] identifier[output_dir] keyword[is] keyword[None] :
identifier[output_dir] = identifier[tempfile] . identifier[mkdtemp] ( literal[string] )
keyword[else] :
identifier[output_dir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[output_dir] )
identifier[tmp_dir] = identifier[tempfile] . identifier[mkdtemp] ( literal[string] )
identifier[dir_name] = identifier[isi_preprocessor] . identifier[preprocessed_dir]
identifier[input_binding] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[dir_name] )+ literal[string]
identifier[output_binding] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[output_dir] )+ literal[string]
identifier[tmp_binding] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[tmp_dir] )+ literal[string]
identifier[command] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , identifier[input_binding] , literal[string] , identifier[output_binding] , literal[string] , identifier[tmp_binding] ,
literal[string] , literal[string] ,
literal[string] , identifier[str] ( identifier[num_processes] )]
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[join] ( identifier[command] ))
identifier[ret] = identifier[subprocess] . identifier[call] ( identifier[command] )
keyword[if] identifier[ret] != literal[int] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[ips] =[]
keyword[for] identifier[basename] , identifier[pmid] keyword[in] identifier[isi_preprocessor] . identifier[pmids] . identifier[items] ():
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , literal[string] % identifier[basename] )
identifier[ip] = identifier[process_json_file] ( identifier[fname] , identifier[pmid] = identifier[pmid] ,
identifier[extra_annotations] = identifier[isi_preprocessor] . identifier[extra_annotations] . identifier[get] ( identifier[fname] ,{}),
identifier[add_grounding] = keyword[False] )
identifier[ips] . identifier[append] ( identifier[ip] )
keyword[if] identifier[output_dir] keyword[is] keyword[None] :
keyword[if] identifier[cleanup] :
identifier[shutil] . identifier[rmtree] ( identifier[output_dir] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] % identifier[output_dir] )
keyword[if] identifier[cleanup] :
identifier[shutil] . identifier[rmtree] ( identifier[tmp_dir] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] % identifier[output_dir] )
keyword[if] identifier[len] ( identifier[ips] )> literal[int] :
keyword[for] identifier[ip] keyword[in] identifier[ips] [ literal[int] :]:
identifier[ips] [ literal[int] ]. identifier[statements] += identifier[ip] . identifier[statements]
keyword[if] identifier[ips] :
keyword[if] identifier[add_grounding] :
identifier[ips] [ literal[int] ]. identifier[add_grounding] ()
keyword[return] identifier[ips] [ literal[int] ]
keyword[else] :
keyword[return] keyword[None]
|
def process_preprocessed(isi_preprocessor, num_processes=1, output_dir=None, cleanup=True, add_grounding=True):
"""Process a directory of abstracts and/or papers preprocessed using the
specified IsiPreprocessor, to produce a list of extracted INDRA statements.
Parameters
----------
isi_preprocessor : indra.sources.isi.preprocessor.IsiPreprocessor
Preprocessor object that has already preprocessed the documents we
want to read and process with the ISI reader
num_processes : Optional[int]
Number of processes to parallelize over
output_dir : Optional[str]
The directory into which to put reader output; if omitted or None,
uses a temporary directory.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted statements
"""
# Create a temporary directory to store the output
if output_dir is None:
output_dir = tempfile.mkdtemp('indra_isi_processor_output') # depends on [control=['if'], data=['output_dir']]
else:
output_dir = os.path.abspath(output_dir)
tmp_dir = tempfile.mkdtemp('indra_isi_processor_tmp')
# Form the command to invoke the ISI reader via Docker
dir_name = isi_preprocessor.preprocessed_dir
# We call realpath on all these paths so that any symbolic links
# are generated out - this is needed on Mac
input_binding = os.path.realpath(dir_name) + ':/input:ro'
output_binding = os.path.realpath(output_dir) + ':/output:rw'
tmp_binding = os.path.realpath(tmp_dir) + ':/temp:rw'
command = ['docker', 'run', '-it', '--rm', '-v', input_binding, '-v', output_binding, '-v', tmp_binding, 'sahilgar/bigmechisi', './myprocesspapers.sh', '-c', str(num_processes)]
# Invoke the ISI reader
logger.info('Running command:')
logger.info(' '.join(command))
ret = subprocess.call(command)
if ret != 0:
logger.error('Docker returned non-zero status code') # depends on [control=['if'], data=[]]
ips = []
for (basename, pmid) in isi_preprocessor.pmids.items():
fname = os.path.join(output_dir, '%s.json' % basename)
ip = process_json_file(fname, pmid=pmid, extra_annotations=isi_preprocessor.extra_annotations.get(fname, {}), add_grounding=False)
ips.append(ip) # depends on [control=['for'], data=[]]
# Remove the temporary output directory
if output_dir is None:
if cleanup:
shutil.rmtree(output_dir) # depends on [control=['if'], data=[]]
else:
logger.info('Not cleaning up %s' % output_dir) # depends on [control=['if'], data=['output_dir']]
if cleanup:
shutil.rmtree(tmp_dir) # depends on [control=['if'], data=[]]
else:
logger.info('Not cleaning up %s' % output_dir)
if len(ips) > 1:
for ip in ips[1:]:
ips[0].statements += ip.statements # depends on [control=['for'], data=['ip']] # depends on [control=['if'], data=[]]
if ips:
if add_grounding:
ips[0].add_grounding() # depends on [control=['if'], data=[]]
return ips[0] # depends on [control=['if'], data=[]]
else:
return None
|
def note_hz_to_midi(annotation):
'''Convert a pitch_hz annotation to pitch_midi'''
annotation.namespace = 'note_midi'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence,
value=12 * (np.log2(obs.value) - np.log2(440.0)) + 69)
return annotation
|
def function[note_hz_to_midi, parameter[annotation]]:
constant[Convert a pitch_hz annotation to pitch_midi]
name[annotation].namespace assign[=] constant[note_midi]
variable[data] assign[=] call[name[annotation].pop_data, parameter[]]
for taget[name[obs]] in starred[name[data]] begin[:]
call[name[annotation].append, parameter[]]
return[name[annotation]]
|
keyword[def] identifier[note_hz_to_midi] ( identifier[annotation] ):
literal[string]
identifier[annotation] . identifier[namespace] = literal[string]
identifier[data] = identifier[annotation] . identifier[pop_data] ()
keyword[for] identifier[obs] keyword[in] identifier[data] :
identifier[annotation] . identifier[append] ( identifier[time] = identifier[obs] . identifier[time] , identifier[duration] = identifier[obs] . identifier[duration] ,
identifier[confidence] = identifier[obs] . identifier[confidence] ,
identifier[value] = literal[int] *( identifier[np] . identifier[log2] ( identifier[obs] . identifier[value] )- identifier[np] . identifier[log2] ( literal[int] ))+ literal[int] )
keyword[return] identifier[annotation]
|
def note_hz_to_midi(annotation):
"""Convert a pitch_hz annotation to pitch_midi"""
annotation.namespace = 'note_midi'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration, confidence=obs.confidence, value=12 * (np.log2(obs.value) - np.log2(440.0)) + 69) # depends on [control=['for'], data=['obs']]
return annotation
|
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
|
def function[best_representative, parameter[d1, d2]]:
constant[
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
]
if <ast.BoolOp object at 0x7da1b196e860> begin[:]
return[name[d1]]
if compare[name[d1] is constant[None]] begin[:]
return[name[d2]]
variable[preference] assign[=] tuple[[<ast.Attribute object at 0x7da1b196fb20>, <ast.Name object at 0x7da1b196efb0>, <ast.Name object at 0x7da1b196d2d0>, <ast.Name object at 0x7da1b196de70>, <ast.Name object at 0x7da1b196fd90>, <ast.Name object at 0x7da1b196e020>]]
variable[worst_pref] assign[=] constant[0]
variable[worst] assign[=] constant[]
for taget[name[coerced]] in starred[tuple[[<ast.Name object at 0x7da1b196fe50>, <ast.Name object at 0x7da1b196d540>]]] begin[:]
variable[pref] assign[=] call[name[preference].index, parameter[call[name[type], parameter[name[coerced]]]]]
if compare[name[pref] greater[>] name[worst_pref]] begin[:]
variable[worst_pref] assign[=] name[pref]
variable[worst] assign[=] call[name[set_worst], parameter[name[worst], name[coerced]]]
return[name[worst]]
|
keyword[def] identifier[best_representative] ( identifier[d1] , identifier[d2] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[d2] , literal[string] ) keyword[and] keyword[not] identifier[d2] . identifier[strip] ():
keyword[return] identifier[d1]
keyword[if] identifier[d1] keyword[is] keyword[None] :
keyword[return] identifier[d2]
keyword[elif] identifier[d2] keyword[is] keyword[None] :
keyword[return] identifier[d1]
identifier[preference] =( identifier[datetime] . identifier[datetime] , identifier[bool] , identifier[int] , identifier[Decimal] , identifier[float] , identifier[str] )
identifier[worst_pref] = literal[int]
identifier[worst] = literal[string]
keyword[for] identifier[coerced] keyword[in] ( identifier[d1] , identifier[d2] ):
identifier[pref] = identifier[preference] . identifier[index] ( identifier[type] ( identifier[coerced] ))
keyword[if] identifier[pref] > identifier[worst_pref] :
identifier[worst_pref] = identifier[pref]
identifier[worst] = identifier[set_worst] ( identifier[worst] , identifier[coerced] )
keyword[elif] identifier[pref] == identifier[worst_pref] :
keyword[if] identifier[isinstance] ( identifier[coerced] , identifier[Decimal] ):
identifier[worst] = identifier[set_worst] ( identifier[worst] , identifier[worst_decimal] ( identifier[coerced] , identifier[worst] ))
keyword[elif] identifier[isinstance] ( identifier[coerced] , identifier[float] ):
identifier[worst] = identifier[set_worst] ( identifier[worst] , identifier[max] ( identifier[coerced] , identifier[worst] ))
keyword[else] :
keyword[if] identifier[len] ( identifier[str] ( identifier[coerced] ))> identifier[len] ( identifier[str] ( identifier[worst] )):
identifier[worst] = identifier[set_worst] ( identifier[worst] , identifier[coerced] )
keyword[return] identifier[worst]
|
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and (not d2.strip()):
return d1 # depends on [control=['if'], data=[]]
if d1 is None:
return d2 # depends on [control=['if'], data=[]]
elif d2 is None:
return d1 # depends on [control=['if'], data=[]]
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced) # depends on [control=['if'], data=['pref', 'worst_pref']]
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst)) # depends on [control=['if'], data=[]]
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst)) # depends on [control=['if'], data=[]] # int, str
elif len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['coerced']]
return worst
|
def icosphere(script, radius=1.0, diameter=None, subdivisions=3, color=None):
"""create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
if diameter is not None:
radius = diameter / 2
filter_xml = ''.join([
' <filter name="Sphere">\n',
' <Param name="radius" ',
'value="%s" ' % radius,
'description="Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % subdivisions,
'description="Subdiv. Level" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
def function[icosphere, parameter[script, radius, diameter, subdivisions, color]]:
constant[create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh]
if compare[name[diameter] is_not constant[None]] begin[:]
variable[radius] assign[=] binary_operation[name[diameter] / constant[2]]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da20c993c40>, <ast.Constant object at 0x7da20c991000>, <ast.BinOp object at 0x7da20c991510>, <ast.Constant object at 0x7da20c993160>, <ast.Constant object at 0x7da20c990d30>, <ast.Constant object at 0x7da20c993730>, <ast.Constant object at 0x7da20c992530>, <ast.BinOp object at 0x7da20c993ac0>, <ast.Constant object at 0x7da20c991750>, <ast.Constant object at 0x7da20c991ab0>, <ast.Constant object at 0x7da20c992da0>, <ast.Constant object at 0x7da20c9917b0>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
if call[name[isinstance], parameter[name[script], name[FilterScript]]] begin[:]
call[name[script].add_layer, parameter[constant[Sphere]]]
if compare[name[color] is_not constant[None]] begin[:]
call[name[vert_color].function, parameter[name[script]]]
return[constant[None]]
|
keyword[def] identifier[icosphere] ( identifier[script] , identifier[radius] = literal[int] , identifier[diameter] = keyword[None] , identifier[subdivisions] = literal[int] , identifier[color] = keyword[None] ):
literal[string]
keyword[if] identifier[diameter] keyword[is] keyword[not] keyword[None] :
identifier[radius] = identifier[diameter] / literal[int]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] % identifier[radius] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] % identifier[subdivisions] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[if] identifier[isinstance] ( identifier[script] , identifier[FilterScript] ):
identifier[script] . identifier[add_layer] ( literal[string] , identifier[change_layer] = keyword[True] )
keyword[if] identifier[color] keyword[is] keyword[not] keyword[None] :
identifier[vert_color] . identifier[function] ( identifier[script] , identifier[color] = identifier[color] )
keyword[return] keyword[None]
|
def icosphere(script, radius=1.0, diameter=None, subdivisions=3, color=None):
"""create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
if diameter is not None:
radius = diameter / 2 # depends on [control=['if'], data=['diameter']]
filter_xml = ''.join([' <filter name="Sphere">\n', ' <Param name="radius" ', 'value="%s" ' % radius, 'description="Radius" ', 'type="RichFloat" ', '/>\n', ' <Param name="subdiv" ', 'value="%d" ' % subdivisions, 'description="Subdiv. Level" ', 'type="RichInt" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere', change_layer=True) # depends on [control=['if'], data=[]]
if color is not None:
vert_color.function(script, color=color) # depends on [control=['if'], data=['color']]
return None
|
def doi(self):
"""
https://es.wikipedia.org/wiki/Identificador_de_objeto_digital
:return: a random Spanish CIF or NIE or NIF
"""
return random.choice([self.cif, self.nie, self.nif])()
|
def function[doi, parameter[self]]:
constant[
https://es.wikipedia.org/wiki/Identificador_de_objeto_digital
:return: a random Spanish CIF or NIE or NIF
]
return[call[call[name[random].choice, parameter[list[[<ast.Attribute object at 0x7da18ede40d0>, <ast.Attribute object at 0x7da18ede7d60>, <ast.Attribute object at 0x7da18ede7970>]]]], parameter[]]]
|
keyword[def] identifier[doi] ( identifier[self] ):
literal[string]
keyword[return] identifier[random] . identifier[choice] ([ identifier[self] . identifier[cif] , identifier[self] . identifier[nie] , identifier[self] . identifier[nif] ])()
|
def doi(self):
"""
https://es.wikipedia.org/wiki/Identificador_de_objeto_digital
:return: a random Spanish CIF or NIE or NIF
"""
return random.choice([self.cif, self.nie, self.nif])()
|
def autodiscover(cls,
module_paths: List[str],
subclass: 'Container' = None) -> None:
"""
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
"""
def find_base(bases: set, implementation: Type):
found = {b for b in bases if issubclass(implementation, b)}
if not found:
raise ConfigurationError(
"No base defined for %r" % implementation)
elif len(found) > 1:
raise ConfigurationError(
"More than one base found for %r" % implementation)
else:
return found.pop()
def walk(pkg: Union[str, ModuleType]) -> Dict[str, ModuleType]:
if isinstance(pkg, str):
pkg: ModuleType = importlib.import_module(pkg)
results = {}
try:
path = pkg.__path__
except AttributeError:
results[pkg.__name__] = importlib.import_module(pkg.__name__)
else:
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = pkg.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(walk(full_name))
return results
with cls._lock:
for module_path in module_paths:
walk(module_path)
config: List[Egg] = []
for egg_ in egg.factories:
base_ = find_base(base.classes, egg_.type_)
egg_.base_ = base_
config.append(egg_)
cls.configure(config, subclass=subclass)
|
def function[autodiscover, parameter[cls, module_paths, subclass]]:
constant[
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
]
def function[find_base, parameter[bases, implementation]]:
variable[found] assign[=] <ast.SetComp object at 0x7da20e9b3fd0>
if <ast.UnaryOp object at 0x7da20e9b1840> begin[:]
<ast.Raise object at 0x7da20e9b07c0>
def function[walk, parameter[pkg]]:
if call[name[isinstance], parameter[name[pkg], name[str]]] begin[:]
<ast.AnnAssign object at 0x7da204620b20>
variable[results] assign[=] dictionary[[], []]
<ast.Try object at 0x7da2046213f0>
return[name[results]]
with name[cls]._lock begin[:]
for taget[name[module_path]] in starred[name[module_paths]] begin[:]
call[name[walk], parameter[name[module_path]]]
<ast.AnnAssign object at 0x7da204622e00>
for taget[name[egg_]] in starred[name[egg].factories] begin[:]
variable[base_] assign[=] call[name[find_base], parameter[name[base].classes, name[egg_].type_]]
name[egg_].base_ assign[=] name[base_]
call[name[config].append, parameter[name[egg_]]]
call[name[cls].configure, parameter[name[config]]]
|
keyword[def] identifier[autodiscover] ( identifier[cls] ,
identifier[module_paths] : identifier[List] [ identifier[str] ],
identifier[subclass] : literal[string] = keyword[None] )-> keyword[None] :
literal[string]
keyword[def] identifier[find_base] ( identifier[bases] : identifier[set] , identifier[implementation] : identifier[Type] ):
identifier[found] ={ identifier[b] keyword[for] identifier[b] keyword[in] identifier[bases] keyword[if] identifier[issubclass] ( identifier[implementation] , identifier[b] )}
keyword[if] keyword[not] identifier[found] :
keyword[raise] identifier[ConfigurationError] (
literal[string] % identifier[implementation] )
keyword[elif] identifier[len] ( identifier[found] )> literal[int] :
keyword[raise] identifier[ConfigurationError] (
literal[string] % identifier[implementation] )
keyword[else] :
keyword[return] identifier[found] . identifier[pop] ()
keyword[def] identifier[walk] ( identifier[pkg] : identifier[Union] [ identifier[str] , identifier[ModuleType] ])-> identifier[Dict] [ identifier[str] , identifier[ModuleType] ]:
keyword[if] identifier[isinstance] ( identifier[pkg] , identifier[str] ):
identifier[pkg] : identifier[ModuleType] = identifier[importlib] . identifier[import_module] ( identifier[pkg] )
identifier[results] ={}
keyword[try] :
identifier[path] = identifier[pkg] . identifier[__path__]
keyword[except] identifier[AttributeError] :
identifier[results] [ identifier[pkg] . identifier[__name__] ]= identifier[importlib] . identifier[import_module] ( identifier[pkg] . identifier[__name__] )
keyword[else] :
keyword[for] identifier[loader] , identifier[name] , identifier[is_pkg] keyword[in] identifier[pkgutil] . identifier[walk_packages] ( identifier[path] ):
identifier[full_name] = identifier[pkg] . identifier[__name__] + literal[string] + identifier[name]
identifier[results] [ identifier[full_name] ]= identifier[importlib] . identifier[import_module] ( identifier[full_name] )
keyword[if] identifier[is_pkg] :
identifier[results] . identifier[update] ( identifier[walk] ( identifier[full_name] ))
keyword[return] identifier[results]
keyword[with] identifier[cls] . identifier[_lock] :
keyword[for] identifier[module_path] keyword[in] identifier[module_paths] :
identifier[walk] ( identifier[module_path] )
identifier[config] : identifier[List] [ identifier[Egg] ]=[]
keyword[for] identifier[egg_] keyword[in] identifier[egg] . identifier[factories] :
identifier[base_] = identifier[find_base] ( identifier[base] . identifier[classes] , identifier[egg_] . identifier[type_] )
identifier[egg_] . identifier[base_] = identifier[base_]
identifier[config] . identifier[append] ( identifier[egg_] )
identifier[cls] . identifier[configure] ( identifier[config] , identifier[subclass] = identifier[subclass] )
|
def autodiscover(cls, module_paths: List[str], subclass: 'Container'=None) -> None:
"""
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
"""
def find_base(bases: set, implementation: Type):
found = {b for b in bases if issubclass(implementation, b)}
if not found:
raise ConfigurationError('No base defined for %r' % implementation) # depends on [control=['if'], data=[]]
elif len(found) > 1:
raise ConfigurationError('More than one base found for %r' % implementation) # depends on [control=['if'], data=[]]
else:
return found.pop()
def walk(pkg: Union[str, ModuleType]) -> Dict[str, ModuleType]:
if isinstance(pkg, str):
pkg: ModuleType = importlib.import_module(pkg) # depends on [control=['if'], data=[]]
results = {}
try:
path = pkg.__path__ # depends on [control=['try'], data=[]]
except AttributeError:
results[pkg.__name__] = importlib.import_module(pkg.__name__) # depends on [control=['except'], data=[]]
else:
for (loader, name, is_pkg) in pkgutil.walk_packages(path):
full_name = pkg.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(walk(full_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return results
with cls._lock:
for module_path in module_paths:
walk(module_path) # depends on [control=['for'], data=['module_path']]
config: List[Egg] = []
for egg_ in egg.factories:
base_ = find_base(base.classes, egg_.type_)
egg_.base_ = base_
config.append(egg_) # depends on [control=['for'], data=['egg_']]
cls.configure(config, subclass=subclass) # depends on [control=['with'], data=[]]
|
def diff_json_files(left_files, right_files):
'''
Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
`left_files`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the files in `right_files`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
`left_files` and `right_files` are lists of file paths. The output
is written to files with the same names as those in `left_files`,
but with `.diff` added to the end. If those files exist, they are overwritten.
Parameters
----------
left_files : list of str
Paths to JSON files to use as the base
right_files : list of str
Paths to JSON files to subtract from each file of `left_files`
Returns
----------
None
'''
left_data = [fileio.read_json_basis(x) for x in left_files]
right_data = [fileio.read_json_basis(x) for x in right_files]
d = diff_basis_dict(left_data, right_data)
for idx, diff_bs in enumerate(d):
fpath = left_files[idx]
fileio.write_json_basis(fpath + '.diff', diff_bs)
|
def function[diff_json_files, parameter[left_files, right_files]]:
constant[
Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
`left_files`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the files in `right_files`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
`left_files` and `right_files` are lists of file paths. The output
is written to files with the same names as those in `left_files`,
but with `.diff` added to the end. If those files exist, they are overwritten.
Parameters
----------
left_files : list of str
Paths to JSON files to use as the base
right_files : list of str
Paths to JSON files to subtract from each file of `left_files`
Returns
----------
None
]
variable[left_data] assign[=] <ast.ListComp object at 0x7da204567910>
variable[right_data] assign[=] <ast.ListComp object at 0x7da204565690>
variable[d] assign[=] call[name[diff_basis_dict], parameter[name[left_data], name[right_data]]]
for taget[tuple[[<ast.Name object at 0x7da2045663e0>, <ast.Name object at 0x7da204564940>]]] in starred[call[name[enumerate], parameter[name[d]]]] begin[:]
variable[fpath] assign[=] call[name[left_files]][name[idx]]
call[name[fileio].write_json_basis, parameter[binary_operation[name[fpath] + constant[.diff]], name[diff_bs]]]
|
keyword[def] identifier[diff_json_files] ( identifier[left_files] , identifier[right_files] ):
literal[string]
identifier[left_data] =[ identifier[fileio] . identifier[read_json_basis] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[left_files] ]
identifier[right_data] =[ identifier[fileio] . identifier[read_json_basis] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[right_files] ]
identifier[d] = identifier[diff_basis_dict] ( identifier[left_data] , identifier[right_data] )
keyword[for] identifier[idx] , identifier[diff_bs] keyword[in] identifier[enumerate] ( identifier[d] ):
identifier[fpath] = identifier[left_files] [ identifier[idx] ]
identifier[fileio] . identifier[write_json_basis] ( identifier[fpath] + literal[string] , identifier[diff_bs] )
|
def diff_json_files(left_files, right_files):
"""
Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
`left_files`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the files in `right_files`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
`left_files` and `right_files` are lists of file paths. The output
is written to files with the same names as those in `left_files`,
but with `.diff` added to the end. If those files exist, they are overwritten.
Parameters
----------
left_files : list of str
Paths to JSON files to use as the base
right_files : list of str
Paths to JSON files to subtract from each file of `left_files`
Returns
----------
None
"""
left_data = [fileio.read_json_basis(x) for x in left_files]
right_data = [fileio.read_json_basis(x) for x in right_files]
d = diff_basis_dict(left_data, right_data)
for (idx, diff_bs) in enumerate(d):
fpath = left_files[idx]
fileio.write_json_basis(fpath + '.diff', diff_bs) # depends on [control=['for'], data=[]]
|
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
|
def function[make_logging_undefined, parameter[logger, base]]:
constant[Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
]
if compare[name[logger] is constant[None]] begin[:]
import module[logging]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].addHandler, parameter[call[name[logging].StreamHandler, parameter[name[sys].stderr]]]]
if compare[name[base] is constant[None]] begin[:]
variable[base] assign[=] name[Undefined]
def function[_log_message, parameter[undef]]:
if compare[name[undef]._undefined_hint is constant[None]] begin[:]
if compare[name[undef]._undefined_obj is name[missing]] begin[:]
variable[hint] assign[=] binary_operation[constant[%s is undefined] <ast.Mod object at 0x7da2590d6920> name[undef]._undefined_name]
call[name[logger].warning, parameter[constant[Template variable warning: %s], name[hint]]]
class class[LoggingUndefined, parameter[]] begin[:]
def function[_fail_with_undefined_error, parameter[self]]:
<ast.Try object at 0x7da1b1eb8820>
def function[__str__, parameter[self]]:
variable[rv] assign[=] call[name[base].__str__, parameter[name[self]]]
call[name[_log_message], parameter[name[self]]]
return[name[rv]]
def function[__iter__, parameter[self]]:
variable[rv] assign[=] call[name[base].__iter__, parameter[name[self]]]
call[name[_log_message], parameter[name[self]]]
return[name[rv]]
if name[PY2] begin[:]
def function[__nonzero__, parameter[self]]:
variable[rv] assign[=] call[name[base].__nonzero__, parameter[name[self]]]
call[name[_log_message], parameter[name[self]]]
return[name[rv]]
def function[__unicode__, parameter[self]]:
variable[rv] assign[=] call[name[base].__unicode__, parameter[name[self]]]
call[name[_log_message], parameter[name[self]]]
return[name[rv]]
return[name[LoggingUndefined]]
|
keyword[def] identifier[make_logging_undefined] ( identifier[logger] = keyword[None] , identifier[base] = keyword[None] ):
literal[string]
keyword[if] identifier[logger] keyword[is] keyword[None] :
keyword[import] identifier[logging]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[addHandler] ( identifier[logging] . identifier[StreamHandler] ( identifier[sys] . identifier[stderr] ))
keyword[if] identifier[base] keyword[is] keyword[None] :
identifier[base] = identifier[Undefined]
keyword[def] identifier[_log_message] ( identifier[undef] ):
keyword[if] identifier[undef] . identifier[_undefined_hint] keyword[is] keyword[None] :
keyword[if] identifier[undef] . identifier[_undefined_obj] keyword[is] identifier[missing] :
identifier[hint] = literal[string] % identifier[undef] . identifier[_undefined_name]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[undef] . identifier[_undefined_name] , identifier[string_types] ):
identifier[hint] = literal[string] %(
identifier[object_type_repr] ( identifier[undef] . identifier[_undefined_obj] ),
identifier[undef] . identifier[_undefined_name] )
keyword[else] :
identifier[hint] = literal[string] %(
identifier[object_type_repr] ( identifier[undef] . identifier[_undefined_obj] ),
identifier[undef] . identifier[_undefined_name] )
keyword[else] :
identifier[hint] = identifier[undef] . identifier[_undefined_hint]
identifier[logger] . identifier[warning] ( literal[string] , identifier[hint] )
keyword[class] identifier[LoggingUndefined] ( identifier[base] ):
keyword[def] identifier[_fail_with_undefined_error] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
keyword[return] identifier[base] . identifier[_fail_with_undefined_error] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[self] . identifier[_undefined_exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[str] ( identifier[e] ))
keyword[raise] identifier[e]
keyword[def] identifier[__str__] ( identifier[self] ):
identifier[rv] = identifier[base] . identifier[__str__] ( identifier[self] )
identifier[_log_message] ( identifier[self] )
keyword[return] identifier[rv]
keyword[def] identifier[__iter__] ( identifier[self] ):
identifier[rv] = identifier[base] . identifier[__iter__] ( identifier[self] )
identifier[_log_message] ( identifier[self] )
keyword[return] identifier[rv]
keyword[if] identifier[PY2] :
keyword[def] identifier[__nonzero__] ( identifier[self] ):
identifier[rv] = identifier[base] . identifier[__nonzero__] ( identifier[self] )
identifier[_log_message] ( identifier[self] )
keyword[return] identifier[rv]
keyword[def] identifier[__unicode__] ( identifier[self] ):
identifier[rv] = identifier[base] . identifier[__unicode__] ( identifier[self] )
identifier[_log_message] ( identifier[self] )
keyword[return] identifier[rv]
keyword[else] :
keyword[def] identifier[__bool__] ( identifier[self] ):
identifier[rv] = identifier[base] . identifier[__bool__] ( identifier[self] )
identifier[_log_message] ( identifier[self] )
keyword[return] identifier[rv]
keyword[return] identifier[LoggingUndefined]
|
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr)) # depends on [control=['if'], data=['logger']]
if base is None:
base = Undefined # depends on [control=['if'], data=['base']]
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name # depends on [control=['if'], data=[]]
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (object_type_repr(undef._undefined_obj), undef._undefined_name) # depends on [control=['if'], data=[]]
else:
hint = '%s has no attribute %s' % (object_type_repr(undef._undefined_obj), undef._undefined_name) # depends on [control=['if'], data=[]]
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs) # depends on [control=['try'], data=[]]
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e # depends on [control=['except'], data=['e']]
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv # depends on [control=['if'], data=[]]
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
|
def import_app_sitetree_module(app):
"""Imports sitetree module from a given app.
:param str|unicode app: Application name
:return: module|None
"""
module_name = settings.APP_MODULE_NAME
module = import_module(app)
try:
sub_module = import_module('%s.%s' % (app, module_name))
return sub_module
except ImportError:
if module_has_submodule(module, module_name):
raise
return None
|
def function[import_app_sitetree_module, parameter[app]]:
constant[Imports sitetree module from a given app.
:param str|unicode app: Application name
:return: module|None
]
variable[module_name] assign[=] name[settings].APP_MODULE_NAME
variable[module] assign[=] call[name[import_module], parameter[name[app]]]
<ast.Try object at 0x7da2041da7d0>
|
keyword[def] identifier[import_app_sitetree_module] ( identifier[app] ):
literal[string]
identifier[module_name] = identifier[settings] . identifier[APP_MODULE_NAME]
identifier[module] = identifier[import_module] ( identifier[app] )
keyword[try] :
identifier[sub_module] = identifier[import_module] ( literal[string] %( identifier[app] , identifier[module_name] ))
keyword[return] identifier[sub_module]
keyword[except] identifier[ImportError] :
keyword[if] identifier[module_has_submodule] ( identifier[module] , identifier[module_name] ):
keyword[raise]
keyword[return] keyword[None]
|
def import_app_sitetree_module(app):
"""Imports sitetree module from a given app.
:param str|unicode app: Application name
:return: module|None
"""
module_name = settings.APP_MODULE_NAME
module = import_module(app)
try:
sub_module = import_module('%s.%s' % (app, module_name))
return sub_module # depends on [control=['try'], data=[]]
except ImportError:
if module_has_submodule(module, module_name):
raise # depends on [control=['if'], data=[]]
return None # depends on [control=['except'], data=[]]
|
def uncollapse(self):
"""Uncollapse a private message or modmail."""
url = self.reddit_session.config['uncollapse_message']
self.reddit_session.request_json(url, data={'id': self.name})
|
def function[uncollapse, parameter[self]]:
constant[Uncollapse a private message or modmail.]
variable[url] assign[=] call[name[self].reddit_session.config][constant[uncollapse_message]]
call[name[self].reddit_session.request_json, parameter[name[url]]]
|
keyword[def] identifier[uncollapse] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[reddit_session] . identifier[config] [ literal[string] ]
identifier[self] . identifier[reddit_session] . identifier[request_json] ( identifier[url] , identifier[data] ={ literal[string] : identifier[self] . identifier[name] })
|
def uncollapse(self):
"""Uncollapse a private message or modmail."""
url = self.reddit_session.config['uncollapse_message']
self.reddit_session.request_json(url, data={'id': self.name})
|
def affine_transform_keypoints(coords_list, transform_matrix):
"""Transform keypoint coordinates according to a given affine transform matrix.
OpenCV format, x is width.
Note that, for pose estimation task, flipping requires maintaining the left and right body information.
We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``.
Parameters
-----------
coords_list : list of list of tuple/list
The coordinates
e.g., the keypoint coordinates of every person in an image.
transform_matrix : numpy.array
Transform matrix, OpenCV format.
Examples
---------
>>> # 1. get all affine transform matrices
>>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20)
>>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1)
>>> # 2. combine all affine transform matrices to one matrix
>>> M_combined = dot(M_flip).dot(M_rotate)
>>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image)
>>> # to Image coordinate (the origin on the top-left of image)
>>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h)
>>> # 4. then we can transfrom the image once for all transformations
>>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
>>> # 5. transform keypoint coordinates
>>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
>>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
"""
coords_result_list = []
for coords in coords_list:
coords = np.asarray(coords)
coords = coords.transpose([1, 0])
coords = np.insert(coords, 2, 1, axis=0)
# print(coords)
# print(transform_matrix)
coords_result = np.matmul(transform_matrix, coords)
coords_result = coords_result[0:2, :].transpose([1, 0])
coords_result_list.append(coords_result)
return coords_result_list
|
def function[affine_transform_keypoints, parameter[coords_list, transform_matrix]]:
constant[Transform keypoint coordinates according to a given affine transform matrix.
OpenCV format, x is width.
Note that, for pose estimation task, flipping requires maintaining the left and right body information.
We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``.
Parameters
-----------
coords_list : list of list of tuple/list
The coordinates
e.g., the keypoint coordinates of every person in an image.
transform_matrix : numpy.array
Transform matrix, OpenCV format.
Examples
---------
>>> # 1. get all affine transform matrices
>>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20)
>>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1)
>>> # 2. combine all affine transform matrices to one matrix
>>> M_combined = dot(M_flip).dot(M_rotate)
>>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image)
>>> # to Image coordinate (the origin on the top-left of image)
>>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h)
>>> # 4. then we can transfrom the image once for all transformations
>>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
>>> # 5. transform keypoint coordinates
>>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
>>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
]
variable[coords_result_list] assign[=] list[[]]
for taget[name[coords]] in starred[name[coords_list]] begin[:]
variable[coords] assign[=] call[name[np].asarray, parameter[name[coords]]]
variable[coords] assign[=] call[name[coords].transpose, parameter[list[[<ast.Constant object at 0x7da20c6a8730>, <ast.Constant object at 0x7da20c6aa4d0>]]]]
variable[coords] assign[=] call[name[np].insert, parameter[name[coords], constant[2], constant[1]]]
variable[coords_result] assign[=] call[name[np].matmul, parameter[name[transform_matrix], name[coords]]]
variable[coords_result] assign[=] call[call[name[coords_result]][tuple[[<ast.Slice object at 0x7da20c6aa440>, <ast.Slice object at 0x7da20c6a8fa0>]]].transpose, parameter[list[[<ast.Constant object at 0x7da20c6aa680>, <ast.Constant object at 0x7da20c6a9ab0>]]]]
call[name[coords_result_list].append, parameter[name[coords_result]]]
return[name[coords_result_list]]
|
keyword[def] identifier[affine_transform_keypoints] ( identifier[coords_list] , identifier[transform_matrix] ):
literal[string]
identifier[coords_result_list] =[]
keyword[for] identifier[coords] keyword[in] identifier[coords_list] :
identifier[coords] = identifier[np] . identifier[asarray] ( identifier[coords] )
identifier[coords] = identifier[coords] . identifier[transpose] ([ literal[int] , literal[int] ])
identifier[coords] = identifier[np] . identifier[insert] ( identifier[coords] , literal[int] , literal[int] , identifier[axis] = literal[int] )
identifier[coords_result] = identifier[np] . identifier[matmul] ( identifier[transform_matrix] , identifier[coords] )
identifier[coords_result] = identifier[coords_result] [ literal[int] : literal[int] ,:]. identifier[transpose] ([ literal[int] , literal[int] ])
identifier[coords_result_list] . identifier[append] ( identifier[coords_result] )
keyword[return] identifier[coords_result_list]
|
def affine_transform_keypoints(coords_list, transform_matrix):
"""Transform keypoint coordinates according to a given affine transform matrix.
OpenCV format, x is width.
Note that, for pose estimation task, flipping requires maintaining the left and right body information.
We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``.
Parameters
-----------
coords_list : list of list of tuple/list
The coordinates
e.g., the keypoint coordinates of every person in an image.
transform_matrix : numpy.array
Transform matrix, OpenCV format.
Examples
---------
>>> # 1. get all affine transform matrices
>>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20)
>>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1)
>>> # 2. combine all affine transform matrices to one matrix
>>> M_combined = dot(M_flip).dot(M_rotate)
>>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image)
>>> # to Image coordinate (the origin on the top-left of image)
>>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h)
>>> # 4. then we can transfrom the image once for all transformations
>>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
>>> # 5. transform keypoint coordinates
>>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
>>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
"""
coords_result_list = []
for coords in coords_list:
coords = np.asarray(coords)
coords = coords.transpose([1, 0])
coords = np.insert(coords, 2, 1, axis=0)
# print(coords)
# print(transform_matrix)
coords_result = np.matmul(transform_matrix, coords)
coords_result = coords_result[0:2, :].transpose([1, 0])
coords_result_list.append(coords_result) # depends on [control=['for'], data=['coords']]
return coords_result_list
|
def error(self, interface_id, errorcode, msg):
"""When some error occurs the CCU / Homegear will send it's error message here"""
LOG.debug("RPCFunctions.error: interface_id = %s, errorcode = %i, message = %s" % (
interface_id, int(errorcode), str(msg)))
if self.systemcallback:
self.systemcallback('error', interface_id, errorcode, msg)
return True
|
def function[error, parameter[self, interface_id, errorcode, msg]]:
constant[When some error occurs the CCU / Homegear will send it's error message here]
call[name[LOG].debug, parameter[binary_operation[constant[RPCFunctions.error: interface_id = %s, errorcode = %i, message = %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0546dd0>, <ast.Call object at 0x7da1b0544400>, <ast.Call object at 0x7da1b0546cb0>]]]]]
if name[self].systemcallback begin[:]
call[name[self].systemcallback, parameter[constant[error], name[interface_id], name[errorcode], name[msg]]]
return[constant[True]]
|
keyword[def] identifier[error] ( identifier[self] , identifier[interface_id] , identifier[errorcode] , identifier[msg] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] %(
identifier[interface_id] , identifier[int] ( identifier[errorcode] ), identifier[str] ( identifier[msg] )))
keyword[if] identifier[self] . identifier[systemcallback] :
identifier[self] . identifier[systemcallback] ( literal[string] , identifier[interface_id] , identifier[errorcode] , identifier[msg] )
keyword[return] keyword[True]
|
def error(self, interface_id, errorcode, msg):
"""When some error occurs the CCU / Homegear will send it's error message here"""
LOG.debug('RPCFunctions.error: interface_id = %s, errorcode = %i, message = %s' % (interface_id, int(errorcode), str(msg)))
if self.systemcallback:
self.systemcallback('error', interface_id, errorcode, msg) # depends on [control=['if'], data=[]]
return True
|
def cc(self, chan, ctrl, val):
"""Send control change value.
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1: vibrato
7: volume
10: pan (left to right)
11: expression (soft to loud)
64: sustain
91: reverb
93: chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
|
def function[cc, parameter[self, chan, ctrl, val]]:
constant[Send control change value.
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1: vibrato
7: volume
10: pan (left to right)
11: expression (soft to loud)
64: sustain
91: reverb
93: chorus
]
return[call[name[fluid_synth_cc], parameter[name[self].synth, name[chan], name[ctrl], name[val]]]]
|
keyword[def] identifier[cc] ( identifier[self] , identifier[chan] , identifier[ctrl] , identifier[val] ):
literal[string]
keyword[return] identifier[fluid_synth_cc] ( identifier[self] . identifier[synth] , identifier[chan] , identifier[ctrl] , identifier[val] )
|
def cc(self, chan, ctrl, val):
"""Send control change value.
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1: vibrato
7: volume
10: pan (left to right)
11: expression (soft to loud)
64: sustain
91: reverb
93: chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
|
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
|
def function[_default_commands, parameter[self]]:
constant[ Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
]
variable[commands] assign[=] <ast.ListComp object at 0x7da18bcc8a30>
for taget[name[ep]] in starred[call[name[pkg_resources].iter_entry_points, parameter[]]] begin[:]
variable[c] assign[=] call[name[ep].load, parameter[]]
if <ast.UnaryOp object at 0x7da212d41a80> begin[:]
call[name[print], parameter[call[constant[Warning: entry point {} did not return a valid enaml cli command! This command will be ignored!].format, parameter[name[ep].name]]]]
call[name[commands].append, parameter[call[name[c], parameter[]]]]
return[name[commands]]
|
keyword[def] identifier[_default_commands] ( identifier[self] ):
literal[string]
identifier[commands] =[ identifier[c] () keyword[for] identifier[c] keyword[in] identifier[find_commands] ( identifier[Command] )]
keyword[for] identifier[ep] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] (
identifier[group] = literal[string] ):
identifier[c] = identifier[ep] . identifier[load] ()
keyword[if] keyword[not] identifier[issubclass] ( identifier[c] , identifier[Command] ):
identifier[print] ( literal[string]
literal[string] . identifier[format] (
identifier[ep] . identifier[name] ))
identifier[commands] . identifier[append] ( identifier[c] ())
keyword[return] identifier[commands]
|
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(group='enaml_native_command'):
c = ep.load()
if not issubclass(c, Command):
print('Warning: entry point {} did not return a valid enaml cli command! This command will be ignored!'.format(ep.name)) # depends on [control=['if'], data=[]]
commands.append(c()) # depends on [control=['for'], data=['ep']]
return commands
|
def get_neighbors(self, index):
"""!
@brief Finds neighbors of the oscillator with specified index.
@param[in] index (uint): index of oscillator for which neighbors should be found in the network.
@return (list) Indexes of neighbors of the specified oscillator.
"""
if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ):
self._osc_conn = wrapper.sync_connectivity_matrix(self._ccore_network_pointer);
return super().get_neighbors(index);
|
def function[get_neighbors, parameter[self, index]]:
constant[!
@brief Finds neighbors of the oscillator with specified index.
@param[in] index (uint): index of oscillator for which neighbors should be found in the network.
@return (list) Indexes of neighbors of the specified oscillator.
]
if <ast.BoolOp object at 0x7da1b013ec50> begin[:]
name[self]._osc_conn assign[=] call[name[wrapper].sync_connectivity_matrix, parameter[name[self]._ccore_network_pointer]]
return[call[call[name[super], parameter[]].get_neighbors, parameter[name[index]]]]
|
keyword[def] identifier[get_neighbors] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] (( identifier[self] . identifier[_ccore_network_pointer] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[self] . identifier[_osc_conn] keyword[is] keyword[None] )):
identifier[self] . identifier[_osc_conn] = identifier[wrapper] . identifier[sync_connectivity_matrix] ( identifier[self] . identifier[_ccore_network_pointer] );
keyword[return] identifier[super] (). identifier[get_neighbors] ( identifier[index] );
|
def get_neighbors(self, index):
"""!
@brief Finds neighbors of the oscillator with specified index.
@param[in] index (uint): index of oscillator for which neighbors should be found in the network.
@return (list) Indexes of neighbors of the specified oscillator.
"""
if self._ccore_network_pointer is not None and self._osc_conn is None:
self._osc_conn = wrapper.sync_connectivity_matrix(self._ccore_network_pointer) # depends on [control=['if'], data=[]]
return super().get_neighbors(index)
|
def get_currency(self, code):
"""
Returns an iterable of currency elements matching the code:
<CtryNm>UNITED STATES MINOR OUTLYING ISLANDS (THE)</CtryNm>
<CcyNm>US Dollar</CcyNm>
<Ccy>USD</Ccy>
<CcyNbr>840</CcyNbr>
<CcyMnrUnts>2</CcyMnrUnts>
NOTE: the currency is repeated for each country name
"""
missing = True
for currency in self.currencies[0]:
try:
if code != currency.find('Ccy').text:
continue
except AttributeError:
continue
missing = False
yield currency
if missing:
raise RuntimeError("%s: %s not found" % (self.name, code))
|
def function[get_currency, parameter[self, code]]:
constant[
Returns an iterable of currency elements matching the code:
<CtryNm>UNITED STATES MINOR OUTLYING ISLANDS (THE)</CtryNm>
<CcyNm>US Dollar</CcyNm>
<Ccy>USD</Ccy>
<CcyNbr>840</CcyNbr>
<CcyMnrUnts>2</CcyMnrUnts>
NOTE: the currency is repeated for each country name
]
variable[missing] assign[=] constant[True]
for taget[name[currency]] in starred[call[name[self].currencies][constant[0]]] begin[:]
<ast.Try object at 0x7da1b2346a70>
variable[missing] assign[=] constant[False]
<ast.Yield object at 0x7da1b2346980>
if name[missing] begin[:]
<ast.Raise object at 0x7da1b2344b80>
|
keyword[def] identifier[get_currency] ( identifier[self] , identifier[code] ):
literal[string]
identifier[missing] = keyword[True]
keyword[for] identifier[currency] keyword[in] identifier[self] . identifier[currencies] [ literal[int] ]:
keyword[try] :
keyword[if] identifier[code] != identifier[currency] . identifier[find] ( literal[string] ). identifier[text] :
keyword[continue]
keyword[except] identifier[AttributeError] :
keyword[continue]
identifier[missing] = keyword[False]
keyword[yield] identifier[currency]
keyword[if] identifier[missing] :
keyword[raise] identifier[RuntimeError] ( literal[string] %( identifier[self] . identifier[name] , identifier[code] ))
|
def get_currency(self, code):
"""
Returns an iterable of currency elements matching the code:
<CtryNm>UNITED STATES MINOR OUTLYING ISLANDS (THE)</CtryNm>
<CcyNm>US Dollar</CcyNm>
<Ccy>USD</Ccy>
<CcyNbr>840</CcyNbr>
<CcyMnrUnts>2</CcyMnrUnts>
NOTE: the currency is repeated for each country name
"""
missing = True
for currency in self.currencies[0]:
try:
if code != currency.find('Ccy').text:
continue # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
continue # depends on [control=['except'], data=[]]
missing = False
yield currency # depends on [control=['for'], data=['currency']]
if missing:
raise RuntimeError('%s: %s not found' % (self.name, code)) # depends on [control=['if'], data=[]]
|
def clean_draft_pages_from_space(confluence, space_key, count, date_now):
"""
Remove draft pages from space using datetime.now
:param confluence:
:param space_key:
:param count:
:param date_now:
:return: int counter
"""
pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500)
for page in pages:
page_id = page['id']
draft_page = confluence.get_draft_page_by_id(page_id=page_id)
last_date_string = draft_page['version']['when']
last_date = datetime.datetime.strptime(last_date_string.replace(".000", "")[:-6], "%Y-%m-%dT%H:%M:%S")
if (date_now - last_date) > datetime.timedelta(days=DRAFT_DAYS):
count += 1
print("Removing page with page id: " + page_id)
confluence.remove_page_as_draft(page_id=page_id)
print("Removed page with date " + last_date_string)
return count
|
def function[clean_draft_pages_from_space, parameter[confluence, space_key, count, date_now]]:
constant[
Remove draft pages from space using datetime.now
:param confluence:
:param space_key:
:param count:
:param date_now:
:return: int counter
]
variable[pages] assign[=] call[name[confluence].get_all_draft_pages_from_space, parameter[]]
for taget[name[page]] in starred[name[pages]] begin[:]
variable[page_id] assign[=] call[name[page]][constant[id]]
variable[draft_page] assign[=] call[name[confluence].get_draft_page_by_id, parameter[]]
variable[last_date_string] assign[=] call[call[name[draft_page]][constant[version]]][constant[when]]
variable[last_date] assign[=] call[name[datetime].datetime.strptime, parameter[call[call[name[last_date_string].replace, parameter[constant[.000], constant[]]]][<ast.Slice object at 0x7da20e957220>], constant[%Y-%m-%dT%H:%M:%S]]]
if compare[binary_operation[name[date_now] - name[last_date]] greater[>] call[name[datetime].timedelta, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da20e956a40>
call[name[print], parameter[binary_operation[constant[Removing page with page id: ] + name[page_id]]]]
call[name[confluence].remove_page_as_draft, parameter[]]
call[name[print], parameter[binary_operation[constant[Removed page with date ] + name[last_date_string]]]]
return[name[count]]
|
keyword[def] identifier[clean_draft_pages_from_space] ( identifier[confluence] , identifier[space_key] , identifier[count] , identifier[date_now] ):
literal[string]
identifier[pages] = identifier[confluence] . identifier[get_all_draft_pages_from_space] ( identifier[space] = identifier[space_key] , identifier[start] = literal[int] , identifier[limit] = literal[int] )
keyword[for] identifier[page] keyword[in] identifier[pages] :
identifier[page_id] = identifier[page] [ literal[string] ]
identifier[draft_page] = identifier[confluence] . identifier[get_draft_page_by_id] ( identifier[page_id] = identifier[page_id] )
identifier[last_date_string] = identifier[draft_page] [ literal[string] ][ literal[string] ]
identifier[last_date] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[last_date_string] . identifier[replace] ( literal[string] , literal[string] )[:- literal[int] ], literal[string] )
keyword[if] ( identifier[date_now] - identifier[last_date] )> identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[DRAFT_DAYS] ):
identifier[count] += literal[int]
identifier[print] ( literal[string] + identifier[page_id] )
identifier[confluence] . identifier[remove_page_as_draft] ( identifier[page_id] = identifier[page_id] )
identifier[print] ( literal[string] + identifier[last_date_string] )
keyword[return] identifier[count]
|
def clean_draft_pages_from_space(confluence, space_key, count, date_now):
"""
Remove draft pages from space using datetime.now
:param confluence:
:param space_key:
:param count:
:param date_now:
:return: int counter
"""
pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500)
for page in pages:
page_id = page['id']
draft_page = confluence.get_draft_page_by_id(page_id=page_id)
last_date_string = draft_page['version']['when']
last_date = datetime.datetime.strptime(last_date_string.replace('.000', '')[:-6], '%Y-%m-%dT%H:%M:%S')
if date_now - last_date > datetime.timedelta(days=DRAFT_DAYS):
count += 1
print('Removing page with page id: ' + page_id)
confluence.remove_page_as_draft(page_id=page_id)
print('Removed page with date ' + last_date_string) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['page']]
return count
|
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True
if unbound and not isinstance(pyname, pynames.UnboundName):
return False
if pyname.get_object() == pyobjects.get_unknown():
return True
|
def function[unsure_pyname, parameter[pyname, unbound]]:
constant[Return `True` if we don't know what this name references]
if compare[name[pyname] is constant[None]] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b065d330> begin[:]
return[constant[False]]
if compare[call[name[pyname].get_object, parameter[]] equal[==] call[name[pyobjects].get_unknown, parameter[]]] begin[:]
return[constant[True]]
|
keyword[def] identifier[unsure_pyname] ( identifier[pyname] , identifier[unbound] = keyword[True] ):
literal[string]
keyword[if] identifier[pyname] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[unbound] keyword[and] keyword[not] identifier[isinstance] ( identifier[pyname] , identifier[pynames] . identifier[UnboundName] ):
keyword[return] keyword[False]
keyword[if] identifier[pyname] . identifier[get_object] ()== identifier[pyobjects] . identifier[get_unknown] ():
keyword[return] keyword[True]
|
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True # depends on [control=['if'], data=[]]
if unbound and (not isinstance(pyname, pynames.UnboundName)):
return False # depends on [control=['if'], data=[]]
if pyname.get_object() == pyobjects.get_unknown():
return True # depends on [control=['if'], data=[]]
|
def deflate(self, value):
"""
Handles the marshalling from NeomodelPoint to Neo4J POINT
:param value: The point that was assigned as value to a property in the model
:type value: NeomodelPoint
:return: Neo4J POINT
"""
if not isinstance(value, NeomodelPoint):
raise TypeError('Invalid datatype to deflate. Expected NeomodelPoint, received {}'.format(type(value)))
if not value.crs == self._crs:
raise ValueError('Invalid CRS. '
'Expected NeomodelPoint defined over {}, '
'received NeomodelPoint defined over {}'.format(self._crs, value.crs))
if value.crs == 'cartesian-3d':
return neo4j.types.spatial.CartesianPoint((value.x, value.y, value.z))
elif value.crs == 'cartesian':
return neo4j.types.spatial.CartesianPoint((value.x,value.y))
elif value.crs == 'wgs-84':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude))
elif value.crs == 'wgs-84-3d':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude, value.height))
|
def function[deflate, parameter[self, value]]:
constant[
Handles the marshalling from NeomodelPoint to Neo4J POINT
:param value: The point that was assigned as value to a property in the model
:type value: NeomodelPoint
:return: Neo4J POINT
]
if <ast.UnaryOp object at 0x7da18f09fa30> begin[:]
<ast.Raise object at 0x7da18f09f5e0>
if <ast.UnaryOp object at 0x7da18f09e7a0> begin[:]
<ast.Raise object at 0x7da18f09df30>
if compare[name[value].crs equal[==] constant[cartesian-3d]] begin[:]
return[call[name[neo4j].types.spatial.CartesianPoint, parameter[tuple[[<ast.Attribute object at 0x7da18f09e4d0>, <ast.Attribute object at 0x7da18f09e320>, <ast.Attribute object at 0x7da18f09cee0>]]]]]
|
keyword[def] identifier[deflate] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[NeomodelPoint] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[value] )))
keyword[if] keyword[not] identifier[value] . identifier[crs] == identifier[self] . identifier[_crs] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[_crs] , identifier[value] . identifier[crs] ))
keyword[if] identifier[value] . identifier[crs] == literal[string] :
keyword[return] identifier[neo4j] . identifier[types] . identifier[spatial] . identifier[CartesianPoint] (( identifier[value] . identifier[x] , identifier[value] . identifier[y] , identifier[value] . identifier[z] ))
keyword[elif] identifier[value] . identifier[crs] == literal[string] :
keyword[return] identifier[neo4j] . identifier[types] . identifier[spatial] . identifier[CartesianPoint] (( identifier[value] . identifier[x] , identifier[value] . identifier[y] ))
keyword[elif] identifier[value] . identifier[crs] == literal[string] :
keyword[return] identifier[neo4j] . identifier[types] . identifier[spatial] . identifier[WGS84Point] (( identifier[value] . identifier[longitude] , identifier[value] . identifier[latitude] ))
keyword[elif] identifier[value] . identifier[crs] == literal[string] :
keyword[return] identifier[neo4j] . identifier[types] . identifier[spatial] . identifier[WGS84Point] (( identifier[value] . identifier[longitude] , identifier[value] . identifier[latitude] , identifier[value] . identifier[height] ))
|
def deflate(self, value):
"""
Handles the marshalling from NeomodelPoint to Neo4J POINT
:param value: The point that was assigned as value to a property in the model
:type value: NeomodelPoint
:return: Neo4J POINT
"""
if not isinstance(value, NeomodelPoint):
raise TypeError('Invalid datatype to deflate. Expected NeomodelPoint, received {}'.format(type(value))) # depends on [control=['if'], data=[]]
if not value.crs == self._crs:
raise ValueError('Invalid CRS. Expected NeomodelPoint defined over {}, received NeomodelPoint defined over {}'.format(self._crs, value.crs)) # depends on [control=['if'], data=[]]
if value.crs == 'cartesian-3d':
return neo4j.types.spatial.CartesianPoint((value.x, value.y, value.z)) # depends on [control=['if'], data=[]]
elif value.crs == 'cartesian':
return neo4j.types.spatial.CartesianPoint((value.x, value.y)) # depends on [control=['if'], data=[]]
elif value.crs == 'wgs-84':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude)) # depends on [control=['if'], data=[]]
elif value.crs == 'wgs-84-3d':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude, value.height)) # depends on [control=['if'], data=[]]
|
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
|
def function[evaluate, parameter[self, system_id, rouge_args]]:
constant[
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
]
call[name[self].write_config, parameter[]]
variable[options] assign[=] call[name[self].__get_options, parameter[name[rouge_args]]]
variable[command] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b08f87c0>]] + name[options]]
variable[env] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b07906a0> begin[:]
variable[env] assign[=] dictionary[[<ast.Constant object at 0x7da1b07925c0>], [<ast.Attribute object at 0x7da1b0790700>]]
call[name[self].log.info, parameter[call[constant[Running ROUGE with command {}].format, parameter[call[constant[ ].join, parameter[name[command]]]]]]]
variable[rouge_output] assign[=] call[call[name[check_output], parameter[name[command]]].decode, parameter[constant[UTF-8]]]
return[name[rouge_output]]
|
keyword[def] identifier[evaluate] ( identifier[self] , identifier[system_id] = literal[int] , identifier[rouge_args] = keyword[None] ):
literal[string]
identifier[self] . identifier[write_config] ( identifier[system_id] = identifier[system_id] )
identifier[options] = identifier[self] . identifier[__get_options] ( identifier[rouge_args] )
identifier[command] =[ identifier[self] . identifier[_bin_path] ]+ identifier[options]
identifier[env] = keyword[None]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[_home_dir] :
identifier[env] ={ literal[string] : identifier[self] . identifier[_home_dir] }
identifier[self] . identifier[log] . identifier[info] (
literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[command] )))
identifier[rouge_output] = identifier[check_output] ( identifier[command] , identifier[env] = identifier[env] ). identifier[decode] ( literal[string] )
keyword[return] identifier[rouge_output]
|
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, '_home_dir') and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir} # depends on [control=['if'], data=[]]
self.log.info('Running ROUGE with command {}'.format(' '.join(command)))
rouge_output = check_output(command, env=env).decode('UTF-8')
return rouge_output
|
def debug_process(pid):
"""Interrupt a running process and debug it."""
os.kill(pid, signal.SIGUSR1) # Signal process.
pipe = NamedPipe(pipename(pid), 1)
try:
while pipe.is_open():
txt=raw_input(pipe.get()) + '\n'
pipe.put(txt)
except EOFError:
pass # Exit.
pipe.close()
|
def function[debug_process, parameter[pid]]:
constant[Interrupt a running process and debug it.]
call[name[os].kill, parameter[name[pid], name[signal].SIGUSR1]]
variable[pipe] assign[=] call[name[NamedPipe], parameter[call[name[pipename], parameter[name[pid]]], constant[1]]]
<ast.Try object at 0x7da1b10a4430>
call[name[pipe].close, parameter[]]
|
keyword[def] identifier[debug_process] ( identifier[pid] ):
literal[string]
identifier[os] . identifier[kill] ( identifier[pid] , identifier[signal] . identifier[SIGUSR1] )
identifier[pipe] = identifier[NamedPipe] ( identifier[pipename] ( identifier[pid] ), literal[int] )
keyword[try] :
keyword[while] identifier[pipe] . identifier[is_open] ():
identifier[txt] = identifier[raw_input] ( identifier[pipe] . identifier[get] ())+ literal[string]
identifier[pipe] . identifier[put] ( identifier[txt] )
keyword[except] identifier[EOFError] :
keyword[pass]
identifier[pipe] . identifier[close] ()
|
def debug_process(pid):
"""Interrupt a running process and debug it."""
os.kill(pid, signal.SIGUSR1) # Signal process.
pipe = NamedPipe(pipename(pid), 1)
try:
while pipe.is_open():
txt = raw_input(pipe.get()) + '\n'
pipe.put(txt) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except EOFError:
pass # Exit. # depends on [control=['except'], data=[]]
pipe.close()
|
def hdd_to_pmf(hypo_depth_dist, use_default=False):
"""
Returns the hypocentral depth distribtuion as an instance of the :class:
openquake.hazardlib.pmf.
"""
if isinstance(hypo_depth_dist, PMF):
# Is already instance of PMF
return hypo_depth_dist
else:
if use_default:
# Default value of 10 km accepted
return PMF([(1.0, 10.0)])
else:
# Out of options - raise error!
raise ValueError('Hypocentral depth distribution not defined!')
|
def function[hdd_to_pmf, parameter[hypo_depth_dist, use_default]]:
constant[
Returns the hypocentral depth distribtuion as an instance of the :class:
openquake.hazardlib.pmf.
]
if call[name[isinstance], parameter[name[hypo_depth_dist], name[PMF]]] begin[:]
return[name[hypo_depth_dist]]
|
keyword[def] identifier[hdd_to_pmf] ( identifier[hypo_depth_dist] , identifier[use_default] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[hypo_depth_dist] , identifier[PMF] ):
keyword[return] identifier[hypo_depth_dist]
keyword[else] :
keyword[if] identifier[use_default] :
keyword[return] identifier[PMF] ([( literal[int] , literal[int] )])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def hdd_to_pmf(hypo_depth_dist, use_default=False):
"""
Returns the hypocentral depth distribtuion as an instance of the :class:
openquake.hazardlib.pmf.
"""
if isinstance(hypo_depth_dist, PMF):
# Is already instance of PMF
return hypo_depth_dist # depends on [control=['if'], data=[]]
elif use_default:
# Default value of 10 km accepted
return PMF([(1.0, 10.0)]) # depends on [control=['if'], data=[]]
else:
# Out of options - raise error!
raise ValueError('Hypocentral depth distribution not defined!')
|
def sort_file_tabs_alphabetically(self):
"""Sort open tabs alphabetically."""
while self.sorted() is False:
for i in range(0, self.tabs.tabBar().count()):
if(self.tabs.tabBar().tabText(i) >
self.tabs.tabBar().tabText(i + 1)):
self.tabs.tabBar().moveTab(i, i + 1)
|
def function[sort_file_tabs_alphabetically, parameter[self]]:
constant[Sort open tabs alphabetically.]
while compare[call[name[self].sorted, parameter[]] is constant[False]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[call[name[self].tabs.tabBar, parameter[]].count, parameter[]]]]] begin[:]
if compare[call[call[name[self].tabs.tabBar, parameter[]].tabText, parameter[name[i]]] greater[>] call[call[name[self].tabs.tabBar, parameter[]].tabText, parameter[binary_operation[name[i] + constant[1]]]]] begin[:]
call[call[name[self].tabs.tabBar, parameter[]].moveTab, parameter[name[i], binary_operation[name[i] + constant[1]]]]
|
keyword[def] identifier[sort_file_tabs_alphabetically] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[sorted] () keyword[is] keyword[False] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[tabs] . identifier[tabBar] (). identifier[count] ()):
keyword[if] ( identifier[self] . identifier[tabs] . identifier[tabBar] (). identifier[tabText] ( identifier[i] )>
identifier[self] . identifier[tabs] . identifier[tabBar] (). identifier[tabText] ( identifier[i] + literal[int] )):
identifier[self] . identifier[tabs] . identifier[tabBar] (). identifier[moveTab] ( identifier[i] , identifier[i] + literal[int] )
|
def sort_file_tabs_alphabetically(self):
"""Sort open tabs alphabetically."""
while self.sorted() is False:
for i in range(0, self.tabs.tabBar().count()):
if self.tabs.tabBar().tabText(i) > self.tabs.tabBar().tabText(i + 1):
self.tabs.tabBar().moveTab(i, i + 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['while'], data=[]]
|
def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
name='facade',
traceid=trace_header.root,
entityid=trace_header.parent,
sampled=sampled,
)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', [])
|
def function[_initialize_context, parameter[self, trace_header]]:
constant[
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
]
variable[sampled] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da204960040> begin[:]
variable[sampled] assign[=] constant[False]
variable[segment] assign[=] call[name[FacadeSegment], parameter[]]
call[name[setattr], parameter[name[self]._local, constant[segment], name[segment]]]
call[name[setattr], parameter[name[self]._local, constant[entities], list[[]]]]
|
keyword[def] identifier[_initialize_context] ( identifier[self] , identifier[trace_header] ):
literal[string]
identifier[sampled] = keyword[None]
keyword[if] keyword[not] identifier[global_sdk_config] . identifier[sdk_enabled] ():
identifier[sampled] = keyword[False]
keyword[elif] identifier[trace_header] . identifier[sampled] == literal[int] :
identifier[sampled] = keyword[False]
keyword[elif] identifier[trace_header] . identifier[sampled] == literal[int] :
identifier[sampled] = keyword[True]
identifier[segment] = identifier[FacadeSegment] (
identifier[name] = literal[string] ,
identifier[traceid] = identifier[trace_header] . identifier[root] ,
identifier[entityid] = identifier[trace_header] . identifier[parent] ,
identifier[sampled] = identifier[sampled] ,
)
identifier[setattr] ( identifier[self] . identifier[_local] , literal[string] , identifier[segment] )
identifier[setattr] ( identifier[self] . identifier[_local] , literal[string] ,[])
|
def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False # depends on [control=['if'], data=[]]
elif trace_header.sampled == 0:
sampled = False # depends on [control=['if'], data=[]]
elif trace_header.sampled == 1:
sampled = True # depends on [control=['if'], data=[]]
segment = FacadeSegment(name='facade', traceid=trace_header.root, entityid=trace_header.parent, sampled=sampled)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', [])
|
def traverse_nodes(self, node_set, depth=0):
"""BFS traversal of nodes that returns name traversal as large string.
Args:
node_set: Set of input nodes to begin traversal.
depth: Current traversal depth for child node viewing.
Returns:
type: String containing tabbed traversal view.
"""
tab = " "
result = list()
for n in node_set:
repr = (
n
if self.nodes[n]["type"] == "variable"
else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}"
)
result.append(f"{tab * depth}{repr}")
result.extend(
self.traverse_nodes(self.successors(n), depth=depth + 1)
)
return result
|
def function[traverse_nodes, parameter[self, node_set, depth]]:
constant[BFS traversal of nodes that returns name traversal as large string.
Args:
node_set: Set of input nodes to begin traversal.
depth: Current traversal depth for child node viewing.
Returns:
type: String containing tabbed traversal view.
]
variable[tab] assign[=] constant[ ]
variable[result] assign[=] call[name[list], parameter[]]
for taget[name[n]] in starred[name[node_set]] begin[:]
variable[repr] assign[=] <ast.IfExp object at 0x7da20c7943d0>
call[name[result].append, parameter[<ast.JoinedStr object at 0x7da1b049b400>]]
call[name[result].extend, parameter[call[name[self].traverse_nodes, parameter[call[name[self].successors, parameter[name[n]]]]]]]
return[name[result]]
|
keyword[def] identifier[traverse_nodes] ( identifier[self] , identifier[node_set] , identifier[depth] = literal[int] ):
literal[string]
identifier[tab] = literal[string]
identifier[result] = identifier[list] ()
keyword[for] identifier[n] keyword[in] identifier[node_set] :
identifier[repr] =(
identifier[n]
keyword[if] identifier[self] . identifier[nodes] [ identifier[n] ][ literal[string] ]== literal[string]
keyword[else] literal[string]
)
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[extend] (
identifier[self] . identifier[traverse_nodes] ( identifier[self] . identifier[successors] ( identifier[n] ), identifier[depth] = identifier[depth] + literal[int] )
)
keyword[return] identifier[result]
|
def traverse_nodes(self, node_set, depth=0):
"""BFS traversal of nodes that returns name traversal as large string.
Args:
node_set: Set of input nodes to begin traversal.
depth: Current traversal depth for child node viewing.
Returns:
type: String containing tabbed traversal view.
"""
tab = ' '
result = list()
for n in node_set:
repr = n if self.nodes[n]['type'] == 'variable' else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}"
result.append(f'{tab * depth}{repr}')
result.extend(self.traverse_nodes(self.successors(n), depth=depth + 1)) # depends on [control=['for'], data=['n']]
return result
|
def calcfluxscale(d, imstd_med, flagfrac_med):
""" Given state dict and noise properties, estimate flux scale at the VLA
imstd and flagfrac are expected to be median (typical) values from sample in merged noise pkl.
"""
# useful functions and VLA parameters
sensitivity = lambda sefd, dt, bw, eta, nbl, npol: sefd/(eta*np.sqrt(nbl*2 * dt * bw * npol))
nbl = lambda nant: nant*(nant-1)/2
eta = {'L': 0.92, 'S': 0.92, 'C': 0.8, 'X': 0.8} # correlator efficiency
sefd = {'L': 420, 'S': 370, 'C': 310, 'X': 250} # fixed to match exposure calculator int time to 100 microJy.
bw = sum([d['spw_nchan_select'][i]*d['spw_chansize'][i] for i in range(len(d['spw_chansize']))])
dt = d['inttime']
npol = d['npol']
nant = d['nants']
freq = d['freq'][0]
if (freq >= 1 and freq < 2):
band = 'L'
elif (freq >= 2 and freq < 4):
band = 'S'
elif (freq >= 4 and freq < 8):
band = 'C'
elif (freq >= 8 and freq < 12):
band = 'X'
else:
logger.warn('first channel freq ({0}) not in bands L, S, C, or X. Assuming L band.'.format(freq))
band = 'L'
goodfrac = 1 - flagfrac_med # correct for flagged data
slim_theory = sensitivity(sefd[band], dt, bw, eta[band], goodfrac*nbl(nant), npol)
fluxscale = slim_theory/imstd_med
return fluxscale
|
def function[calcfluxscale, parameter[d, imstd_med, flagfrac_med]]:
constant[ Given state dict and noise properties, estimate flux scale at the VLA
imstd and flagfrac are expected to be median (typical) values from sample in merged noise pkl.
]
variable[sensitivity] assign[=] <ast.Lambda object at 0x7da1b26073d0>
variable[nbl] assign[=] <ast.Lambda object at 0x7da1b26074f0>
variable[eta] assign[=] dictionary[[<ast.Constant object at 0x7da1b2607790>, <ast.Constant object at 0x7da1b2607760>, <ast.Constant object at 0x7da1b26077c0>, <ast.Constant object at 0x7da1b2607820>], [<ast.Constant object at 0x7da1b26077f0>, <ast.Constant object at 0x7da1b2607850>, <ast.Constant object at 0x7da1b2607880>, <ast.Constant object at 0x7da1b26078b0>]]
variable[sefd] assign[=] dictionary[[<ast.Constant object at 0x7da1b2606d70>, <ast.Constant object at 0x7da1b2606da0>, <ast.Constant object at 0x7da1b2606620>, <ast.Constant object at 0x7da1b2606680>], [<ast.Constant object at 0x7da1b2606710>, <ast.Constant object at 0x7da1b2606b30>, <ast.Constant object at 0x7da1b2606d10>, <ast.Constant object at 0x7da1b2606c50>]]
variable[bw] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b2606110>]]
variable[dt] assign[=] call[name[d]][constant[inttime]]
variable[npol] assign[=] call[name[d]][constant[npol]]
variable[nant] assign[=] call[name[d]][constant[nants]]
variable[freq] assign[=] call[call[name[d]][constant[freq]]][constant[0]]
if <ast.BoolOp object at 0x7da1b2606350> begin[:]
variable[band] assign[=] constant[L]
variable[goodfrac] assign[=] binary_operation[constant[1] - name[flagfrac_med]]
variable[slim_theory] assign[=] call[name[sensitivity], parameter[call[name[sefd]][name[band]], name[dt], name[bw], call[name[eta]][name[band]], binary_operation[name[goodfrac] * call[name[nbl], parameter[name[nant]]]], name[npol]]]
variable[fluxscale] assign[=] binary_operation[name[slim_theory] / name[imstd_med]]
return[name[fluxscale]]
|
keyword[def] identifier[calcfluxscale] ( identifier[d] , identifier[imstd_med] , identifier[flagfrac_med] ):
literal[string]
identifier[sensitivity] = keyword[lambda] identifier[sefd] , identifier[dt] , identifier[bw] , identifier[eta] , identifier[nbl] , identifier[npol] : identifier[sefd] /( identifier[eta] * identifier[np] . identifier[sqrt] ( identifier[nbl] * literal[int] * identifier[dt] * identifier[bw] * identifier[npol] ))
identifier[nbl] = keyword[lambda] identifier[nant] : identifier[nant] *( identifier[nant] - literal[int] )/ literal[int]
identifier[eta] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[sefd] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[bw] = identifier[sum] ([ identifier[d] [ literal[string] ][ identifier[i] ]* identifier[d] [ literal[string] ][ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[d] [ literal[string] ]))])
identifier[dt] = identifier[d] [ literal[string] ]
identifier[npol] = identifier[d] [ literal[string] ]
identifier[nant] = identifier[d] [ literal[string] ]
identifier[freq] = identifier[d] [ literal[string] ][ literal[int] ]
keyword[if] ( identifier[freq] >= literal[int] keyword[and] identifier[freq] < literal[int] ):
identifier[band] = literal[string]
keyword[elif] ( identifier[freq] >= literal[int] keyword[and] identifier[freq] < literal[int] ):
identifier[band] = literal[string]
keyword[elif] ( identifier[freq] >= literal[int] keyword[and] identifier[freq] < literal[int] ):
identifier[band] = literal[string]
keyword[elif] ( identifier[freq] >= literal[int] keyword[and] identifier[freq] < literal[int] ):
identifier[band] = literal[string]
keyword[else] :
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[freq] ))
identifier[band] = literal[string]
identifier[goodfrac] = literal[int] - identifier[flagfrac_med]
identifier[slim_theory] = identifier[sensitivity] ( identifier[sefd] [ identifier[band] ], identifier[dt] , identifier[bw] , identifier[eta] [ identifier[band] ], identifier[goodfrac] * identifier[nbl] ( identifier[nant] ), identifier[npol] )
identifier[fluxscale] = identifier[slim_theory] / identifier[imstd_med]
keyword[return] identifier[fluxscale]
|
def calcfluxscale(d, imstd_med, flagfrac_med):
""" Given state dict and noise properties, estimate flux scale at the VLA
imstd and flagfrac are expected to be median (typical) values from sample in merged noise pkl.
"""
# useful functions and VLA parameters
sensitivity = lambda sefd, dt, bw, eta, nbl, npol: sefd / (eta * np.sqrt(nbl * 2 * dt * bw * npol))
nbl = lambda nant: nant * (nant - 1) / 2
eta = {'L': 0.92, 'S': 0.92, 'C': 0.8, 'X': 0.8} # correlator efficiency
sefd = {'L': 420, 'S': 370, 'C': 310, 'X': 250} # fixed to match exposure calculator int time to 100 microJy.
bw = sum([d['spw_nchan_select'][i] * d['spw_chansize'][i] for i in range(len(d['spw_chansize']))])
dt = d['inttime']
npol = d['npol']
nant = d['nants']
freq = d['freq'][0]
if freq >= 1 and freq < 2:
band = 'L' # depends on [control=['if'], data=[]]
elif freq >= 2 and freq < 4:
band = 'S' # depends on [control=['if'], data=[]]
elif freq >= 4 and freq < 8:
band = 'C' # depends on [control=['if'], data=[]]
elif freq >= 8 and freq < 12:
band = 'X' # depends on [control=['if'], data=[]]
else:
logger.warn('first channel freq ({0}) not in bands L, S, C, or X. Assuming L band.'.format(freq))
band = 'L'
goodfrac = 1 - flagfrac_med # correct for flagged data
slim_theory = sensitivity(sefd[band], dt, bw, eta[band], goodfrac * nbl(nant), npol)
fluxscale = slim_theory / imstd_med
return fluxscale
|
def getPointsForInterpolation(self,EndOfPrdvP,aNrmNow):
'''
Finds interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
cNrmNow = self.uPinv(EndOfPrdvP)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.insert(cNrmNow,0,0.,axis=-1)
m_for_interpolation = np.insert(mNrmNow,0,self.BoroCnstNat,axis=-1)
# Store these for calcvFunc
self.cNrmNow = cNrmNow
self.mNrmNow = mNrmNow
return c_for_interpolation,m_for_interpolation
|
def function[getPointsForInterpolation, parameter[self, EndOfPrdvP, aNrmNow]]:
constant[
Finds interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
]
variable[cNrmNow] assign[=] call[name[self].uPinv, parameter[name[EndOfPrdvP]]]
variable[mNrmNow] assign[=] binary_operation[name[cNrmNow] + name[aNrmNow]]
variable[c_for_interpolation] assign[=] call[name[np].insert, parameter[name[cNrmNow], constant[0], constant[0.0]]]
variable[m_for_interpolation] assign[=] call[name[np].insert, parameter[name[mNrmNow], constant[0], name[self].BoroCnstNat]]
name[self].cNrmNow assign[=] name[cNrmNow]
name[self].mNrmNow assign[=] name[mNrmNow]
return[tuple[[<ast.Name object at 0x7da204345b70>, <ast.Name object at 0x7da204346ec0>]]]
|
keyword[def] identifier[getPointsForInterpolation] ( identifier[self] , identifier[EndOfPrdvP] , identifier[aNrmNow] ):
literal[string]
identifier[cNrmNow] = identifier[self] . identifier[uPinv] ( identifier[EndOfPrdvP] )
identifier[mNrmNow] = identifier[cNrmNow] + identifier[aNrmNow]
identifier[c_for_interpolation] = identifier[np] . identifier[insert] ( identifier[cNrmNow] , literal[int] , literal[int] , identifier[axis] =- literal[int] )
identifier[m_for_interpolation] = identifier[np] . identifier[insert] ( identifier[mNrmNow] , literal[int] , identifier[self] . identifier[BoroCnstNat] , identifier[axis] =- literal[int] )
identifier[self] . identifier[cNrmNow] = identifier[cNrmNow]
identifier[self] . identifier[mNrmNow] = identifier[mNrmNow]
keyword[return] identifier[c_for_interpolation] , identifier[m_for_interpolation]
|
def getPointsForInterpolation(self, EndOfPrdvP, aNrmNow):
"""
Finds interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
"""
cNrmNow = self.uPinv(EndOfPrdvP)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.insert(cNrmNow, 0, 0.0, axis=-1)
m_for_interpolation = np.insert(mNrmNow, 0, self.BoroCnstNat, axis=-1)
# Store these for calcvFunc
self.cNrmNow = cNrmNow
self.mNrmNow = mNrmNow
return (c_for_interpolation, m_for_interpolation)
|
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket, trial)
return action
|
def function[on_trial_result, parameter[self, trial_runner, trial, result]]:
constant[If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it.]
<ast.Tuple object at 0x7da2041db520> assign[=] call[name[self]._trial_info][name[trial]]
call[name[bracket].update_trial_stats, parameter[name[trial], name[result]]]
if call[name[bracket].continue_trial, parameter[name[trial]]] begin[:]
return[name[TrialScheduler].CONTINUE]
variable[action] assign[=] call[name[self]._process_bracket, parameter[name[trial_runner], name[bracket], name[trial]]]
return[name[action]]
|
keyword[def] identifier[on_trial_result] ( identifier[self] , identifier[trial_runner] , identifier[trial] , identifier[result] ):
literal[string]
identifier[bracket] , identifier[_] = identifier[self] . identifier[_trial_info] [ identifier[trial] ]
identifier[bracket] . identifier[update_trial_stats] ( identifier[trial] , identifier[result] )
keyword[if] identifier[bracket] . identifier[continue_trial] ( identifier[trial] ):
keyword[return] identifier[TrialScheduler] . identifier[CONTINUE]
identifier[action] = identifier[self] . identifier[_process_bracket] ( identifier[trial_runner] , identifier[bracket] , identifier[trial] )
keyword[return] identifier[action]
|
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
(bracket, _) = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE # depends on [control=['if'], data=[]]
action = self._process_bracket(trial_runner, bracket, trial)
return action
|
def multi_buffering(layer, radii, callback=None):
"""Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer
"""
# Layer output
output_layer_name = buffer_steps['output_layer_name']
processing_step = buffer_steps['step_name']
input_crs = layer.crs()
feature_count = layer.featureCount()
fields = layer.fields()
# Set the new hazard class field.
new_field = create_field_from_definition(hazard_class_field)
fields.append(new_field)
# Set the new buffer distances field.
new_field = create_field_from_definition(buffer_distance_field)
fields.append(new_field)
buffered = create_memory_layer(
output_layer_name, QgsWkbTypes.PolygonGeometry, input_crs, fields)
buffered.startEditing()
# Reproject features if needed into UTM if the layer is in 4326.
if layer.crs().authid() == 'EPSG:4326':
center = layer.extent().center()
utm = QgsCoordinateReferenceSystem(
get_utm_epsg(center.x(), center.y(), input_crs))
transform = QgsCoordinateTransform(
layer.crs(), utm, QgsProject.instance())
reverse_transform = QgsCoordinateTransform(
utm, layer.crs(), QgsProject.instance())
else:
transform = None
reverse_transform = None
for i, feature in enumerate(layer.getFeatures()):
geom = QgsGeometry(feature.geometry())
if transform:
geom.transform(transform)
inner_ring = None
for radius in radii:
attributes = feature.attributes()
# We add the hazard value name to the attribute table.
attributes.append(radii[radius])
# We add the value of buffer distance to the attribute table.
attributes.append(radius)
circle = geom.buffer(radius, 30)
if inner_ring:
circle.addRing(inner_ring)
inner_ring = circle.asPolygon()[0]
new_feature = QgsFeature()
if reverse_transform:
circle.transform(reverse_transform)
new_feature.setGeometry(circle)
new_feature.setAttributes(attributes)
buffered.addFeature(new_feature)
if callback:
callback(current=i, maximum=feature_count, step=processing_step)
buffered.commitChanges()
# We transfer keywords to the output.
buffered.keywords = layer.keywords
buffered.keywords['layer_geometry'] = 'polygon'
buffered.keywords['layer_purpose'] = layer_purpose_hazard['key']
buffered.keywords['inasafe_fields'][hazard_class_field['key']] = (
hazard_class_field['field_name'])
check_layer(buffered)
return buffered
|
def function[multi_buffering, parameter[layer, radii, callback]]:
constant[Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer
]
variable[output_layer_name] assign[=] call[name[buffer_steps]][constant[output_layer_name]]
variable[processing_step] assign[=] call[name[buffer_steps]][constant[step_name]]
variable[input_crs] assign[=] call[name[layer].crs, parameter[]]
variable[feature_count] assign[=] call[name[layer].featureCount, parameter[]]
variable[fields] assign[=] call[name[layer].fields, parameter[]]
variable[new_field] assign[=] call[name[create_field_from_definition], parameter[name[hazard_class_field]]]
call[name[fields].append, parameter[name[new_field]]]
variable[new_field] assign[=] call[name[create_field_from_definition], parameter[name[buffer_distance_field]]]
call[name[fields].append, parameter[name[new_field]]]
variable[buffered] assign[=] call[name[create_memory_layer], parameter[name[output_layer_name], name[QgsWkbTypes].PolygonGeometry, name[input_crs], name[fields]]]
call[name[buffered].startEditing, parameter[]]
if compare[call[call[name[layer].crs, parameter[]].authid, parameter[]] equal[==] constant[EPSG:4326]] begin[:]
variable[center] assign[=] call[call[name[layer].extent, parameter[]].center, parameter[]]
variable[utm] assign[=] call[name[QgsCoordinateReferenceSystem], parameter[call[name[get_utm_epsg], parameter[call[name[center].x, parameter[]], call[name[center].y, parameter[]], name[input_crs]]]]]
variable[transform] assign[=] call[name[QgsCoordinateTransform], parameter[call[name[layer].crs, parameter[]], name[utm], call[name[QgsProject].instance, parameter[]]]]
variable[reverse_transform] assign[=] call[name[QgsCoordinateTransform], parameter[name[utm], call[name[layer].crs, parameter[]], call[name[QgsProject].instance, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0c89300>, <ast.Name object at 0x7da1b0c8a470>]]] in starred[call[name[enumerate], parameter[call[name[layer].getFeatures, parameter[]]]]] begin[:]
variable[geom] assign[=] call[name[QgsGeometry], parameter[call[name[feature].geometry, parameter[]]]]
if name[transform] begin[:]
call[name[geom].transform, parameter[name[transform]]]
variable[inner_ring] assign[=] constant[None]
for taget[name[radius]] in starred[name[radii]] begin[:]
variable[attributes] assign[=] call[name[feature].attributes, parameter[]]
call[name[attributes].append, parameter[call[name[radii]][name[radius]]]]
call[name[attributes].append, parameter[name[radius]]]
variable[circle] assign[=] call[name[geom].buffer, parameter[name[radius], constant[30]]]
if name[inner_ring] begin[:]
call[name[circle].addRing, parameter[name[inner_ring]]]
variable[inner_ring] assign[=] call[call[name[circle].asPolygon, parameter[]]][constant[0]]
variable[new_feature] assign[=] call[name[QgsFeature], parameter[]]
if name[reverse_transform] begin[:]
call[name[circle].transform, parameter[name[reverse_transform]]]
call[name[new_feature].setGeometry, parameter[name[circle]]]
call[name[new_feature].setAttributes, parameter[name[attributes]]]
call[name[buffered].addFeature, parameter[name[new_feature]]]
if name[callback] begin[:]
call[name[callback], parameter[]]
call[name[buffered].commitChanges, parameter[]]
name[buffered].keywords assign[=] name[layer].keywords
call[name[buffered].keywords][constant[layer_geometry]] assign[=] constant[polygon]
call[name[buffered].keywords][constant[layer_purpose]] assign[=] call[name[layer_purpose_hazard]][constant[key]]
call[call[name[buffered].keywords][constant[inasafe_fields]]][call[name[hazard_class_field]][constant[key]]] assign[=] call[name[hazard_class_field]][constant[field_name]]
call[name[check_layer], parameter[name[buffered]]]
return[name[buffered]]
|
keyword[def] identifier[multi_buffering] ( identifier[layer] , identifier[radii] , identifier[callback] = keyword[None] ):
literal[string]
identifier[output_layer_name] = identifier[buffer_steps] [ literal[string] ]
identifier[processing_step] = identifier[buffer_steps] [ literal[string] ]
identifier[input_crs] = identifier[layer] . identifier[crs] ()
identifier[feature_count] = identifier[layer] . identifier[featureCount] ()
identifier[fields] = identifier[layer] . identifier[fields] ()
identifier[new_field] = identifier[create_field_from_definition] ( identifier[hazard_class_field] )
identifier[fields] . identifier[append] ( identifier[new_field] )
identifier[new_field] = identifier[create_field_from_definition] ( identifier[buffer_distance_field] )
identifier[fields] . identifier[append] ( identifier[new_field] )
identifier[buffered] = identifier[create_memory_layer] (
identifier[output_layer_name] , identifier[QgsWkbTypes] . identifier[PolygonGeometry] , identifier[input_crs] , identifier[fields] )
identifier[buffered] . identifier[startEditing] ()
keyword[if] identifier[layer] . identifier[crs] (). identifier[authid] ()== literal[string] :
identifier[center] = identifier[layer] . identifier[extent] (). identifier[center] ()
identifier[utm] = identifier[QgsCoordinateReferenceSystem] (
identifier[get_utm_epsg] ( identifier[center] . identifier[x] (), identifier[center] . identifier[y] (), identifier[input_crs] ))
identifier[transform] = identifier[QgsCoordinateTransform] (
identifier[layer] . identifier[crs] (), identifier[utm] , identifier[QgsProject] . identifier[instance] ())
identifier[reverse_transform] = identifier[QgsCoordinateTransform] (
identifier[utm] , identifier[layer] . identifier[crs] (), identifier[QgsProject] . identifier[instance] ())
keyword[else] :
identifier[transform] = keyword[None]
identifier[reverse_transform] = keyword[None]
keyword[for] identifier[i] , identifier[feature] keyword[in] identifier[enumerate] ( identifier[layer] . identifier[getFeatures] ()):
identifier[geom] = identifier[QgsGeometry] ( identifier[feature] . identifier[geometry] ())
keyword[if] identifier[transform] :
identifier[geom] . identifier[transform] ( identifier[transform] )
identifier[inner_ring] = keyword[None]
keyword[for] identifier[radius] keyword[in] identifier[radii] :
identifier[attributes] = identifier[feature] . identifier[attributes] ()
identifier[attributes] . identifier[append] ( identifier[radii] [ identifier[radius] ])
identifier[attributes] . identifier[append] ( identifier[radius] )
identifier[circle] = identifier[geom] . identifier[buffer] ( identifier[radius] , literal[int] )
keyword[if] identifier[inner_ring] :
identifier[circle] . identifier[addRing] ( identifier[inner_ring] )
identifier[inner_ring] = identifier[circle] . identifier[asPolygon] ()[ literal[int] ]
identifier[new_feature] = identifier[QgsFeature] ()
keyword[if] identifier[reverse_transform] :
identifier[circle] . identifier[transform] ( identifier[reverse_transform] )
identifier[new_feature] . identifier[setGeometry] ( identifier[circle] )
identifier[new_feature] . identifier[setAttributes] ( identifier[attributes] )
identifier[buffered] . identifier[addFeature] ( identifier[new_feature] )
keyword[if] identifier[callback] :
identifier[callback] ( identifier[current] = identifier[i] , identifier[maximum] = identifier[feature_count] , identifier[step] = identifier[processing_step] )
identifier[buffered] . identifier[commitChanges] ()
identifier[buffered] . identifier[keywords] = identifier[layer] . identifier[keywords]
identifier[buffered] . identifier[keywords] [ literal[string] ]= literal[string]
identifier[buffered] . identifier[keywords] [ literal[string] ]= identifier[layer_purpose_hazard] [ literal[string] ]
identifier[buffered] . identifier[keywords] [ literal[string] ][ identifier[hazard_class_field] [ literal[string] ]]=(
identifier[hazard_class_field] [ literal[string] ])
identifier[check_layer] ( identifier[buffered] )
keyword[return] identifier[buffered]
|
def multi_buffering(layer, radii, callback=None):
"""Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer
"""
# Layer output
output_layer_name = buffer_steps['output_layer_name']
processing_step = buffer_steps['step_name']
input_crs = layer.crs()
feature_count = layer.featureCount()
fields = layer.fields()
# Set the new hazard class field.
new_field = create_field_from_definition(hazard_class_field)
fields.append(new_field)
# Set the new buffer distances field.
new_field = create_field_from_definition(buffer_distance_field)
fields.append(new_field)
buffered = create_memory_layer(output_layer_name, QgsWkbTypes.PolygonGeometry, input_crs, fields)
buffered.startEditing()
# Reproject features if needed into UTM if the layer is in 4326.
if layer.crs().authid() == 'EPSG:4326':
center = layer.extent().center()
utm = QgsCoordinateReferenceSystem(get_utm_epsg(center.x(), center.y(), input_crs))
transform = QgsCoordinateTransform(layer.crs(), utm, QgsProject.instance())
reverse_transform = QgsCoordinateTransform(utm, layer.crs(), QgsProject.instance()) # depends on [control=['if'], data=[]]
else:
transform = None
reverse_transform = None
for (i, feature) in enumerate(layer.getFeatures()):
geom = QgsGeometry(feature.geometry())
if transform:
geom.transform(transform) # depends on [control=['if'], data=[]]
inner_ring = None
for radius in radii:
attributes = feature.attributes()
# We add the hazard value name to the attribute table.
attributes.append(radii[radius])
# We add the value of buffer distance to the attribute table.
attributes.append(radius)
circle = geom.buffer(radius, 30)
if inner_ring:
circle.addRing(inner_ring) # depends on [control=['if'], data=[]]
inner_ring = circle.asPolygon()[0]
new_feature = QgsFeature()
if reverse_transform:
circle.transform(reverse_transform) # depends on [control=['if'], data=[]]
new_feature.setGeometry(circle)
new_feature.setAttributes(attributes)
buffered.addFeature(new_feature) # depends on [control=['for'], data=['radius']]
if callback:
callback(current=i, maximum=feature_count, step=processing_step) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
buffered.commitChanges()
# We transfer keywords to the output.
buffered.keywords = layer.keywords
buffered.keywords['layer_geometry'] = 'polygon'
buffered.keywords['layer_purpose'] = layer_purpose_hazard['key']
buffered.keywords['inasafe_fields'][hazard_class_field['key']] = hazard_class_field['field_name']
check_layer(buffered)
return buffered
|
def main(argv=sys.argv[1:]):
"""Parse argument and start main program."""
args = docopt(__doc__, argv=argv,
version=pkg_resources.require('buienradar')[0].version)
level = logging.ERROR
if args['-v']:
level = logging.INFO
if args['-v'] == 2:
level = logging.DEBUG
logging.basicConfig(level=level)
log = logging.getLogger(__name__)
log.info("Start...")
latitude = float(args['--latitude'])
longitude = float(args['--longitude'])
timeframe = int(args['--timeframe'])
usexml = False
if args['--usexml']:
usexml = True
result = get_data(latitude, longitude, usexml)
if result[SUCCESS]:
log.debug("Retrieved data:\n%s", result)
result = parse_data(result[CONTENT], result[RAINCONTENT],
latitude, longitude, timeframe, usexml)
log.info("result: %s", result)
print(result)
else:
log.error("Retrieving weather data was not successfull (%s)",
result[MESSAGE])
|
def function[main, parameter[argv]]:
constant[Parse argument and start main program.]
variable[args] assign[=] call[name[docopt], parameter[name[__doc__]]]
variable[level] assign[=] name[logging].ERROR
if call[name[args]][constant[-v]] begin[:]
variable[level] assign[=] name[logging].INFO
if compare[call[name[args]][constant[-v]] equal[==] constant[2]] begin[:]
variable[level] assign[=] name[logging].DEBUG
call[name[logging].basicConfig, parameter[]]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[log].info, parameter[constant[Start...]]]
variable[latitude] assign[=] call[name[float], parameter[call[name[args]][constant[--latitude]]]]
variable[longitude] assign[=] call[name[float], parameter[call[name[args]][constant[--longitude]]]]
variable[timeframe] assign[=] call[name[int], parameter[call[name[args]][constant[--timeframe]]]]
variable[usexml] assign[=] constant[False]
if call[name[args]][constant[--usexml]] begin[:]
variable[usexml] assign[=] constant[True]
variable[result] assign[=] call[name[get_data], parameter[name[latitude], name[longitude], name[usexml]]]
if call[name[result]][name[SUCCESS]] begin[:]
call[name[log].debug, parameter[constant[Retrieved data:
%s], name[result]]]
variable[result] assign[=] call[name[parse_data], parameter[call[name[result]][name[CONTENT]], call[name[result]][name[RAINCONTENT]], name[latitude], name[longitude], name[timeframe], name[usexml]]]
call[name[log].info, parameter[constant[result: %s], name[result]]]
call[name[print], parameter[name[result]]]
|
keyword[def] identifier[main] ( identifier[argv] = identifier[sys] . identifier[argv] [ literal[int] :]):
literal[string]
identifier[args] = identifier[docopt] ( identifier[__doc__] , identifier[argv] = identifier[argv] ,
identifier[version] = identifier[pkg_resources] . identifier[require] ( literal[string] )[ literal[int] ]. identifier[version] )
identifier[level] = identifier[logging] . identifier[ERROR]
keyword[if] identifier[args] [ literal[string] ]:
identifier[level] = identifier[logging] . identifier[INFO]
keyword[if] identifier[args] [ literal[string] ]== literal[int] :
identifier[level] = identifier[logging] . identifier[DEBUG]
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[level] )
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[log] . identifier[info] ( literal[string] )
identifier[latitude] = identifier[float] ( identifier[args] [ literal[string] ])
identifier[longitude] = identifier[float] ( identifier[args] [ literal[string] ])
identifier[timeframe] = identifier[int] ( identifier[args] [ literal[string] ])
identifier[usexml] = keyword[False]
keyword[if] identifier[args] [ literal[string] ]:
identifier[usexml] = keyword[True]
identifier[result] = identifier[get_data] ( identifier[latitude] , identifier[longitude] , identifier[usexml] )
keyword[if] identifier[result] [ identifier[SUCCESS] ]:
identifier[log] . identifier[debug] ( literal[string] , identifier[result] )
identifier[result] = identifier[parse_data] ( identifier[result] [ identifier[CONTENT] ], identifier[result] [ identifier[RAINCONTENT] ],
identifier[latitude] , identifier[longitude] , identifier[timeframe] , identifier[usexml] )
identifier[log] . identifier[info] ( literal[string] , identifier[result] )
identifier[print] ( identifier[result] )
keyword[else] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[result] [ identifier[MESSAGE] ])
|
def main(argv=sys.argv[1:]):
"""Parse argument and start main program."""
args = docopt(__doc__, argv=argv, version=pkg_resources.require('buienradar')[0].version)
level = logging.ERROR
if args['-v']:
level = logging.INFO # depends on [control=['if'], data=[]]
if args['-v'] == 2:
level = logging.DEBUG # depends on [control=['if'], data=[]]
logging.basicConfig(level=level)
log = logging.getLogger(__name__)
log.info('Start...')
latitude = float(args['--latitude'])
longitude = float(args['--longitude'])
timeframe = int(args['--timeframe'])
usexml = False
if args['--usexml']:
usexml = True # depends on [control=['if'], data=[]]
result = get_data(latitude, longitude, usexml)
if result[SUCCESS]:
log.debug('Retrieved data:\n%s', result)
result = parse_data(result[CONTENT], result[RAINCONTENT], latitude, longitude, timeframe, usexml)
log.info('result: %s', result)
print(result) # depends on [control=['if'], data=[]]
else:
log.error('Retrieving weather data was not successfull (%s)', result[MESSAGE])
|
def exit_hook(callable, once=True):
r"""A decorator that makes the decorated function to run while ec exits.
Args:
callable (callable): The target callable.
once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.
Note:
Hooks are processedd in a LIFO order.
"""
if once and callable in ExitHooks:
return
ExitHooks.append(callable)
|
def function[exit_hook, parameter[callable, once]]:
constant[A decorator that makes the decorated function to run while ec exits.
Args:
callable (callable): The target callable.
once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.
Note:
Hooks are processedd in a LIFO order.
]
if <ast.BoolOp object at 0x7da1b2345750> begin[:]
return[None]
call[name[ExitHooks].append, parameter[name[callable]]]
|
keyword[def] identifier[exit_hook] ( identifier[callable] , identifier[once] = keyword[True] ):
literal[string]
keyword[if] identifier[once] keyword[and] identifier[callable] keyword[in] identifier[ExitHooks] :
keyword[return]
identifier[ExitHooks] . identifier[append] ( identifier[callable] )
|
def exit_hook(callable, once=True):
"""A decorator that makes the decorated function to run while ec exits.
Args:
callable (callable): The target callable.
once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.
Note:
Hooks are processedd in a LIFO order.
"""
if once and callable in ExitHooks:
return # depends on [control=['if'], data=[]]
ExitHooks.append(callable)
|
def shift(self, count=1):
"""
Shift the view a specified number of times.
:param count: The count of times to shift the view.
"""
if self:
self._index = (self._index + count) % len(self)
else:
self._index = 0
|
def function[shift, parameter[self, count]]:
constant[
Shift the view a specified number of times.
:param count: The count of times to shift the view.
]
if name[self] begin[:]
name[self]._index assign[=] binary_operation[binary_operation[name[self]._index + name[count]] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[self]]]]
|
keyword[def] identifier[shift] ( identifier[self] , identifier[count] = literal[int] ):
literal[string]
keyword[if] identifier[self] :
identifier[self] . identifier[_index] =( identifier[self] . identifier[_index] + identifier[count] )% identifier[len] ( identifier[self] )
keyword[else] :
identifier[self] . identifier[_index] = literal[int]
|
def shift(self, count=1):
"""
Shift the view a specified number of times.
:param count: The count of times to shift the view.
"""
if self:
self._index = (self._index + count) % len(self) # depends on [control=['if'], data=[]]
else:
self._index = 0
|
def listdir(self, name, **kwargs):
"""
Returns a list of the files under the specified path.
This is different from list as it will only give you files under the
current directory, much like ls.
name must be in the form of `s3://bucket/prefix/`
Parameters
----------
keys: optional
if True then this will return the actual boto keys for files
that are encountered
objects: optional
if True then this will return the actual boto objects for
files or prefixes that are encountered
"""
assert self._is_s3(name), "name must be in form s3://bucket/prefix/"
if not name.endswith('/'):
name += "/"
return self.list(name, delimiter='/', **kwargs)
|
def function[listdir, parameter[self, name]]:
constant[
Returns a list of the files under the specified path.
This is different from list as it will only give you files under the
current directory, much like ls.
name must be in the form of `s3://bucket/prefix/`
Parameters
----------
keys: optional
if True then this will return the actual boto keys for files
that are encountered
objects: optional
if True then this will return the actual boto objects for
files or prefixes that are encountered
]
assert[call[name[self]._is_s3, parameter[name[name]]]]
if <ast.UnaryOp object at 0x7da1b1b162c0> begin[:]
<ast.AugAssign object at 0x7da1b1b16140>
return[call[name[self].list, parameter[name[name]]]]
|
keyword[def] identifier[listdir] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[self] . identifier[_is_s3] ( identifier[name] ), literal[string]
keyword[if] keyword[not] identifier[name] . identifier[endswith] ( literal[string] ):
identifier[name] += literal[string]
keyword[return] identifier[self] . identifier[list] ( identifier[name] , identifier[delimiter] = literal[string] ,** identifier[kwargs] )
|
def listdir(self, name, **kwargs):
"""
Returns a list of the files under the specified path.
This is different from list as it will only give you files under the
current directory, much like ls.
name must be in the form of `s3://bucket/prefix/`
Parameters
----------
keys: optional
if True then this will return the actual boto keys for files
that are encountered
objects: optional
if True then this will return the actual boto objects for
files or prefixes that are encountered
"""
assert self._is_s3(name), 'name must be in form s3://bucket/prefix/'
if not name.endswith('/'):
name += '/' # depends on [control=['if'], data=[]]
return self.list(name, delimiter='/', **kwargs)
|
def elem_match(self, value):
''' This method does two things depending on the context:
1. In the context of a query expression it:
Creates a query expression to do an $elemMatch on the selected
field. If the type of this field is a DocumentField the value
can be either a QueryExpression using that Document's fields OR
you can use a dict for raw mongo.
See the mongo documentation for thorough treatment of
elemMatch:
http://docs.mongodb.org/manual/reference/operator/elemMatch/
2. In the context of choosing fields in a query.fields() expr:
Sets the field to use elemMatch, so only the matching elements
of a list are used. See the mongo docs for more details:
http://docs.mongodb.org/manual/reference/projection/elemMatch/
'''
self.__is_elem_match = True
if not self.__type.is_sequence_field:
raise BadQueryException('elem_match called on a non-sequence '
'field: ' + str(self))
if isinstance(value, dict):
self.__fields_expr = { '$elemMatch' : value}
return ElemMatchQueryExpression(self, {self : self.__fields_expr })
elif isinstance(value, QueryExpression):
self.__fields_expr = { '$elemMatch' : value.obj }
e = ElemMatchQueryExpression(self, {
self : self.__fields_expr
})
return e
raise BadQueryException('elem_match requires a QueryExpression '
'(to be typesafe) or a dict (which is '
'not type safe)')
|
def function[elem_match, parameter[self, value]]:
constant[ This method does two things depending on the context:
1. In the context of a query expression it:
Creates a query expression to do an $elemMatch on the selected
field. If the type of this field is a DocumentField the value
can be either a QueryExpression using that Document's fields OR
you can use a dict for raw mongo.
See the mongo documentation for thorough treatment of
elemMatch:
http://docs.mongodb.org/manual/reference/operator/elemMatch/
2. In the context of choosing fields in a query.fields() expr:
Sets the field to use elemMatch, so only the matching elements
of a list are used. See the mongo docs for more details:
http://docs.mongodb.org/manual/reference/projection/elemMatch/
]
name[self].__is_elem_match assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18f723f10> begin[:]
<ast.Raise object at 0x7da204565270>
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
name[self].__fields_expr assign[=] dictionary[[<ast.Constant object at 0x7da204567610>], [<ast.Name object at 0x7da2045671c0>]]
return[call[name[ElemMatchQueryExpression], parameter[name[self], dictionary[[<ast.Name object at 0x7da204565ba0>], [<ast.Attribute object at 0x7da204564bb0>]]]]]
<ast.Raise object at 0x7da2047eb7f0>
|
keyword[def] identifier[elem_match] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[__is_elem_match] = keyword[True]
keyword[if] keyword[not] identifier[self] . identifier[__type] . identifier[is_sequence_field] :
keyword[raise] identifier[BadQueryException] ( literal[string]
literal[string] + identifier[str] ( identifier[self] ))
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[self] . identifier[__fields_expr] ={ literal[string] : identifier[value] }
keyword[return] identifier[ElemMatchQueryExpression] ( identifier[self] ,{ identifier[self] : identifier[self] . identifier[__fields_expr] })
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[QueryExpression] ):
identifier[self] . identifier[__fields_expr] ={ literal[string] : identifier[value] . identifier[obj] }
identifier[e] = identifier[ElemMatchQueryExpression] ( identifier[self] ,{
identifier[self] : identifier[self] . identifier[__fields_expr]
})
keyword[return] identifier[e]
keyword[raise] identifier[BadQueryException] ( literal[string]
literal[string]
literal[string] )
|
def elem_match(self, value):
""" This method does two things depending on the context:
1. In the context of a query expression it:
Creates a query expression to do an $elemMatch on the selected
field. If the type of this field is a DocumentField the value
can be either a QueryExpression using that Document's fields OR
you can use a dict for raw mongo.
See the mongo documentation for thorough treatment of
elemMatch:
http://docs.mongodb.org/manual/reference/operator/elemMatch/
2. In the context of choosing fields in a query.fields() expr:
Sets the field to use elemMatch, so only the matching elements
of a list are used. See the mongo docs for more details:
http://docs.mongodb.org/manual/reference/projection/elemMatch/
"""
self.__is_elem_match = True
if not self.__type.is_sequence_field:
raise BadQueryException('elem_match called on a non-sequence field: ' + str(self)) # depends on [control=['if'], data=[]]
if isinstance(value, dict):
self.__fields_expr = {'$elemMatch': value}
return ElemMatchQueryExpression(self, {self: self.__fields_expr}) # depends on [control=['if'], data=[]]
elif isinstance(value, QueryExpression):
self.__fields_expr = {'$elemMatch': value.obj}
e = ElemMatchQueryExpression(self, {self: self.__fields_expr})
return e # depends on [control=['if'], data=[]]
raise BadQueryException('elem_match requires a QueryExpression (to be typesafe) or a dict (which is not type safe)')
|
def calculate_colors(self, values):
"""Return a list (or list of lists) of colors based on input values."""
# set domain if it is not set
_flattenedList = list(flatten(values))
if not self.is_domain_set:
self.set_domain(_flattenedList)
_flattenedColors = range(len(_flattenedList))
for count, value in enumerate(_flattenedList):
_flattenedColors[count] = self.calculate_color(value)
return unflatten(values, iter(_flattenedColors))
|
def function[calculate_colors, parameter[self, values]]:
constant[Return a list (or list of lists) of colors based on input values.]
variable[_flattenedList] assign[=] call[name[list], parameter[call[name[flatten], parameter[name[values]]]]]
if <ast.UnaryOp object at 0x7da1b1224d60> begin[:]
call[name[self].set_domain, parameter[name[_flattenedList]]]
variable[_flattenedColors] assign[=] call[name[range], parameter[call[name[len], parameter[name[_flattenedList]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1225960>, <ast.Name object at 0x7da1b12258d0>]]] in starred[call[name[enumerate], parameter[name[_flattenedList]]]] begin[:]
call[name[_flattenedColors]][name[count]] assign[=] call[name[self].calculate_color, parameter[name[value]]]
return[call[name[unflatten], parameter[name[values], call[name[iter], parameter[name[_flattenedColors]]]]]]
|
keyword[def] identifier[calculate_colors] ( identifier[self] , identifier[values] ):
literal[string]
identifier[_flattenedList] = identifier[list] ( identifier[flatten] ( identifier[values] ))
keyword[if] keyword[not] identifier[self] . identifier[is_domain_set] :
identifier[self] . identifier[set_domain] ( identifier[_flattenedList] )
identifier[_flattenedColors] = identifier[range] ( identifier[len] ( identifier[_flattenedList] ))
keyword[for] identifier[count] , identifier[value] keyword[in] identifier[enumerate] ( identifier[_flattenedList] ):
identifier[_flattenedColors] [ identifier[count] ]= identifier[self] . identifier[calculate_color] ( identifier[value] )
keyword[return] identifier[unflatten] ( identifier[values] , identifier[iter] ( identifier[_flattenedColors] ))
|
def calculate_colors(self, values):
"""Return a list (or list of lists) of colors based on input values."""
# set domain if it is not set
_flattenedList = list(flatten(values))
if not self.is_domain_set:
self.set_domain(_flattenedList) # depends on [control=['if'], data=[]]
_flattenedColors = range(len(_flattenedList))
for (count, value) in enumerate(_flattenedList):
_flattenedColors[count] = self.calculate_color(value) # depends on [control=['for'], data=[]]
return unflatten(values, iter(_flattenedColors))
|
def _no_mute_on_stop_playback(self):
""" make sure vlc does not stop muted """
if self.ctrl_c_pressed:
return
if self.isPlaying():
if self.actual_volume == -1:
self._get_volume()
while self.actual_volume == -1:
pass
if self.actual_volume == 0:
self.actual_volume = int(self.max_volume*0.25)
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume))
elif self.muted:
if self.actual_volume > 0:
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
self.show_volume = True
|
def function[_no_mute_on_stop_playback, parameter[self]]:
constant[ make sure vlc does not stop muted ]
if name[self].ctrl_c_pressed begin[:]
return[None]
if call[name[self].isPlaying, parameter[]] begin[:]
if compare[name[self].actual_volume equal[==] <ast.UnaryOp object at 0x7da1b100d300>] begin[:]
call[name[self]._get_volume, parameter[]]
while compare[name[self].actual_volume equal[==] <ast.UnaryOp object at 0x7da1b100ed70>] begin[:]
pass
if compare[name[self].actual_volume equal[==] constant[0]] begin[:]
name[self].actual_volume assign[=] call[name[int], parameter[binary_operation[name[self].max_volume * constant[0.25]]]]
call[name[self]._sendCommand, parameter[call[constant[volume {}
].format, parameter[name[self].actual_volume]]]]
if call[name[logger].isEnabledFor, parameter[name[logging].DEBUG]] begin[:]
call[name[logger].debug, parameter[call[constant[Unmuting VLC on exit: {} (25%)].format, parameter[name[self].actual_volume]]]]
name[self].show_volume assign[=] constant[True]
|
keyword[def] identifier[_no_mute_on_stop_playback] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[ctrl_c_pressed] :
keyword[return]
keyword[if] identifier[self] . identifier[isPlaying] ():
keyword[if] identifier[self] . identifier[actual_volume] ==- literal[int] :
identifier[self] . identifier[_get_volume] ()
keyword[while] identifier[self] . identifier[actual_volume] ==- literal[int] :
keyword[pass]
keyword[if] identifier[self] . identifier[actual_volume] == literal[int] :
identifier[self] . identifier[actual_volume] = identifier[int] ( identifier[self] . identifier[max_volume] * literal[int] )
identifier[self] . identifier[_sendCommand] ( literal[string] . identifier[format] ( identifier[self] . identifier[actual_volume] ))
keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[actual_volume] ))
keyword[elif] identifier[self] . identifier[muted] :
keyword[if] identifier[self] . identifier[actual_volume] > literal[int] :
identifier[self] . identifier[_sendCommand] ( literal[string] . identifier[format] ( identifier[self] . identifier[actual_volume] ))
keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[actual_volume] , identifier[int] ( literal[int] * identifier[self] . identifier[actual_volume] / identifier[self] . identifier[max_volume] )))
identifier[self] . identifier[show_volume] = keyword[True]
|
def _no_mute_on_stop_playback(self):
""" make sure vlc does not stop muted """
if self.ctrl_c_pressed:
return # depends on [control=['if'], data=[]]
if self.isPlaying():
if self.actual_volume == -1:
self._get_volume()
while self.actual_volume == -1:
pass # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
if self.actual_volume == 0:
self.actual_volume = int(self.max_volume * 0.25)
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.muted:
if self.actual_volume > 0:
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.show_volume = True # depends on [control=['if'], data=[]]
|
def db_list(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Return dictionary with information about databases of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_list
'''
ret = {}
query = (
'SELECT datname as "Name", pga.rolname as "Owner", '
'pg_encoding_to_char(encoding) as "Encoding", '
'datcollate as "Collate", datctype as "Ctype", '
'datacl as "Access privileges", spcname as "Tablespace" '
'FROM pg_database pgd, pg_roles pga, pg_tablespace pgts '
'WHERE pga.oid = pgd.datdba AND pgts.oid = pgd.dattablespace'
)
rows = psql_query(query, runas=runas, host=host, user=user,
port=port, maintenance_db=maintenance_db,
password=password)
for row in rows:
ret[row['Name']] = row
ret[row['Name']].pop('Name')
return ret
|
def function[db_list, parameter[user, host, port, maintenance_db, password, runas]]:
constant[
Return dictionary with information about databases of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_list
]
variable[ret] assign[=] dictionary[[], []]
variable[query] assign[=] constant[SELECT datname as "Name", pga.rolname as "Owner", pg_encoding_to_char(encoding) as "Encoding", datcollate as "Collate", datctype as "Ctype", datacl as "Access privileges", spcname as "Tablespace" FROM pg_database pgd, pg_roles pga, pg_tablespace pgts WHERE pga.oid = pgd.datdba AND pgts.oid = pgd.dattablespace]
variable[rows] assign[=] call[name[psql_query], parameter[name[query]]]
for taget[name[row]] in starred[name[rows]] begin[:]
call[name[ret]][call[name[row]][constant[Name]]] assign[=] name[row]
call[call[name[ret]][call[name[row]][constant[Name]]].pop, parameter[constant[Name]]]
return[name[ret]]
|
keyword[def] identifier[db_list] ( identifier[user] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[maintenance_db] = keyword[None] ,
identifier[password] = keyword[None] , identifier[runas] = keyword[None] ):
literal[string]
identifier[ret] ={}
identifier[query] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[rows] = identifier[psql_query] ( identifier[query] , identifier[runas] = identifier[runas] , identifier[host] = identifier[host] , identifier[user] = identifier[user] ,
identifier[port] = identifier[port] , identifier[maintenance_db] = identifier[maintenance_db] ,
identifier[password] = identifier[password] )
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[ret] [ identifier[row] [ literal[string] ]]= identifier[row]
identifier[ret] [ identifier[row] [ literal[string] ]]. identifier[pop] ( literal[string] )
keyword[return] identifier[ret]
|
def db_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None):
"""
Return dictionary with information about databases of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_list
"""
ret = {}
query = 'SELECT datname as "Name", pga.rolname as "Owner", pg_encoding_to_char(encoding) as "Encoding", datcollate as "Collate", datctype as "Ctype", datacl as "Access privileges", spcname as "Tablespace" FROM pg_database pgd, pg_roles pga, pg_tablespace pgts WHERE pga.oid = pgd.datdba AND pgts.oid = pgd.dattablespace'
rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password)
for row in rows:
ret[row['Name']] = row
ret[row['Name']].pop('Name') # depends on [control=['for'], data=['row']]
return ret
|
def _request(self, url, **kwargs):
'''Inner :func:`requests.request` wrapper.
:param url: Endpoint url
:param \*\*kwargs: Keyword arguments to pass to
:func:`requests.request`.
'''
if self.method is None:
raise NotImplementedError('method must be defined on a subclass')
response = requests.request(self.method, url, **kwargs)
return self.response_class(response, sender=self)
|
def function[_request, parameter[self, url]]:
constant[Inner :func:`requests.request` wrapper.
:param url: Endpoint url
:param \*\*kwargs: Keyword arguments to pass to
:func:`requests.request`.
]
if compare[name[self].method is constant[None]] begin[:]
<ast.Raise object at 0x7da20c7949d0>
variable[response] assign[=] call[name[requests].request, parameter[name[self].method, name[url]]]
return[call[name[self].response_class, parameter[name[response]]]]
|
keyword[def] identifier[_request] ( identifier[self] , identifier[url] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[method] keyword[is] keyword[None] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[response] = identifier[requests] . identifier[request] ( identifier[self] . identifier[method] , identifier[url] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[response_class] ( identifier[response] , identifier[sender] = identifier[self] )
|
def _request(self, url, **kwargs):
"""Inner :func:`requests.request` wrapper.
:param url: Endpoint url
:param \\*\\*kwargs: Keyword arguments to pass to
:func:`requests.request`.
"""
if self.method is None:
raise NotImplementedError('method must be defined on a subclass') # depends on [control=['if'], data=[]]
response = requests.request(self.method, url, **kwargs)
return self.response_class(response, sender=self)
|
def _update_disks(disks_old_new):
'''
Changes the disk size and returns the config spec objects in a list.
The controller property cannot be updated, because controller address
identifies the disk by the unit and bus number properties.
disks_diffs
List of old and new disk properties, the properties are dictionary
objects
'''
disk_changes = []
if disks_old_new:
devs = [disk['old']['address'] for disk in disks_old_new]
log.trace('Updating disks %s', devs)
for item in disks_old_new:
current_disk = item['old']
next_disk = item['new']
difference = recursive_diff(current_disk, next_disk)
difference.ignore_unset_values = False
if difference.changed():
if next_disk['size'] < current_disk['size']:
raise salt.exceptions.VMwareSaltError(
'Disk cannot be downsized size={0} unit={1} '
'controller_key={2} '
'unit_number={3}'.format(
next_disk['size'],
next_disk['unit'],
current_disk['controller_key'],
current_disk['unit_number']))
log.trace('Virtual machine disk will be updated size=%s '
'unit=%s controller_key=%s unit_number=%s',
next_disk['size'],
next_disk['unit'],
current_disk['controller_key'],
current_disk['unit_number'])
device_config_spec = _apply_hard_disk(
current_disk['unit_number'],
current_disk['key'], 'edit',
size=next_disk['size'],
unit=next_disk['unit'],
controller_key=current_disk['controller_key'])
# The backing didn't change and we must supply one for
# reconfigure
device_config_spec.device.backing = \
current_disk['object'].backing
disk_changes.append(device_config_spec)
return disk_changes
|
def function[_update_disks, parameter[disks_old_new]]:
constant[
Changes the disk size and returns the config spec objects in a list.
The controller property cannot be updated, because controller address
identifies the disk by the unit and bus number properties.
disks_diffs
List of old and new disk properties, the properties are dictionary
objects
]
variable[disk_changes] assign[=] list[[]]
if name[disks_old_new] begin[:]
variable[devs] assign[=] <ast.ListComp object at 0x7da2054a7c10>
call[name[log].trace, parameter[constant[Updating disks %s], name[devs]]]
for taget[name[item]] in starred[name[disks_old_new]] begin[:]
variable[current_disk] assign[=] call[name[item]][constant[old]]
variable[next_disk] assign[=] call[name[item]][constant[new]]
variable[difference] assign[=] call[name[recursive_diff], parameter[name[current_disk], name[next_disk]]]
name[difference].ignore_unset_values assign[=] constant[False]
if call[name[difference].changed, parameter[]] begin[:]
if compare[call[name[next_disk]][constant[size]] less[<] call[name[current_disk]][constant[size]]] begin[:]
<ast.Raise object at 0x7da18c4cd030>
call[name[log].trace, parameter[constant[Virtual machine disk will be updated size=%s unit=%s controller_key=%s unit_number=%s], call[name[next_disk]][constant[size]], call[name[next_disk]][constant[unit]], call[name[current_disk]][constant[controller_key]], call[name[current_disk]][constant[unit_number]]]]
variable[device_config_spec] assign[=] call[name[_apply_hard_disk], parameter[call[name[current_disk]][constant[unit_number]], call[name[current_disk]][constant[key]], constant[edit]]]
name[device_config_spec].device.backing assign[=] call[name[current_disk]][constant[object]].backing
call[name[disk_changes].append, parameter[name[device_config_spec]]]
return[name[disk_changes]]
|
keyword[def] identifier[_update_disks] ( identifier[disks_old_new] ):
literal[string]
identifier[disk_changes] =[]
keyword[if] identifier[disks_old_new] :
identifier[devs] =[ identifier[disk] [ literal[string] ][ literal[string] ] keyword[for] identifier[disk] keyword[in] identifier[disks_old_new] ]
identifier[log] . identifier[trace] ( literal[string] , identifier[devs] )
keyword[for] identifier[item] keyword[in] identifier[disks_old_new] :
identifier[current_disk] = identifier[item] [ literal[string] ]
identifier[next_disk] = identifier[item] [ literal[string] ]
identifier[difference] = identifier[recursive_diff] ( identifier[current_disk] , identifier[next_disk] )
identifier[difference] . identifier[ignore_unset_values] = keyword[False]
keyword[if] identifier[difference] . identifier[changed] ():
keyword[if] identifier[next_disk] [ literal[string] ]< identifier[current_disk] [ literal[string] ]:
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[VMwareSaltError] (
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[next_disk] [ literal[string] ],
identifier[next_disk] [ literal[string] ],
identifier[current_disk] [ literal[string] ],
identifier[current_disk] [ literal[string] ]))
identifier[log] . identifier[trace] ( literal[string]
literal[string] ,
identifier[next_disk] [ literal[string] ],
identifier[next_disk] [ literal[string] ],
identifier[current_disk] [ literal[string] ],
identifier[current_disk] [ literal[string] ])
identifier[device_config_spec] = identifier[_apply_hard_disk] (
identifier[current_disk] [ literal[string] ],
identifier[current_disk] [ literal[string] ], literal[string] ,
identifier[size] = identifier[next_disk] [ literal[string] ],
identifier[unit] = identifier[next_disk] [ literal[string] ],
identifier[controller_key] = identifier[current_disk] [ literal[string] ])
identifier[device_config_spec] . identifier[device] . identifier[backing] = identifier[current_disk] [ literal[string] ]. identifier[backing]
identifier[disk_changes] . identifier[append] ( identifier[device_config_spec] )
keyword[return] identifier[disk_changes]
|
def _update_disks(disks_old_new):
"""
Changes the disk size and returns the config spec objects in a list.
The controller property cannot be updated, because controller address
identifies the disk by the unit and bus number properties.
disks_diffs
List of old and new disk properties, the properties are dictionary
objects
"""
disk_changes = []
if disks_old_new:
devs = [disk['old']['address'] for disk in disks_old_new]
log.trace('Updating disks %s', devs)
for item in disks_old_new:
current_disk = item['old']
next_disk = item['new']
difference = recursive_diff(current_disk, next_disk)
difference.ignore_unset_values = False
if difference.changed():
if next_disk['size'] < current_disk['size']:
raise salt.exceptions.VMwareSaltError('Disk cannot be downsized size={0} unit={1} controller_key={2} unit_number={3}'.format(next_disk['size'], next_disk['unit'], current_disk['controller_key'], current_disk['unit_number'])) # depends on [control=['if'], data=[]]
log.trace('Virtual machine disk will be updated size=%s unit=%s controller_key=%s unit_number=%s', next_disk['size'], next_disk['unit'], current_disk['controller_key'], current_disk['unit_number'])
device_config_spec = _apply_hard_disk(current_disk['unit_number'], current_disk['key'], 'edit', size=next_disk['size'], unit=next_disk['unit'], controller_key=current_disk['controller_key'])
# The backing didn't change and we must supply one for
# reconfigure
device_config_spec.device.backing = current_disk['object'].backing
disk_changes.append(device_config_spec) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
return disk_changes
|
def print_computation_log(self, aggregate = False):
"""
Print the computation log of a simulation.
If ``aggregate`` is ``False`` (default), print the value of each computed vector.
If ``aggregate`` is ``True``, only print the minimum, maximum, and average value of each computed vector.
This mode is more suited for simulations on a large population.
"""
for line in self.computation_log(aggregate):
print(line)
|
def function[print_computation_log, parameter[self, aggregate]]:
constant[
Print the computation log of a simulation.
If ``aggregate`` is ``False`` (default), print the value of each computed vector.
If ``aggregate`` is ``True``, only print the minimum, maximum, and average value of each computed vector.
This mode is more suited for simulations on a large population.
]
for taget[name[line]] in starred[call[name[self].computation_log, parameter[name[aggregate]]]] begin[:]
call[name[print], parameter[name[line]]]
|
keyword[def] identifier[print_computation_log] ( identifier[self] , identifier[aggregate] = keyword[False] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[computation_log] ( identifier[aggregate] ):
identifier[print] ( identifier[line] )
|
def print_computation_log(self, aggregate=False):
"""
Print the computation log of a simulation.
If ``aggregate`` is ``False`` (default), print the value of each computed vector.
If ``aggregate`` is ``True``, only print the minimum, maximum, and average value of each computed vector.
This mode is more suited for simulations on a large population.
"""
for line in self.computation_log(aggregate):
print(line) # depends on [control=['for'], data=['line']]
|
def __update_membership(self):
"""!
@brief Update membership for each point in line with current cluster centers.
"""
data_difference = numpy.zeros((len(self.__centers), len(self.__data)))
for i in range(len(self.__centers)):
data_difference[i] = numpy.sum(numpy.square(self.__data - self.__centers[i]), axis=1)
for i in range(len(self.__data)):
for j in range(len(self.__centers)):
divider = sum([pow(data_difference[j][i] / data_difference[k][i], self.__degree) for k in range(len(self.__centers)) if data_difference[k][i] != 0.0])
if divider != 0.0:
self.__membership[i][j] = 1.0 / divider
else:
self.__membership[i][j] = 1.0
|
def function[__update_membership, parameter[self]]:
constant[!
@brief Update membership for each point in line with current cluster centers.
]
variable[data_difference] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Call object at 0x7da1b01fcac0>, <ast.Call object at 0x7da1b01fece0>]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].__centers]]]]] begin[:]
call[name[data_difference]][name[i]] assign[=] call[name[numpy].sum, parameter[call[name[numpy].square, parameter[binary_operation[name[self].__data - call[name[self].__centers][name[i]]]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].__data]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].__centers]]]]] begin[:]
variable[divider] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b01dbdc0>]]
if compare[name[divider] not_equal[!=] constant[0.0]] begin[:]
call[call[name[self].__membership][name[i]]][name[j]] assign[=] binary_operation[constant[1.0] / name[divider]]
|
keyword[def] identifier[__update_membership] ( identifier[self] ):
literal[string]
identifier[data_difference] = identifier[numpy] . identifier[zeros] (( identifier[len] ( identifier[self] . identifier[__centers] ), identifier[len] ( identifier[self] . identifier[__data] )))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__centers] )):
identifier[data_difference] [ identifier[i] ]= identifier[numpy] . identifier[sum] ( identifier[numpy] . identifier[square] ( identifier[self] . identifier[__data] - identifier[self] . identifier[__centers] [ identifier[i] ]), identifier[axis] = literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__data] )):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__centers] )):
identifier[divider] = identifier[sum] ([ identifier[pow] ( identifier[data_difference] [ identifier[j] ][ identifier[i] ]/ identifier[data_difference] [ identifier[k] ][ identifier[i] ], identifier[self] . identifier[__degree] ) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__centers] )) keyword[if] identifier[data_difference] [ identifier[k] ][ identifier[i] ]!= literal[int] ])
keyword[if] identifier[divider] != literal[int] :
identifier[self] . identifier[__membership] [ identifier[i] ][ identifier[j] ]= literal[int] / identifier[divider]
keyword[else] :
identifier[self] . identifier[__membership] [ identifier[i] ][ identifier[j] ]= literal[int]
|
def __update_membership(self):
"""!
@brief Update membership for each point in line with current cluster centers.
"""
data_difference = numpy.zeros((len(self.__centers), len(self.__data)))
for i in range(len(self.__centers)):
data_difference[i] = numpy.sum(numpy.square(self.__data - self.__centers[i]), axis=1) # depends on [control=['for'], data=['i']]
for i in range(len(self.__data)):
for j in range(len(self.__centers)):
divider = sum([pow(data_difference[j][i] / data_difference[k][i], self.__degree) for k in range(len(self.__centers)) if data_difference[k][i] != 0.0])
if divider != 0.0:
self.__membership[i][j] = 1.0 / divider # depends on [control=['if'], data=['divider']]
else:
self.__membership[i][j] = 1.0 # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
|
def apply(self, doc):
"""Generate MentionNgrams from a Document by parsing all of its Sentences.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionNgrams.apply() must be of type Document"
)
for sentence in doc.sentences:
for ts in Ngrams.apply(self, sentence):
yield ts
|
def function[apply, parameter[self, doc]]:
constant[Generate MentionNgrams from a Document by parsing all of its Sentences.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``.
]
if <ast.UnaryOp object at 0x7da1b1e8ed10> begin[:]
<ast.Raise object at 0x7da1b1e8e9b0>
for taget[name[sentence]] in starred[name[doc].sentences] begin[:]
for taget[name[ts]] in starred[call[name[Ngrams].apply, parameter[name[self], name[sentence]]]] begin[:]
<ast.Yield object at 0x7da1b26aed40>
|
keyword[def] identifier[apply] ( identifier[self] , identifier[doc] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[doc] , identifier[Document] ):
keyword[raise] identifier[TypeError] (
literal[string]
)
keyword[for] identifier[sentence] keyword[in] identifier[doc] . identifier[sentences] :
keyword[for] identifier[ts] keyword[in] identifier[Ngrams] . identifier[apply] ( identifier[self] , identifier[sentence] ):
keyword[yield] identifier[ts]
|
def apply(self, doc):
"""Generate MentionNgrams from a Document by parsing all of its Sentences.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError('Input Contexts to MentionNgrams.apply() must be of type Document') # depends on [control=['if'], data=[]]
for sentence in doc.sentences:
for ts in Ngrams.apply(self, sentence):
yield ts # depends on [control=['for'], data=['ts']] # depends on [control=['for'], data=['sentence']]
|
def upload_url(self):
"""
This method generates URLs which allow overwriting a file's content with new content. The output is suitable
for use in the ``send_data()`` method below.
Input:
* None
Output:
* A URL (string)
Example::
file = client.get_file("4ddfds", 0)
file.send_data(put_url=file.upload_url, data=open("example.txt", "rb").read())
"""
if self.put_upload_url:
return self.put_upload_url
else:
response = GettRequest().get("/files/%s/%s/upload?accesstoken=%s" % (self.sharename, self.fileid, self.user.access_token()))
if response.http_status == 200:
return response.response['puturl']
|
def function[upload_url, parameter[self]]:
constant[
This method generates URLs which allow overwriting a file's content with new content. The output is suitable
for use in the ``send_data()`` method below.
Input:
* None
Output:
* A URL (string)
Example::
file = client.get_file("4ddfds", 0)
file.send_data(put_url=file.upload_url, data=open("example.txt", "rb").read())
]
if name[self].put_upload_url begin[:]
return[name[self].put_upload_url]
|
keyword[def] identifier[upload_url] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[put_upload_url] :
keyword[return] identifier[self] . identifier[put_upload_url]
keyword[else] :
identifier[response] = identifier[GettRequest] (). identifier[get] ( literal[string] %( identifier[self] . identifier[sharename] , identifier[self] . identifier[fileid] , identifier[self] . identifier[user] . identifier[access_token] ()))
keyword[if] identifier[response] . identifier[http_status] == literal[int] :
keyword[return] identifier[response] . identifier[response] [ literal[string] ]
|
def upload_url(self):
"""
This method generates URLs which allow overwriting a file's content with new content. The output is suitable
for use in the ``send_data()`` method below.
Input:
* None
Output:
* A URL (string)
Example::
file = client.get_file("4ddfds", 0)
file.send_data(put_url=file.upload_url, data=open("example.txt", "rb").read())
"""
if self.put_upload_url:
return self.put_upload_url # depends on [control=['if'], data=[]]
else:
response = GettRequest().get('/files/%s/%s/upload?accesstoken=%s' % (self.sharename, self.fileid, self.user.access_token()))
if response.http_status == 200:
return response.response['puturl'] # depends on [control=['if'], data=[]]
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
|
def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da204962830> begin[:]
call[name[_dict]][constant[examples]] assign[=] <ast.ListComp object at 0x7da2049605b0>
if <ast.BoolOp object at 0x7da204963820> begin[:]
call[name[_dict]][constant[pagination]] assign[=] call[name[self].pagination._to_dict, parameter[]]
return[name[_dict]]
|
keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[examples] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[examples] ]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[pagination] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[pagination] . identifier[_to_dict] ()
keyword[return] identifier[_dict]
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples] # depends on [control=['if'], data=[]]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict() # depends on [control=['if'], data=[]]
return _dict
|
def page_templates(mapping):
"""Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls.
"""
def decorator(view):
@wraps(view)
def decorated(request, *args, **kwargs):
# Trust the developer: he wrote ``context.update(extra_context)``
# in his view.
extra_context = kwargs.setdefault('extra_context', {})
querystring_key = request.GET.get(QS_KEY,
request.POST.get(QS_KEY, PAGE_LABEL))
template = _get_template(querystring_key, mapping)
extra_context['page_template'] = template
# Switch the template when the request is Ajax.
if request.is_ajax() and template:
kwargs[TEMPLATE_VARNAME] = template
return view(request, *args, **kwargs)
return decorated
return decorator
|
def function[page_templates, parameter[mapping]]:
constant[Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls.
]
def function[decorator, parameter[view]]:
def function[decorated, parameter[request]]:
variable[extra_context] assign[=] call[name[kwargs].setdefault, parameter[constant[extra_context], dictionary[[], []]]]
variable[querystring_key] assign[=] call[name[request].GET.get, parameter[name[QS_KEY], call[name[request].POST.get, parameter[name[QS_KEY], name[PAGE_LABEL]]]]]
variable[template] assign[=] call[name[_get_template], parameter[name[querystring_key], name[mapping]]]
call[name[extra_context]][constant[page_template]] assign[=] name[template]
if <ast.BoolOp object at 0x7da1b12a9bd0> begin[:]
call[name[kwargs]][name[TEMPLATE_VARNAME]] assign[=] name[template]
return[call[name[view], parameter[name[request], <ast.Starred object at 0x7da1b12a86d0>]]]
return[name[decorated]]
return[name[decorator]]
|
keyword[def] identifier[page_templates] ( identifier[mapping] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[view] ):
@ identifier[wraps] ( identifier[view] )
keyword[def] identifier[decorated] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
identifier[extra_context] = identifier[kwargs] . identifier[setdefault] ( literal[string] ,{})
identifier[querystring_key] = identifier[request] . identifier[GET] . identifier[get] ( identifier[QS_KEY] ,
identifier[request] . identifier[POST] . identifier[get] ( identifier[QS_KEY] , identifier[PAGE_LABEL] ))
identifier[template] = identifier[_get_template] ( identifier[querystring_key] , identifier[mapping] )
identifier[extra_context] [ literal[string] ]= identifier[template]
keyword[if] identifier[request] . identifier[is_ajax] () keyword[and] identifier[template] :
identifier[kwargs] [ identifier[TEMPLATE_VARNAME] ]= identifier[template]
keyword[return] identifier[view] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[decorated]
keyword[return] identifier[decorator]
|
def page_templates(mapping):
"""Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls.
"""
def decorator(view):
@wraps(view)
def decorated(request, *args, **kwargs):
# Trust the developer: he wrote ``context.update(extra_context)``
# in his view.
extra_context = kwargs.setdefault('extra_context', {})
querystring_key = request.GET.get(QS_KEY, request.POST.get(QS_KEY, PAGE_LABEL))
template = _get_template(querystring_key, mapping)
extra_context['page_template'] = template
# Switch the template when the request is Ajax.
if request.is_ajax() and template:
kwargs[TEMPLATE_VARNAME] = template # depends on [control=['if'], data=[]]
return view(request, *args, **kwargs)
return decorated
return decorator
|
def _expand_path(path):
"""Expand both environment variables and user home in the given path."""
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
|
def function[_expand_path, parameter[path]]:
constant[Expand both environment variables and user home in the given path.]
variable[path] assign[=] call[name[os].path.expandvars, parameter[name[path]]]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
return[name[path]]
|
keyword[def] identifier[_expand_path] ( identifier[path] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[path] )
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
keyword[return] identifier[path]
|
def _expand_path(path):
"""Expand both environment variables and user home in the given path."""
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
|
def compareBIMfiles(beforeFileName, afterFileName, outputFileName):
"""Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file.
"""
# Creating the options
options = Dummy()
options.before = beforeFileName
options.after = afterFileName
options.out = outputFileName
# Checking the options
CompareBIM.checkArgs(options)
# Reading the BIM files
beforeBIM = CompareBIM.readBIM(options.before)
afterBIM = CompareBIM.readBIM(options.after)
# Finding the differences
CompareBIM.compareSNPs(beforeBIM, afterBIM, options.out)
return beforeBIM - afterBIM
|
def function[compareBIMfiles, parameter[beforeFileName, afterFileName, outputFileName]]:
constant[Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file.
]
variable[options] assign[=] call[name[Dummy], parameter[]]
name[options].before assign[=] name[beforeFileName]
name[options].after assign[=] name[afterFileName]
name[options].out assign[=] name[outputFileName]
call[name[CompareBIM].checkArgs, parameter[name[options]]]
variable[beforeBIM] assign[=] call[name[CompareBIM].readBIM, parameter[name[options].before]]
variable[afterBIM] assign[=] call[name[CompareBIM].readBIM, parameter[name[options].after]]
call[name[CompareBIM].compareSNPs, parameter[name[beforeBIM], name[afterBIM], name[options].out]]
return[binary_operation[name[beforeBIM] - name[afterBIM]]]
|
keyword[def] identifier[compareBIMfiles] ( identifier[beforeFileName] , identifier[afterFileName] , identifier[outputFileName] ):
literal[string]
identifier[options] = identifier[Dummy] ()
identifier[options] . identifier[before] = identifier[beforeFileName]
identifier[options] . identifier[after] = identifier[afterFileName]
identifier[options] . identifier[out] = identifier[outputFileName]
identifier[CompareBIM] . identifier[checkArgs] ( identifier[options] )
identifier[beforeBIM] = identifier[CompareBIM] . identifier[readBIM] ( identifier[options] . identifier[before] )
identifier[afterBIM] = identifier[CompareBIM] . identifier[readBIM] ( identifier[options] . identifier[after] )
identifier[CompareBIM] . identifier[compareSNPs] ( identifier[beforeBIM] , identifier[afterBIM] , identifier[options] . identifier[out] )
keyword[return] identifier[beforeBIM] - identifier[afterBIM]
|
def compareBIMfiles(beforeFileName, afterFileName, outputFileName):
"""Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file.
"""
# Creating the options
options = Dummy()
options.before = beforeFileName
options.after = afterFileName
options.out = outputFileName
# Checking the options
CompareBIM.checkArgs(options)
# Reading the BIM files
beforeBIM = CompareBIM.readBIM(options.before)
afterBIM = CompareBIM.readBIM(options.after)
# Finding the differences
CompareBIM.compareSNPs(beforeBIM, afterBIM, options.out)
return beforeBIM - afterBIM
|
def _to_enos_roles(roles):
"""Transform the roles to use enoslib.host.Host hosts.
Args:
roles (dict): roles returned by
:py:func:`enoslib.infra.provider.Provider.init`
"""
def to_host(h):
extra = {}
# create extra_vars for the nics
# network_role = ethX
for nic, roles in h["nics"]:
for role in roles:
extra[role] = nic
return Host(h["host"], user="root", extra=extra)
enos_roles = {}
for role, hosts in roles.items():
enos_roles[role] = [to_host(h) for h in hosts]
logger.debug(enos_roles)
return enos_roles
|
def function[_to_enos_roles, parameter[roles]]:
constant[Transform the roles to use enoslib.host.Host hosts.
Args:
roles (dict): roles returned by
:py:func:`enoslib.infra.provider.Provider.init`
]
def function[to_host, parameter[h]]:
variable[extra] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e7400>, <ast.Name object at 0x7da20c6e75b0>]]] in starred[call[name[h]][constant[nics]]] begin[:]
for taget[name[role]] in starred[name[roles]] begin[:]
call[name[extra]][name[role]] assign[=] name[nic]
return[call[name[Host], parameter[call[name[h]][constant[host]]]]]
variable[enos_roles] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c7cb3a0>, <ast.Name object at 0x7da20c7cb6d0>]]] in starred[call[name[roles].items, parameter[]]] begin[:]
call[name[enos_roles]][name[role]] assign[=] <ast.ListComp object at 0x7da20c7cb790>
call[name[logger].debug, parameter[name[enos_roles]]]
return[name[enos_roles]]
|
keyword[def] identifier[_to_enos_roles] ( identifier[roles] ):
literal[string]
keyword[def] identifier[to_host] ( identifier[h] ):
identifier[extra] ={}
keyword[for] identifier[nic] , identifier[roles] keyword[in] identifier[h] [ literal[string] ]:
keyword[for] identifier[role] keyword[in] identifier[roles] :
identifier[extra] [ identifier[role] ]= identifier[nic]
keyword[return] identifier[Host] ( identifier[h] [ literal[string] ], identifier[user] = literal[string] , identifier[extra] = identifier[extra] )
identifier[enos_roles] ={}
keyword[for] identifier[role] , identifier[hosts] keyword[in] identifier[roles] . identifier[items] ():
identifier[enos_roles] [ identifier[role] ]=[ identifier[to_host] ( identifier[h] ) keyword[for] identifier[h] keyword[in] identifier[hosts] ]
identifier[logger] . identifier[debug] ( identifier[enos_roles] )
keyword[return] identifier[enos_roles]
|
def _to_enos_roles(roles):
"""Transform the roles to use enoslib.host.Host hosts.
Args:
roles (dict): roles returned by
:py:func:`enoslib.infra.provider.Provider.init`
"""
def to_host(h):
extra = {}
# create extra_vars for the nics
# network_role = ethX
for (nic, roles) in h['nics']:
for role in roles:
extra[role] = nic # depends on [control=['for'], data=['role']] # depends on [control=['for'], data=[]]
return Host(h['host'], user='root', extra=extra)
enos_roles = {}
for (role, hosts) in roles.items():
enos_roles[role] = [to_host(h) for h in hosts] # depends on [control=['for'], data=[]]
logger.debug(enos_roles)
return enos_roles
|
def _deprecated_kwargs(kwargs, arg_newarg):
""" arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
"""
warn_for = []
for (arg, new_kw) in arg_newarg:
if arg in kwargs.keys():
val = kwargs.pop(arg)
kwargs[new_kw] = val
warn_for.append((arg, new_kw))
if len(warn_for) > 0:
if len(warn_for) == 1:
warnings.warn("Argument '{}' is deprecated. Use {} instead".
format(warn_for[0][0], warn_for[0][1]),
DeprecationWarning, stacklevel=4)
else:
args = ", ".join([x[0] for x in warn_for])
repl = ", ".join([x[1] for x in warn_for])
warnings.warn(
"Arguments '{}' are deprecated. Use '{}' instead respectively".
format(args, repl),
DeprecationWarning, stacklevel=4)
return kwargs
|
def function[_deprecated_kwargs, parameter[kwargs, arg_newarg]]:
constant[ arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
]
variable[warn_for] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2838820>, <ast.Name object at 0x7da1b2839b40>]]] in starred[name[arg_newarg]] begin[:]
if compare[name[arg] in call[name[kwargs].keys, parameter[]]] begin[:]
variable[val] assign[=] call[name[kwargs].pop, parameter[name[arg]]]
call[name[kwargs]][name[new_kw]] assign[=] name[val]
call[name[warn_for].append, parameter[tuple[[<ast.Name object at 0x7da1b2838b80>, <ast.Name object at 0x7da1b2838760>]]]]
if compare[call[name[len], parameter[name[warn_for]]] greater[>] constant[0]] begin[:]
if compare[call[name[len], parameter[name[warn_for]]] equal[==] constant[1]] begin[:]
call[name[warnings].warn, parameter[call[constant[Argument '{}' is deprecated. Use {} instead].format, parameter[call[call[name[warn_for]][constant[0]]][constant[0]], call[call[name[warn_for]][constant[0]]][constant[1]]]], name[DeprecationWarning]]]
return[name[kwargs]]
|
keyword[def] identifier[_deprecated_kwargs] ( identifier[kwargs] , identifier[arg_newarg] ):
literal[string]
identifier[warn_for] =[]
keyword[for] ( identifier[arg] , identifier[new_kw] ) keyword[in] identifier[arg_newarg] :
keyword[if] identifier[arg] keyword[in] identifier[kwargs] . identifier[keys] ():
identifier[val] = identifier[kwargs] . identifier[pop] ( identifier[arg] )
identifier[kwargs] [ identifier[new_kw] ]= identifier[val]
identifier[warn_for] . identifier[append] (( identifier[arg] , identifier[new_kw] ))
keyword[if] identifier[len] ( identifier[warn_for] )> literal[int] :
keyword[if] identifier[len] ( identifier[warn_for] )== literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] .
identifier[format] ( identifier[warn_for] [ literal[int] ][ literal[int] ], identifier[warn_for] [ literal[int] ][ literal[int] ]),
identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[else] :
identifier[args] = literal[string] . identifier[join] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[warn_for] ])
identifier[repl] = literal[string] . identifier[join] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[warn_for] ])
identifier[warnings] . identifier[warn] (
literal[string] .
identifier[format] ( identifier[args] , identifier[repl] ),
identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[kwargs]
|
def _deprecated_kwargs(kwargs, arg_newarg):
""" arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
"""
warn_for = []
for (arg, new_kw) in arg_newarg:
if arg in kwargs.keys():
val = kwargs.pop(arg)
kwargs[new_kw] = val
warn_for.append((arg, new_kw)) # depends on [control=['if'], data=['arg']] # depends on [control=['for'], data=[]]
if len(warn_for) > 0:
if len(warn_for) == 1:
warnings.warn("Argument '{}' is deprecated. Use {} instead".format(warn_for[0][0], warn_for[0][1]), DeprecationWarning, stacklevel=4) # depends on [control=['if'], data=[]]
else:
args = ', '.join([x[0] for x in warn_for])
repl = ', '.join([x[1] for x in warn_for])
warnings.warn("Arguments '{}' are deprecated. Use '{}' instead respectively".format(args, repl), DeprecationWarning, stacklevel=4) # depends on [control=['if'], data=[]]
return kwargs
|
def op_or(self, *elements):
"""Update the ``Expression`` by joining the specified additional
``elements`` using an "OR" ``Operator``
Args:
*elements (BaseExpression): The ``Expression`` and/or
``Constraint`` elements which the "OR" ``Operator`` applies
to.
Returns:
Expression: ``self`` or related ``Expression``.
"""
expression = self.add_operator(Operator(','))
for element in elements:
expression.add_element(element)
return expression
|
def function[op_or, parameter[self]]:
constant[Update the ``Expression`` by joining the specified additional
``elements`` using an "OR" ``Operator``
Args:
*elements (BaseExpression): The ``Expression`` and/or
``Constraint`` elements which the "OR" ``Operator`` applies
to.
Returns:
Expression: ``self`` or related ``Expression``.
]
variable[expression] assign[=] call[name[self].add_operator, parameter[call[name[Operator], parameter[constant[,]]]]]
for taget[name[element]] in starred[name[elements]] begin[:]
call[name[expression].add_element, parameter[name[element]]]
return[name[expression]]
|
keyword[def] identifier[op_or] ( identifier[self] ,* identifier[elements] ):
literal[string]
identifier[expression] = identifier[self] . identifier[add_operator] ( identifier[Operator] ( literal[string] ))
keyword[for] identifier[element] keyword[in] identifier[elements] :
identifier[expression] . identifier[add_element] ( identifier[element] )
keyword[return] identifier[expression]
|
def op_or(self, *elements):
"""Update the ``Expression`` by joining the specified additional
``elements`` using an "OR" ``Operator``
Args:
*elements (BaseExpression): The ``Expression`` and/or
``Constraint`` elements which the "OR" ``Operator`` applies
to.
Returns:
Expression: ``self`` or related ``Expression``.
"""
expression = self.add_operator(Operator(','))
for element in elements:
expression.add_element(element) # depends on [control=['for'], data=['element']]
return expression
|
def mark_job_as_canceling(self, job_id):
"""
Mark the job as requested for canceling. Does not actually try to cancel a running job.
:param job_id: the job to be marked as canceling.
:return: the job object
"""
job, _ = self._update_job_state(job_id, State.CANCELING)
return job
|
def function[mark_job_as_canceling, parameter[self, job_id]]:
constant[
Mark the job as requested for canceling. Does not actually try to cancel a running job.
:param job_id: the job to be marked as canceling.
:return: the job object
]
<ast.Tuple object at 0x7da1b05e0d60> assign[=] call[name[self]._update_job_state, parameter[name[job_id], name[State].CANCELING]]
return[name[job]]
|
keyword[def] identifier[mark_job_as_canceling] ( identifier[self] , identifier[job_id] ):
literal[string]
identifier[job] , identifier[_] = identifier[self] . identifier[_update_job_state] ( identifier[job_id] , identifier[State] . identifier[CANCELING] )
keyword[return] identifier[job]
|
def mark_job_as_canceling(self, job_id):
"""
Mark the job as requested for canceling. Does not actually try to cancel a running job.
:param job_id: the job to be marked as canceling.
:return: the job object
"""
(job, _) = self._update_job_state(job_id, State.CANCELING)
return job
|
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur)
|
def function[xception_internal, parameter[inputs, hparams]]:
constant[Xception body.]
with call[name[tf].variable_scope, parameter[constant[xception]]] begin[:]
variable[cur] assign[=] name[inputs]
if compare[call[call[call[name[cur].get_shape, parameter[]].as_list, parameter[]]][constant[1]] greater[>] constant[200]] begin[:]
variable[cur] assign[=] call[name[xception_entry], parameter[name[cur], name[hparams].hidden_size]]
for taget[name[i]] in starred[call[name[range], parameter[name[hparams].num_hidden_layers]]] begin[:]
with call[name[tf].variable_scope, parameter[binary_operation[constant[layer_%d] <ast.Mod object at 0x7da2590d6920> name[i]]]] begin[:]
variable[cur] assign[=] call[name[residual_block], parameter[name[cur], name[hparams]]]
return[call[name[xception_exit], parameter[name[cur]]]]
|
keyword[def] identifier[xception_internal] ( identifier[inputs] , identifier[hparams] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[cur] = identifier[inputs]
keyword[if] identifier[cur] . identifier[get_shape] (). identifier[as_list] ()[ literal[int] ]> literal[int] :
identifier[cur] = identifier[xception_entry] ( identifier[cur] , identifier[hparams] . identifier[hidden_size] )
keyword[else] :
identifier[cur] = identifier[common_layers] . identifier[conv_block] (
identifier[cur] ,
identifier[hparams] . identifier[hidden_size] ,[(( literal[int] , literal[int] ),( literal[int] , literal[int] ))],
identifier[first_relu] = keyword[False] ,
identifier[padding] = literal[string] ,
identifier[force2d] = keyword[True] ,
identifier[name] = literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[hparams] . identifier[num_hidden_layers] ):
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] % identifier[i] ):
identifier[cur] = identifier[residual_block] ( identifier[cur] , identifier[hparams] )
keyword[return] identifier[xception_exit] ( identifier[cur] )
|
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope('xception'):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size) # depends on [control=['if'], data=[]]
else:
# Small image, conv
cur = common_layers.conv_block(cur, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding='SAME', force2d=True, name='small_image_conv')
for i in range(hparams.num_hidden_layers):
with tf.variable_scope('layer_%d' % i):
cur = residual_block(cur, hparams) # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['i']]
return xception_exit(cur) # depends on [control=['with'], data=[]]
|
def deregisterkbevent(self, keys, modifiers):
"""
Remove callback of registered event
@param keys: key to listen
@type keys: string
@param modifiers: control / alt combination using gtk MODIFIERS
@type modifiers: int
@return: 1 if registration was successful, 0 if not.
@rtype: integer
"""
event_name = "kbevent%s%s" % (keys, modifiers)
if event_name in _pollEvents._callback:
del _pollEvents._callback[event_name]
return self._remote_deregisterkbevent(keys, modifiers)
|
def function[deregisterkbevent, parameter[self, keys, modifiers]]:
constant[
Remove callback of registered event
@param keys: key to listen
@type keys: string
@param modifiers: control / alt combination using gtk MODIFIERS
@type modifiers: int
@return: 1 if registration was successful, 0 if not.
@rtype: integer
]
variable[event_name] assign[=] binary_operation[constant[kbevent%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09c130>, <ast.Name object at 0x7da18f09da20>]]]
if compare[name[event_name] in name[_pollEvents]._callback] begin[:]
<ast.Delete object at 0x7da18f09ed40>
return[call[name[self]._remote_deregisterkbevent, parameter[name[keys], name[modifiers]]]]
|
keyword[def] identifier[deregisterkbevent] ( identifier[self] , identifier[keys] , identifier[modifiers] ):
literal[string]
identifier[event_name] = literal[string] %( identifier[keys] , identifier[modifiers] )
keyword[if] identifier[event_name] keyword[in] identifier[_pollEvents] . identifier[_callback] :
keyword[del] identifier[_pollEvents] . identifier[_callback] [ identifier[event_name] ]
keyword[return] identifier[self] . identifier[_remote_deregisterkbevent] ( identifier[keys] , identifier[modifiers] )
|
def deregisterkbevent(self, keys, modifiers):
"""
Remove callback of registered event
@param keys: key to listen
@type keys: string
@param modifiers: control / alt combination using gtk MODIFIERS
@type modifiers: int
@return: 1 if registration was successful, 0 if not.
@rtype: integer
"""
event_name = 'kbevent%s%s' % (keys, modifiers)
if event_name in _pollEvents._callback:
del _pollEvents._callback[event_name] # depends on [control=['if'], data=['event_name']]
return self._remote_deregisterkbevent(keys, modifiers)
|
def tagAttributes_while(fdef_master_list,root):
'''Tag each node under root with the appropriate depth. '''
depth = 0
current = root
untagged_nodes = [root]
while untagged_nodes:
current = untagged_nodes.pop()
for x in fdef_master_list:
if jsName(x.path,x.name) == current['name']:
current['path'] = x.path
if children in current:
for child in children:
child["depth"] = depth
untagged_nodes.append(child)
if depth not in current:
current["depth"] = depth
depth += 1
return root
|
def function[tagAttributes_while, parameter[fdef_master_list, root]]:
constant[Tag each node under root with the appropriate depth. ]
variable[depth] assign[=] constant[0]
variable[current] assign[=] name[root]
variable[untagged_nodes] assign[=] list[[<ast.Name object at 0x7da1b27e04f0>]]
while name[untagged_nodes] begin[:]
variable[current] assign[=] call[name[untagged_nodes].pop, parameter[]]
for taget[name[x]] in starred[name[fdef_master_list]] begin[:]
if compare[call[name[jsName], parameter[name[x].path, name[x].name]] equal[==] call[name[current]][constant[name]]] begin[:]
call[name[current]][constant[path]] assign[=] name[x].path
if compare[name[children] in name[current]] begin[:]
for taget[name[child]] in starred[name[children]] begin[:]
call[name[child]][constant[depth]] assign[=] name[depth]
call[name[untagged_nodes].append, parameter[name[child]]]
if compare[name[depth] <ast.NotIn object at 0x7da2590d7190> name[current]] begin[:]
call[name[current]][constant[depth]] assign[=] name[depth]
<ast.AugAssign object at 0x7da1b287f4c0>
return[name[root]]
|
keyword[def] identifier[tagAttributes_while] ( identifier[fdef_master_list] , identifier[root] ):
literal[string]
identifier[depth] = literal[int]
identifier[current] = identifier[root]
identifier[untagged_nodes] =[ identifier[root] ]
keyword[while] identifier[untagged_nodes] :
identifier[current] = identifier[untagged_nodes] . identifier[pop] ()
keyword[for] identifier[x] keyword[in] identifier[fdef_master_list] :
keyword[if] identifier[jsName] ( identifier[x] . identifier[path] , identifier[x] . identifier[name] )== identifier[current] [ literal[string] ]:
identifier[current] [ literal[string] ]= identifier[x] . identifier[path]
keyword[if] identifier[children] keyword[in] identifier[current] :
keyword[for] identifier[child] keyword[in] identifier[children] :
identifier[child] [ literal[string] ]= identifier[depth]
identifier[untagged_nodes] . identifier[append] ( identifier[child] )
keyword[if] identifier[depth] keyword[not] keyword[in] identifier[current] :
identifier[current] [ literal[string] ]= identifier[depth]
identifier[depth] += literal[int]
keyword[return] identifier[root]
|
def tagAttributes_while(fdef_master_list, root):
"""Tag each node under root with the appropriate depth. """
depth = 0
current = root
untagged_nodes = [root]
while untagged_nodes:
current = untagged_nodes.pop()
for x in fdef_master_list:
if jsName(x.path, x.name) == current['name']:
current['path'] = x.path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']]
if children in current:
for child in children:
child['depth'] = depth
untagged_nodes.append(child) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=['children']]
if depth not in current:
current['depth'] = depth
depth += 1 # depends on [control=['if'], data=['depth', 'current']] # depends on [control=['while'], data=[]]
return root
|
def subsample_reads(self):
"""
Subsampling of reads to 20X coverage of rMLST genes (roughly).
To be called after rMLST extraction and read trimming, in that order.
"""
logging.info('Subsampling {at} reads'.format(at=self.analysistype))
with progressbar(self.runmetadata) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Create the name of the subsampled read file
sample[self.analysistype].subsampledreads = os.path.join(
sample[self.analysistype].outputdir,
'{at}_targetMatches_subsampled.fastq.gz'.format(at=self.analysistype))
# Set the reformat.sh command. It will be run multiple times, overwrite previous iterations
# each time. Use samplebasestarget to provide an approximate number of bases to include in the
# subsampled reads e.g. for rMLST: 700000 (approx. 35000 bp total length of genes x 20X coverage)
sample[self.analysistype].subsamplecmd = \
'reformat.sh in={bf} out={ssr} overwrite samplebasestarget=700000' \
.format(bf=sample[self.analysistype].baitedfastq,
ssr=sample[self.analysistype].subsampledreads)
if not os.path.isfile(sample[self.analysistype].subsampledreads):
# Run the call
out, err = run_subprocess(sample[self.analysistype].subsamplecmd)
write_to_logfile(sample[self.analysistype].subsamplecmd,
sample[self.analysistype].subsamplecmd,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
# Update the variable to store the baited reads
sample[self.analysistype].baitedfastq = sample[self.analysistype].subsampledreads
|
def function[subsample_reads, parameter[self]]:
constant[
Subsampling of reads to 20X coverage of rMLST genes (roughly).
To be called after rMLST extraction and read trimming, in that order.
]
call[name[logging].info, parameter[call[constant[Subsampling {at} reads].format, parameter[]]]]
with call[name[progressbar], parameter[name[self].runmetadata]] begin[:]
for taget[name[sample]] in starred[name[bar]] begin[:]
if compare[name[sample].general.bestassemblyfile not_equal[!=] constant[NA]] begin[:]
call[name[sample]][name[self].analysistype].subsampledreads assign[=] call[name[os].path.join, parameter[call[name[sample]][name[self].analysistype].outputdir, call[constant[{at}_targetMatches_subsampled.fastq.gz].format, parameter[]]]]
call[name[sample]][name[self].analysistype].subsamplecmd assign[=] call[constant[reformat.sh in={bf} out={ssr} overwrite samplebasestarget=700000].format, parameter[]]
if <ast.UnaryOp object at 0x7da1b1ecb130> begin[:]
<ast.Tuple object at 0x7da1b1ecaf20> assign[=] call[name[run_subprocess], parameter[call[name[sample]][name[self].analysistype].subsamplecmd]]
call[name[write_to_logfile], parameter[call[name[sample]][name[self].analysistype].subsamplecmd, call[name[sample]][name[self].analysistype].subsamplecmd, name[self].logfile, name[sample].general.logout, name[sample].general.logerr, call[name[sample]][name[self].analysistype].logout, call[name[sample]][name[self].analysistype].logerr]]
call[name[write_to_logfile], parameter[name[out], name[err], name[self].logfile, name[sample].general.logout, name[sample].general.logerr, call[name[sample]][name[self].analysistype].logout, call[name[sample]][name[self].analysistype].logerr]]
call[name[sample]][name[self].analysistype].baitedfastq assign[=] call[name[sample]][name[self].analysistype].subsampledreads
|
keyword[def] identifier[subsample_reads] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[at] = identifier[self] . identifier[analysistype] ))
keyword[with] identifier[progressbar] ( identifier[self] . identifier[runmetadata] ) keyword[as] identifier[bar] :
keyword[for] identifier[sample] keyword[in] identifier[bar] :
keyword[if] identifier[sample] . identifier[general] . identifier[bestassemblyfile] != literal[string] :
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsampledreads] = identifier[os] . identifier[path] . identifier[join] (
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[outputdir] ,
literal[string] . identifier[format] ( identifier[at] = identifier[self] . identifier[analysistype] ))
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsamplecmd] = literal[string] . identifier[format] ( identifier[bf] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[baitedfastq] ,
identifier[ssr] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsampledreads] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsampledreads] ):
identifier[out] , identifier[err] = identifier[run_subprocess] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsamplecmd] )
identifier[write_to_logfile] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsamplecmd] ,
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsamplecmd] ,
identifier[self] . identifier[logfile] , identifier[sample] . identifier[general] . identifier[logout] , identifier[sample] . identifier[general] . identifier[logerr] ,
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logout] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logerr] )
identifier[write_to_logfile] ( identifier[out] ,
identifier[err] ,
identifier[self] . identifier[logfile] , identifier[sample] . identifier[general] . identifier[logout] , identifier[sample] . identifier[general] . identifier[logerr] ,
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logout] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logerr] )
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[baitedfastq] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[subsampledreads]
|
def subsample_reads(self):
"""
Subsampling of reads to 20X coverage of rMLST genes (roughly).
To be called after rMLST extraction and read trimming, in that order.
"""
logging.info('Subsampling {at} reads'.format(at=self.analysistype))
with progressbar(self.runmetadata) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Create the name of the subsampled read file
sample[self.analysistype].subsampledreads = os.path.join(sample[self.analysistype].outputdir, '{at}_targetMatches_subsampled.fastq.gz'.format(at=self.analysistype))
# Set the reformat.sh command. It will be run multiple times, overwrite previous iterations
# each time. Use samplebasestarget to provide an approximate number of bases to include in the
# subsampled reads e.g. for rMLST: 700000 (approx. 35000 bp total length of genes x 20X coverage)
sample[self.analysistype].subsamplecmd = 'reformat.sh in={bf} out={ssr} overwrite samplebasestarget=700000'.format(bf=sample[self.analysistype].baitedfastq, ssr=sample[self.analysistype].subsampledreads)
if not os.path.isfile(sample[self.analysistype].subsampledreads):
# Run the call
(out, err) = run_subprocess(sample[self.analysistype].subsamplecmd)
write_to_logfile(sample[self.analysistype].subsamplecmd, sample[self.analysistype].subsamplecmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # depends on [control=['if'], data=[]]
# Update the variable to store the baited reads
sample[self.analysistype].baitedfastq = sample[self.analysistype].subsampledreads # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sample']] # depends on [control=['with'], data=['bar']]
|
def normalize(self, address, **kwargs):
"""Make the address more compareable."""
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses
|
def function[normalize, parameter[self, address]]:
constant[Make the address more compareable.]
variable[addresses] assign[=] call[call[name[super], parameter[name[AddressType], name[self]]].normalize, parameter[name[address]]]
return[name[addresses]]
|
keyword[def] identifier[normalize] ( identifier[self] , identifier[address] ,** identifier[kwargs] ):
literal[string]
identifier[addresses] = identifier[super] ( identifier[AddressType] , identifier[self] ). identifier[normalize] ( identifier[address] ,** identifier[kwargs] )
keyword[return] identifier[addresses]
|
def normalize(self, address, **kwargs):
"""Make the address more compareable."""
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses
|
def ub_to_str(string):
"""
converts py2 unicode / py3 bytestring into str
Args:
string (unicode, byte_string): string to be converted
Returns:
(str)
"""
if not isinstance(string, str):
if six.PY2:
return str(string)
else:
return string.decode()
return string
|
def function[ub_to_str, parameter[string]]:
constant[
converts py2 unicode / py3 bytestring into str
Args:
string (unicode, byte_string): string to be converted
Returns:
(str)
]
if <ast.UnaryOp object at 0x7da207f032e0> begin[:]
if name[six].PY2 begin[:]
return[call[name[str], parameter[name[string]]]]
return[name[string]]
|
keyword[def] identifier[ub_to_str] ( identifier[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[string] , identifier[str] ):
keyword[if] identifier[six] . identifier[PY2] :
keyword[return] identifier[str] ( identifier[string] )
keyword[else] :
keyword[return] identifier[string] . identifier[decode] ()
keyword[return] identifier[string]
|
def ub_to_str(string):
"""
converts py2 unicode / py3 bytestring into str
Args:
string (unicode, byte_string): string to be converted
Returns:
(str)
"""
if not isinstance(string, str):
if six.PY2:
return str(string) # depends on [control=['if'], data=[]]
else:
return string.decode() # depends on [control=['if'], data=[]]
return string
|
def merge_dfs(dfs, resample=None, do_mean=False, do_sum=False, do_min=False, do_max=False):
"""
dfs is a dictionary of key => dataframe
This method resamples each of the dataframes if a period is provided
(http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
"""
if len(dfs) == 0:
raise Exception("No dataframes provided")
df = dfs.values()[0]
name = dfs.keys()[0]
df.columns = map(lambda x: name+"_"+x if not x.startswith(name) else x, df.columns)
if resample is not None:
df = df.resample(resample)
if do_mean: df = df.mean()
elif do_sum: df = df.sum()
elif do_min: df = df.min()
elif do_max: df = df.max()
else: df = df.mean()
if len(dfs) > 1:
for name, newdf in dfs.items()[1:]:
if resample is not None:
newdf = newdf.resample(resample)
if do_mean: newdf = newdf.mean()
elif do_sum: newdf = newdf.sum()
elif do_min: newdf = newdf.min()
elif do_max: newdf = newdf.max()
else: newdf = newdf.mean()
newdf.columns = map(lambda x: name+"_"+x if not x.startswith(name) else x, newdf.columns)
df = df.merge(newdf, left_index=True, right_index=True, how='outer')
return df
|
def function[merge_dfs, parameter[dfs, resample, do_mean, do_sum, do_min, do_max]]:
constant[
dfs is a dictionary of key => dataframe
This method resamples each of the dataframes if a period is provided
(http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
]
if compare[call[name[len], parameter[name[dfs]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9b1420>
variable[df] assign[=] call[call[name[dfs].values, parameter[]]][constant[0]]
variable[name] assign[=] call[call[name[dfs].keys, parameter[]]][constant[0]]
name[df].columns assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da20e9b39a0>, name[df].columns]]
if compare[name[resample] is_not constant[None]] begin[:]
variable[df] assign[=] call[name[df].resample, parameter[name[resample]]]
if name[do_mean] begin[:]
variable[df] assign[=] call[name[df].mean, parameter[]]
if compare[call[name[len], parameter[name[dfs]]] greater[>] constant[1]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00ebc0>, <ast.Name object at 0x7da18f00dab0>]]] in starred[call[call[name[dfs].items, parameter[]]][<ast.Slice object at 0x7da18f00cc10>]] begin[:]
if compare[name[resample] is_not constant[None]] begin[:]
variable[newdf] assign[=] call[name[newdf].resample, parameter[name[resample]]]
if name[do_mean] begin[:]
variable[newdf] assign[=] call[name[newdf].mean, parameter[]]
name[newdf].columns assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18f00c760>, name[newdf].columns]]
variable[df] assign[=] call[name[df].merge, parameter[name[newdf]]]
return[name[df]]
|
keyword[def] identifier[merge_dfs] ( identifier[dfs] , identifier[resample] = keyword[None] , identifier[do_mean] = keyword[False] , identifier[do_sum] = keyword[False] , identifier[do_min] = keyword[False] , identifier[do_max] = keyword[False] ):
literal[string]
keyword[if] identifier[len] ( identifier[dfs] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[df] = identifier[dfs] . identifier[values] ()[ literal[int] ]
identifier[name] = identifier[dfs] . identifier[keys] ()[ literal[int] ]
identifier[df] . identifier[columns] = identifier[map] ( keyword[lambda] identifier[x] : identifier[name] + literal[string] + identifier[x] keyword[if] keyword[not] identifier[x] . identifier[startswith] ( identifier[name] ) keyword[else] identifier[x] , identifier[df] . identifier[columns] )
keyword[if] identifier[resample] keyword[is] keyword[not] keyword[None] :
identifier[df] = identifier[df] . identifier[resample] ( identifier[resample] )
keyword[if] identifier[do_mean] : identifier[df] = identifier[df] . identifier[mean] ()
keyword[elif] identifier[do_sum] : identifier[df] = identifier[df] . identifier[sum] ()
keyword[elif] identifier[do_min] : identifier[df] = identifier[df] . identifier[min] ()
keyword[elif] identifier[do_max] : identifier[df] = identifier[df] . identifier[max] ()
keyword[else] : identifier[df] = identifier[df] . identifier[mean] ()
keyword[if] identifier[len] ( identifier[dfs] )> literal[int] :
keyword[for] identifier[name] , identifier[newdf] keyword[in] identifier[dfs] . identifier[items] ()[ literal[int] :]:
keyword[if] identifier[resample] keyword[is] keyword[not] keyword[None] :
identifier[newdf] = identifier[newdf] . identifier[resample] ( identifier[resample] )
keyword[if] identifier[do_mean] : identifier[newdf] = identifier[newdf] . identifier[mean] ()
keyword[elif] identifier[do_sum] : identifier[newdf] = identifier[newdf] . identifier[sum] ()
keyword[elif] identifier[do_min] : identifier[newdf] = identifier[newdf] . identifier[min] ()
keyword[elif] identifier[do_max] : identifier[newdf] = identifier[newdf] . identifier[max] ()
keyword[else] : identifier[newdf] = identifier[newdf] . identifier[mean] ()
identifier[newdf] . identifier[columns] = identifier[map] ( keyword[lambda] identifier[x] : identifier[name] + literal[string] + identifier[x] keyword[if] keyword[not] identifier[x] . identifier[startswith] ( identifier[name] ) keyword[else] identifier[x] , identifier[newdf] . identifier[columns] )
identifier[df] = identifier[df] . identifier[merge] ( identifier[newdf] , identifier[left_index] = keyword[True] , identifier[right_index] = keyword[True] , identifier[how] = literal[string] )
keyword[return] identifier[df]
|
def merge_dfs(dfs, resample=None, do_mean=False, do_sum=False, do_min=False, do_max=False):
"""
dfs is a dictionary of key => dataframe
This method resamples each of the dataframes if a period is provided
(http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
"""
if len(dfs) == 0:
raise Exception('No dataframes provided') # depends on [control=['if'], data=[]]
df = dfs.values()[0]
name = dfs.keys()[0]
df.columns = map(lambda x: name + '_' + x if not x.startswith(name) else x, df.columns)
if resample is not None:
df = df.resample(resample)
if do_mean:
df = df.mean() # depends on [control=['if'], data=[]]
elif do_sum:
df = df.sum() # depends on [control=['if'], data=[]]
elif do_min:
df = df.min() # depends on [control=['if'], data=[]]
elif do_max:
df = df.max() # depends on [control=['if'], data=[]]
else:
df = df.mean() # depends on [control=['if'], data=['resample']]
if len(dfs) > 1:
for (name, newdf) in dfs.items()[1:]:
if resample is not None:
newdf = newdf.resample(resample)
if do_mean:
newdf = newdf.mean() # depends on [control=['if'], data=[]]
elif do_sum:
newdf = newdf.sum() # depends on [control=['if'], data=[]]
elif do_min:
newdf = newdf.min() # depends on [control=['if'], data=[]]
elif do_max:
newdf = newdf.max() # depends on [control=['if'], data=[]]
else:
newdf = newdf.mean() # depends on [control=['if'], data=['resample']]
newdf.columns = map(lambda x: name + '_' + x if not x.startswith(name) else x, newdf.columns)
df = df.merge(newdf, left_index=True, right_index=True, how='outer') # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return df
|
def cmd_karma(host):
"""Use the Karma service https://karma.securetia.com to check an IP
against various Threat Intelligence / Reputation lists.
\b
$ habu.karma www.google.com
www.google.com -> 64.233.190.99
[
"hphosts_fsa",
"hphosts_psh",
"hphosts_emd"
]
Note: You can use the hostname or the IP of the host to query.
"""
URL = 'https://karma.securetia.com/api/ip/'
try:
resolved = socket.gethostbyname(host)
except Exception:
logging.error('Invalid IP address or hostname')
sys.exit(1)
if host != resolved:
print(host, '->', resolved, file=sys.stderr)
r = requests.get(URL + resolved, headers={'Accept': 'application/json'})
if r.status_code != 200:
logging.error('HTTP Error code received: {}'.format(r.status_code))
sys.exit(1)
print(json.dumps(r.json(), indent=4))
|
def function[cmd_karma, parameter[host]]:
constant[Use the Karma service https://karma.securetia.com to check an IP
against various Threat Intelligence / Reputation lists.
$ habu.karma www.google.com
www.google.com -> 64.233.190.99
[
"hphosts_fsa",
"hphosts_psh",
"hphosts_emd"
]
Note: You can use the hostname or the IP of the host to query.
]
variable[URL] assign[=] constant[https://karma.securetia.com/api/ip/]
<ast.Try object at 0x7da1b2263e20>
if compare[name[host] not_equal[!=] name[resolved]] begin[:]
call[name[print], parameter[name[host], constant[->], name[resolved]]]
variable[r] assign[=] call[name[requests].get, parameter[binary_operation[name[URL] + name[resolved]]]]
if compare[name[r].status_code not_equal[!=] constant[200]] begin[:]
call[name[logging].error, parameter[call[constant[HTTP Error code received: {}].format, parameter[name[r].status_code]]]]
call[name[sys].exit, parameter[constant[1]]]
call[name[print], parameter[call[name[json].dumps, parameter[call[name[r].json, parameter[]]]]]]
|
keyword[def] identifier[cmd_karma] ( identifier[host] ):
literal[string]
identifier[URL] = literal[string]
keyword[try] :
identifier[resolved] = identifier[socket] . identifier[gethostbyname] ( identifier[host] )
keyword[except] identifier[Exception] :
identifier[logging] . identifier[error] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[host] != identifier[resolved] :
identifier[print] ( identifier[host] , literal[string] , identifier[resolved] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[URL] + identifier[resolved] , identifier[headers] ={ literal[string] : literal[string] })
keyword[if] identifier[r] . identifier[status_code] != literal[int] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[r] . identifier[status_code] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[r] . identifier[json] (), identifier[indent] = literal[int] ))
|
def cmd_karma(host):
"""Use the Karma service https://karma.securetia.com to check an IP
against various Threat Intelligence / Reputation lists.
\x08
$ habu.karma www.google.com
www.google.com -> 64.233.190.99
[
"hphosts_fsa",
"hphosts_psh",
"hphosts_emd"
]
Note: You can use the hostname or the IP of the host to query.
"""
URL = 'https://karma.securetia.com/api/ip/'
try:
resolved = socket.gethostbyname(host) # depends on [control=['try'], data=[]]
except Exception:
logging.error('Invalid IP address or hostname')
sys.exit(1) # depends on [control=['except'], data=[]]
if host != resolved:
print(host, '->', resolved, file=sys.stderr) # depends on [control=['if'], data=['host', 'resolved']]
r = requests.get(URL + resolved, headers={'Accept': 'application/json'})
if r.status_code != 200:
logging.error('HTTP Error code received: {}'.format(r.status_code))
sys.exit(1) # depends on [control=['if'], data=[]]
print(json.dumps(r.json(), indent=4))
|
def get_bin(self, *args, **kwargs):
"""Pass through to provider BinLookupSession.get_bin"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bin
return Bin(
self._provider_manager,
self._get_provider_session('bin_lookup_session').get_bin(*args, **kwargs),
self._runtime,
self._proxy)
|
def function[get_bin, parameter[self]]:
constant[Pass through to provider BinLookupSession.get_bin]
return[call[name[Bin], parameter[name[self]._provider_manager, call[call[name[self]._get_provider_session, parameter[constant[bin_lookup_session]]].get_bin, parameter[<ast.Starred object at 0x7da1b0a64c40>]], name[self]._runtime, name[self]._proxy]]]
|
keyword[def] identifier[get_bin] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[Bin] (
identifier[self] . identifier[_provider_manager] ,
identifier[self] . identifier[_get_provider_session] ( literal[string] ). identifier[get_bin] (* identifier[args] ,** identifier[kwargs] ),
identifier[self] . identifier[_runtime] ,
identifier[self] . identifier[_proxy] )
|
def get_bin(self, *args, **kwargs):
"""Pass through to provider BinLookupSession.get_bin"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bin
return Bin(self._provider_manager, self._get_provider_session('bin_lookup_session').get_bin(*args, **kwargs), self._runtime, self._proxy)
|
def _should_retry(self, context):
# type: (ExponentialRetryWithMaxWait,
# azure.storage.common.models.RetryContext) -> bool
"""Determine if retry should happen or not
:param ExponentialRetryWithMaxWait self: this
:param azure.storage.common.models.RetryContext context: retry context
:rtype: bool
:return: True if retry should happen, False otherwise
"""
# do not retry if max attempts equal or exceeded
if context.count >= self.max_attempts:
return False
# get response status
status = None
if context.response and context.response.status:
status = context.response.status
# if there is no response status, then handle the exception
# appropriately from the lower layer
if status is None:
exc = context.exception
# default to not retry in unknown/unhandled exception case
ret = False
# requests timeout, retry
if isinstance(exc, requests.Timeout):
ret = True
elif isinstance(exc, requests.exceptions.ContentDecodingError):
ret = True
elif (isinstance(exc, requests.exceptions.ConnectionError) or
isinstance(exc, requests.exceptions.ChunkedEncodingError)):
# newer versions of requests do not expose errno on the
# args[0] reason object; manually string parse
if isinstance(exc.args[0], urllib3.exceptions.MaxRetryError):
try:
msg = exc.args[0].reason.args[0]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass
else:
if any(x in msg for x in _RETRYABLE_ERRNO_MAXRETRY):
ret = True
elif isinstance(exc.args[0], urllib3.exceptions.ProtocolError):
try:
msg = exc.args[0].args[0]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass
else:
if any(x in msg for x in _RETRYABLE_ERRNO_PROTOCOL):
ret = True
# fallback to string search
if not ret:
msg = str(exc).lower()
if any(x in msg for x in _RETRYABLE_STRING_FALLBACK):
ret = True
return ret
elif 200 <= status < 300:
# failure during respond body download or parsing, so success
# codes should be retried
return True
elif 300 <= status < 500:
# response code 404 should be retried if secondary was used
if (status == 404 and
context.location_mode ==
azure.storage.common.models.LocationMode.SECONDARY):
return True
# response code 408 is a timeout and should be retried
# response code 429 is too many requests (throttle)
# TODO use "Retry-After" header for backoff amount
if status == 408 or status == 429:
return True
return False
elif status >= 500:
# response codes above 500 should be retried except for
# 501 (not implemented) and 505 (version not supported)
if status == 501 or status == 505:
return False
return True
else: # noqa
# this should be unreachable, retry anyway
return True
|
def function[_should_retry, parameter[self, context]]:
constant[Determine if retry should happen or not
:param ExponentialRetryWithMaxWait self: this
:param azure.storage.common.models.RetryContext context: retry context
:rtype: bool
:return: True if retry should happen, False otherwise
]
if compare[name[context].count greater_or_equal[>=] name[self].max_attempts] begin[:]
return[constant[False]]
variable[status] assign[=] constant[None]
if <ast.BoolOp object at 0x7da207f03850> begin[:]
variable[status] assign[=] name[context].response.status
if compare[name[status] is constant[None]] begin[:]
variable[exc] assign[=] name[context].exception
variable[ret] assign[=] constant[False]
if call[name[isinstance], parameter[name[exc], name[requests].Timeout]] begin[:]
variable[ret] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da204623b50> begin[:]
variable[msg] assign[=] call[call[name[str], parameter[name[exc]]].lower, parameter[]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da204623a00>]] begin[:]
variable[ret] assign[=] constant[True]
return[name[ret]]
|
keyword[def] identifier[_should_retry] ( identifier[self] , identifier[context] ):
literal[string]
keyword[if] identifier[context] . identifier[count] >= identifier[self] . identifier[max_attempts] :
keyword[return] keyword[False]
identifier[status] = keyword[None]
keyword[if] identifier[context] . identifier[response] keyword[and] identifier[context] . identifier[response] . identifier[status] :
identifier[status] = identifier[context] . identifier[response] . identifier[status]
keyword[if] identifier[status] keyword[is] keyword[None] :
identifier[exc] = identifier[context] . identifier[exception]
identifier[ret] = keyword[False]
keyword[if] identifier[isinstance] ( identifier[exc] , identifier[requests] . identifier[Timeout] ):
identifier[ret] = keyword[True]
keyword[elif] identifier[isinstance] ( identifier[exc] , identifier[requests] . identifier[exceptions] . identifier[ContentDecodingError] ):
identifier[ret] = keyword[True]
keyword[elif] ( identifier[isinstance] ( identifier[exc] , identifier[requests] . identifier[exceptions] . identifier[ConnectionError] ) keyword[or]
identifier[isinstance] ( identifier[exc] , identifier[requests] . identifier[exceptions] . identifier[ChunkedEncodingError] )):
keyword[if] identifier[isinstance] ( identifier[exc] . identifier[args] [ literal[int] ], identifier[urllib3] . identifier[exceptions] . identifier[MaxRetryError] ):
keyword[try] :
identifier[msg] = identifier[exc] . identifier[args] [ literal[int] ]. identifier[reason] . identifier[args] [ literal[int] ]
keyword[except] ( identifier[AttributeError] , identifier[IndexError] ):
keyword[pass]
keyword[else] :
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[msg] keyword[for] identifier[x] keyword[in] identifier[_RETRYABLE_ERRNO_MAXRETRY] ):
identifier[ret] = keyword[True]
keyword[elif] identifier[isinstance] ( identifier[exc] . identifier[args] [ literal[int] ], identifier[urllib3] . identifier[exceptions] . identifier[ProtocolError] ):
keyword[try] :
identifier[msg] = identifier[exc] . identifier[args] [ literal[int] ]. identifier[args] [ literal[int] ]
keyword[except] ( identifier[AttributeError] , identifier[IndexError] ):
keyword[pass]
keyword[else] :
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[msg] keyword[for] identifier[x] keyword[in] identifier[_RETRYABLE_ERRNO_PROTOCOL] ):
identifier[ret] = keyword[True]
keyword[if] keyword[not] identifier[ret] :
identifier[msg] = identifier[str] ( identifier[exc] ). identifier[lower] ()
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[msg] keyword[for] identifier[x] keyword[in] identifier[_RETRYABLE_STRING_FALLBACK] ):
identifier[ret] = keyword[True]
keyword[return] identifier[ret]
keyword[elif] literal[int] <= identifier[status] < literal[int] :
keyword[return] keyword[True]
keyword[elif] literal[int] <= identifier[status] < literal[int] :
keyword[if] ( identifier[status] == literal[int] keyword[and]
identifier[context] . identifier[location_mode] ==
identifier[azure] . identifier[storage] . identifier[common] . identifier[models] . identifier[LocationMode] . identifier[SECONDARY] ):
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[int] keyword[or] identifier[status] == literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[elif] identifier[status] >= literal[int] :
keyword[if] identifier[status] == literal[int] keyword[or] identifier[status] == literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[True]
|
def _should_retry(self, context):
# type: (ExponentialRetryWithMaxWait,
# azure.storage.common.models.RetryContext) -> bool
'Determine if retry should happen or not\n :param ExponentialRetryWithMaxWait self: this\n :param azure.storage.common.models.RetryContext context: retry context\n :rtype: bool\n :return: True if retry should happen, False otherwise\n '
# do not retry if max attempts equal or exceeded
if context.count >= self.max_attempts:
return False # depends on [control=['if'], data=[]]
# get response status
status = None
if context.response and context.response.status:
status = context.response.status # depends on [control=['if'], data=[]]
# if there is no response status, then handle the exception
# appropriately from the lower layer
if status is None:
exc = context.exception
# default to not retry in unknown/unhandled exception case
ret = False
# requests timeout, retry
if isinstance(exc, requests.Timeout):
ret = True # depends on [control=['if'], data=[]]
elif isinstance(exc, requests.exceptions.ContentDecodingError):
ret = True # depends on [control=['if'], data=[]]
elif isinstance(exc, requests.exceptions.ConnectionError) or isinstance(exc, requests.exceptions.ChunkedEncodingError):
# newer versions of requests do not expose errno on the
# args[0] reason object; manually string parse
if isinstance(exc.args[0], urllib3.exceptions.MaxRetryError):
try:
msg = exc.args[0].reason.args[0] # depends on [control=['try'], data=[]]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass # depends on [control=['except'], data=[]]
else:
if any((x in msg for x in _RETRYABLE_ERRNO_MAXRETRY)):
ret = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(exc.args[0], urllib3.exceptions.ProtocolError):
try:
msg = exc.args[0].args[0] # depends on [control=['try'], data=[]]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass # depends on [control=['except'], data=[]]
else:
if any((x in msg for x in _RETRYABLE_ERRNO_PROTOCOL)):
ret = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# fallback to string search
if not ret:
msg = str(exc).lower()
if any((x in msg for x in _RETRYABLE_STRING_FALLBACK)):
ret = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ret # depends on [control=['if'], data=[]]
elif 200 <= status < 300:
# failure during respond body download or parsing, so success
# codes should be retried
return True # depends on [control=['if'], data=[]]
elif 300 <= status < 500:
# response code 404 should be retried if secondary was used
if status == 404 and context.location_mode == azure.storage.common.models.LocationMode.SECONDARY:
return True # depends on [control=['if'], data=[]]
# response code 408 is a timeout and should be retried
# response code 429 is too many requests (throttle)
# TODO use "Retry-After" header for backoff amount
if status == 408 or status == 429:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=['status']]
elif status >= 500:
# response codes above 500 should be retried except for
# 501 (not implemented) and 505 (version not supported)
if status == 501 or status == 505:
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=['status']]
else: # noqa
# this should be unreachable, retry anyway
return True
|
def gc(args):
"""
%prog gc fastafile
Plot G+C content distribution.
"""
p = OptionParser(gc.__doc__)
p.add_option("--binsize", default=500, type="int",
help="Bin size to use")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
binsize = opts.binsize
allbins = []
for name, seq in parse_fasta(fastafile):
for i in range(len(seq) / binsize):
atcnt = gccnt = 0
for c in seq[i * binsize: (i + 1) * binsize].upper():
if c in "AT":
atcnt += 1
elif c in "GC":
gccnt += 1
totalcnt = atcnt + gccnt
if totalcnt == 0:
continue
gcpct = gccnt * 100 / totalcnt
allbins.append(gcpct)
from jcvi.graphics.base import asciiplot
from collections import Counter
title = "Total number of bins={}".format(len(allbins))
c = Counter(allbins)
x, y = zip(*sorted(c.items()))
asciiplot(x, y, title=title)
|
def function[gc, parameter[args]]:
constant[
%prog gc fastafile
Plot G+C content distribution.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[gc].__doc__]]
call[name[p].add_option, parameter[constant[--binsize]]]
<ast.Tuple object at 0x7da1b08d2410> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08d0fd0>]]
<ast.Tuple object at 0x7da1b08d1120> assign[=] name[args]
variable[binsize] assign[=] name[opts].binsize
variable[allbins] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b08d1720>, <ast.Name object at 0x7da1b08d2fe0>]]] in starred[call[name[parse_fasta], parameter[name[fastafile]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[seq]]] / name[binsize]]]]] begin[:]
variable[atcnt] assign[=] constant[0]
for taget[name[c]] in starred[call[call[name[seq]][<ast.Slice object at 0x7da1b08d1e40>].upper, parameter[]]] begin[:]
if compare[name[c] in constant[AT]] begin[:]
<ast.AugAssign object at 0x7da1b08d07f0>
variable[totalcnt] assign[=] binary_operation[name[atcnt] + name[gccnt]]
if compare[name[totalcnt] equal[==] constant[0]] begin[:]
continue
variable[gcpct] assign[=] binary_operation[binary_operation[name[gccnt] * constant[100]] / name[totalcnt]]
call[name[allbins].append, parameter[name[gcpct]]]
from relative_module[jcvi.graphics.base] import module[asciiplot]
from relative_module[collections] import module[Counter]
variable[title] assign[=] call[constant[Total number of bins={}].format, parameter[call[name[len], parameter[name[allbins]]]]]
variable[c] assign[=] call[name[Counter], parameter[name[allbins]]]
<ast.Tuple object at 0x7da1b08d2440> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b08d3f10>]]
call[name[asciiplot], parameter[name[x], name[y]]]
|
keyword[def] identifier[gc] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[gc] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastafile] ,= identifier[args]
identifier[binsize] = identifier[opts] . identifier[binsize]
identifier[allbins] =[]
keyword[for] identifier[name] , identifier[seq] keyword[in] identifier[parse_fasta] ( identifier[fastafile] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[seq] )/ identifier[binsize] ):
identifier[atcnt] = identifier[gccnt] = literal[int]
keyword[for] identifier[c] keyword[in] identifier[seq] [ identifier[i] * identifier[binsize] :( identifier[i] + literal[int] )* identifier[binsize] ]. identifier[upper] ():
keyword[if] identifier[c] keyword[in] literal[string] :
identifier[atcnt] += literal[int]
keyword[elif] identifier[c] keyword[in] literal[string] :
identifier[gccnt] += literal[int]
identifier[totalcnt] = identifier[atcnt] + identifier[gccnt]
keyword[if] identifier[totalcnt] == literal[int] :
keyword[continue]
identifier[gcpct] = identifier[gccnt] * literal[int] / identifier[totalcnt]
identifier[allbins] . identifier[append] ( identifier[gcpct] )
keyword[from] identifier[jcvi] . identifier[graphics] . identifier[base] keyword[import] identifier[asciiplot]
keyword[from] identifier[collections] keyword[import] identifier[Counter]
identifier[title] = literal[string] . identifier[format] ( identifier[len] ( identifier[allbins] ))
identifier[c] = identifier[Counter] ( identifier[allbins] )
identifier[x] , identifier[y] = identifier[zip] (* identifier[sorted] ( identifier[c] . identifier[items] ()))
identifier[asciiplot] ( identifier[x] , identifier[y] , identifier[title] = identifier[title] )
|
def gc(args):
"""
%prog gc fastafile
Plot G+C content distribution.
"""
p = OptionParser(gc.__doc__)
p.add_option('--binsize', default=500, type='int', help='Bin size to use')
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastafile,) = args
binsize = opts.binsize
allbins = []
for (name, seq) in parse_fasta(fastafile):
for i in range(len(seq) / binsize):
atcnt = gccnt = 0
for c in seq[i * binsize:(i + 1) * binsize].upper():
if c in 'AT':
atcnt += 1 # depends on [control=['if'], data=[]]
elif c in 'GC':
gccnt += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
totalcnt = atcnt + gccnt
if totalcnt == 0:
continue # depends on [control=['if'], data=[]]
gcpct = gccnt * 100 / totalcnt
allbins.append(gcpct) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]]
from jcvi.graphics.base import asciiplot
from collections import Counter
title = 'Total number of bins={}'.format(len(allbins))
c = Counter(allbins)
(x, y) = zip(*sorted(c.items()))
asciiplot(x, y, title=title)
|
def p_substr_assignment(p):
""" statement : LET ID arg_list EQ expr
"""
if p[3] is None or p[5] is None:
return # There were errors
p[0] = None
entry = SYMBOL_TABLE.access_call(p[2], p.lineno(2))
if entry is None:
return
if entry.class_ == CLASS.unknown:
entry.class_ = CLASS.var
assert entry.class_ == CLASS.var and entry.type_ == TYPE.string
if p[5].type_ != TYPE.string:
api.errmsg.syntax_error_expected_string(p.lineno(4), p[5].type_)
if len(p[3]) > 1:
syntax_error(p.lineno(2), "Accessing string with too many indexes. Expected only one.")
return
if len(p[3]) == 1:
substr = (
make_typecast(_TYPE(gl.STR_INDEX_TYPE), p[3][0].value, p.lineno(2)),
make_typecast(_TYPE(gl.STR_INDEX_TYPE), p[3][0].value, p.lineno(2)))
else:
substr = (make_typecast(_TYPE(gl.STR_INDEX_TYPE),
make_number(gl.MIN_STRSLICE_IDX,
lineno=p.lineno(2)),
p.lineno(2)),
make_typecast(_TYPE(gl.STR_INDEX_TYPE),
make_number(gl.MAX_STRSLICE_IDX,
lineno=p.lineno(2)),
p.lineno(2)))
lineno = p.lineno(2)
base = make_number(OPTIONS.string_base.value, lineno, _TYPE(gl.STR_INDEX_TYPE))
p[0] = make_sentence('LETSUBSTR', entry,
make_binary(lineno, 'MINUS', substr[0], base, func=lambda x, y: x - y),
make_binary(lineno, 'MINUS', substr[1], base, func=lambda x, y: x - y),
p[5])
|
def function[p_substr_assignment, parameter[p]]:
constant[ statement : LET ID arg_list EQ expr
]
if <ast.BoolOp object at 0x7da1b0653ee0> begin[:]
return[None]
call[name[p]][constant[0]] assign[=] constant[None]
variable[entry] assign[=] call[name[SYMBOL_TABLE].access_call, parameter[call[name[p]][constant[2]], call[name[p].lineno, parameter[constant[2]]]]]
if compare[name[entry] is constant[None]] begin[:]
return[None]
if compare[name[entry].class_ equal[==] name[CLASS].unknown] begin[:]
name[entry].class_ assign[=] name[CLASS].var
assert[<ast.BoolOp object at 0x7da1b06532e0>]
if compare[call[name[p]][constant[5]].type_ not_equal[!=] name[TYPE].string] begin[:]
call[name[api].errmsg.syntax_error_expected_string, parameter[call[name[p].lineno, parameter[constant[4]]], call[name[p]][constant[5]].type_]]
if compare[call[name[len], parameter[call[name[p]][constant[3]]]] greater[>] constant[1]] begin[:]
call[name[syntax_error], parameter[call[name[p].lineno, parameter[constant[2]]], constant[Accessing string with too many indexes. Expected only one.]]]
return[None]
if compare[call[name[len], parameter[call[name[p]][constant[3]]]] equal[==] constant[1]] begin[:]
variable[substr] assign[=] tuple[[<ast.Call object at 0x7da1b26ac820>, <ast.Call object at 0x7da1b26ae470>]]
variable[lineno] assign[=] call[name[p].lineno, parameter[constant[2]]]
variable[base] assign[=] call[name[make_number], parameter[name[OPTIONS].string_base.value, name[lineno], call[name[_TYPE], parameter[name[gl].STR_INDEX_TYPE]]]]
call[name[p]][constant[0]] assign[=] call[name[make_sentence], parameter[constant[LETSUBSTR], name[entry], call[name[make_binary], parameter[name[lineno], constant[MINUS], call[name[substr]][constant[0]], name[base]]], call[name[make_binary], parameter[name[lineno], constant[MINUS], call[name[substr]][constant[1]], name[base]]], call[name[p]][constant[5]]]]
|
keyword[def] identifier[p_substr_assignment] ( identifier[p] ):
literal[string]
keyword[if] identifier[p] [ literal[int] ] keyword[is] keyword[None] keyword[or] identifier[p] [ literal[int] ] keyword[is] keyword[None] :
keyword[return]
identifier[p] [ literal[int] ]= keyword[None]
identifier[entry] = identifier[SYMBOL_TABLE] . identifier[access_call] ( identifier[p] [ literal[int] ], identifier[p] . identifier[lineno] ( literal[int] ))
keyword[if] identifier[entry] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[entry] . identifier[class_] == identifier[CLASS] . identifier[unknown] :
identifier[entry] . identifier[class_] = identifier[CLASS] . identifier[var]
keyword[assert] identifier[entry] . identifier[class_] == identifier[CLASS] . identifier[var] keyword[and] identifier[entry] . identifier[type_] == identifier[TYPE] . identifier[string]
keyword[if] identifier[p] [ literal[int] ]. identifier[type_] != identifier[TYPE] . identifier[string] :
identifier[api] . identifier[errmsg] . identifier[syntax_error_expected_string] ( identifier[p] . identifier[lineno] ( literal[int] ), identifier[p] [ literal[int] ]. identifier[type_] )
keyword[if] identifier[len] ( identifier[p] [ literal[int] ])> literal[int] :
identifier[syntax_error] ( identifier[p] . identifier[lineno] ( literal[int] ), literal[string] )
keyword[return]
keyword[if] identifier[len] ( identifier[p] [ literal[int] ])== literal[int] :
identifier[substr] =(
identifier[make_typecast] ( identifier[_TYPE] ( identifier[gl] . identifier[STR_INDEX_TYPE] ), identifier[p] [ literal[int] ][ literal[int] ]. identifier[value] , identifier[p] . identifier[lineno] ( literal[int] )),
identifier[make_typecast] ( identifier[_TYPE] ( identifier[gl] . identifier[STR_INDEX_TYPE] ), identifier[p] [ literal[int] ][ literal[int] ]. identifier[value] , identifier[p] . identifier[lineno] ( literal[int] )))
keyword[else] :
identifier[substr] =( identifier[make_typecast] ( identifier[_TYPE] ( identifier[gl] . identifier[STR_INDEX_TYPE] ),
identifier[make_number] ( identifier[gl] . identifier[MIN_STRSLICE_IDX] ,
identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )),
identifier[p] . identifier[lineno] ( literal[int] )),
identifier[make_typecast] ( identifier[_TYPE] ( identifier[gl] . identifier[STR_INDEX_TYPE] ),
identifier[make_number] ( identifier[gl] . identifier[MAX_STRSLICE_IDX] ,
identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )),
identifier[p] . identifier[lineno] ( literal[int] )))
identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )
identifier[base] = identifier[make_number] ( identifier[OPTIONS] . identifier[string_base] . identifier[value] , identifier[lineno] , identifier[_TYPE] ( identifier[gl] . identifier[STR_INDEX_TYPE] ))
identifier[p] [ literal[int] ]= identifier[make_sentence] ( literal[string] , identifier[entry] ,
identifier[make_binary] ( identifier[lineno] , literal[string] , identifier[substr] [ literal[int] ], identifier[base] , identifier[func] = keyword[lambda] identifier[x] , identifier[y] : identifier[x] - identifier[y] ),
identifier[make_binary] ( identifier[lineno] , literal[string] , identifier[substr] [ literal[int] ], identifier[base] , identifier[func] = keyword[lambda] identifier[x] , identifier[y] : identifier[x] - identifier[y] ),
identifier[p] [ literal[int] ])
|
def p_substr_assignment(p):
""" statement : LET ID arg_list EQ expr
"""
if p[3] is None or p[5] is None:
return # There were errors # depends on [control=['if'], data=[]]
p[0] = None
entry = SYMBOL_TABLE.access_call(p[2], p.lineno(2))
if entry is None:
return # depends on [control=['if'], data=[]]
if entry.class_ == CLASS.unknown:
entry.class_ = CLASS.var # depends on [control=['if'], data=[]]
assert entry.class_ == CLASS.var and entry.type_ == TYPE.string
if p[5].type_ != TYPE.string:
api.errmsg.syntax_error_expected_string(p.lineno(4), p[5].type_) # depends on [control=['if'], data=[]]
if len(p[3]) > 1:
syntax_error(p.lineno(2), 'Accessing string with too many indexes. Expected only one.')
return # depends on [control=['if'], data=[]]
if len(p[3]) == 1:
substr = (make_typecast(_TYPE(gl.STR_INDEX_TYPE), p[3][0].value, p.lineno(2)), make_typecast(_TYPE(gl.STR_INDEX_TYPE), p[3][0].value, p.lineno(2))) # depends on [control=['if'], data=[]]
else:
substr = (make_typecast(_TYPE(gl.STR_INDEX_TYPE), make_number(gl.MIN_STRSLICE_IDX, lineno=p.lineno(2)), p.lineno(2)), make_typecast(_TYPE(gl.STR_INDEX_TYPE), make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(2)), p.lineno(2)))
lineno = p.lineno(2)
base = make_number(OPTIONS.string_base.value, lineno, _TYPE(gl.STR_INDEX_TYPE))
p[0] = make_sentence('LETSUBSTR', entry, make_binary(lineno, 'MINUS', substr[0], base, func=lambda x, y: x - y), make_binary(lineno, 'MINUS', substr[1], base, func=lambda x, y: x - y), p[5])
|
def load_riskmodel(self):
# to be called before read_exposure
# NB: this is called even if there is no risk model
"""
Read the risk model and set the attribute .riskmodel.
The riskmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
"""
logging.info('Reading the risk model if present')
self.riskmodel = readinput.get_risk_model(self.oqparam)
if not self.riskmodel:
parent = self.datastore.parent
if 'risk_model' in parent:
self.riskmodel = riskinput.CompositeRiskModel.read(parent)
return
if self.oqparam.ground_motion_fields and not self.oqparam.imtls:
raise InvalidFile('No intensity_measure_types specified in %s' %
self.oqparam.inputs['job_ini'])
self.save_params()
|
def function[load_riskmodel, parameter[self]]:
constant[
Read the risk model and set the attribute .riskmodel.
The riskmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
]
call[name[logging].info, parameter[constant[Reading the risk model if present]]]
name[self].riskmodel assign[=] call[name[readinput].get_risk_model, parameter[name[self].oqparam]]
if <ast.UnaryOp object at 0x7da20cabfe50> begin[:]
variable[parent] assign[=] name[self].datastore.parent
if compare[constant[risk_model] in name[parent]] begin[:]
name[self].riskmodel assign[=] call[name[riskinput].CompositeRiskModel.read, parameter[name[parent]]]
return[None]
if <ast.BoolOp object at 0x7da18ede70a0> begin[:]
<ast.Raise object at 0x7da18ede6560>
call[name[self].save_params, parameter[]]
|
keyword[def] identifier[load_riskmodel] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[riskmodel] = identifier[readinput] . identifier[get_risk_model] ( identifier[self] . identifier[oqparam] )
keyword[if] keyword[not] identifier[self] . identifier[riskmodel] :
identifier[parent] = identifier[self] . identifier[datastore] . identifier[parent]
keyword[if] literal[string] keyword[in] identifier[parent] :
identifier[self] . identifier[riskmodel] = identifier[riskinput] . identifier[CompositeRiskModel] . identifier[read] ( identifier[parent] )
keyword[return]
keyword[if] identifier[self] . identifier[oqparam] . identifier[ground_motion_fields] keyword[and] keyword[not] identifier[self] . identifier[oqparam] . identifier[imtls] :
keyword[raise] identifier[InvalidFile] ( literal[string] %
identifier[self] . identifier[oqparam] . identifier[inputs] [ literal[string] ])
identifier[self] . identifier[save_params] ()
|
def load_riskmodel(self):
# to be called before read_exposure
# NB: this is called even if there is no risk model
'\n Read the risk model and set the attribute .riskmodel.\n The riskmodel can be empty for hazard calculations.\n Save the loss ratios (if any) in the datastore.\n '
logging.info('Reading the risk model if present')
self.riskmodel = readinput.get_risk_model(self.oqparam)
if not self.riskmodel:
parent = self.datastore.parent
if 'risk_model' in parent:
self.riskmodel = riskinput.CompositeRiskModel.read(parent) # depends on [control=['if'], data=['parent']]
return # depends on [control=['if'], data=[]]
if self.oqparam.ground_motion_fields and (not self.oqparam.imtls):
raise InvalidFile('No intensity_measure_types specified in %s' % self.oqparam.inputs['job_ini']) # depends on [control=['if'], data=[]]
self.save_params()
|
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus
|
def function[biomaRtTOkegg, parameter[df]]:
constant[
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
]
variable[df] assign[=] call[name[df].dropna, parameter[]]
variable[ECcols] assign[=] call[name[df].columns.tolist, parameter[]]
call[name[df].reset_index, parameter[]]
variable[field] assign[=] call[call[name[pd].DataFrame, parameter[call[call[call[name[df]][constant[kegg_enzyme]].str.split, parameter[constant[+], constant[1]]].tolist, parameter[]]]]][constant[1]]
variable[field] assign[=] call[name[pd].DataFrame, parameter[name[field]]]
variable[df] assign[=] call[name[pd].concat, parameter[list[[<ast.Subscript object at 0x7da18f58ea70>, <ast.Name object at 0x7da18f58d300>]]]]
name[df].columns assign[=] name[ECcols]
call[name[df].drop_duplicates, parameter[]]
call[name[df].reset_index, parameter[]]
variable[plus] assign[=] call[call[name[df]][constant[kegg_enzyme]].tolist, parameter[]]
variable[plus] assign[=] <ast.ListComp object at 0x7da18f58d330>
variable[noPlus] assign[=] call[name[df]][<ast.UnaryOp object at 0x7da18f58ce50>]
variable[plus] assign[=] call[name[df]][call[call[name[df]][constant[kegg_enzyme]].isin, parameter[name[plus]]]]
call[name[noPlus].reset_index, parameter[]]
call[name[plus].reset_index, parameter[]]
for taget[name[p]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[plus]]]]]] begin[:]
variable[enz] assign[=] call[call[name[plus].ix][name[p]]][constant[kegg_enzyme]]
variable[enz] assign[=] call[name[enz].split, parameter[constant[+]]]
variable[enz] assign[=] call[name[pd].DataFrame, parameter[name[enz]]]
name[enz].colums assign[=] list[[<ast.Constant object at 0x7da20e9b2080>]]
call[name[enz]][constant[ensembl_gene_id]] assign[=] call[call[name[plus].ix][name[p]]][constant[kegg_enzyme]]
variable[noPlus] assign[=] call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da207f02bc0>, <ast.Name object at 0x7da207f03cd0>]]]]
variable[noPlus] assign[=] call[name[noPlus].drop_duplicates, parameter[]]
variable[noPlus] assign[=] call[name[noPlus]][list[[<ast.Constant object at 0x7da207f01990>, <ast.Constant object at 0x7da207f00790>]]]
call[name[noPlus]][constant[fake]] assign[=] constant[ec:]
call[name[noPlus]][constant[kegg_enzyme]] assign[=] binary_operation[call[name[noPlus]][constant[fake]] + call[name[noPlus]][constant[kegg_enzyme]]]
variable[noPlus] assign[=] call[name[noPlus]][list[[<ast.Constant object at 0x7da18bccb430>, <ast.Constant object at 0x7da18bcc93f0>]]]
return[name[noPlus]]
|
keyword[def] identifier[biomaRtTOkegg] ( identifier[df] ):
literal[string]
identifier[df] = identifier[df] . identifier[dropna] ()
identifier[ECcols] = identifier[df] . identifier[columns] . identifier[tolist] ()
identifier[df] . identifier[reset_index] ( identifier[inplace] = keyword[True] , identifier[drop] = keyword[True] )
identifier[field] = identifier[pd] . identifier[DataFrame] ( identifier[df] [ literal[string] ]. identifier[str] . identifier[split] ( literal[string] , literal[int] ). identifier[tolist] ())[ literal[int] ]
identifier[field] = identifier[pd] . identifier[DataFrame] ( identifier[field] )
identifier[df] = identifier[pd] . identifier[concat] ([ identifier[df] [[ literal[string] ]], identifier[field] ], identifier[axis] = literal[int] )
identifier[df] . identifier[columns] = identifier[ECcols]
identifier[df] . identifier[drop_duplicates] ( identifier[inplace] = keyword[True] )
identifier[df] . identifier[reset_index] ( identifier[inplace] = keyword[True] , identifier[drop] = keyword[True] )
identifier[plus] = identifier[df] [ literal[string] ]. identifier[tolist] ()
identifier[plus] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[plus] keyword[if] literal[string] keyword[in] identifier[s] ]
identifier[noPlus] = identifier[df] [~ identifier[df] [ literal[string] ]. identifier[isin] ( identifier[plus] )]
identifier[plus] = identifier[df] [ identifier[df] [ literal[string] ]. identifier[isin] ( identifier[plus] )]
identifier[noPlus] . identifier[reset_index] ( identifier[inplace] = keyword[True] , identifier[drop] = keyword[True] )
identifier[plus] . identifier[reset_index] ( identifier[inplace] = keyword[True] , identifier[drop] = keyword[True] )
keyword[for] identifier[p] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[plus] )):
identifier[enz] = identifier[plus] . identifier[ix] [ identifier[p] ][ literal[string] ]
identifier[enz] = identifier[enz] . identifier[split] ( literal[string] )
identifier[enz] = identifier[pd] . identifier[DataFrame] ( identifier[enz] )
identifier[enz] . identifier[colums] =[ literal[string] ]
identifier[enz] [ literal[string] ]= identifier[plus] . identifier[ix] [ identifier[p] ][ literal[string] ]
identifier[noPlus] = identifier[pd] . identifier[concat] ([ identifier[noPlus] , identifier[enz] ])
identifier[noPlus] = identifier[noPlus] . identifier[drop_duplicates] ()
identifier[noPlus] = identifier[noPlus] [[ literal[string] , literal[string] ]]
identifier[noPlus] [ literal[string] ]= literal[string]
identifier[noPlus] [ literal[string] ]= identifier[noPlus] [ literal[string] ]+ identifier[noPlus] [ literal[string] ]
identifier[noPlus] = identifier[noPlus] [[ literal[string] , literal[string] ]]
keyword[return] identifier[noPlus]
|
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df = df.dropna()
ECcols = df.columns.tolist()
df.reset_index(inplace=True, drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+', 1).tolist())[1]
field = pd.DataFrame(field)
df = pd.concat([df[['ensembl_gene_id']], field], axis=1)
df.columns = ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True, drop=True)
plus = df['kegg_enzyme'].tolist()
plus = [s for s in plus if '+' in s]
noPlus = df[~df['kegg_enzyme'].isin(plus)]
plus = df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0, len(plus)):
enz = plus.ix[p]['kegg_enzyme']
enz = enz.split('+')
enz = pd.DataFrame(enz)
enz.colums = ['kegg_enzyme']
enz['ensembl_gene_id'] = plus.ix[p]['kegg_enzyme']
noPlus = pd.concat([noPlus, enz]) # depends on [control=['for'], data=['p']]
noPlus = noPlus.drop_duplicates()
noPlus = noPlus[['ensembl_gene_id', 'kegg_enzyme']]
noPlus['fake'] = 'ec:'
noPlus['kegg_enzyme'] = noPlus['fake'] + noPlus['kegg_enzyme']
noPlus = noPlus[['ensembl_gene_id', 'kegg_enzyme']]
return noPlus
|
def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
r"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
"""
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs)
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs)
|
def function[link_to_dashboard, parameter[self, dashboard_id, panel_id]]:
constant[
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
]
if compare[name[self]._dimensions equal[==] constant[1]] begin[:]
call[name[self]._sensor_value.link_to_dashboard, parameter[name[dashboard_id], name[panel_id]]]
|
keyword[def] identifier[link_to_dashboard] ( identifier[self] , identifier[dashboard_id] = keyword[None] , identifier[panel_id] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_dimensions] == literal[int] :
identifier[self] . identifier[_sensor_value] . identifier[link_to_dashboard] ( identifier[dashboard_id] , identifier[panel_id] ,** identifier[kwargs] )
keyword[else] :
keyword[for] identifier[dimension] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_dimensions] ):
identifier[self] . identifier[_sub_sensors] [ identifier[dimension] ]. identifier[link_to_dashboard] ( identifier[dashboard_id] , identifier[panel_id] ,** identifier[kwargs] )
|
def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
"""
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs) # depends on [control=['if'], data=[]]
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs) # depends on [control=['for'], data=['dimension']]
|
def initial_validation(request, prefix):
"""
Returns the related model instance and post data to use in the
comment/rating views below.
Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``
setting. If this is ``True`` and the user is unauthenticated, we
store their post data in their session, and redirect to login with
the view's url (also defined by the prefix arg) as the ``next``
param. We can then check the session data once they log in,
and complete the action authenticated.
On successful post, we pass the related object and post data back,
which may have come from the session, for each of the comments and
ratings view functions to deal with as needed.
"""
post_data = request.POST
login_required_setting_name = prefix.upper() + "S_ACCOUNT_REQUIRED"
posted_session_key = "unauthenticated_" + prefix
redirect_url = ""
if getattr(settings, login_required_setting_name, False):
if not request.user.is_authenticated():
request.session[posted_session_key] = request.POST
error(request, _("You must be logged in. Please log in or "
"sign up to complete this action."))
redirect_url = "%s?next=%s" % (settings.LOGIN_URL, reverse(prefix))
elif posted_session_key in request.session:
post_data = request.session.pop(posted_session_key)
if not redirect_url:
model_data = post_data.get("content_type", "").split(".", 1)
if len(model_data) != 2:
return HttpResponseBadRequest()
try:
model = apps.get_model(*model_data)
obj = model.objects.get(id=post_data.get("object_pk", None))
except (TypeError, ObjectDoesNotExist, LookupError):
redirect_url = "/"
if redirect_url:
if request.is_ajax():
return HttpResponse(dumps({"location": redirect_url}))
else:
return redirect(redirect_url)
return obj, post_data
|
def function[initial_validation, parameter[request, prefix]]:
constant[
Returns the related model instance and post data to use in the
comment/rating views below.
Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``
setting. If this is ``True`` and the user is unauthenticated, we
store their post data in their session, and redirect to login with
the view's url (also defined by the prefix arg) as the ``next``
param. We can then check the session data once they log in,
and complete the action authenticated.
On successful post, we pass the related object and post data back,
which may have come from the session, for each of the comments and
ratings view functions to deal with as needed.
]
variable[post_data] assign[=] name[request].POST
variable[login_required_setting_name] assign[=] binary_operation[call[name[prefix].upper, parameter[]] + constant[S_ACCOUNT_REQUIRED]]
variable[posted_session_key] assign[=] binary_operation[constant[unauthenticated_] + name[prefix]]
variable[redirect_url] assign[=] constant[]
if call[name[getattr], parameter[name[settings], name[login_required_setting_name], constant[False]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1365ea0> begin[:]
call[name[request].session][name[posted_session_key]] assign[=] name[request].POST
call[name[error], parameter[name[request], call[name[_], parameter[constant[You must be logged in. Please log in or sign up to complete this action.]]]]]
variable[redirect_url] assign[=] binary_operation[constant[%s?next=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1366860>, <ast.Call object at 0x7da1b1367850>]]]
if <ast.UnaryOp object at 0x7da1b15ac640> begin[:]
variable[model_data] assign[=] call[call[name[post_data].get, parameter[constant[content_type], constant[]]].split, parameter[constant[.], constant[1]]]
if compare[call[name[len], parameter[name[model_data]]] not_equal[!=] constant[2]] begin[:]
return[call[name[HttpResponseBadRequest], parameter[]]]
<ast.Try object at 0x7da204623d60>
if name[redirect_url] begin[:]
if call[name[request].is_ajax, parameter[]] begin[:]
return[call[name[HttpResponse], parameter[call[name[dumps], parameter[dictionary[[<ast.Constant object at 0x7da204621300>], [<ast.Name object at 0x7da204622350>]]]]]]]
return[tuple[[<ast.Name object at 0x7da2046234f0>, <ast.Name object at 0x7da204623430>]]]
|
keyword[def] identifier[initial_validation] ( identifier[request] , identifier[prefix] ):
literal[string]
identifier[post_data] = identifier[request] . identifier[POST]
identifier[login_required_setting_name] = identifier[prefix] . identifier[upper] ()+ literal[string]
identifier[posted_session_key] = literal[string] + identifier[prefix]
identifier[redirect_url] = literal[string]
keyword[if] identifier[getattr] ( identifier[settings] , identifier[login_required_setting_name] , keyword[False] ):
keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[is_authenticated] ():
identifier[request] . identifier[session] [ identifier[posted_session_key] ]= identifier[request] . identifier[POST]
identifier[error] ( identifier[request] , identifier[_] ( literal[string]
literal[string] ))
identifier[redirect_url] = literal[string] %( identifier[settings] . identifier[LOGIN_URL] , identifier[reverse] ( identifier[prefix] ))
keyword[elif] identifier[posted_session_key] keyword[in] identifier[request] . identifier[session] :
identifier[post_data] = identifier[request] . identifier[session] . identifier[pop] ( identifier[posted_session_key] )
keyword[if] keyword[not] identifier[redirect_url] :
identifier[model_data] = identifier[post_data] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[model_data] )!= literal[int] :
keyword[return] identifier[HttpResponseBadRequest] ()
keyword[try] :
identifier[model] = identifier[apps] . identifier[get_model] (* identifier[model_data] )
identifier[obj] = identifier[model] . identifier[objects] . identifier[get] ( identifier[id] = identifier[post_data] . identifier[get] ( literal[string] , keyword[None] ))
keyword[except] ( identifier[TypeError] , identifier[ObjectDoesNotExist] , identifier[LookupError] ):
identifier[redirect_url] = literal[string]
keyword[if] identifier[redirect_url] :
keyword[if] identifier[request] . identifier[is_ajax] ():
keyword[return] identifier[HttpResponse] ( identifier[dumps] ({ literal[string] : identifier[redirect_url] }))
keyword[else] :
keyword[return] identifier[redirect] ( identifier[redirect_url] )
keyword[return] identifier[obj] , identifier[post_data]
|
def initial_validation(request, prefix):
"""
Returns the related model instance and post data to use in the
comment/rating views below.
Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``
setting. If this is ``True`` and the user is unauthenticated, we
store their post data in their session, and redirect to login with
the view's url (also defined by the prefix arg) as the ``next``
param. We can then check the session data once they log in,
and complete the action authenticated.
On successful post, we pass the related object and post data back,
which may have come from the session, for each of the comments and
ratings view functions to deal with as needed.
"""
post_data = request.POST
login_required_setting_name = prefix.upper() + 'S_ACCOUNT_REQUIRED'
posted_session_key = 'unauthenticated_' + prefix
redirect_url = ''
if getattr(settings, login_required_setting_name, False):
if not request.user.is_authenticated():
request.session[posted_session_key] = request.POST
error(request, _('You must be logged in. Please log in or sign up to complete this action.'))
redirect_url = '%s?next=%s' % (settings.LOGIN_URL, reverse(prefix)) # depends on [control=['if'], data=[]]
elif posted_session_key in request.session:
post_data = request.session.pop(posted_session_key) # depends on [control=['if'], data=['posted_session_key']] # depends on [control=['if'], data=[]]
if not redirect_url:
model_data = post_data.get('content_type', '').split('.', 1)
if len(model_data) != 2:
return HttpResponseBadRequest() # depends on [control=['if'], data=[]]
try:
model = apps.get_model(*model_data)
obj = model.objects.get(id=post_data.get('object_pk', None)) # depends on [control=['try'], data=[]]
except (TypeError, ObjectDoesNotExist, LookupError):
redirect_url = '/' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if redirect_url:
if request.is_ajax():
return HttpResponse(dumps({'location': redirect_url})) # depends on [control=['if'], data=[]]
else:
return redirect(redirect_url) # depends on [control=['if'], data=[]]
return (obj, post_data)
|
def make_neurites(rdw):
'''Build neurite trees from a raw data wrapper'''
post_action = _NEURITE_ACTION[rdw.fmt]
trunks = rdw.neurite_root_section_ids()
if not trunks:
return [], []
# One pass over sections to build nodes
nodes = tuple(Section(section_id=i,
points=rdw.data_block[sec.ids],
section_type=_TREE_TYPES[sec.ntype])
for i, sec in enumerate(rdw.sections))
# One pass over nodes to connect children to parents
for i, node in enumerate(nodes):
parent_id = rdw.sections[i].pid
parent_type = nodes[parent_id].type
# only connect neurites
if parent_id != ROOT_ID and parent_type != NeuriteType.soma:
nodes[parent_id].add_child(node)
neurites = tuple(Neurite(nodes[i]) for i in trunks)
if post_action is not None:
for n in neurites:
post_action(n.root_node)
return neurites, nodes
|
def function[make_neurites, parameter[rdw]]:
constant[Build neurite trees from a raw data wrapper]
variable[post_action] assign[=] call[name[_NEURITE_ACTION]][name[rdw].fmt]
variable[trunks] assign[=] call[name[rdw].neurite_root_section_ids, parameter[]]
if <ast.UnaryOp object at 0x7da20e9b3730> begin[:]
return[tuple[[<ast.List object at 0x7da20e9b3b20>, <ast.List object at 0x7da20e9b2d40>]]]
variable[nodes] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e9b25c0>]]
for taget[tuple[[<ast.Name object at 0x7da20e9b2020>, <ast.Name object at 0x7da20e9b3f10>]]] in starred[call[name[enumerate], parameter[name[nodes]]]] begin[:]
variable[parent_id] assign[=] call[name[rdw].sections][name[i]].pid
variable[parent_type] assign[=] call[name[nodes]][name[parent_id]].type
if <ast.BoolOp object at 0x7da20e962470> begin[:]
call[call[name[nodes]][name[parent_id]].add_child, parameter[name[node]]]
variable[neurites] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e960ca0>]]
if compare[name[post_action] is_not constant[None]] begin[:]
for taget[name[n]] in starred[name[neurites]] begin[:]
call[name[post_action], parameter[name[n].root_node]]
return[tuple[[<ast.Name object at 0x7da20e9633a0>, <ast.Name object at 0x7da20e960730>]]]
|
keyword[def] identifier[make_neurites] ( identifier[rdw] ):
literal[string]
identifier[post_action] = identifier[_NEURITE_ACTION] [ identifier[rdw] . identifier[fmt] ]
identifier[trunks] = identifier[rdw] . identifier[neurite_root_section_ids] ()
keyword[if] keyword[not] identifier[trunks] :
keyword[return] [],[]
identifier[nodes] = identifier[tuple] ( identifier[Section] ( identifier[section_id] = identifier[i] ,
identifier[points] = identifier[rdw] . identifier[data_block] [ identifier[sec] . identifier[ids] ],
identifier[section_type] = identifier[_TREE_TYPES] [ identifier[sec] . identifier[ntype] ])
keyword[for] identifier[i] , identifier[sec] keyword[in] identifier[enumerate] ( identifier[rdw] . identifier[sections] ))
keyword[for] identifier[i] , identifier[node] keyword[in] identifier[enumerate] ( identifier[nodes] ):
identifier[parent_id] = identifier[rdw] . identifier[sections] [ identifier[i] ]. identifier[pid]
identifier[parent_type] = identifier[nodes] [ identifier[parent_id] ]. identifier[type]
keyword[if] identifier[parent_id] != identifier[ROOT_ID] keyword[and] identifier[parent_type] != identifier[NeuriteType] . identifier[soma] :
identifier[nodes] [ identifier[parent_id] ]. identifier[add_child] ( identifier[node] )
identifier[neurites] = identifier[tuple] ( identifier[Neurite] ( identifier[nodes] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[trunks] )
keyword[if] identifier[post_action] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[n] keyword[in] identifier[neurites] :
identifier[post_action] ( identifier[n] . identifier[root_node] )
keyword[return] identifier[neurites] , identifier[nodes]
|
def make_neurites(rdw):
"""Build neurite trees from a raw data wrapper"""
post_action = _NEURITE_ACTION[rdw.fmt]
trunks = rdw.neurite_root_section_ids()
if not trunks:
return ([], []) # depends on [control=['if'], data=[]]
# One pass over sections to build nodes
nodes = tuple((Section(section_id=i, points=rdw.data_block[sec.ids], section_type=_TREE_TYPES[sec.ntype]) for (i, sec) in enumerate(rdw.sections)))
# One pass over nodes to connect children to parents
for (i, node) in enumerate(nodes):
parent_id = rdw.sections[i].pid
parent_type = nodes[parent_id].type
# only connect neurites
if parent_id != ROOT_ID and parent_type != NeuriteType.soma:
nodes[parent_id].add_child(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
neurites = tuple((Neurite(nodes[i]) for i in trunks))
if post_action is not None:
for n in neurites:
post_action(n.root_node) # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=['post_action']]
return (neurites, nodes)
|
def sum2diag(A, D, out=None):
r"""Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
A = asarray(A, float)
D = asarray(D, float)
if out is None:
out = copy(A)
else:
copyto(out, A)
einsum("ii->i", out)[:] += D
return out
|
def function[sum2diag, parameter[A, D, out]]:
constant[Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
]
variable[A] assign[=] call[name[asarray], parameter[name[A], name[float]]]
variable[D] assign[=] call[name[asarray], parameter[name[D], name[float]]]
if compare[name[out] is constant[None]] begin[:]
variable[out] assign[=] call[name[copy], parameter[name[A]]]
<ast.AugAssign object at 0x7da1b1a766b0>
return[name[out]]
|
keyword[def] identifier[sum2diag] ( identifier[A] , identifier[D] , identifier[out] = keyword[None] ):
literal[string]
identifier[A] = identifier[asarray] ( identifier[A] , identifier[float] )
identifier[D] = identifier[asarray] ( identifier[D] , identifier[float] )
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[copy] ( identifier[A] )
keyword[else] :
identifier[copyto] ( identifier[out] , identifier[A] )
identifier[einsum] ( literal[string] , identifier[out] )[:]+= identifier[D]
keyword[return] identifier[out]
|
def sum2diag(A, D, out=None):
"""Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
A = asarray(A, float)
D = asarray(D, float)
if out is None:
out = copy(A) # depends on [control=['if'], data=['out']]
else:
copyto(out, A)
einsum('ii->i', out)[:] += D
return out
|
def handle_resource_update_success(resource):
"""
Recover resource if its state is ERRED and clear error message.
"""
update_fields = []
if resource.state == resource.States.ERRED:
resource.recover()
update_fields.append('state')
if resource.state in (resource.States.UPDATING, resource.States.CREATING):
resource.set_ok()
update_fields.append('state')
if resource.error_message:
resource.error_message = ''
update_fields.append('error_message')
if update_fields:
resource.save(update_fields=update_fields)
logger.warning('%s %s (PK: %s) was successfully updated.' % (
resource.__class__.__name__, resource, resource.pk))
|
def function[handle_resource_update_success, parameter[resource]]:
constant[
Recover resource if its state is ERRED and clear error message.
]
variable[update_fields] assign[=] list[[]]
if compare[name[resource].state equal[==] name[resource].States.ERRED] begin[:]
call[name[resource].recover, parameter[]]
call[name[update_fields].append, parameter[constant[state]]]
if compare[name[resource].state in tuple[[<ast.Attribute object at 0x7da1b0fe40a0>, <ast.Attribute object at 0x7da1b0fe4760>]]] begin[:]
call[name[resource].set_ok, parameter[]]
call[name[update_fields].append, parameter[constant[state]]]
if name[resource].error_message begin[:]
name[resource].error_message assign[=] constant[]
call[name[update_fields].append, parameter[constant[error_message]]]
if name[update_fields] begin[:]
call[name[resource].save, parameter[]]
call[name[logger].warning, parameter[binary_operation[constant[%s %s (PK: %s) was successfully updated.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0fe5900>, <ast.Name object at 0x7da1b0fe74f0>, <ast.Attribute object at 0x7da1b0fe5210>]]]]]
|
keyword[def] identifier[handle_resource_update_success] ( identifier[resource] ):
literal[string]
identifier[update_fields] =[]
keyword[if] identifier[resource] . identifier[state] == identifier[resource] . identifier[States] . identifier[ERRED] :
identifier[resource] . identifier[recover] ()
identifier[update_fields] . identifier[append] ( literal[string] )
keyword[if] identifier[resource] . identifier[state] keyword[in] ( identifier[resource] . identifier[States] . identifier[UPDATING] , identifier[resource] . identifier[States] . identifier[CREATING] ):
identifier[resource] . identifier[set_ok] ()
identifier[update_fields] . identifier[append] ( literal[string] )
keyword[if] identifier[resource] . identifier[error_message] :
identifier[resource] . identifier[error_message] = literal[string]
identifier[update_fields] . identifier[append] ( literal[string] )
keyword[if] identifier[update_fields] :
identifier[resource] . identifier[save] ( identifier[update_fields] = identifier[update_fields] )
identifier[logger] . identifier[warning] ( literal[string] %(
identifier[resource] . identifier[__class__] . identifier[__name__] , identifier[resource] , identifier[resource] . identifier[pk] ))
|
def handle_resource_update_success(resource):
"""
Recover resource if its state is ERRED and clear error message.
"""
update_fields = []
if resource.state == resource.States.ERRED:
resource.recover()
update_fields.append('state') # depends on [control=['if'], data=[]]
if resource.state in (resource.States.UPDATING, resource.States.CREATING):
resource.set_ok()
update_fields.append('state') # depends on [control=['if'], data=[]]
if resource.error_message:
resource.error_message = ''
update_fields.append('error_message') # depends on [control=['if'], data=[]]
if update_fields:
resource.save(update_fields=update_fields) # depends on [control=['if'], data=[]]
logger.warning('%s %s (PK: %s) was successfully updated.' % (resource.__class__.__name__, resource, resource.pk))
|
def FindAnomalies(self):
"""Identify anomalies in the password/shadow and group/gshadow data."""
# Find anomalous group entries.
findings = []
group_entries = {g.gid for g in itervalues(self.groups)}
for gid in set(self.gids) - group_entries:
undefined = ",".join(self.gids.get(gid, []))
findings.append(
"gid %d assigned without /etc/groups entry: %s" % (gid, undefined))
if findings:
yield self._Anomaly("Accounts with invalid gid.", findings)
# Find any shared user IDs.
findings = []
for uid, names in iteritems(self.uids):
if len(names) > 1:
findings.append("uid %d assigned to multiple accounts: %s" %
(uid, ",".join(sorted(names))))
if findings:
yield self._Anomaly("Accounts with shared uid.", findings)
# Find privileged groups with unusual members.
findings = []
root_grp = self.groups.get("root")
if root_grp is not None:
root_members = sorted([m for m in root_grp.members if m != "root"])
if root_members:
findings.append("Accounts in 'root' group: %s" % ",".join(root_members))
if findings:
yield self._Anomaly("Privileged group with unusual members.", findings)
# Find accounts without passwd/shadow entries.
diffs = self.MemberDiff(self.entry, "passwd", self.shadow, "shadow")
if diffs:
yield self._Anomaly("Mismatched passwd and shadow files.", diffs)
|
def function[FindAnomalies, parameter[self]]:
constant[Identify anomalies in the password/shadow and group/gshadow data.]
variable[findings] assign[=] list[[]]
variable[group_entries] assign[=] <ast.SetComp object at 0x7da20cabda80>
for taget[name[gid]] in starred[binary_operation[call[name[set], parameter[name[self].gids]] - name[group_entries]]] begin[:]
variable[undefined] assign[=] call[constant[,].join, parameter[call[name[self].gids.get, parameter[name[gid], list[[]]]]]]
call[name[findings].append, parameter[binary_operation[constant[gid %d assigned without /etc/groups entry: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20cabe0b0>, <ast.Name object at 0x7da20cabe170>]]]]]
if name[findings] begin[:]
<ast.Yield object at 0x7da20cabd120>
variable[findings] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20cabd570>, <ast.Name object at 0x7da20cabf790>]]] in starred[call[name[iteritems], parameter[name[self].uids]]] begin[:]
if compare[call[name[len], parameter[name[names]]] greater[>] constant[1]] begin[:]
call[name[findings].append, parameter[binary_operation[constant[uid %d assigned to multiple accounts: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20cabcb20>, <ast.Call object at 0x7da20cabc3a0>]]]]]
if name[findings] begin[:]
<ast.Yield object at 0x7da20cabf460>
variable[findings] assign[=] list[[]]
variable[root_grp] assign[=] call[name[self].groups.get, parameter[constant[root]]]
if compare[name[root_grp] is_not constant[None]] begin[:]
variable[root_members] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da20cabdf90>]]
if name[root_members] begin[:]
call[name[findings].append, parameter[binary_operation[constant[Accounts in 'root' group: %s] <ast.Mod object at 0x7da2590d6920> call[constant[,].join, parameter[name[root_members]]]]]]
if name[findings] begin[:]
<ast.Yield object at 0x7da1b1b86500>
variable[diffs] assign[=] call[name[self].MemberDiff, parameter[name[self].entry, constant[passwd], name[self].shadow, constant[shadow]]]
if name[diffs] begin[:]
<ast.Yield object at 0x7da1b1b05030>
|
keyword[def] identifier[FindAnomalies] ( identifier[self] ):
literal[string]
identifier[findings] =[]
identifier[group_entries] ={ identifier[g] . identifier[gid] keyword[for] identifier[g] keyword[in] identifier[itervalues] ( identifier[self] . identifier[groups] )}
keyword[for] identifier[gid] keyword[in] identifier[set] ( identifier[self] . identifier[gids] )- identifier[group_entries] :
identifier[undefined] = literal[string] . identifier[join] ( identifier[self] . identifier[gids] . identifier[get] ( identifier[gid] ,[]))
identifier[findings] . identifier[append] (
literal[string] %( identifier[gid] , identifier[undefined] ))
keyword[if] identifier[findings] :
keyword[yield] identifier[self] . identifier[_Anomaly] ( literal[string] , identifier[findings] )
identifier[findings] =[]
keyword[for] identifier[uid] , identifier[names] keyword[in] identifier[iteritems] ( identifier[self] . identifier[uids] ):
keyword[if] identifier[len] ( identifier[names] )> literal[int] :
identifier[findings] . identifier[append] ( literal[string] %
( identifier[uid] , literal[string] . identifier[join] ( identifier[sorted] ( identifier[names] ))))
keyword[if] identifier[findings] :
keyword[yield] identifier[self] . identifier[_Anomaly] ( literal[string] , identifier[findings] )
identifier[findings] =[]
identifier[root_grp] = identifier[self] . identifier[groups] . identifier[get] ( literal[string] )
keyword[if] identifier[root_grp] keyword[is] keyword[not] keyword[None] :
identifier[root_members] = identifier[sorted] ([ identifier[m] keyword[for] identifier[m] keyword[in] identifier[root_grp] . identifier[members] keyword[if] identifier[m] != literal[string] ])
keyword[if] identifier[root_members] :
identifier[findings] . identifier[append] ( literal[string] % literal[string] . identifier[join] ( identifier[root_members] ))
keyword[if] identifier[findings] :
keyword[yield] identifier[self] . identifier[_Anomaly] ( literal[string] , identifier[findings] )
identifier[diffs] = identifier[self] . identifier[MemberDiff] ( identifier[self] . identifier[entry] , literal[string] , identifier[self] . identifier[shadow] , literal[string] )
keyword[if] identifier[diffs] :
keyword[yield] identifier[self] . identifier[_Anomaly] ( literal[string] , identifier[diffs] )
|
def FindAnomalies(self):
"""Identify anomalies in the password/shadow and group/gshadow data."""
# Find anomalous group entries.
findings = []
group_entries = {g.gid for g in itervalues(self.groups)}
for gid in set(self.gids) - group_entries:
undefined = ','.join(self.gids.get(gid, []))
findings.append('gid %d assigned without /etc/groups entry: %s' % (gid, undefined)) # depends on [control=['for'], data=['gid']]
if findings:
yield self._Anomaly('Accounts with invalid gid.', findings) # depends on [control=['if'], data=[]]
# Find any shared user IDs.
findings = []
for (uid, names) in iteritems(self.uids):
if len(names) > 1:
findings.append('uid %d assigned to multiple accounts: %s' % (uid, ','.join(sorted(names)))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if findings:
yield self._Anomaly('Accounts with shared uid.', findings) # depends on [control=['if'], data=[]]
# Find privileged groups with unusual members.
findings = []
root_grp = self.groups.get('root')
if root_grp is not None:
root_members = sorted([m for m in root_grp.members if m != 'root'])
if root_members:
findings.append("Accounts in 'root' group: %s" % ','.join(root_members)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['root_grp']]
if findings:
yield self._Anomaly('Privileged group with unusual members.', findings) # depends on [control=['if'], data=[]]
# Find accounts without passwd/shadow entries.
diffs = self.MemberDiff(self.entry, 'passwd', self.shadow, 'shadow')
if diffs:
yield self._Anomaly('Mismatched passwd and shadow files.', diffs) # depends on [control=['if'], data=[]]
|
def is_valid_varname(self, name, item):
""" Valid variable name, checked against global context. """
check_valid_varname(name, self._custom_units, self._structs, self._constants, item)
if name in self._globals:
raise VariableDeclarationException(
'Invalid name "%s", previously defined as global.' % name, item
)
return True
|
def function[is_valid_varname, parameter[self, name, item]]:
constant[ Valid variable name, checked against global context. ]
call[name[check_valid_varname], parameter[name[name], name[self]._custom_units, name[self]._structs, name[self]._constants, name[item]]]
if compare[name[name] in name[self]._globals] begin[:]
<ast.Raise object at 0x7da1b1b47160>
return[constant[True]]
|
keyword[def] identifier[is_valid_varname] ( identifier[self] , identifier[name] , identifier[item] ):
literal[string]
identifier[check_valid_varname] ( identifier[name] , identifier[self] . identifier[_custom_units] , identifier[self] . identifier[_structs] , identifier[self] . identifier[_constants] , identifier[item] )
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_globals] :
keyword[raise] identifier[VariableDeclarationException] (
literal[string] % identifier[name] , identifier[item]
)
keyword[return] keyword[True]
|
def is_valid_varname(self, name, item):
""" Valid variable name, checked against global context. """
check_valid_varname(name, self._custom_units, self._structs, self._constants, item)
if name in self._globals:
raise VariableDeclarationException('Invalid name "%s", previously defined as global.' % name, item) # depends on [control=['if'], data=['name']]
return True
|
def color_replace(image, color):
"""Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO
"""
pixels = image.load()
size = image.size[0]
for width in range(size):
for height in range(size):
r, g, b, a = pixels[width, height]
if (r, g, b, a) == (0,0,0,255):
pixels[width,height] = color
else:
pixels[width,height] = (r,g,b,color[3])
|
def function[color_replace, parameter[image, color]]:
constant[Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO
]
variable[pixels] assign[=] call[name[image].load, parameter[]]
variable[size] assign[=] call[name[image].size][constant[0]]
for taget[name[width]] in starred[call[name[range], parameter[name[size]]]] begin[:]
for taget[name[height]] in starred[call[name[range], parameter[name[size]]]] begin[:]
<ast.Tuple object at 0x7da1b0b121d0> assign[=] call[name[pixels]][tuple[[<ast.Name object at 0x7da1b0b13e80>, <ast.Name object at 0x7da1b0b13eb0>]]]
if compare[tuple[[<ast.Name object at 0x7da1b0b10160>, <ast.Name object at 0x7da1b0b12140>, <ast.Name object at 0x7da1b0b12b90>, <ast.Name object at 0x7da1b0b12680>]] equal[==] tuple[[<ast.Constant object at 0x7da1b0b124d0>, <ast.Constant object at 0x7da1b0b10ee0>, <ast.Constant object at 0x7da1b0b12a70>, <ast.Constant object at 0x7da1b0b127d0>]]] begin[:]
call[name[pixels]][tuple[[<ast.Name object at 0x7da1b0b11090>, <ast.Name object at 0x7da1b0b131f0>]]] assign[=] name[color]
|
keyword[def] identifier[color_replace] ( identifier[image] , identifier[color] ):
literal[string]
identifier[pixels] = identifier[image] . identifier[load] ()
identifier[size] = identifier[image] . identifier[size] [ literal[int] ]
keyword[for] identifier[width] keyword[in] identifier[range] ( identifier[size] ):
keyword[for] identifier[height] keyword[in] identifier[range] ( identifier[size] ):
identifier[r] , identifier[g] , identifier[b] , identifier[a] = identifier[pixels] [ identifier[width] , identifier[height] ]
keyword[if] ( identifier[r] , identifier[g] , identifier[b] , identifier[a] )==( literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[pixels] [ identifier[width] , identifier[height] ]= identifier[color]
keyword[else] :
identifier[pixels] [ identifier[width] , identifier[height] ]=( identifier[r] , identifier[g] , identifier[b] , identifier[color] [ literal[int] ])
|
def color_replace(image, color):
"""Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO
"""
pixels = image.load()
size = image.size[0]
for width in range(size):
for height in range(size):
(r, g, b, a) = pixels[width, height]
if (r, g, b, a) == (0, 0, 0, 255):
pixels[width, height] = color # depends on [control=['if'], data=[]]
else:
pixels[width, height] = (r, g, b, color[3]) # depends on [control=['for'], data=['height']] # depends on [control=['for'], data=['width']]
|
def _parse_redirect(self, element):
"""
Parse a redirect statement
:param element: The XML Element object
:type element: etree._Element
"""
self._log.info('Parsing response as a redirect')
self.redirect = True
return self._parse_template(element)
|
def function[_parse_redirect, parameter[self, element]]:
constant[
Parse a redirect statement
:param element: The XML Element object
:type element: etree._Element
]
call[name[self]._log.info, parameter[constant[Parsing response as a redirect]]]
name[self].redirect assign[=] constant[True]
return[call[name[self]._parse_template, parameter[name[element]]]]
|
keyword[def] identifier[_parse_redirect] ( identifier[self] , identifier[element] ):
literal[string]
identifier[self] . identifier[_log] . identifier[info] ( literal[string] )
identifier[self] . identifier[redirect] = keyword[True]
keyword[return] identifier[self] . identifier[_parse_template] ( identifier[element] )
|
def _parse_redirect(self, element):
"""
Parse a redirect statement
:param element: The XML Element object
:type element: etree._Element
"""
self._log.info('Parsing response as a redirect')
self.redirect = True
return self._parse_template(element)
|
def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None:
'''Trim memory.
Args:
level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \
MODERATE | RUNNING_CRITICAL | COMPLETE
'''
_, error = self._execute('-s', self.device_sn, 'shell',
'am', 'send-trim-memory', str(pid), level)
if error and error.startswith('Error'):
raise ApplicationsException(error.split(':', 1)[-1].strip())
|
def function[app_trim_memory, parameter[self, pid, level]]:
constant[Trim memory.
Args:
level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | MODERATE | RUNNING_CRITICAL | COMPLETE
]
<ast.Tuple object at 0x7da207f994e0> assign[=] call[name[self]._execute, parameter[constant[-s], name[self].device_sn, constant[shell], constant[am], constant[send-trim-memory], call[name[str], parameter[name[pid]]], name[level]]]
if <ast.BoolOp object at 0x7da207f995d0> begin[:]
<ast.Raise object at 0x7da207f9ba60>
|
keyword[def] identifier[app_trim_memory] ( identifier[self] , identifier[pid] : identifier[int] keyword[or] identifier[str] , identifier[level] : identifier[str] = literal[string] )-> keyword[None] :
literal[string]
identifier[_] , identifier[error] = identifier[self] . identifier[_execute] ( literal[string] , identifier[self] . identifier[device_sn] , literal[string] ,
literal[string] , literal[string] , identifier[str] ( identifier[pid] ), identifier[level] )
keyword[if] identifier[error] keyword[and] identifier[error] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ApplicationsException] ( identifier[error] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[strip] ())
|
def app_trim_memory(self, pid: int or str, level: str='RUNNING_LOW') -> None:
"""Trim memory.
Args:
level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | MODERATE | RUNNING_CRITICAL | COMPLETE
"""
(_, error) = self._execute('-s', self.device_sn, 'shell', 'am', 'send-trim-memory', str(pid), level)
if error and error.startswith('Error'):
raise ApplicationsException(error.split(':', 1)[-1].strip()) # depends on [control=['if'], data=[]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.