code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def place(vertices_resources, nets, machine, constraints,
vertex_order=None, chip_order=None):
"""Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped).
"""
# If no vertices to place, just stop (from here on we presume that at least
# one vertex will be placed)
if len(vertices_resources) == 0:
return {}
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
vertices_resources, nets, constraints, substitutions = \
apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError(
"Chip requested by {} unavailable".format(machine))
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location],
resources)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
elif isinstance(constraint, # pragma: no branch
ReserveResourceConstraint):
apply_reserve_resource_constraint(machine, constraint)
if vertex_order is not None:
# Must modify the vertex_order to substitute the merged vertices
# inserted by apply_reserve_resource_constraint.
vertex_order = list(vertex_order)
for merged_vertex in substitutions:
# Swap the first merged vertex for its MergedVertex object and
# remove all other vertices from the merged set
vertex_order[vertex_order.index(merged_vertex.vertices[0])] \
= merged_vertex
# Remove all other vertices in the MergedVertex
already_removed = set([merged_vertex.vertices[0]])
for vertex in merged_vertex.vertices[1:]:
if vertex not in already_removed:
vertex_order.remove(vertex)
already_removed.add(vertex)
# The set of vertices which have not been constrained, in iteration order
movable_vertices = (v for v in (vertices_resources
if vertex_order is None
else vertex_order)
if v not in placements)
# A cyclic iterator over all available chips
chips = cycle(c for c in (machine if chip_order is None else chip_order)
if c in machine)
chips_iter = iter(chips)
try:
cur_chip = next(chips_iter)
except StopIteration:
raise InsufficientResourceError("No working chips in machine.")
# The last chip that we successfully placed something on. Used to detect
# when we've tried all available chips and not found a suitable candidate
last_successful_chip = cur_chip
# Place each vertex in turn
for vertex in movable_vertices:
while True:
resources_if_placed = subtract_resources(
machine[cur_chip], vertices_resources[vertex])
if not overallocated(resources_if_placed):
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = cur_chip
machine[cur_chip] = resources_if_placed
last_successful_chip = cur_chip
break
else:
# The vertex won't fit on this chip, move onto the next one
# available.
cur_chip = next(chips_iter)
# If we've looped around all the available chips without
# managing to place the vertex, give up!
if cur_chip == last_successful_chip:
raise InsufficientResourceError(
"Ran out of chips while attempting to place vertex "
"{}".format(vertex))
finalise_same_chip_constraints(substitutions, placements)
return placements | def function[place, parameter[vertices_resources, nets, machine, constraints, vertex_order, chip_order]]:
constant[Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped).
]
if compare[call[name[len], parameter[name[vertices_resources]]] equal[==] constant[0]] begin[:]
return[dictionary[[], []]]
variable[machine] assign[=] call[name[machine].copy, parameter[]]
variable[placements] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b195c670> assign[=] call[name[apply_same_chip_constraints], parameter[name[vertices_resources], name[nets], name[constraints]]]
for taget[name[constraint]] in starred[name[constraints]] begin[:]
if call[name[isinstance], parameter[name[constraint], name[LocationConstraint]]] begin[:]
variable[location] assign[=] name[constraint].location
if compare[name[location] <ast.NotIn object at 0x7da2590d7190> name[machine]] begin[:]
<ast.Raise object at 0x7da1b195f580>
variable[vertex] assign[=] name[constraint].vertex
call[name[placements]][name[vertex]] assign[=] name[location]
variable[resources] assign[=] call[name[vertices_resources]][name[vertex]]
call[name[machine]][name[location]] assign[=] call[name[subtract_resources], parameter[call[name[machine]][name[location]], name[resources]]]
if call[name[overallocated], parameter[call[name[machine]][name[location]]]] begin[:]
<ast.Raise object at 0x7da1b195d330>
if compare[name[vertex_order] is_not constant[None]] begin[:]
variable[vertex_order] assign[=] call[name[list], parameter[name[vertex_order]]]
for taget[name[merged_vertex]] in starred[name[substitutions]] begin[:]
call[name[vertex_order]][call[name[vertex_order].index, parameter[call[name[merged_vertex].vertices][constant[0]]]]] assign[=] name[merged_vertex]
variable[already_removed] assign[=] call[name[set], parameter[list[[<ast.Subscript object at 0x7da1b195d6c0>]]]]
for taget[name[vertex]] in starred[call[name[merged_vertex].vertices][<ast.Slice object at 0x7da1b195f340>]] begin[:]
if compare[name[vertex] <ast.NotIn object at 0x7da2590d7190> name[already_removed]] begin[:]
call[name[vertex_order].remove, parameter[name[vertex]]]
call[name[already_removed].add, parameter[name[vertex]]]
variable[movable_vertices] assign[=] <ast.GeneratorExp object at 0x7da1b195eb00>
variable[chips] assign[=] call[name[cycle], parameter[<ast.GeneratorExp object at 0x7da1b195d570>]]
variable[chips_iter] assign[=] call[name[iter], parameter[name[chips]]]
<ast.Try object at 0x7da1b195c0d0>
variable[last_successful_chip] assign[=] name[cur_chip]
for taget[name[vertex]] in starred[name[movable_vertices]] begin[:]
while constant[True] begin[:]
variable[resources_if_placed] assign[=] call[name[subtract_resources], parameter[call[name[machine]][name[cur_chip]], call[name[vertices_resources]][name[vertex]]]]
if <ast.UnaryOp object at 0x7da1b195f130> begin[:]
call[name[placements]][name[vertex]] assign[=] name[cur_chip]
call[name[machine]][name[cur_chip]] assign[=] name[resources_if_placed]
variable[last_successful_chip] assign[=] name[cur_chip]
break
call[name[finalise_same_chip_constraints], parameter[name[substitutions], name[placements]]]
return[name[placements]] | keyword[def] identifier[place] ( identifier[vertices_resources] , identifier[nets] , identifier[machine] , identifier[constraints] ,
identifier[vertex_order] = keyword[None] , identifier[chip_order] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[vertices_resources] )== literal[int] :
keyword[return] {}
identifier[machine] = identifier[machine] . identifier[copy] ()
identifier[placements] ={}
identifier[vertices_resources] , identifier[nets] , identifier[constraints] , identifier[substitutions] = identifier[apply_same_chip_constraints] ( identifier[vertices_resources] , identifier[nets] , identifier[constraints] )
keyword[for] identifier[constraint] keyword[in] identifier[constraints] :
keyword[if] identifier[isinstance] ( identifier[constraint] , identifier[LocationConstraint] ):
identifier[location] = identifier[constraint] . identifier[location]
keyword[if] identifier[location] keyword[not] keyword[in] identifier[machine] :
keyword[raise] identifier[InvalidConstraintError] (
literal[string] . identifier[format] ( identifier[machine] ))
identifier[vertex] = identifier[constraint] . identifier[vertex]
identifier[placements] [ identifier[vertex] ]= identifier[location]
identifier[resources] = identifier[vertices_resources] [ identifier[vertex] ]
identifier[machine] [ identifier[location] ]= identifier[subtract_resources] ( identifier[machine] [ identifier[location] ],
identifier[resources] )
keyword[if] identifier[overallocated] ( identifier[machine] [ identifier[location] ]):
keyword[raise] identifier[InsufficientResourceError] (
literal[string] . identifier[format] ( identifier[constraint] ))
keyword[elif] identifier[isinstance] ( identifier[constraint] ,
identifier[ReserveResourceConstraint] ):
identifier[apply_reserve_resource_constraint] ( identifier[machine] , identifier[constraint] )
keyword[if] identifier[vertex_order] keyword[is] keyword[not] keyword[None] :
identifier[vertex_order] = identifier[list] ( identifier[vertex_order] )
keyword[for] identifier[merged_vertex] keyword[in] identifier[substitutions] :
identifier[vertex_order] [ identifier[vertex_order] . identifier[index] ( identifier[merged_vertex] . identifier[vertices] [ literal[int] ])]= identifier[merged_vertex]
identifier[already_removed] = identifier[set] ([ identifier[merged_vertex] . identifier[vertices] [ literal[int] ]])
keyword[for] identifier[vertex] keyword[in] identifier[merged_vertex] . identifier[vertices] [ literal[int] :]:
keyword[if] identifier[vertex] keyword[not] keyword[in] identifier[already_removed] :
identifier[vertex_order] . identifier[remove] ( identifier[vertex] )
identifier[already_removed] . identifier[add] ( identifier[vertex] )
identifier[movable_vertices] =( identifier[v] keyword[for] identifier[v] keyword[in] ( identifier[vertices_resources]
keyword[if] identifier[vertex_order] keyword[is] keyword[None]
keyword[else] identifier[vertex_order] )
keyword[if] identifier[v] keyword[not] keyword[in] identifier[placements] )
identifier[chips] = identifier[cycle] ( identifier[c] keyword[for] identifier[c] keyword[in] ( identifier[machine] keyword[if] identifier[chip_order] keyword[is] keyword[None] keyword[else] identifier[chip_order] )
keyword[if] identifier[c] keyword[in] identifier[machine] )
identifier[chips_iter] = identifier[iter] ( identifier[chips] )
keyword[try] :
identifier[cur_chip] = identifier[next] ( identifier[chips_iter] )
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[InsufficientResourceError] ( literal[string] )
identifier[last_successful_chip] = identifier[cur_chip]
keyword[for] identifier[vertex] keyword[in] identifier[movable_vertices] :
keyword[while] keyword[True] :
identifier[resources_if_placed] = identifier[subtract_resources] (
identifier[machine] [ identifier[cur_chip] ], identifier[vertices_resources] [ identifier[vertex] ])
keyword[if] keyword[not] identifier[overallocated] ( identifier[resources_if_placed] ):
identifier[placements] [ identifier[vertex] ]= identifier[cur_chip]
identifier[machine] [ identifier[cur_chip] ]= identifier[resources_if_placed]
identifier[last_successful_chip] = identifier[cur_chip]
keyword[break]
keyword[else] :
identifier[cur_chip] = identifier[next] ( identifier[chips_iter] )
keyword[if] identifier[cur_chip] == identifier[last_successful_chip] :
keyword[raise] identifier[InsufficientResourceError] (
literal[string]
literal[string] . identifier[format] ( identifier[vertex] ))
identifier[finalise_same_chip_constraints] ( identifier[substitutions] , identifier[placements] )
keyword[return] identifier[placements] | def place(vertices_resources, nets, machine, constraints, vertex_order=None, chip_order=None):
"""Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped).
"""
# If no vertices to place, just stop (from here on we presume that at least
# one vertex will be placed)
if len(vertices_resources) == 0:
return {} # depends on [control=['if'], data=[]]
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
(vertices_resources, nets, constraints, substitutions) = apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError('Chip requested by {} unavailable'.format(machine)) # depends on [control=['if'], data=['machine']]
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location], resources)
if overallocated(machine[location]):
raise InsufficientResourceError('Cannot meet {}'.format(constraint)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(constraint, ReserveResourceConstraint): # pragma: no branch
apply_reserve_resource_constraint(machine, constraint) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['constraint']]
if vertex_order is not None:
# Must modify the vertex_order to substitute the merged vertices
# inserted by apply_reserve_resource_constraint.
vertex_order = list(vertex_order)
for merged_vertex in substitutions:
# Swap the first merged vertex for its MergedVertex object and
# remove all other vertices from the merged set
vertex_order[vertex_order.index(merged_vertex.vertices[0])] = merged_vertex
# Remove all other vertices in the MergedVertex
already_removed = set([merged_vertex.vertices[0]])
for vertex in merged_vertex.vertices[1:]:
if vertex not in already_removed:
vertex_order.remove(vertex)
already_removed.add(vertex) # depends on [control=['if'], data=['vertex', 'already_removed']] # depends on [control=['for'], data=['vertex']] # depends on [control=['for'], data=['merged_vertex']] # depends on [control=['if'], data=['vertex_order']]
# The set of vertices which have not been constrained, in iteration order
movable_vertices = (v for v in (vertices_resources if vertex_order is None else vertex_order) if v not in placements)
# A cyclic iterator over all available chips
chips = cycle((c for c in (machine if chip_order is None else chip_order) if c in machine))
chips_iter = iter(chips)
try:
cur_chip = next(chips_iter) # depends on [control=['try'], data=[]]
except StopIteration:
raise InsufficientResourceError('No working chips in machine.') # depends on [control=['except'], data=[]]
# The last chip that we successfully placed something on. Used to detect
# when we've tried all available chips and not found a suitable candidate
last_successful_chip = cur_chip
# Place each vertex in turn
for vertex in movable_vertices:
while True:
resources_if_placed = subtract_resources(machine[cur_chip], vertices_resources[vertex])
if not overallocated(resources_if_placed):
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = cur_chip
machine[cur_chip] = resources_if_placed
last_successful_chip = cur_chip
break # depends on [control=['if'], data=[]]
else:
# The vertex won't fit on this chip, move onto the next one
# available.
cur_chip = next(chips_iter)
# If we've looped around all the available chips without
# managing to place the vertex, give up!
if cur_chip == last_successful_chip:
raise InsufficientResourceError('Ran out of chips while attempting to place vertex {}'.format(vertex)) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['vertex']]
finalise_same_chip_constraints(substitutions, placements)
return placements |
def escape_chars(text, chars):
"""Helper function to escape uncomfortable characters."""
text = str(text)
chars = list(set(chars))
if '\\' in chars:
chars.remove('\\')
chars.insert(0, '\\')
for ch in chars:
text = text.replace(ch, '\\' + ch)
return text | def function[escape_chars, parameter[text, chars]]:
constant[Helper function to escape uncomfortable characters.]
variable[text] assign[=] call[name[str], parameter[name[text]]]
variable[chars] assign[=] call[name[list], parameter[call[name[set], parameter[name[chars]]]]]
if compare[constant[\] in name[chars]] begin[:]
call[name[chars].remove, parameter[constant[\]]]
call[name[chars].insert, parameter[constant[0], constant[\]]]
for taget[name[ch]] in starred[name[chars]] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[name[ch], binary_operation[constant[\] + name[ch]]]]
return[name[text]] | keyword[def] identifier[escape_chars] ( identifier[text] , identifier[chars] ):
literal[string]
identifier[text] = identifier[str] ( identifier[text] )
identifier[chars] = identifier[list] ( identifier[set] ( identifier[chars] ))
keyword[if] literal[string] keyword[in] identifier[chars] :
identifier[chars] . identifier[remove] ( literal[string] )
identifier[chars] . identifier[insert] ( literal[int] , literal[string] )
keyword[for] identifier[ch] keyword[in] identifier[chars] :
identifier[text] = identifier[text] . identifier[replace] ( identifier[ch] , literal[string] + identifier[ch] )
keyword[return] identifier[text] | def escape_chars(text, chars):
"""Helper function to escape uncomfortable characters."""
text = str(text)
chars = list(set(chars))
if '\\' in chars:
chars.remove('\\')
chars.insert(0, '\\') # depends on [control=['if'], data=['chars']]
for ch in chars:
text = text.replace(ch, '\\' + ch) # depends on [control=['for'], data=['ch']]
return text |
def script(self):
"""
Get the script :class:`language_tags.Subtag.Subtag` of the tag.
:return: script :class:`language_tags.Subtag.Subtag` that is part of the tag.
The return can be None.
"""
script_item = [subtag for subtag in self.subtags if subtag.type == 'script']
return script_item[0] if len(script_item) > 0 else None | def function[script, parameter[self]]:
constant[
Get the script :class:`language_tags.Subtag.Subtag` of the tag.
:return: script :class:`language_tags.Subtag.Subtag` that is part of the tag.
The return can be None.
]
variable[script_item] assign[=] <ast.ListComp object at 0x7da1b258a8c0>
return[<ast.IfExp object at 0x7da1b2589240>] | keyword[def] identifier[script] ( identifier[self] ):
literal[string]
identifier[script_item] =[ identifier[subtag] keyword[for] identifier[subtag] keyword[in] identifier[self] . identifier[subtags] keyword[if] identifier[subtag] . identifier[type] == literal[string] ]
keyword[return] identifier[script_item] [ literal[int] ] keyword[if] identifier[len] ( identifier[script_item] )> literal[int] keyword[else] keyword[None] | def script(self):
"""
Get the script :class:`language_tags.Subtag.Subtag` of the tag.
:return: script :class:`language_tags.Subtag.Subtag` that is part of the tag.
The return can be None.
"""
script_item = [subtag for subtag in self.subtags if subtag.type == 'script']
return script_item[0] if len(script_item) > 0 else None |
async def _flush(self, request: 'Request', stacks: List[Stack]):
"""
Perform the actual sending to platform. This is separated from
`flush()` since it needs to be inside a middleware call.
"""
for stack in stacks:
await self.platform.send(request, stack) | <ast.AsyncFunctionDef object at 0x7da18dc9a8f0> | keyword[async] keyword[def] identifier[_flush] ( identifier[self] , identifier[request] : literal[string] , identifier[stacks] : identifier[List] [ identifier[Stack] ]):
literal[string]
keyword[for] identifier[stack] keyword[in] identifier[stacks] :
keyword[await] identifier[self] . identifier[platform] . identifier[send] ( identifier[request] , identifier[stack] ) | async def _flush(self, request: 'Request', stacks: List[Stack]):
"""
Perform the actual sending to platform. This is separated from
`flush()` since it needs to be inside a middleware call.
"""
for stack in stacks:
await self.platform.send(request, stack) # depends on [control=['for'], data=['stack']] |
def make_context_aware(func, numargs):
"""
Check if given function has no more arguments than given. If so, wrap it
into another function that takes extra argument and drops it.
Used to support user providing callback functions that are not context aware.
"""
try:
if inspect.ismethod(func):
arg_count = len(inspect.getargspec(func).args) - 1
elif inspect.isfunction(func):
arg_count = len(inspect.getargspec(func).args)
elif inspect.isclass(func):
arg_count = len(inspect.getargspec(func.__init__).args) - 1
else:
arg_count = len(inspect.getargspec(func.__call__).args) - 1
except TypeError:
arg_count = numargs
if arg_count <= numargs:
def normalized(*args):
return func(*args[:-1])
return normalized
return func | def function[make_context_aware, parameter[func, numargs]]:
constant[
Check if given function has no more arguments than given. If so, wrap it
into another function that takes extra argument and drops it.
Used to support user providing callback functions that are not context aware.
]
<ast.Try object at 0x7da18f58ec20>
if compare[name[arg_count] less_or_equal[<=] name[numargs]] begin[:]
def function[normalized, parameter[]]:
return[call[name[func], parameter[<ast.Starred object at 0x7da18f58e9e0>]]]
return[name[normalized]]
return[name[func]] | keyword[def] identifier[make_context_aware] ( identifier[func] , identifier[numargs] ):
literal[string]
keyword[try] :
keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[func] ):
identifier[arg_count] = identifier[len] ( identifier[inspect] . identifier[getargspec] ( identifier[func] ). identifier[args] )- literal[int]
keyword[elif] identifier[inspect] . identifier[isfunction] ( identifier[func] ):
identifier[arg_count] = identifier[len] ( identifier[inspect] . identifier[getargspec] ( identifier[func] ). identifier[args] )
keyword[elif] identifier[inspect] . identifier[isclass] ( identifier[func] ):
identifier[arg_count] = identifier[len] ( identifier[inspect] . identifier[getargspec] ( identifier[func] . identifier[__init__] ). identifier[args] )- literal[int]
keyword[else] :
identifier[arg_count] = identifier[len] ( identifier[inspect] . identifier[getargspec] ( identifier[func] . identifier[__call__] ). identifier[args] )- literal[int]
keyword[except] identifier[TypeError] :
identifier[arg_count] = identifier[numargs]
keyword[if] identifier[arg_count] <= identifier[numargs] :
keyword[def] identifier[normalized] (* identifier[args] ):
keyword[return] identifier[func] (* identifier[args] [:- literal[int] ])
keyword[return] identifier[normalized]
keyword[return] identifier[func] | def make_context_aware(func, numargs):
"""
Check if given function has no more arguments than given. If so, wrap it
into another function that takes extra argument and drops it.
Used to support user providing callback functions that are not context aware.
"""
try:
if inspect.ismethod(func):
arg_count = len(inspect.getargspec(func).args) - 1 # depends on [control=['if'], data=[]]
elif inspect.isfunction(func):
arg_count = len(inspect.getargspec(func).args) # depends on [control=['if'], data=[]]
elif inspect.isclass(func):
arg_count = len(inspect.getargspec(func.__init__).args) - 1 # depends on [control=['if'], data=[]]
else:
arg_count = len(inspect.getargspec(func.__call__).args) - 1 # depends on [control=['try'], data=[]]
except TypeError:
arg_count = numargs # depends on [control=['except'], data=[]]
if arg_count <= numargs:
def normalized(*args):
return func(*args[:-1])
return normalized # depends on [control=['if'], data=[]]
return func |
def split_sequence(seq, n):
"""Generates tokens of length n from a sequence.
The last token may be of smaller length."""
tokens = []
while seq:
tokens.append(seq[:n])
seq = seq[n:]
return tokens | def function[split_sequence, parameter[seq, n]]:
constant[Generates tokens of length n from a sequence.
The last token may be of smaller length.]
variable[tokens] assign[=] list[[]]
while name[seq] begin[:]
call[name[tokens].append, parameter[call[name[seq]][<ast.Slice object at 0x7da20c796410>]]]
variable[seq] assign[=] call[name[seq]][<ast.Slice object at 0x7da20c7956c0>]
return[name[tokens]] | keyword[def] identifier[split_sequence] ( identifier[seq] , identifier[n] ):
literal[string]
identifier[tokens] =[]
keyword[while] identifier[seq] :
identifier[tokens] . identifier[append] ( identifier[seq] [: identifier[n] ])
identifier[seq] = identifier[seq] [ identifier[n] :]
keyword[return] identifier[tokens] | def split_sequence(seq, n):
"""Generates tokens of length n from a sequence.
The last token may be of smaller length."""
tokens = []
while seq:
tokens.append(seq[:n])
seq = seq[n:] # depends on [control=['while'], data=[]]
return tokens |
def get_inventory(self, resources):
"""
Returns a JSON object with the requested resources and their
properties, that are managed by the HMC.
This method performs the 'Get Inventory' HMC operation.
Parameters:
resources (:term:`iterable` of :term:`string`):
Resource classes and/or resource classifiers specifying the types
of resources that should be included in the result. For valid
values, see the 'Get Inventory' operation in the :term:`HMC API`
book.
Element resources of the specified resource types are automatically
included as children (for example, requesting 'partition' includes
all of its 'hba', 'nic' and 'virtual-function' element resources).
Must not be `None`.
Returns:
:term:`JSON object`:
The resources with their properties, for the requested resource
classes and resource classifiers.
Example:
resource_classes = ['partition', 'adapter']
result_dict = client.get_inventory(resource_classes)
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
uri = '/api/services/inventory'
body = {'resources': resources}
result = self.session.post(uri, body=body)
return result | def function[get_inventory, parameter[self, resources]]:
constant[
Returns a JSON object with the requested resources and their
properties, that are managed by the HMC.
This method performs the 'Get Inventory' HMC operation.
Parameters:
resources (:term:`iterable` of :term:`string`):
Resource classes and/or resource classifiers specifying the types
of resources that should be included in the result. For valid
values, see the 'Get Inventory' operation in the :term:`HMC API`
book.
Element resources of the specified resource types are automatically
included as children (for example, requesting 'partition' includes
all of its 'hba', 'nic' and 'virtual-function' element resources).
Must not be `None`.
Returns:
:term:`JSON object`:
The resources with their properties, for the requested resource
classes and resource classifiers.
Example:
resource_classes = ['partition', 'adapter']
result_dict = client.get_inventory(resource_classes)
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
]
variable[uri] assign[=] constant[/api/services/inventory]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f722bc0>], [<ast.Name object at 0x7da18f7217b0>]]
variable[result] assign[=] call[name[self].session.post, parameter[name[uri]]]
return[name[result]] | keyword[def] identifier[get_inventory] ( identifier[self] , identifier[resources] ):
literal[string]
identifier[uri] = literal[string]
identifier[body] ={ literal[string] : identifier[resources] }
identifier[result] = identifier[self] . identifier[session] . identifier[post] ( identifier[uri] , identifier[body] = identifier[body] )
keyword[return] identifier[result] | def get_inventory(self, resources):
"""
Returns a JSON object with the requested resources and their
properties, that are managed by the HMC.
This method performs the 'Get Inventory' HMC operation.
Parameters:
resources (:term:`iterable` of :term:`string`):
Resource classes and/or resource classifiers specifying the types
of resources that should be included in the result. For valid
values, see the 'Get Inventory' operation in the :term:`HMC API`
book.
Element resources of the specified resource types are automatically
included as children (for example, requesting 'partition' includes
all of its 'hba', 'nic' and 'virtual-function' element resources).
Must not be `None`.
Returns:
:term:`JSON object`:
The resources with their properties, for the requested resource
classes and resource classifiers.
Example:
resource_classes = ['partition', 'adapter']
result_dict = client.get_inventory(resource_classes)
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
uri = '/api/services/inventory'
body = {'resources': resources}
result = self.session.post(uri, body=body)
return result |
def initialize_app(flask_app, args):
"""Initialize the App."""
# Setup gourde with the args.
gourde.setup(args)
# Register a custom health check.
gourde.is_healthy = is_healthy
# Add an optional API
initialize_api(flask_app) | def function[initialize_app, parameter[flask_app, args]]:
constant[Initialize the App.]
call[name[gourde].setup, parameter[name[args]]]
name[gourde].is_healthy assign[=] name[is_healthy]
call[name[initialize_api], parameter[name[flask_app]]] | keyword[def] identifier[initialize_app] ( identifier[flask_app] , identifier[args] ):
literal[string]
identifier[gourde] . identifier[setup] ( identifier[args] )
identifier[gourde] . identifier[is_healthy] = identifier[is_healthy]
identifier[initialize_api] ( identifier[flask_app] ) | def initialize_app(flask_app, args):
"""Initialize the App."""
# Setup gourde with the args.
gourde.setup(args)
# Register a custom health check.
gourde.is_healthy = is_healthy
# Add an optional API
initialize_api(flask_app) |
def _get_thumbnail_filename(filename, append_text="-thumbnail"):
"""
Returns a thumbnail version of the file name.
"""
name, ext = os.path.splitext(filename)
return ''.join([name, append_text, ext]) | def function[_get_thumbnail_filename, parameter[filename, append_text]]:
constant[
Returns a thumbnail version of the file name.
]
<ast.Tuple object at 0x7da20cabf160> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
return[call[constant[].join, parameter[list[[<ast.Name object at 0x7da1b144e980>, <ast.Name object at 0x7da1b144fdc0>, <ast.Name object at 0x7da1b144f0d0>]]]]] | keyword[def] identifier[_get_thumbnail_filename] ( identifier[filename] , identifier[append_text] = literal[string] ):
literal[string]
identifier[name] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
keyword[return] literal[string] . identifier[join] ([ identifier[name] , identifier[append_text] , identifier[ext] ]) | def _get_thumbnail_filename(filename, append_text='-thumbnail'):
"""
Returns a thumbnail version of the file name.
"""
(name, ext) = os.path.splitext(filename)
return ''.join([name, append_text, ext]) |
def context_lookup(self, vars):
"""
Lookup the variables in the provided dictionary, resolve with entries
in the context
"""
while isinstance(vars, IscmExpr):
vars = vars.resolve(self.context)
#
for (k,v) in vars.items():
if isinstance(v, IscmExpr):
vars[k] = v.resolve(self.context)
return vars | def function[context_lookup, parameter[self, vars]]:
constant[
Lookup the variables in the provided dictionary, resolve with entries
in the context
]
while call[name[isinstance], parameter[name[vars], name[IscmExpr]]] begin[:]
variable[vars] assign[=] call[name[vars].resolve, parameter[name[self].context]]
for taget[tuple[[<ast.Name object at 0x7da2041db370>, <ast.Name object at 0x7da2041d86d0>]]] in starred[call[name[vars].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[IscmExpr]]] begin[:]
call[name[vars]][name[k]] assign[=] call[name[v].resolve, parameter[name[self].context]]
return[name[vars]] | keyword[def] identifier[context_lookup] ( identifier[self] , identifier[vars] ):
literal[string]
keyword[while] identifier[isinstance] ( identifier[vars] , identifier[IscmExpr] ):
identifier[vars] = identifier[vars] . identifier[resolve] ( identifier[self] . identifier[context] )
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[vars] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[IscmExpr] ):
identifier[vars] [ identifier[k] ]= identifier[v] . identifier[resolve] ( identifier[self] . identifier[context] )
keyword[return] identifier[vars] | def context_lookup(self, vars):
"""
Lookup the variables in the provided dictionary, resolve with entries
in the context
"""
while isinstance(vars, IscmExpr):
vars = vars.resolve(self.context) # depends on [control=['while'], data=[]]
#
for (k, v) in vars.items():
if isinstance(v, IscmExpr):
vars[k] = v.resolve(self.context) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return vars |
def protected_operation(fn):
"""
Use this decorator to prevent an operation from being executed
when the related uri resource is still in use.
The parent_object must contain:
* a request
* with a registry.queryUtility(IReferencer)
:raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to
delete a certain URI because it's still in use somewhere else.
:raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were
unable to check that the URI is no longer being used.
"""
@functools.wraps(fn)
def advice(parent_object, *args, **kw):
response = _advice(parent_object.request)
if response is not None:
return response
else:
return fn(parent_object, *args, **kw)
return advice | def function[protected_operation, parameter[fn]]:
constant[
Use this decorator to prevent an operation from being executed
when the related uri resource is still in use.
The parent_object must contain:
* a request
* with a registry.queryUtility(IReferencer)
:raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to
delete a certain URI because it's still in use somewhere else.
:raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were
unable to check that the URI is no longer being used.
]
def function[advice, parameter[parent_object]]:
variable[response] assign[=] call[name[_advice], parameter[name[parent_object].request]]
if compare[name[response] is_not constant[None]] begin[:]
return[name[response]]
return[name[advice]] | keyword[def] identifier[protected_operation] ( identifier[fn] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[advice] ( identifier[parent_object] ,* identifier[args] ,** identifier[kw] ):
identifier[response] = identifier[_advice] ( identifier[parent_object] . identifier[request] )
keyword[if] identifier[response] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[response]
keyword[else] :
keyword[return] identifier[fn] ( identifier[parent_object] ,* identifier[args] ,** identifier[kw] )
keyword[return] identifier[advice] | def protected_operation(fn):
"""
Use this decorator to prevent an operation from being executed
when the related uri resource is still in use.
The parent_object must contain:
* a request
* with a registry.queryUtility(IReferencer)
:raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to
delete a certain URI because it's still in use somewhere else.
:raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were
unable to check that the URI is no longer being used.
"""
@functools.wraps(fn)
def advice(parent_object, *args, **kw):
response = _advice(parent_object.request)
if response is not None:
return response # depends on [control=['if'], data=['response']]
else:
return fn(parent_object, *args, **kw)
return advice |
def _get_properties(self, fldstr):
"""Return optional Annotation Properties (0 or greater)."""
prop2val = {}
props = self._rd_fld_vals("Properties", fldstr, False) # Get set
go_evidence = None
for prop in props:
# There can be more properties than 'go_evidence',
# but currently we see only 'go_evidence'.
# Upon encountering updates, evaluate and update code to support ...
if prop[:12] == 'go_evidence=':
assert go_evidence is None, "MORE THAN ONE EVIDENCE CODE FOUND"
go_evidence = prop[12:]
else:
assert False, "UNPROGRAMMED PROPERTY({P})".format(P=prop)
## TBD: Is 'go_evidence' still used? Replaced by ECO? And eco2group
## assert go_evidence is not None, "go_evidence == None"
## prop2val['go_evidence'] = go_evidence
if go_evidence is not None:
prop2val['go_evidence'] = go_evidence
return prop2val | def function[_get_properties, parameter[self, fldstr]]:
constant[Return optional Annotation Properties (0 or greater).]
variable[prop2val] assign[=] dictionary[[], []]
variable[props] assign[=] call[name[self]._rd_fld_vals, parameter[constant[Properties], name[fldstr], constant[False]]]
variable[go_evidence] assign[=] constant[None]
for taget[name[prop]] in starred[name[props]] begin[:]
if compare[call[name[prop]][<ast.Slice object at 0x7da20c6a9ed0>] equal[==] constant[go_evidence=]] begin[:]
assert[compare[name[go_evidence] is constant[None]]]
variable[go_evidence] assign[=] call[name[prop]][<ast.Slice object at 0x7da20c6a9990>]
if compare[name[go_evidence] is_not constant[None]] begin[:]
call[name[prop2val]][constant[go_evidence]] assign[=] name[go_evidence]
return[name[prop2val]] | keyword[def] identifier[_get_properties] ( identifier[self] , identifier[fldstr] ):
literal[string]
identifier[prop2val] ={}
identifier[props] = identifier[self] . identifier[_rd_fld_vals] ( literal[string] , identifier[fldstr] , keyword[False] )
identifier[go_evidence] = keyword[None]
keyword[for] identifier[prop] keyword[in] identifier[props] :
keyword[if] identifier[prop] [: literal[int] ]== literal[string] :
keyword[assert] identifier[go_evidence] keyword[is] keyword[None] , literal[string]
identifier[go_evidence] = identifier[prop] [ literal[int] :]
keyword[else] :
keyword[assert] keyword[False] , literal[string] . identifier[format] ( identifier[P] = identifier[prop] )
keyword[if] identifier[go_evidence] keyword[is] keyword[not] keyword[None] :
identifier[prop2val] [ literal[string] ]= identifier[go_evidence]
keyword[return] identifier[prop2val] | def _get_properties(self, fldstr):
"""Return optional Annotation Properties (0 or greater)."""
prop2val = {}
props = self._rd_fld_vals('Properties', fldstr, False) # Get set
go_evidence = None
for prop in props:
# There can be more properties than 'go_evidence',
# but currently we see only 'go_evidence'.
# Upon encountering updates, evaluate and update code to support ...
if prop[:12] == 'go_evidence=':
assert go_evidence is None, 'MORE THAN ONE EVIDENCE CODE FOUND'
go_evidence = prop[12:] # depends on [control=['if'], data=[]]
else:
assert False, 'UNPROGRAMMED PROPERTY({P})'.format(P=prop) # depends on [control=['for'], data=['prop']]
## TBD: Is 'go_evidence' still used? Replaced by ECO? And eco2group
## assert go_evidence is not None, "go_evidence == None"
## prop2val['go_evidence'] = go_evidence
if go_evidence is not None:
prop2val['go_evidence'] = go_evidence # depends on [control=['if'], data=['go_evidence']]
return prop2val |
def affiliation(self):
"""A list of namedtuples representing listed affiliations in
the form (id, name, city, country).
Note: Might be empty.
"""
out = []
aff = namedtuple('Affiliation', 'id name city country')
affs = listify(self._json.get('affiliation', []))
for item in affs:
new = aff(id=item.get('@id'), name=item.get('affilname'),
city=item.get('affiliation-city'),
country=item.get('affiliation-country'))
out.append(new)
return out or None | def function[affiliation, parameter[self]]:
constant[A list of namedtuples representing listed affiliations in
the form (id, name, city, country).
Note: Might be empty.
]
variable[out] assign[=] list[[]]
variable[aff] assign[=] call[name[namedtuple], parameter[constant[Affiliation], constant[id name city country]]]
variable[affs] assign[=] call[name[listify], parameter[call[name[self]._json.get, parameter[constant[affiliation], list[[]]]]]]
for taget[name[item]] in starred[name[affs]] begin[:]
variable[new] assign[=] call[name[aff], parameter[]]
call[name[out].append, parameter[name[new]]]
return[<ast.BoolOp object at 0x7da18f00cee0>] | keyword[def] identifier[affiliation] ( identifier[self] ):
literal[string]
identifier[out] =[]
identifier[aff] = identifier[namedtuple] ( literal[string] , literal[string] )
identifier[affs] = identifier[listify] ( identifier[self] . identifier[_json] . identifier[get] ( literal[string] ,[]))
keyword[for] identifier[item] keyword[in] identifier[affs] :
identifier[new] = identifier[aff] ( identifier[id] = identifier[item] . identifier[get] ( literal[string] ), identifier[name] = identifier[item] . identifier[get] ( literal[string] ),
identifier[city] = identifier[item] . identifier[get] ( literal[string] ),
identifier[country] = identifier[item] . identifier[get] ( literal[string] ))
identifier[out] . identifier[append] ( identifier[new] )
keyword[return] identifier[out] keyword[or] keyword[None] | def affiliation(self):
"""A list of namedtuples representing listed affiliations in
the form (id, name, city, country).
Note: Might be empty.
"""
out = []
aff = namedtuple('Affiliation', 'id name city country')
affs = listify(self._json.get('affiliation', []))
for item in affs:
new = aff(id=item.get('@id'), name=item.get('affilname'), city=item.get('affiliation-city'), country=item.get('affiliation-country'))
out.append(new) # depends on [control=['for'], data=['item']]
return out or None |
def run(self):
'''Actually runs the work unit.
This is called by the standard worker system, generally
once per work unit. It requires the work spec to contain
keys ``module``, ``run_function``, and ``terminate_function``.
It looks up ``run_function`` in :attr:`module` and calls that
function with :const:`self` as its only parameter.
'''
try:
logger.info('running work unit {0}'.format(self.key))
run_function = getattr(self.module, self.spec['run_function'])
ret_val = run_function(self)
self.update()
logger.info('completed work unit {0}'.format(self.key))
return ret_val
except LostLease:
logger.warning('work unit {0} timed out'.format(self.key))
raise
except Exception:
logger.error('work unit {0} failed'.format(self.key),
exc_info=True)
raise | def function[run, parameter[self]]:
constant[Actually runs the work unit.
This is called by the standard worker system, generally
once per work unit. It requires the work spec to contain
keys ``module``, ``run_function``, and ``terminate_function``.
It looks up ``run_function`` in :attr:`module` and calls that
function with :const:`self` as its only parameter.
]
<ast.Try object at 0x7da1b14e5510> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[key] ))
identifier[run_function] = identifier[getattr] ( identifier[self] . identifier[module] , identifier[self] . identifier[spec] [ literal[string] ])
identifier[ret_val] = identifier[run_function] ( identifier[self] )
identifier[self] . identifier[update] ()
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[key] ))
keyword[return] identifier[ret_val]
keyword[except] identifier[LostLease] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[self] . identifier[key] ))
keyword[raise]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] . identifier[key] ),
identifier[exc_info] = keyword[True] )
keyword[raise] | def run(self):
"""Actually runs the work unit.
This is called by the standard worker system, generally
once per work unit. It requires the work spec to contain
keys ``module``, ``run_function``, and ``terminate_function``.
It looks up ``run_function`` in :attr:`module` and calls that
function with :const:`self` as its only parameter.
"""
try:
logger.info('running work unit {0}'.format(self.key))
run_function = getattr(self.module, self.spec['run_function'])
ret_val = run_function(self)
self.update()
logger.info('completed work unit {0}'.format(self.key))
return ret_val # depends on [control=['try'], data=[]]
except LostLease:
logger.warning('work unit {0} timed out'.format(self.key))
raise # depends on [control=['except'], data=[]]
except Exception:
logger.error('work unit {0} failed'.format(self.key), exc_info=True)
raise # depends on [control=['except'], data=[]] |
def GenerateTaskID(self):
"""Generates a new, unique task_id."""
# Random number can not be zero since next_id_base must increment.
random_number = random.PositiveUInt16()
# 16 bit random numbers
with GrrMessage.lock:
next_id_base = GrrMessage.next_id_base
id_base = (next_id_base + random_number) & 0xFFFFFFFF
if id_base < next_id_base:
time.sleep(0.001)
GrrMessage.next_id_base = id_base
# 32 bit timestamp (in 1/1000 second resolution)
time_base = (int(time.time() * 1000) & 0x1FFFFFFF) << 32
task_id = time_base | id_base
self.Set("task_id", task_id)
return task_id | def function[GenerateTaskID, parameter[self]]:
constant[Generates a new, unique task_id.]
variable[random_number] assign[=] call[name[random].PositiveUInt16, parameter[]]
with name[GrrMessage].lock begin[:]
variable[next_id_base] assign[=] name[GrrMessage].next_id_base
variable[id_base] assign[=] binary_operation[binary_operation[name[next_id_base] + name[random_number]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
if compare[name[id_base] less[<] name[next_id_base]] begin[:]
call[name[time].sleep, parameter[constant[0.001]]]
name[GrrMessage].next_id_base assign[=] name[id_base]
variable[time_base] assign[=] binary_operation[binary_operation[call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]] <ast.BitAnd object at 0x7da2590d6b60> constant[536870911]] <ast.LShift object at 0x7da2590d69e0> constant[32]]
variable[task_id] assign[=] binary_operation[name[time_base] <ast.BitOr object at 0x7da2590d6aa0> name[id_base]]
call[name[self].Set, parameter[constant[task_id], name[task_id]]]
return[name[task_id]] | keyword[def] identifier[GenerateTaskID] ( identifier[self] ):
literal[string]
identifier[random_number] = identifier[random] . identifier[PositiveUInt16] ()
keyword[with] identifier[GrrMessage] . identifier[lock] :
identifier[next_id_base] = identifier[GrrMessage] . identifier[next_id_base]
identifier[id_base] =( identifier[next_id_base] + identifier[random_number] )& literal[int]
keyword[if] identifier[id_base] < identifier[next_id_base] :
identifier[time] . identifier[sleep] ( literal[int] )
identifier[GrrMessage] . identifier[next_id_base] = identifier[id_base]
identifier[time_base] =( identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )& literal[int] )<< literal[int]
identifier[task_id] = identifier[time_base] | identifier[id_base]
identifier[self] . identifier[Set] ( literal[string] , identifier[task_id] )
keyword[return] identifier[task_id] | def GenerateTaskID(self):
"""Generates a new, unique task_id."""
# Random number can not be zero since next_id_base must increment.
random_number = random.PositiveUInt16()
# 16 bit random numbers
with GrrMessage.lock:
next_id_base = GrrMessage.next_id_base
id_base = next_id_base + random_number & 4294967295
if id_base < next_id_base:
time.sleep(0.001) # depends on [control=['if'], data=[]]
GrrMessage.next_id_base = id_base # depends on [control=['with'], data=[]]
# 32 bit timestamp (in 1/1000 second resolution)
time_base = (int(time.time() * 1000) & 536870911) << 32
task_id = time_base | id_base
self.Set('task_id', task_id)
return task_id |
def create_machine(self, name=None, package=None, dataset=None,
metadata=None, tags=None, boot_script=None, credentials=False,
image=None, networks=None):
"""
::
POST /:login/machines
Provision a machine in the current
:py:class:`smartdc.datacenter.DataCenter`, returning an instantiated
:py:class:`smartdc.machine.Machine` object. All of the parameter
values are optional, as they are assigned default values by the
datacenter's API itself.
:param name: a human-readable label for the machine
:type name: :py:class:`basestring`
:param package: cluster of resource values identified by name
:type package: :py:class:`basestring` or :py:class:`dict`
:param image: an identifier for the base operating system image
(formerly a ``dataset``)
:type image: :py:class:`basestring` or :py:class:`dict`
:param dataset: base operating system image identified by a globally
unique ID or URN (deprecated)
:type dataset: :py:class:`basestring` or :py:class:`dict`
:param metadata: keys & values with arbitrary supplementary
details for the machine, accessible from the machine itself
:type metadata: :py:class:`dict`
:param tags: keys & values with arbitrary supplementary
identifying information for filtering when querying for machines
:type tags: :py:class:`dict`
:param networks: list of networks where this machine will belong to
:type networks: :py:class:`list`
:param boot_script: path to a file to upload for execution on boot
:type boot_script: :py:class:`basestring` as file path
:rtype: :py:class:`smartdc.machine.Machine`
If `package`, `image`, or `dataset` are passed a :py:class:`dict` containing a
`name` key (in the case of `package`) or an `id` key (in the case of
`image` or `dataset`), it passes the corresponding value. The server API
appears to resolve incomplete or ambiguous dataset URNs with the
highest version number.
"""
params = {}
if name:
assert re.match(r'[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$',
name), "Illegal name"
params['name'] = name
if package:
if isinstance(package, dict):
package = package['name']
params['package'] = package
if image:
if isinstance(image, dict):
image = image['id']
params['image'] = image
if dataset and not image:
if isinstance(dataset, dict):
dataset = dataset.get('id', dataset['urn'])
params['dataset'] = dataset
if metadata:
for k, v in metadata.items():
params['metadata.' + str(k)] = v
if tags:
for k, v in tags.items():
params['tag.' + str(k)] = v
if boot_script:
with open(boot_script) as f:
params['metadata.user-script'] = f.read()
if networks:
if isinstance(networks, list):
params['networks'] = networks
elif isinstance(networks, basestring):
params['networks'] = [networks]
j, r = self.request('POST', 'machines', data=params)
if r.status_code >= 400:
print(j, file=sys.stderr)
r.raise_for_status()
return Machine(datacenter=self, data=j) | def function[create_machine, parameter[self, name, package, dataset, metadata, tags, boot_script, credentials, image, networks]]:
constant[
::
POST /:login/machines
Provision a machine in the current
:py:class:`smartdc.datacenter.DataCenter`, returning an instantiated
:py:class:`smartdc.machine.Machine` object. All of the parameter
values are optional, as they are assigned default values by the
datacenter's API itself.
:param name: a human-readable label for the machine
:type name: :py:class:`basestring`
:param package: cluster of resource values identified by name
:type package: :py:class:`basestring` or :py:class:`dict`
:param image: an identifier for the base operating system image
(formerly a ``dataset``)
:type image: :py:class:`basestring` or :py:class:`dict`
:param dataset: base operating system image identified by a globally
unique ID or URN (deprecated)
:type dataset: :py:class:`basestring` or :py:class:`dict`
:param metadata: keys & values with arbitrary supplementary
details for the machine, accessible from the machine itself
:type metadata: :py:class:`dict`
:param tags: keys & values with arbitrary supplementary
identifying information for filtering when querying for machines
:type tags: :py:class:`dict`
:param networks: list of networks where this machine will belong to
:type networks: :py:class:`list`
:param boot_script: path to a file to upload for execution on boot
:type boot_script: :py:class:`basestring` as file path
:rtype: :py:class:`smartdc.machine.Machine`
If `package`, `image`, or `dataset` are passed a :py:class:`dict` containing a
`name` key (in the case of `package`) or an `id` key (in the case of
`image` or `dataset`), it passes the corresponding value. The server API
appears to resolve incomplete or ambiguous dataset URNs with the
highest version number.
]
variable[params] assign[=] dictionary[[], []]
if name[name] begin[:]
assert[call[name[re].match, parameter[constant[[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$], name[name]]]]
call[name[params]][constant[name]] assign[=] name[name]
if name[package] begin[:]
if call[name[isinstance], parameter[name[package], name[dict]]] begin[:]
variable[package] assign[=] call[name[package]][constant[name]]
call[name[params]][constant[package]] assign[=] name[package]
if name[image] begin[:]
if call[name[isinstance], parameter[name[image], name[dict]]] begin[:]
variable[image] assign[=] call[name[image]][constant[id]]
call[name[params]][constant[image]] assign[=] name[image]
if <ast.BoolOp object at 0x7da1b25875b0> begin[:]
if call[name[isinstance], parameter[name[dataset], name[dict]]] begin[:]
variable[dataset] assign[=] call[name[dataset].get, parameter[constant[id], call[name[dataset]][constant[urn]]]]
call[name[params]][constant[dataset]] assign[=] name[dataset]
if name[metadata] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2370400>, <ast.Name object at 0x7da1b2373f40>]]] in starred[call[name[metadata].items, parameter[]]] begin[:]
call[name[params]][binary_operation[constant[metadata.] + call[name[str], parameter[name[k]]]]] assign[=] name[v]
if name[tags] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204960670>, <ast.Name object at 0x7da204961030>]]] in starred[call[name[tags].items, parameter[]]] begin[:]
call[name[params]][binary_operation[constant[tag.] + call[name[str], parameter[name[k]]]]] assign[=] name[v]
if name[boot_script] begin[:]
with call[name[open], parameter[name[boot_script]]] begin[:]
call[name[params]][constant[metadata.user-script]] assign[=] call[name[f].read, parameter[]]
if name[networks] begin[:]
if call[name[isinstance], parameter[name[networks], name[list]]] begin[:]
call[name[params]][constant[networks]] assign[=] name[networks]
<ast.Tuple object at 0x7da204963be0> assign[=] call[name[self].request, parameter[constant[POST], constant[machines]]]
if compare[name[r].status_code greater_or_equal[>=] constant[400]] begin[:]
call[name[print], parameter[name[j]]]
call[name[r].raise_for_status, parameter[]]
return[call[name[Machine], parameter[]]] | keyword[def] identifier[create_machine] ( identifier[self] , identifier[name] = keyword[None] , identifier[package] = keyword[None] , identifier[dataset] = keyword[None] ,
identifier[metadata] = keyword[None] , identifier[tags] = keyword[None] , identifier[boot_script] = keyword[None] , identifier[credentials] = keyword[False] ,
identifier[image] = keyword[None] , identifier[networks] = keyword[None] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[name] :
keyword[assert] identifier[re] . identifier[match] ( literal[string] ,
identifier[name] ), literal[string]
identifier[params] [ literal[string] ]= identifier[name]
keyword[if] identifier[package] :
keyword[if] identifier[isinstance] ( identifier[package] , identifier[dict] ):
identifier[package] = identifier[package] [ literal[string] ]
identifier[params] [ literal[string] ]= identifier[package]
keyword[if] identifier[image] :
keyword[if] identifier[isinstance] ( identifier[image] , identifier[dict] ):
identifier[image] = identifier[image] [ literal[string] ]
identifier[params] [ literal[string] ]= identifier[image]
keyword[if] identifier[dataset] keyword[and] keyword[not] identifier[image] :
keyword[if] identifier[isinstance] ( identifier[dataset] , identifier[dict] ):
identifier[dataset] = identifier[dataset] . identifier[get] ( literal[string] , identifier[dataset] [ literal[string] ])
identifier[params] [ literal[string] ]= identifier[dataset]
keyword[if] identifier[metadata] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[metadata] . identifier[items] ():
identifier[params] [ literal[string] + identifier[str] ( identifier[k] )]= identifier[v]
keyword[if] identifier[tags] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[tags] . identifier[items] ():
identifier[params] [ literal[string] + identifier[str] ( identifier[k] )]= identifier[v]
keyword[if] identifier[boot_script] :
keyword[with] identifier[open] ( identifier[boot_script] ) keyword[as] identifier[f] :
identifier[params] [ literal[string] ]= identifier[f] . identifier[read] ()
keyword[if] identifier[networks] :
keyword[if] identifier[isinstance] ( identifier[networks] , identifier[list] ):
identifier[params] [ literal[string] ]= identifier[networks]
keyword[elif] identifier[isinstance] ( identifier[networks] , identifier[basestring] ):
identifier[params] [ literal[string] ]=[ identifier[networks] ]
identifier[j] , identifier[r] = identifier[self] . identifier[request] ( literal[string] , literal[string] , identifier[data] = identifier[params] )
keyword[if] identifier[r] . identifier[status_code] >= literal[int] :
identifier[print] ( identifier[j] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[r] . identifier[raise_for_status] ()
keyword[return] identifier[Machine] ( identifier[datacenter] = identifier[self] , identifier[data] = identifier[j] ) | def create_machine(self, name=None, package=None, dataset=None, metadata=None, tags=None, boot_script=None, credentials=False, image=None, networks=None):
"""
::
POST /:login/machines
Provision a machine in the current
:py:class:`smartdc.datacenter.DataCenter`, returning an instantiated
:py:class:`smartdc.machine.Machine` object. All of the parameter
values are optional, as they are assigned default values by the
datacenter's API itself.
:param name: a human-readable label for the machine
:type name: :py:class:`basestring`
:param package: cluster of resource values identified by name
:type package: :py:class:`basestring` or :py:class:`dict`
:param image: an identifier for the base operating system image
(formerly a ``dataset``)
:type image: :py:class:`basestring` or :py:class:`dict`
:param dataset: base operating system image identified by a globally
unique ID or URN (deprecated)
:type dataset: :py:class:`basestring` or :py:class:`dict`
:param metadata: keys & values with arbitrary supplementary
details for the machine, accessible from the machine itself
:type metadata: :py:class:`dict`
:param tags: keys & values with arbitrary supplementary
identifying information for filtering when querying for machines
:type tags: :py:class:`dict`
:param networks: list of networks where this machine will belong to
:type networks: :py:class:`list`
:param boot_script: path to a file to upload for execution on boot
:type boot_script: :py:class:`basestring` as file path
:rtype: :py:class:`smartdc.machine.Machine`
If `package`, `image`, or `dataset` are passed a :py:class:`dict` containing a
`name` key (in the case of `package`) or an `id` key (in the case of
`image` or `dataset`), it passes the corresponding value. The server API
appears to resolve incomplete or ambiguous dataset URNs with the
highest version number.
"""
params = {}
if name:
assert re.match('[a-zA-Z0-9]([a-zA-Z0-9\\-\\.]*[a-zA-Z0-9])?$', name), 'Illegal name'
params['name'] = name # depends on [control=['if'], data=[]]
if package:
if isinstance(package, dict):
package = package['name'] # depends on [control=['if'], data=[]]
params['package'] = package # depends on [control=['if'], data=[]]
if image:
if isinstance(image, dict):
image = image['id'] # depends on [control=['if'], data=[]]
params['image'] = image # depends on [control=['if'], data=[]]
if dataset and (not image):
if isinstance(dataset, dict):
dataset = dataset.get('id', dataset['urn']) # depends on [control=['if'], data=[]]
params['dataset'] = dataset # depends on [control=['if'], data=[]]
if metadata:
for (k, v) in metadata.items():
params['metadata.' + str(k)] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if tags:
for (k, v) in tags.items():
params['tag.' + str(k)] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if boot_script:
with open(boot_script) as f:
params['metadata.user-script'] = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
if networks:
if isinstance(networks, list):
params['networks'] = networks # depends on [control=['if'], data=[]]
elif isinstance(networks, basestring):
params['networks'] = [networks] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
(j, r) = self.request('POST', 'machines', data=params)
if r.status_code >= 400:
print(j, file=sys.stderr)
r.raise_for_status() # depends on [control=['if'], data=[]]
return Machine(datacenter=self, data=j) |
def createJWT(self, payload={}, expiredSeconds=3600):
""" 生成token: https://tools.ietf.org/html/rfc7519
@param payload dict: 自定义公有或私有载荷, 存放有效信息的地方;
@param expiredSeconds int: Token过期时间,单位秒,签发时间是本地当前时间戳,此参数指定签发时间之后多少秒过期;
"""
#1. check params
if isinstance(payload, dict):
for i in self._payloadkey:
if i in payload:
raise KeyError("Standard key exists in payload")
else:
raise TypeError("payload is not a dict")
#2. predefined data
payload.update(self._payload)
payload.update(
exp=self.timestamp_after_timestamp(self.get_current_timestamp(), seconds=expiredSeconds),
iat=self.get_current_timestamp()
)
#3. base64 urlsafe encode
#头部编码
first_part = base64.urlsafe_b64encode(json.dumps(self._header, sort_keys=True, separators=(',', ':')))
#载荷消息体编码
second_part = base64.urlsafe_b64encode(json.dumps(payload, sort_keys=True, separators=(',', ':')))
#签名以上两部分: 把header、playload的base64url编码加密后再次base64编码
third_part = base64.urlsafe_b64encode(self.signatureJWT("{0}.{1}".format(first_part, second_part)))
#4. returns the available token
token = first_part + '.' + second_part + '.' + third_part
logging.info("Generating token ok")
return token | def function[createJWT, parameter[self, payload, expiredSeconds]]:
constant[ 生成token: https://tools.ietf.org/html/rfc7519
@param payload dict: 自定义公有或私有载荷, 存放有效信息的地方;
@param expiredSeconds int: Token过期时间,单位秒,签发时间是本地当前时间戳,此参数指定签发时间之后多少秒过期;
]
if call[name[isinstance], parameter[name[payload], name[dict]]] begin[:]
for taget[name[i]] in starred[name[self]._payloadkey] begin[:]
if compare[name[i] in name[payload]] begin[:]
<ast.Raise object at 0x7da1b26afd30>
call[name[payload].update, parameter[name[self]._payload]]
call[name[payload].update, parameter[]]
variable[first_part] assign[=] call[name[base64].urlsafe_b64encode, parameter[call[name[json].dumps, parameter[name[self]._header]]]]
variable[second_part] assign[=] call[name[base64].urlsafe_b64encode, parameter[call[name[json].dumps, parameter[name[payload]]]]]
variable[third_part] assign[=] call[name[base64].urlsafe_b64encode, parameter[call[name[self].signatureJWT, parameter[call[constant[{0}.{1}].format, parameter[name[first_part], name[second_part]]]]]]]
variable[token] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[first_part] + constant[.]] + name[second_part]] + constant[.]] + name[third_part]]
call[name[logging].info, parameter[constant[Generating token ok]]]
return[name[token]] | keyword[def] identifier[createJWT] ( identifier[self] , identifier[payload] ={}, identifier[expiredSeconds] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[payload] , identifier[dict] ):
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_payloadkey] :
keyword[if] identifier[i] keyword[in] identifier[payload] :
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[payload] . identifier[update] ( identifier[self] . identifier[_payload] )
identifier[payload] . identifier[update] (
identifier[exp] = identifier[self] . identifier[timestamp_after_timestamp] ( identifier[self] . identifier[get_current_timestamp] (), identifier[seconds] = identifier[expiredSeconds] ),
identifier[iat] = identifier[self] . identifier[get_current_timestamp] ()
)
identifier[first_part] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[_header] , identifier[sort_keys] = keyword[True] , identifier[separators] =( literal[string] , literal[string] )))
identifier[second_part] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[json] . identifier[dumps] ( identifier[payload] , identifier[sort_keys] = keyword[True] , identifier[separators] =( literal[string] , literal[string] )))
identifier[third_part] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[self] . identifier[signatureJWT] ( literal[string] . identifier[format] ( identifier[first_part] , identifier[second_part] )))
identifier[token] = identifier[first_part] + literal[string] + identifier[second_part] + literal[string] + identifier[third_part]
identifier[logging] . identifier[info] ( literal[string] )
keyword[return] identifier[token] | def createJWT(self, payload={}, expiredSeconds=3600):
""" 生成token: https://tools.ietf.org/html/rfc7519
@param payload dict: 自定义公有或私有载荷, 存放有效信息的地方;
@param expiredSeconds int: Token过期时间,单位秒,签发时间是本地当前时间戳,此参数指定签发时间之后多少秒过期;
"""
#1. check params
if isinstance(payload, dict):
for i in self._payloadkey:
if i in payload:
raise KeyError('Standard key exists in payload') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
raise TypeError('payload is not a dict')
#2. predefined data
payload.update(self._payload)
payload.update(exp=self.timestamp_after_timestamp(self.get_current_timestamp(), seconds=expiredSeconds), iat=self.get_current_timestamp())
#3. base64 urlsafe encode
#头部编码
first_part = base64.urlsafe_b64encode(json.dumps(self._header, sort_keys=True, separators=(',', ':')))
#载荷消息体编码
second_part = base64.urlsafe_b64encode(json.dumps(payload, sort_keys=True, separators=(',', ':')))
#签名以上两部分: 把header、playload的base64url编码加密后再次base64编码
third_part = base64.urlsafe_b64encode(self.signatureJWT('{0}.{1}'.format(first_part, second_part)))
#4. returns the available token
token = first_part + '.' + second_part + '.' + third_part
logging.info('Generating token ok')
return token |
def atlas_rank_peers_by_data_availability( peer_list=None, peer_table=None, local_inv=None, con=None, path=None ):
"""
Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_list is None:
peer_list = ptbl.keys()[:]
if local_inv is None:
# what's my inventory?
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
peer_availability_ranking = [] # (health score, peer hostport)
for peer_hostport in peer_list:
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
# ignore peers that we don't have an inventory for
if len(peer_inv) == 0:
continue
availability_score = atlas_inventory_count_missing( local_inv, peer_inv )
peer_availability_ranking.append( (availability_score, peer_hostport) )
# sort on availability
peer_availability_ranking.sort()
peer_availability_ranking.reverse()
return [peer_hp for _, peer_hp in peer_availability_ranking] | def function[atlas_rank_peers_by_data_availability, parameter[peer_list, peer_table, local_inv, con, path]]:
constant[
Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors.
]
with call[name[AtlasPeerTableLocked], parameter[name[peer_table]]] begin[:]
if compare[name[peer_list] is constant[None]] begin[:]
variable[peer_list] assign[=] call[call[name[ptbl].keys, parameter[]]][<ast.Slice object at 0x7da1b26ac880>]
if compare[name[local_inv] is constant[None]] begin[:]
variable[inv_len] assign[=] call[name[atlasdb_zonefile_inv_length], parameter[]]
variable[local_inv] assign[=] call[name[atlas_make_zonefile_inventory], parameter[constant[0], name[inv_len]]]
variable[peer_availability_ranking] assign[=] list[[]]
for taget[name[peer_hostport]] in starred[name[peer_list]] begin[:]
variable[peer_inv] assign[=] call[name[atlas_peer_get_zonefile_inventory], parameter[name[peer_hostport]]]
if compare[call[name[len], parameter[name[peer_inv]]] equal[==] constant[0]] begin[:]
continue
variable[availability_score] assign[=] call[name[atlas_inventory_count_missing], parameter[name[local_inv], name[peer_inv]]]
call[name[peer_availability_ranking].append, parameter[tuple[[<ast.Name object at 0x7da1b26acca0>, <ast.Name object at 0x7da20e961e40>]]]]
call[name[peer_availability_ranking].sort, parameter[]]
call[name[peer_availability_ranking].reverse, parameter[]]
return[<ast.ListComp object at 0x7da20e960760>] | keyword[def] identifier[atlas_rank_peers_by_data_availability] ( identifier[peer_list] = keyword[None] , identifier[peer_table] = keyword[None] , identifier[local_inv] = keyword[None] , identifier[con] = keyword[None] , identifier[path] = keyword[None] ):
literal[string]
keyword[with] identifier[AtlasPeerTableLocked] ( identifier[peer_table] ) keyword[as] identifier[ptbl] :
keyword[if] identifier[peer_list] keyword[is] keyword[None] :
identifier[peer_list] = identifier[ptbl] . identifier[keys] ()[:]
keyword[if] identifier[local_inv] keyword[is] keyword[None] :
identifier[inv_len] = identifier[atlasdb_zonefile_inv_length] ( identifier[con] = identifier[con] , identifier[path] = identifier[path] )
identifier[local_inv] = identifier[atlas_make_zonefile_inventory] ( literal[int] , identifier[inv_len] , identifier[con] = identifier[con] , identifier[path] = identifier[path] )
identifier[peer_availability_ranking] =[]
keyword[for] identifier[peer_hostport] keyword[in] identifier[peer_list] :
identifier[peer_inv] = identifier[atlas_peer_get_zonefile_inventory] ( identifier[peer_hostport] , identifier[peer_table] = identifier[ptbl] )
keyword[if] identifier[len] ( identifier[peer_inv] )== literal[int] :
keyword[continue]
identifier[availability_score] = identifier[atlas_inventory_count_missing] ( identifier[local_inv] , identifier[peer_inv] )
identifier[peer_availability_ranking] . identifier[append] (( identifier[availability_score] , identifier[peer_hostport] ))
identifier[peer_availability_ranking] . identifier[sort] ()
identifier[peer_availability_ranking] . identifier[reverse] ()
keyword[return] [ identifier[peer_hp] keyword[for] identifier[_] , identifier[peer_hp] keyword[in] identifier[peer_availability_ranking] ] | def atlas_rank_peers_by_data_availability(peer_list=None, peer_table=None, local_inv=None, con=None, path=None):
"""
Get a ranking of peers to contact for a zonefile.
Peers are ranked by the number of zonefiles they have
which we don't have.
This is used to select neighbors.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_list is None:
peer_list = ptbl.keys()[:] # depends on [control=['if'], data=['peer_list']]
if local_inv is None:
# what's my inventory?
inv_len = atlasdb_zonefile_inv_length(con=con, path=path)
local_inv = atlas_make_zonefile_inventory(0, inv_len, con=con, path=path) # depends on [control=['if'], data=['local_inv']]
peer_availability_ranking = [] # (health score, peer hostport)
for peer_hostport in peer_list:
peer_inv = atlas_peer_get_zonefile_inventory(peer_hostport, peer_table=ptbl)
# ignore peers that we don't have an inventory for
if len(peer_inv) == 0:
continue # depends on [control=['if'], data=[]]
availability_score = atlas_inventory_count_missing(local_inv, peer_inv)
peer_availability_ranking.append((availability_score, peer_hostport)) # depends on [control=['for'], data=['peer_hostport']] # depends on [control=['with'], data=['ptbl']]
# sort on availability
peer_availability_ranking.sort()
peer_availability_ranking.reverse()
return [peer_hp for (_, peer_hp) in peer_availability_ranking] |
def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
"""
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam("num_inrecurrence_layers", 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", True)
if hparams.add_position_timing_signal:
hparams.pos = None
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", True)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "fc")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam("lstm_forget_bias", 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam("add_ffn_unit_to_the_transition_function", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams | def function[update_hparams_for_universal_transformer, parameter[hparams]]:
constant[Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
]
name[hparams].daisy_chain_variables assign[=] constant[False]
call[name[hparams].add_hparam, parameter[constant[mix_with_transformer], constant[None]]]
call[name[hparams].add_hparam, parameter[constant[num_mixedin_layers], constant[2]]]
call[name[hparams].add_hparam, parameter[constant[num_inrecurrence_layers], constant[1]]]
call[name[hparams].add_hparam, parameter[constant[recurrence_type], constant[basic]]]
call[name[hparams].add_hparam, parameter[constant[num_rec_steps], name[hparams].num_hidden_layers]]
call[name[hparams].add_hparam, parameter[constant[add_position_timing_signal], constant[True]]]
if name[hparams].add_position_timing_signal begin[:]
name[hparams].pos assign[=] constant[None]
call[name[hparams].add_hparam, parameter[constant[position_start_index], constant[None]]]
call[name[hparams].add_hparam, parameter[constant[add_step_timing_signal], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[step_timing_signal_type], constant[learned]]]
call[name[hparams].add_hparam, parameter[constant[add_or_concat_timing_signal], constant[add]]]
call[name[hparams].add_hparam, parameter[constant[add_sru], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[transformer_ffn_type], constant[fc]]]
call[name[hparams].add_hparam, parameter[constant[transform_bias_init], <ast.UnaryOp object at 0x7da18f00f190>]]
call[name[hparams].add_hparam, parameter[constant[couple_carry_transform_gates], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[depth_embedding], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[dwa_elements], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[gate_ffn_layer], constant[dense]]]
call[name[hparams].add_hparam, parameter[constant[lstm_forget_bias], constant[1.0]]]
call[name[hparams].add_hparam, parameter[constant[use_memory_as_final_state], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[add_ffn_unit_to_the_transition_function], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[act_type], constant[basic]]]
call[name[hparams].add_hparam, parameter[constant[act_max_steps], binary_operation[constant[2] * name[hparams].num_hidden_layers]]]
call[name[hparams].add_hparam, parameter[constant[act_halting_bias_init], constant[1.0]]]
call[name[hparams].add_hparam, parameter[constant[act_epsilon], constant[0.01]]]
call[name[hparams].add_hparam, parameter[constant[act_loss_weight], constant[0.01]]]
return[name[hparams]] | keyword[def] identifier[update_hparams_for_universal_transformer] ( identifier[hparams] ):
literal[string]
identifier[hparams] . identifier[daisy_chain_variables] = keyword[False]
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[None] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , identifier[hparams] . identifier[num_hidden_layers] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
keyword[if] identifier[hparams] . identifier[add_position_timing_signal] :
identifier[hparams] . identifier[pos] = keyword[None]
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[None] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] ,- literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] * identifier[hparams] . identifier[num_hidden_layers] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
keyword[return] identifier[hparams] | def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
"""
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam('mix_with_transformer', None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam('num_mixedin_layers', 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam('num_inrecurrence_layers', 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam('recurrence_type', 'basic')
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam('num_rec_steps', hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam('add_position_timing_signal', True)
if hparams.add_position_timing_signal:
hparams.pos = None # depends on [control=['if'], data=[]]
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam('position_start_index', None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam('add_step_timing_signal', True)
# Either "learned" or "sinusoid"
hparams.add_hparam('step_timing_signal_type', 'learned')
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam('add_or_concat_timing_signal', 'add')
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam('add_sru', False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam('transformer_ffn_type', 'fc')
# Transform bias (in models with highway or skip connection).
hparams.add_hparam('transform_bias_init', -1.0)
hparams.add_hparam('couple_carry_transform_gates', True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam('depth_embedding', True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam('dwa_elements', True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam('gate_ffn_layer', 'dense')
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam('lstm_forget_bias', 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam('use_memory_as_final_state', False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam('add_ffn_unit_to_the_transition_function', False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam('act_type', 'basic')
# Max number of steps (forces halting at this step).
hparams.add_hparam('act_max_steps', 2 * hparams.num_hidden_layers)
hparams.add_hparam('act_halting_bias_init', 1.0)
hparams.add_hparam('act_epsilon', 0.01)
hparams.add_hparam('act_loss_weight', 0.01)
return hparams |
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
hasher.update(self.executed_lines(filename))
hasher.update(self.executed_arcs(filename)) | def function[add_to_hash, parameter[self, filename, hasher]]:
constant[Contribute `filename`'s data to the Md5Hash `hasher`.]
call[name[hasher].update, parameter[call[name[self].executed_lines, parameter[name[filename]]]]]
call[name[hasher].update, parameter[call[name[self].executed_arcs, parameter[name[filename]]]]] | keyword[def] identifier[add_to_hash] ( identifier[self] , identifier[filename] , identifier[hasher] ):
literal[string]
identifier[hasher] . identifier[update] ( identifier[self] . identifier[executed_lines] ( identifier[filename] ))
identifier[hasher] . identifier[update] ( identifier[self] . identifier[executed_arcs] ( identifier[filename] )) | def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
hasher.update(self.executed_lines(filename))
hasher.update(self.executed_arcs(filename)) |
def delete(ctx, slot, force):
"""
Deletes the configuration of a slot.
"""
controller = ctx.obj['controller']
if not force and not controller.slot_status[slot - 1]:
ctx.fail('Not possible to delete an empty slot.')
force or click.confirm(
'Do you really want to delete'
' the configuration of slot {}?'.format(slot), abort=True, err=True)
click.echo('Deleting the configuration of slot {}...'.format(slot))
try:
controller.zap_slot(slot)
except YkpersError as e:
_failed_to_write_msg(ctx, e) | def function[delete, parameter[ctx, slot, force]]:
constant[
Deletes the configuration of a slot.
]
variable[controller] assign[=] call[name[ctx].obj][constant[controller]]
if <ast.BoolOp object at 0x7da18ede4970> begin[:]
call[name[ctx].fail, parameter[constant[Not possible to delete an empty slot.]]]
<ast.BoolOp object at 0x7da18ede6470>
call[name[click].echo, parameter[call[constant[Deleting the configuration of slot {}...].format, parameter[name[slot]]]]]
<ast.Try object at 0x7da20c796740> | keyword[def] identifier[delete] ( identifier[ctx] , identifier[slot] , identifier[force] ):
literal[string]
identifier[controller] = identifier[ctx] . identifier[obj] [ literal[string] ]
keyword[if] keyword[not] identifier[force] keyword[and] keyword[not] identifier[controller] . identifier[slot_status] [ identifier[slot] - literal[int] ]:
identifier[ctx] . identifier[fail] ( literal[string] )
identifier[force] keyword[or] identifier[click] . identifier[confirm] (
literal[string]
literal[string] . identifier[format] ( identifier[slot] ), identifier[abort] = keyword[True] , identifier[err] = keyword[True] )
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[slot] ))
keyword[try] :
identifier[controller] . identifier[zap_slot] ( identifier[slot] )
keyword[except] identifier[YkpersError] keyword[as] identifier[e] :
identifier[_failed_to_write_msg] ( identifier[ctx] , identifier[e] ) | def delete(ctx, slot, force):
"""
Deletes the configuration of a slot.
"""
controller = ctx.obj['controller']
if not force and (not controller.slot_status[slot - 1]):
ctx.fail('Not possible to delete an empty slot.') # depends on [control=['if'], data=[]]
force or click.confirm('Do you really want to delete the configuration of slot {}?'.format(slot), abort=True, err=True)
click.echo('Deleting the configuration of slot {}...'.format(slot))
try:
controller.zap_slot(slot) # depends on [control=['try'], data=[]]
except YkpersError as e:
_failed_to_write_msg(ctx, e) # depends on [control=['except'], data=['e']] |
def raw_escape(pattern, unix=False):
"""Apply raw character transform before applying escape."""
pattern = util.norm_pattern(pattern, False, True)
return escape(pattern, unix) | def function[raw_escape, parameter[pattern, unix]]:
constant[Apply raw character transform before applying escape.]
variable[pattern] assign[=] call[name[util].norm_pattern, parameter[name[pattern], constant[False], constant[True]]]
return[call[name[escape], parameter[name[pattern], name[unix]]]] | keyword[def] identifier[raw_escape] ( identifier[pattern] , identifier[unix] = keyword[False] ):
literal[string]
identifier[pattern] = identifier[util] . identifier[norm_pattern] ( identifier[pattern] , keyword[False] , keyword[True] )
keyword[return] identifier[escape] ( identifier[pattern] , identifier[unix] ) | def raw_escape(pattern, unix=False):
"""Apply raw character transform before applying escape."""
pattern = util.norm_pattern(pattern, False, True)
return escape(pattern, unix) |
def call(self, args=None, kwargs=None, node=None, send_timeout=1000, recv_timeout=5000, zmq_ctx=None):
"""
Calls a service on a node with req as arguments. if node is None, a node is chosen by zmq.
if zmq_ctx is passed, it will use the existing context
Uses a REQ socket. Ref : http://api.zeromq.org/2-1:zmq-socket
:param node : the node name
"""
context = zmq_ctx or zmq.Context()
assert isinstance(context, zmq.Context)
args = args or ()
assert isinstance(args, tuple)
kwargs = kwargs or {}
assert isinstance(kwargs, dict)
socket = context.socket(zmq.REQ)
# connect to all addresses ( optionally matching node name )
for n, a in [(n, a) for (n, a) in self.providers if (not node or n == node)]:
socket.connect(a)
# build message
fullreq = ServiceRequest(service=self.name, args=pickle.dumps(args), kwargs=pickle.dumps(kwargs))
poller = zmq.Poller()
poller.register(socket) # POLLIN for recv, POLLOUT for send
evts = dict(poller.poll(send_timeout))
if socket in evts and evts[socket] == zmq.POLLOUT:
socket.send(fullreq.serialize())
# TODO : find a way to get rid fo these timeouts when debugging
# TODO : when timeout Exception should occur ( not returning None )
evts = dict(poller.poll(recv_timeout)) # blocking until answer
if socket in evts and evts[socket] == zmq.POLLIN:
resp = socket.recv()
fullresp = ServiceResponse_dictparse(resp)
if fullresp.has_field('response'):
return pickle.loads(fullresp.response)
elif fullresp.has_field('exception'):
svcexc = fullresp.exception # This has already been parsed by ServiceResponse_dictparse
tb = pickle.loads(svcexc.traceback)
if Traceback and isinstance(tb, Traceback):
reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), tb.as_traceback())
else: # traceback not usable
reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), None)
else:
raise UnknownResponseTypeException("Unknown Response Type {0}".format(type(fullresp)))
else:
raise ServiceCallTimeout("Did not receive response through ZMQ socket.")
else:
raise ServiceCallTimeout("Can not send request through ZMQ socket.") | def function[call, parameter[self, args, kwargs, node, send_timeout, recv_timeout, zmq_ctx]]:
constant[
Calls a service on a node with req as arguments. if node is None, a node is chosen by zmq.
if zmq_ctx is passed, it will use the existing context
Uses a REQ socket. Ref : http://api.zeromq.org/2-1:zmq-socket
:param node : the node name
]
variable[context] assign[=] <ast.BoolOp object at 0x7da1b25ecf10>
assert[call[name[isinstance], parameter[name[context], name[zmq].Context]]]
variable[args] assign[=] <ast.BoolOp object at 0x7da1b25ed030>
assert[call[name[isinstance], parameter[name[args], name[tuple]]]]
variable[kwargs] assign[=] <ast.BoolOp object at 0x7da1b23724a0>
assert[call[name[isinstance], parameter[name[kwargs], name[dict]]]]
variable[socket] assign[=] call[name[context].socket, parameter[name[zmq].REQ]]
for taget[tuple[[<ast.Name object at 0x7da1b2372c50>, <ast.Name object at 0x7da1b2373550>]]] in starred[<ast.ListComp object at 0x7da1b2371120>] begin[:]
call[name[socket].connect, parameter[name[a]]]
variable[fullreq] assign[=] call[name[ServiceRequest], parameter[]]
variable[poller] assign[=] call[name[zmq].Poller, parameter[]]
call[name[poller].register, parameter[name[socket]]]
variable[evts] assign[=] call[name[dict], parameter[call[name[poller].poll, parameter[name[send_timeout]]]]]
if <ast.BoolOp object at 0x7da1b2373220> begin[:]
call[name[socket].send, parameter[call[name[fullreq].serialize, parameter[]]]]
variable[evts] assign[=] call[name[dict], parameter[call[name[poller].poll, parameter[name[recv_timeout]]]]]
if <ast.BoolOp object at 0x7da1b2372620> begin[:]
variable[resp] assign[=] call[name[socket].recv, parameter[]]
variable[fullresp] assign[=] call[name[ServiceResponse_dictparse], parameter[name[resp]]]
if call[name[fullresp].has_field, parameter[constant[response]]] begin[:]
return[call[name[pickle].loads, parameter[name[fullresp].response]]] | keyword[def] identifier[call] ( identifier[self] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] , identifier[node] = keyword[None] , identifier[send_timeout] = literal[int] , identifier[recv_timeout] = literal[int] , identifier[zmq_ctx] = keyword[None] ):
literal[string]
identifier[context] = identifier[zmq_ctx] keyword[or] identifier[zmq] . identifier[Context] ()
keyword[assert] identifier[isinstance] ( identifier[context] , identifier[zmq] . identifier[Context] )
identifier[args] = identifier[args] keyword[or] ()
keyword[assert] identifier[isinstance] ( identifier[args] , identifier[tuple] )
identifier[kwargs] = identifier[kwargs] keyword[or] {}
keyword[assert] identifier[isinstance] ( identifier[kwargs] , identifier[dict] )
identifier[socket] = identifier[context] . identifier[socket] ( identifier[zmq] . identifier[REQ] )
keyword[for] identifier[n] , identifier[a] keyword[in] [( identifier[n] , identifier[a] ) keyword[for] ( identifier[n] , identifier[a] ) keyword[in] identifier[self] . identifier[providers] keyword[if] ( keyword[not] identifier[node] keyword[or] identifier[n] == identifier[node] )]:
identifier[socket] . identifier[connect] ( identifier[a] )
identifier[fullreq] = identifier[ServiceRequest] ( identifier[service] = identifier[self] . identifier[name] , identifier[args] = identifier[pickle] . identifier[dumps] ( identifier[args] ), identifier[kwargs] = identifier[pickle] . identifier[dumps] ( identifier[kwargs] ))
identifier[poller] = identifier[zmq] . identifier[Poller] ()
identifier[poller] . identifier[register] ( identifier[socket] )
identifier[evts] = identifier[dict] ( identifier[poller] . identifier[poll] ( identifier[send_timeout] ))
keyword[if] identifier[socket] keyword[in] identifier[evts] keyword[and] identifier[evts] [ identifier[socket] ]== identifier[zmq] . identifier[POLLOUT] :
identifier[socket] . identifier[send] ( identifier[fullreq] . identifier[serialize] ())
identifier[evts] = identifier[dict] ( identifier[poller] . identifier[poll] ( identifier[recv_timeout] ))
keyword[if] identifier[socket] keyword[in] identifier[evts] keyword[and] identifier[evts] [ identifier[socket] ]== identifier[zmq] . identifier[POLLIN] :
identifier[resp] = identifier[socket] . identifier[recv] ()
identifier[fullresp] = identifier[ServiceResponse_dictparse] ( identifier[resp] )
keyword[if] identifier[fullresp] . identifier[has_field] ( literal[string] ):
keyword[return] identifier[pickle] . identifier[loads] ( identifier[fullresp] . identifier[response] )
keyword[elif] identifier[fullresp] . identifier[has_field] ( literal[string] ):
identifier[svcexc] = identifier[fullresp] . identifier[exception]
identifier[tb] = identifier[pickle] . identifier[loads] ( identifier[svcexc] . identifier[traceback] )
keyword[if] identifier[Traceback] keyword[and] identifier[isinstance] ( identifier[tb] , identifier[Traceback] ):
identifier[reraise] ( identifier[pickle] . identifier[loads] ( identifier[svcexc] . identifier[exc_type] ), identifier[pickle] . identifier[loads] ( identifier[svcexc] . identifier[exc_value] ), identifier[tb] . identifier[as_traceback] ())
keyword[else] :
identifier[reraise] ( identifier[pickle] . identifier[loads] ( identifier[svcexc] . identifier[exc_type] ), identifier[pickle] . identifier[loads] ( identifier[svcexc] . identifier[exc_value] ), keyword[None] )
keyword[else] :
keyword[raise] identifier[UnknownResponseTypeException] ( literal[string] . identifier[format] ( identifier[type] ( identifier[fullresp] )))
keyword[else] :
keyword[raise] identifier[ServiceCallTimeout] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ServiceCallTimeout] ( literal[string] ) | def call(self, args=None, kwargs=None, node=None, send_timeout=1000, recv_timeout=5000, zmq_ctx=None):
"""
Calls a service on a node with req as arguments. if node is None, a node is chosen by zmq.
if zmq_ctx is passed, it will use the existing context
Uses a REQ socket. Ref : http://api.zeromq.org/2-1:zmq-socket
:param node : the node name
"""
context = zmq_ctx or zmq.Context()
assert isinstance(context, zmq.Context)
args = args or ()
assert isinstance(args, tuple)
kwargs = kwargs or {}
assert isinstance(kwargs, dict)
socket = context.socket(zmq.REQ)
# connect to all addresses ( optionally matching node name )
for (n, a) in [(n, a) for (n, a) in self.providers if not node or n == node]:
socket.connect(a) # depends on [control=['for'], data=[]]
# build message
fullreq = ServiceRequest(service=self.name, args=pickle.dumps(args), kwargs=pickle.dumps(kwargs))
poller = zmq.Poller()
poller.register(socket) # POLLIN for recv, POLLOUT for send
evts = dict(poller.poll(send_timeout))
if socket in evts and evts[socket] == zmq.POLLOUT:
socket.send(fullreq.serialize())
# TODO : find a way to get rid fo these timeouts when debugging
# TODO : when timeout Exception should occur ( not returning None )
evts = dict(poller.poll(recv_timeout)) # blocking until answer
if socket in evts and evts[socket] == zmq.POLLIN:
resp = socket.recv()
fullresp = ServiceResponse_dictparse(resp)
if fullresp.has_field('response'):
return pickle.loads(fullresp.response) # depends on [control=['if'], data=[]]
elif fullresp.has_field('exception'):
svcexc = fullresp.exception # This has already been parsed by ServiceResponse_dictparse
tb = pickle.loads(svcexc.traceback)
if Traceback and isinstance(tb, Traceback):
reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), tb.as_traceback()) # depends on [control=['if'], data=[]]
else: # traceback not usable
reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), None) # depends on [control=['if'], data=[]]
else:
raise UnknownResponseTypeException('Unknown Response Type {0}'.format(type(fullresp))) # depends on [control=['if'], data=[]]
else:
raise ServiceCallTimeout('Did not receive response through ZMQ socket.') # depends on [control=['if'], data=[]]
else:
raise ServiceCallTimeout('Can not send request through ZMQ socket.') |
def p_exports(self, p):
'''exports :
| PYTHRAN EXPORT export_list opt_craps exports
| PYTHRAN EXPORT CAPSULE export_list opt_craps exports'''
if len(p) > 1:
target = self.exports if len(p) == 6 else self.native_exports
for key, val in p[len(p)-3]:
target[key] += val | def function[p_exports, parameter[self, p]]:
constant[exports :
| PYTHRAN EXPORT export_list opt_craps exports
| PYTHRAN EXPORT CAPSULE export_list opt_craps exports]
if compare[call[name[len], parameter[name[p]]] greater[>] constant[1]] begin[:]
variable[target] assign[=] <ast.IfExp object at 0x7da2054a6dd0>
for taget[tuple[[<ast.Name object at 0x7da2054a6e90>, <ast.Name object at 0x7da2054a6710>]]] in starred[call[name[p]][binary_operation[call[name[len], parameter[name[p]]] - constant[3]]]] begin[:]
<ast.AugAssign object at 0x7da2054a7040> | keyword[def] identifier[p_exports] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )> literal[int] :
identifier[target] = identifier[self] . identifier[exports] keyword[if] identifier[len] ( identifier[p] )== literal[int] keyword[else] identifier[self] . identifier[native_exports]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[p] [ identifier[len] ( identifier[p] )- literal[int] ]:
identifier[target] [ identifier[key] ]+= identifier[val] | def p_exports(self, p):
"""exports :
| PYTHRAN EXPORT export_list opt_craps exports
| PYTHRAN EXPORT CAPSULE export_list opt_craps exports"""
if len(p) > 1:
target = self.exports if len(p) == 6 else self.native_exports
for (key, val) in p[len(p) - 3]:
target[key] += val # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def _compute_variables_indices(self):
"""
Computes and saves the index location of each variable (as a list) in the objectives
space and in the model space. If no categorical variables are available, these two are
equivalent.
"""
counter_objective = 0
counter_model = 0
for variable in self.space_expanded:
variable.set_index_in_objective([counter_objective])
counter_objective +=1
if variable.type is not 'categorical':
variable.set_index_in_model([counter_model])
counter_model +=1
else:
num_categories = len(variable.domain)
variable.set_index_in_model(list(range(counter_model,counter_model + num_categories)))
counter_model +=num_categories | def function[_compute_variables_indices, parameter[self]]:
constant[
Computes and saves the index location of each variable (as a list) in the objectives
space and in the model space. If no categorical variables are available, these two are
equivalent.
]
variable[counter_objective] assign[=] constant[0]
variable[counter_model] assign[=] constant[0]
for taget[name[variable]] in starred[name[self].space_expanded] begin[:]
call[name[variable].set_index_in_objective, parameter[list[[<ast.Name object at 0x7da1b2345120>]]]]
<ast.AugAssign object at 0x7da1b2345810>
if compare[name[variable].type is_not constant[categorical]] begin[:]
call[name[variable].set_index_in_model, parameter[list[[<ast.Name object at 0x7da1b2344f10>]]]]
<ast.AugAssign object at 0x7da1b23472b0> | keyword[def] identifier[_compute_variables_indices] ( identifier[self] ):
literal[string]
identifier[counter_objective] = literal[int]
identifier[counter_model] = literal[int]
keyword[for] identifier[variable] keyword[in] identifier[self] . identifier[space_expanded] :
identifier[variable] . identifier[set_index_in_objective] ([ identifier[counter_objective] ])
identifier[counter_objective] += literal[int]
keyword[if] identifier[variable] . identifier[type] keyword[is] keyword[not] literal[string] :
identifier[variable] . identifier[set_index_in_model] ([ identifier[counter_model] ])
identifier[counter_model] += literal[int]
keyword[else] :
identifier[num_categories] = identifier[len] ( identifier[variable] . identifier[domain] )
identifier[variable] . identifier[set_index_in_model] ( identifier[list] ( identifier[range] ( identifier[counter_model] , identifier[counter_model] + identifier[num_categories] )))
identifier[counter_model] += identifier[num_categories] | def _compute_variables_indices(self):
"""
Computes and saves the index location of each variable (as a list) in the objectives
space and in the model space. If no categorical variables are available, these two are
equivalent.
"""
counter_objective = 0
counter_model = 0
for variable in self.space_expanded:
variable.set_index_in_objective([counter_objective])
counter_objective += 1
if variable.type is not 'categorical':
variable.set_index_in_model([counter_model])
counter_model += 1 # depends on [control=['if'], data=[]]
else:
num_categories = len(variable.domain)
variable.set_index_in_model(list(range(counter_model, counter_model + num_categories)))
counter_model += num_categories # depends on [control=['for'], data=['variable']] |
def get_file_by_label(self, label, asset_content_type=None):
"""stub"""
return self._get_asset_content(self.get_asset_id_by_label(label), asset_content_type).get_data() | def function[get_file_by_label, parameter[self, label, asset_content_type]]:
constant[stub]
return[call[call[name[self]._get_asset_content, parameter[call[name[self].get_asset_id_by_label, parameter[name[label]]], name[asset_content_type]]].get_data, parameter[]]] | keyword[def] identifier[get_file_by_label] ( identifier[self] , identifier[label] , identifier[asset_content_type] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_asset_content] ( identifier[self] . identifier[get_asset_id_by_label] ( identifier[label] ), identifier[asset_content_type] ). identifier[get_data] () | def get_file_by_label(self, label, asset_content_type=None):
"""stub"""
return self._get_asset_content(self.get_asset_id_by_label(label), asset_content_type).get_data() |
def insert_many(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
for i in doc_or_docs:
i = self._valid_record(i)
return self.__collect.insert_many(doc_or_docs, **kwargs) | def function[insert_many, parameter[self, doc_or_docs]]:
constant[Insert method
]
variable[check] assign[=] call[name[kwargs].pop, parameter[constant[check], constant[True]]]
if compare[name[check] is constant[True]] begin[:]
for taget[name[i]] in starred[name[doc_or_docs]] begin[:]
variable[i] assign[=] call[name[self]._valid_record, parameter[name[i]]]
return[call[name[self].__collect.insert_many, parameter[name[doc_or_docs]]]] | keyword[def] identifier[insert_many] ( identifier[self] , identifier[doc_or_docs] ,** identifier[kwargs] ):
literal[string]
identifier[check] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
keyword[if] identifier[check] keyword[is] keyword[True] :
keyword[for] identifier[i] keyword[in] identifier[doc_or_docs] :
identifier[i] = identifier[self] . identifier[_valid_record] ( identifier[i] )
keyword[return] identifier[self] . identifier[__collect] . identifier[insert_many] ( identifier[doc_or_docs] ,** identifier[kwargs] ) | def insert_many(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
for i in doc_or_docs:
i = self._valid_record(i) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return self.__collect.insert_many(doc_or_docs, **kwargs) |
def get_index(self):
''' Convert an interface name to an index value. '''
ifreq = struct.pack('16si', self.name, 0)
res = fcntl.ioctl(sockfd, SIOCGIFINDEX, ifreq)
return struct.unpack("16si", res)[1] | def function[get_index, parameter[self]]:
constant[ Convert an interface name to an index value. ]
variable[ifreq] assign[=] call[name[struct].pack, parameter[constant[16si], name[self].name, constant[0]]]
variable[res] assign[=] call[name[fcntl].ioctl, parameter[name[sockfd], name[SIOCGIFINDEX], name[ifreq]]]
return[call[call[name[struct].unpack, parameter[constant[16si], name[res]]]][constant[1]]] | keyword[def] identifier[get_index] ( identifier[self] ):
literal[string]
identifier[ifreq] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[name] , literal[int] )
identifier[res] = identifier[fcntl] . identifier[ioctl] ( identifier[sockfd] , identifier[SIOCGIFINDEX] , identifier[ifreq] )
keyword[return] identifier[struct] . identifier[unpack] ( literal[string] , identifier[res] )[ literal[int] ] | def get_index(self):
""" Convert an interface name to an index value. """
ifreq = struct.pack('16si', self.name, 0)
res = fcntl.ioctl(sockfd, SIOCGIFINDEX, ifreq)
return struct.unpack('16si', res)[1] |
def has_kingside_castling_rights(self, color: Color) -> bool:
"""
Checks if the given side has kingside (that is h-side in Chess960)
castling rights.
"""
backrank = BB_RANK_1 if color == WHITE else BB_RANK_8
king_mask = self.kings & self.occupied_co[color] & backrank & ~self.promoted
if not king_mask:
return False
castling_rights = self.clean_castling_rights() & backrank
while castling_rights:
rook = castling_rights & -castling_rights
if rook > king_mask:
return True
castling_rights = castling_rights & (castling_rights - 1)
return False | def function[has_kingside_castling_rights, parameter[self, color]]:
constant[
Checks if the given side has kingside (that is h-side in Chess960)
castling rights.
]
variable[backrank] assign[=] <ast.IfExp object at 0x7da1b17e3040>
variable[king_mask] assign[=] binary_operation[binary_operation[binary_operation[name[self].kings <ast.BitAnd object at 0x7da2590d6b60> call[name[self].occupied_co][name[color]]] <ast.BitAnd object at 0x7da2590d6b60> name[backrank]] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b17e35e0>]
if <ast.UnaryOp object at 0x7da1b17e3700> begin[:]
return[constant[False]]
variable[castling_rights] assign[=] binary_operation[call[name[self].clean_castling_rights, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> name[backrank]]
while name[castling_rights] begin[:]
variable[rook] assign[=] binary_operation[name[castling_rights] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b17e2200>]
if compare[name[rook] greater[>] name[king_mask]] begin[:]
return[constant[True]]
variable[castling_rights] assign[=] binary_operation[name[castling_rights] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[name[castling_rights] - constant[1]]]
return[constant[False]] | keyword[def] identifier[has_kingside_castling_rights] ( identifier[self] , identifier[color] : identifier[Color] )-> identifier[bool] :
literal[string]
identifier[backrank] = identifier[BB_RANK_1] keyword[if] identifier[color] == identifier[WHITE] keyword[else] identifier[BB_RANK_8]
identifier[king_mask] = identifier[self] . identifier[kings] & identifier[self] . identifier[occupied_co] [ identifier[color] ]& identifier[backrank] &~ identifier[self] . identifier[promoted]
keyword[if] keyword[not] identifier[king_mask] :
keyword[return] keyword[False]
identifier[castling_rights] = identifier[self] . identifier[clean_castling_rights] ()& identifier[backrank]
keyword[while] identifier[castling_rights] :
identifier[rook] = identifier[castling_rights] &- identifier[castling_rights]
keyword[if] identifier[rook] > identifier[king_mask] :
keyword[return] keyword[True]
identifier[castling_rights] = identifier[castling_rights] &( identifier[castling_rights] - literal[int] )
keyword[return] keyword[False] | def has_kingside_castling_rights(self, color: Color) -> bool:
"""
Checks if the given side has kingside (that is h-side in Chess960)
castling rights.
"""
backrank = BB_RANK_1 if color == WHITE else BB_RANK_8
king_mask = self.kings & self.occupied_co[color] & backrank & ~self.promoted
if not king_mask:
return False # depends on [control=['if'], data=[]]
castling_rights = self.clean_castling_rights() & backrank
while castling_rights:
rook = castling_rights & -castling_rights
if rook > king_mask:
return True # depends on [control=['if'], data=[]]
castling_rights = castling_rights & castling_rights - 1 # depends on [control=['while'], data=[]]
return False |
def _connect_nntp (self, nntpserver):
"""
This is done only once per checking task. Also, the newly
introduced error codes 504 and 505 (both inclining "Too busy, retry
later", are caught.
"""
tries = 0
nntp = None
while tries < 2:
tries += 1
try:
nntp = nntplib.NNTP(nntpserver, usenetrc=False)
except nntplib.NNTPTemporaryError:
self.wait()
except nntplib.NNTPPermanentError as msg:
if re.compile("^50[45]").search(str(msg)):
self.wait()
else:
raise
if nntp is None:
raise LinkCheckerError(
_("NNTP server too busy; tried more than %d times.") % tries)
if log.is_debug(LOG_CHECK):
nntp.set_debuglevel(1)
self.add_info(nntp.getwelcome())
return nntp | def function[_connect_nntp, parameter[self, nntpserver]]:
constant[
This is done only once per checking task. Also, the newly
introduced error codes 504 and 505 (both inclining "Too busy, retry
later", are caught.
]
variable[tries] assign[=] constant[0]
variable[nntp] assign[=] constant[None]
while compare[name[tries] less[<] constant[2]] begin[:]
<ast.AugAssign object at 0x7da1b0ab8ac0>
<ast.Try object at 0x7da1b0abb6d0>
if compare[name[nntp] is constant[None]] begin[:]
<ast.Raise object at 0x7da18fe93ac0>
if call[name[log].is_debug, parameter[name[LOG_CHECK]]] begin[:]
call[name[nntp].set_debuglevel, parameter[constant[1]]]
call[name[self].add_info, parameter[call[name[nntp].getwelcome, parameter[]]]]
return[name[nntp]] | keyword[def] identifier[_connect_nntp] ( identifier[self] , identifier[nntpserver] ):
literal[string]
identifier[tries] = literal[int]
identifier[nntp] = keyword[None]
keyword[while] identifier[tries] < literal[int] :
identifier[tries] += literal[int]
keyword[try] :
identifier[nntp] = identifier[nntplib] . identifier[NNTP] ( identifier[nntpserver] , identifier[usenetrc] = keyword[False] )
keyword[except] identifier[nntplib] . identifier[NNTPTemporaryError] :
identifier[self] . identifier[wait] ()
keyword[except] identifier[nntplib] . identifier[NNTPPermanentError] keyword[as] identifier[msg] :
keyword[if] identifier[re] . identifier[compile] ( literal[string] ). identifier[search] ( identifier[str] ( identifier[msg] )):
identifier[self] . identifier[wait] ()
keyword[else] :
keyword[raise]
keyword[if] identifier[nntp] keyword[is] keyword[None] :
keyword[raise] identifier[LinkCheckerError] (
identifier[_] ( literal[string] )% identifier[tries] )
keyword[if] identifier[log] . identifier[is_debug] ( identifier[LOG_CHECK] ):
identifier[nntp] . identifier[set_debuglevel] ( literal[int] )
identifier[self] . identifier[add_info] ( identifier[nntp] . identifier[getwelcome] ())
keyword[return] identifier[nntp] | def _connect_nntp(self, nntpserver):
"""
This is done only once per checking task. Also, the newly
introduced error codes 504 and 505 (both inclining "Too busy, retry
later", are caught.
"""
tries = 0
nntp = None
while tries < 2:
tries += 1
try:
nntp = nntplib.NNTP(nntpserver, usenetrc=False) # depends on [control=['try'], data=[]]
except nntplib.NNTPTemporaryError:
self.wait() # depends on [control=['except'], data=[]]
except nntplib.NNTPPermanentError as msg:
if re.compile('^50[45]').search(str(msg)):
self.wait() # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['msg']] # depends on [control=['while'], data=['tries']]
if nntp is None:
raise LinkCheckerError(_('NNTP server too busy; tried more than %d times.') % tries) # depends on [control=['if'], data=[]]
if log.is_debug(LOG_CHECK):
nntp.set_debuglevel(1) # depends on [control=['if'], data=[]]
self.add_info(nntp.getwelcome())
return nntp |
def similarity1DdiffShapedArrays(arr1, arr2, normalize=False):
"""
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
"""
# assign longer and shorter here, because jit cannot do it
if len(arr1) < len(arr2):
arr1, arr2 = arr2, arr1
if not len(arr2):
out = sum(arr1)
else:
out = _calc(arr1, arr2)
if normalize:
if not len(arr2):
mn = arr1[0]
mx = arr1[-1]
else:
mn = min(arr1[0], arr2[0])
mx = max(arr1[-1], arr2[-1])
out = out/ (mx - mn)
return out | def function[similarity1DdiffShapedArrays, parameter[arr1, arr2, normalize]]:
constant[
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
]
if compare[call[name[len], parameter[name[arr1]]] less[<] call[name[len], parameter[name[arr2]]]] begin[:]
<ast.Tuple object at 0x7da1b14d7940> assign[=] tuple[[<ast.Name object at 0x7da1b14d4940>, <ast.Name object at 0x7da1b14d7760>]]
if <ast.UnaryOp object at 0x7da1b14d7d60> begin[:]
variable[out] assign[=] call[name[sum], parameter[name[arr1]]]
if name[normalize] begin[:]
if <ast.UnaryOp object at 0x7da1b14d47c0> begin[:]
variable[mn] assign[=] call[name[arr1]][constant[0]]
variable[mx] assign[=] call[name[arr1]][<ast.UnaryOp object at 0x7da1b14d77c0>]
variable[out] assign[=] binary_operation[name[out] / binary_operation[name[mx] - name[mn]]]
return[name[out]] | keyword[def] identifier[similarity1DdiffShapedArrays] ( identifier[arr1] , identifier[arr2] , identifier[normalize] = keyword[False] ):
literal[string]
keyword[if] identifier[len] ( identifier[arr1] )< identifier[len] ( identifier[arr2] ):
identifier[arr1] , identifier[arr2] = identifier[arr2] , identifier[arr1]
keyword[if] keyword[not] identifier[len] ( identifier[arr2] ):
identifier[out] = identifier[sum] ( identifier[arr1] )
keyword[else] :
identifier[out] = identifier[_calc] ( identifier[arr1] , identifier[arr2] )
keyword[if] identifier[normalize] :
keyword[if] keyword[not] identifier[len] ( identifier[arr2] ):
identifier[mn] = identifier[arr1] [ literal[int] ]
identifier[mx] = identifier[arr1] [- literal[int] ]
keyword[else] :
identifier[mn] = identifier[min] ( identifier[arr1] [ literal[int] ], identifier[arr2] [ literal[int] ])
identifier[mx] = identifier[max] ( identifier[arr1] [- literal[int] ], identifier[arr2] [- literal[int] ])
identifier[out] = identifier[out] /( identifier[mx] - identifier[mn] )
keyword[return] identifier[out] | def similarity1DdiffShapedArrays(arr1, arr2, normalize=False):
"""
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
"""
# assign longer and shorter here, because jit cannot do it
if len(arr1) < len(arr2):
(arr1, arr2) = (arr2, arr1) # depends on [control=['if'], data=[]]
if not len(arr2):
out = sum(arr1) # depends on [control=['if'], data=[]]
else:
out = _calc(arr1, arr2)
if normalize:
if not len(arr2):
mn = arr1[0]
mx = arr1[-1] # depends on [control=['if'], data=[]]
else:
mn = min(arr1[0], arr2[0])
mx = max(arr1[-1], arr2[-1])
out = out / (mx - mn) # depends on [control=['if'], data=[]]
return out |
def init(path):
"""Copy a sample config file in the current directory (default to
'sigal.conf.py'), or use the provided 'path'."""
if os.path.isfile(path):
print("Found an existing config file, will abort to keep it safe.")
sys.exit(1)
from pkg_resources import resource_string
conf = resource_string(__name__, 'templates/sigal.conf.py')
with open(path, 'w', encoding='utf-8') as f:
f.write(conf.decode('utf8'))
print("Sample config file created: {}".format(path)) | def function[init, parameter[path]]:
constant[Copy a sample config file in the current directory (default to
'sigal.conf.py'), or use the provided 'path'.]
if call[name[os].path.isfile, parameter[name[path]]] begin[:]
call[name[print], parameter[constant[Found an existing config file, will abort to keep it safe.]]]
call[name[sys].exit, parameter[constant[1]]]
from relative_module[pkg_resources] import module[resource_string]
variable[conf] assign[=] call[name[resource_string], parameter[name[__name__], constant[templates/sigal.conf.py]]]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[conf].decode, parameter[constant[utf8]]]]]
call[name[print], parameter[call[constant[Sample config file created: {}].format, parameter[name[path]]]]] | keyword[def] identifier[init] ( identifier[path] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[from] identifier[pkg_resources] keyword[import] identifier[resource_string]
identifier[conf] = identifier[resource_string] ( identifier[__name__] , literal[string] )
keyword[with] identifier[open] ( identifier[path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[conf] . identifier[decode] ( literal[string] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[path] )) | def init(path):
"""Copy a sample config file in the current directory (default to
'sigal.conf.py'), or use the provided 'path'."""
if os.path.isfile(path):
print('Found an existing config file, will abort to keep it safe.')
sys.exit(1) # depends on [control=['if'], data=[]]
from pkg_resources import resource_string
conf = resource_string(__name__, 'templates/sigal.conf.py')
with open(path, 'w', encoding='utf-8') as f:
f.write(conf.decode('utf8')) # depends on [control=['with'], data=['f']]
print('Sample config file created: {}'.format(path)) |
def load_state(self, fname: str):
"""
Loads the state of the iterator from a file.
:param fname: File name to load the information from.
"""
# restore order
self.data = self.data.permute(self.inverse_data_permutations)
with open(fname, "rb") as fp:
self.batch_indices = pickle.load(fp)
self.curr_batch_index = pickle.load(fp)
inverse_data_permutations = np.load(fp)
data_permutations = np.load(fp)
# Right after loading the iterator state, next() should be called
self.curr_batch_index -= 1
# load previous permutations
self.inverse_data_permutations = []
self.data_permutations = []
for bucket in range(len(self.data)):
inverse_permutation = mx.nd.array(inverse_data_permutations[bucket])
self.inverse_data_permutations.append(inverse_permutation)
permutation = mx.nd.array(data_permutations[bucket])
self.data_permutations.append(permutation)
self.data = self.data.permute(self.data_permutations) | def function[load_state, parameter[self, fname]]:
constant[
Loads the state of the iterator from a file.
:param fname: File name to load the information from.
]
name[self].data assign[=] call[name[self].data.permute, parameter[name[self].inverse_data_permutations]]
with call[name[open], parameter[name[fname], constant[rb]]] begin[:]
name[self].batch_indices assign[=] call[name[pickle].load, parameter[name[fp]]]
name[self].curr_batch_index assign[=] call[name[pickle].load, parameter[name[fp]]]
variable[inverse_data_permutations] assign[=] call[name[np].load, parameter[name[fp]]]
variable[data_permutations] assign[=] call[name[np].load, parameter[name[fp]]]
<ast.AugAssign object at 0x7da1b1df64d0>
name[self].inverse_data_permutations assign[=] list[[]]
name[self].data_permutations assign[=] list[[]]
for taget[name[bucket]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].data]]]]] begin[:]
variable[inverse_permutation] assign[=] call[name[mx].nd.array, parameter[call[name[inverse_data_permutations]][name[bucket]]]]
call[name[self].inverse_data_permutations.append, parameter[name[inverse_permutation]]]
variable[permutation] assign[=] call[name[mx].nd.array, parameter[call[name[data_permutations]][name[bucket]]]]
call[name[self].data_permutations.append, parameter[name[permutation]]]
name[self].data assign[=] call[name[self].data.permute, parameter[name[self].data_permutations]] | keyword[def] identifier[load_state] ( identifier[self] , identifier[fname] : identifier[str] ):
literal[string]
identifier[self] . identifier[data] = identifier[self] . identifier[data] . identifier[permute] ( identifier[self] . identifier[inverse_data_permutations] )
keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[fp] :
identifier[self] . identifier[batch_indices] = identifier[pickle] . identifier[load] ( identifier[fp] )
identifier[self] . identifier[curr_batch_index] = identifier[pickle] . identifier[load] ( identifier[fp] )
identifier[inverse_data_permutations] = identifier[np] . identifier[load] ( identifier[fp] )
identifier[data_permutations] = identifier[np] . identifier[load] ( identifier[fp] )
identifier[self] . identifier[curr_batch_index] -= literal[int]
identifier[self] . identifier[inverse_data_permutations] =[]
identifier[self] . identifier[data_permutations] =[]
keyword[for] identifier[bucket] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[data] )):
identifier[inverse_permutation] = identifier[mx] . identifier[nd] . identifier[array] ( identifier[inverse_data_permutations] [ identifier[bucket] ])
identifier[self] . identifier[inverse_data_permutations] . identifier[append] ( identifier[inverse_permutation] )
identifier[permutation] = identifier[mx] . identifier[nd] . identifier[array] ( identifier[data_permutations] [ identifier[bucket] ])
identifier[self] . identifier[data_permutations] . identifier[append] ( identifier[permutation] )
identifier[self] . identifier[data] = identifier[self] . identifier[data] . identifier[permute] ( identifier[self] . identifier[data_permutations] ) | def load_state(self, fname: str):
"""
Loads the state of the iterator from a file.
:param fname: File name to load the information from.
"""
# restore order
self.data = self.data.permute(self.inverse_data_permutations)
with open(fname, 'rb') as fp:
self.batch_indices = pickle.load(fp)
self.curr_batch_index = pickle.load(fp)
inverse_data_permutations = np.load(fp)
data_permutations = np.load(fp) # depends on [control=['with'], data=['fp']]
# Right after loading the iterator state, next() should be called
self.curr_batch_index -= 1
# load previous permutations
self.inverse_data_permutations = []
self.data_permutations = []
for bucket in range(len(self.data)):
inverse_permutation = mx.nd.array(inverse_data_permutations[bucket])
self.inverse_data_permutations.append(inverse_permutation)
permutation = mx.nd.array(data_permutations[bucket])
self.data_permutations.append(permutation) # depends on [control=['for'], data=['bucket']]
self.data = self.data.permute(self.data_permutations) |
def choices(cls, blank=False):
""" Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
"""
choices = sorted([(key, value) for key, value in cls.values.items()], key=lambda x: x[0])
if blank:
choices.insert(0, ('', Enum.Value('', None, '', cls)))
return choices | def function[choices, parameter[cls, blank]]:
constant[ Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
]
variable[choices] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b115e890>]]
if name[blank] begin[:]
call[name[choices].insert, parameter[constant[0], tuple[[<ast.Constant object at 0x7da1b115e7d0>, <ast.Call object at 0x7da1b115ece0>]]]]
return[name[choices]] | keyword[def] identifier[choices] ( identifier[cls] , identifier[blank] = keyword[False] ):
literal[string]
identifier[choices] = identifier[sorted] ([( identifier[key] , identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[cls] . identifier[values] . identifier[items] ()], identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
keyword[if] identifier[blank] :
identifier[choices] . identifier[insert] ( literal[int] ,( literal[string] , identifier[Enum] . identifier[Value] ( literal[string] , keyword[None] , literal[string] , identifier[cls] )))
keyword[return] identifier[choices] | def choices(cls, blank=False):
""" Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
"""
choices = sorted([(key, value) for (key, value) in cls.values.items()], key=lambda x: x[0])
if blank:
choices.insert(0, ('', Enum.Value('', None, '', cls))) # depends on [control=['if'], data=[]]
return choices |
def list_(extra=False, limit=None, path=None):
'''
List containers classified by state
extra
Also get per-container specific info. This will change the return data.
Instead of returning a list of containers, a dictionary of containers
and each container's output from :mod:`lxc.info
<salt.modules.lxc.info>`.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
limit
Return output matching a specific state (**frozen**, **running**, or
**stopped**).
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' lxc.list
salt '*' lxc.list extra=True
salt '*' lxc.list limit=running
'''
ctnrs = ls_(path=path)
if extra:
stopped = {}
frozen = {}
running = {}
else:
stopped = []
frozen = []
running = []
ret = {'running': running,
'stopped': stopped,
'frozen': frozen}
for container in ctnrs:
cmd = 'lxc-info'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' -n {0}'.format(container)
c_info = __salt__['cmd.run'](
cmd,
python_shell=False,
output_loglevel='debug'
)
c_state = None
for line in c_info.splitlines():
stat = line.split(':')
if stat[0] in ('State', 'state'):
c_state = stat[1].strip()
break
if not c_state or (limit is not None and c_state.lower() != limit):
continue
if extra:
infos = info(container, path=path)
method = 'update'
value = {container: infos}
else:
method = 'append'
value = container
if c_state == 'STOPPED':
getattr(stopped, method)(value)
continue
if c_state == 'FROZEN':
getattr(frozen, method)(value)
continue
if c_state == 'RUNNING':
getattr(running, method)(value)
continue
if limit is not None:
return ret.get(limit, {} if extra else [])
return ret | def function[list_, parameter[extra, limit, path]]:
constant[
List containers classified by state
extra
Also get per-container specific info. This will change the return data.
Instead of returning a list of containers, a dictionary of containers
and each container's output from :mod:`lxc.info
<salt.modules.lxc.info>`.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
limit
Return output matching a specific state (**frozen**, **running**, or
**stopped**).
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' lxc.list
salt '*' lxc.list extra=True
salt '*' lxc.list limit=running
]
variable[ctnrs] assign[=] call[name[ls_], parameter[]]
if name[extra] begin[:]
variable[stopped] assign[=] dictionary[[], []]
variable[frozen] assign[=] dictionary[[], []]
variable[running] assign[=] dictionary[[], []]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b209beb0>, <ast.Constant object at 0x7da1b209bee0>, <ast.Constant object at 0x7da1b209be50>], [<ast.Name object at 0x7da1b209bf70>, <ast.Name object at 0x7da1b209be80>, <ast.Name object at 0x7da1b209bd60>]]
for taget[name[container]] in starred[name[ctnrs]] begin[:]
variable[cmd] assign[=] constant[lxc-info]
if name[path] begin[:]
<ast.AugAssign object at 0x7da1b209bf40>
<ast.AugAssign object at 0x7da1b21bd6c0>
variable[c_info] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]
variable[c_state] assign[=] constant[None]
for taget[name[line]] in starred[call[name[c_info].splitlines, parameter[]]] begin[:]
variable[stat] assign[=] call[name[line].split, parameter[constant[:]]]
if compare[call[name[stat]][constant[0]] in tuple[[<ast.Constant object at 0x7da1b21bd960>, <ast.Constant object at 0x7da1b21be050>]]] begin[:]
variable[c_state] assign[=] call[call[name[stat]][constant[1]].strip, parameter[]]
break
if <ast.BoolOp object at 0x7da1b21bee00> begin[:]
continue
if name[extra] begin[:]
variable[infos] assign[=] call[name[info], parameter[name[container]]]
variable[method] assign[=] constant[update]
variable[value] assign[=] dictionary[[<ast.Name object at 0x7da1b21bcc70>], [<ast.Name object at 0x7da1b21bdf00>]]
if compare[name[c_state] equal[==] constant[STOPPED]] begin[:]
call[call[name[getattr], parameter[name[stopped], name[method]]], parameter[name[value]]]
continue
if compare[name[c_state] equal[==] constant[FROZEN]] begin[:]
call[call[name[getattr], parameter[name[frozen], name[method]]], parameter[name[value]]]
continue
if compare[name[c_state] equal[==] constant[RUNNING]] begin[:]
call[call[name[getattr], parameter[name[running], name[method]]], parameter[name[value]]]
continue
if compare[name[limit] is_not constant[None]] begin[:]
return[call[name[ret].get, parameter[name[limit], <ast.IfExp object at 0x7da1b21a0520>]]]
return[name[ret]] | keyword[def] identifier[list_] ( identifier[extra] = keyword[False] , identifier[limit] = keyword[None] , identifier[path] = keyword[None] ):
literal[string]
identifier[ctnrs] = identifier[ls_] ( identifier[path] = identifier[path] )
keyword[if] identifier[extra] :
identifier[stopped] ={}
identifier[frozen] ={}
identifier[running] ={}
keyword[else] :
identifier[stopped] =[]
identifier[frozen] =[]
identifier[running] =[]
identifier[ret] ={ literal[string] : identifier[running] ,
literal[string] : identifier[stopped] ,
literal[string] : identifier[frozen] }
keyword[for] identifier[container] keyword[in] identifier[ctnrs] :
identifier[cmd] = literal[string]
keyword[if] identifier[path] :
identifier[cmd] += literal[string] . identifier[format] ( identifier[pipes] . identifier[quote] ( identifier[path] ))
identifier[cmd] += literal[string] . identifier[format] ( identifier[container] )
identifier[c_info] = identifier[__salt__] [ literal[string] ](
identifier[cmd] ,
identifier[python_shell] = keyword[False] ,
identifier[output_loglevel] = literal[string]
)
identifier[c_state] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[c_info] . identifier[splitlines] ():
identifier[stat] = identifier[line] . identifier[split] ( literal[string] )
keyword[if] identifier[stat] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ):
identifier[c_state] = identifier[stat] [ literal[int] ]. identifier[strip] ()
keyword[break]
keyword[if] keyword[not] identifier[c_state] keyword[or] ( identifier[limit] keyword[is] keyword[not] keyword[None] keyword[and] identifier[c_state] . identifier[lower] ()!= identifier[limit] ):
keyword[continue]
keyword[if] identifier[extra] :
identifier[infos] = identifier[info] ( identifier[container] , identifier[path] = identifier[path] )
identifier[method] = literal[string]
identifier[value] ={ identifier[container] : identifier[infos] }
keyword[else] :
identifier[method] = literal[string]
identifier[value] = identifier[container]
keyword[if] identifier[c_state] == literal[string] :
identifier[getattr] ( identifier[stopped] , identifier[method] )( identifier[value] )
keyword[continue]
keyword[if] identifier[c_state] == literal[string] :
identifier[getattr] ( identifier[frozen] , identifier[method] )( identifier[value] )
keyword[continue]
keyword[if] identifier[c_state] == literal[string] :
identifier[getattr] ( identifier[running] , identifier[method] )( identifier[value] )
keyword[continue]
keyword[if] identifier[limit] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ret] . identifier[get] ( identifier[limit] ,{} keyword[if] identifier[extra] keyword[else] [])
keyword[return] identifier[ret] | def list_(extra=False, limit=None, path=None):
"""
List containers classified by state
extra
Also get per-container specific info. This will change the return data.
Instead of returning a list of containers, a dictionary of containers
and each container's output from :mod:`lxc.info
<salt.modules.lxc.info>`.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
limit
Return output matching a specific state (**frozen**, **running**, or
**stopped**).
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' lxc.list
salt '*' lxc.list extra=True
salt '*' lxc.list limit=running
"""
ctnrs = ls_(path=path)
if extra:
stopped = {}
frozen = {}
running = {} # depends on [control=['if'], data=[]]
else:
stopped = []
frozen = []
running = []
ret = {'running': running, 'stopped': stopped, 'frozen': frozen}
for container in ctnrs:
cmd = 'lxc-info'
if path:
cmd += ' -P {0}'.format(pipes.quote(path)) # depends on [control=['if'], data=[]]
cmd += ' -n {0}'.format(container)
c_info = __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='debug')
c_state = None
for line in c_info.splitlines():
stat = line.split(':')
if stat[0] in ('State', 'state'):
c_state = stat[1].strip()
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if not c_state or (limit is not None and c_state.lower() != limit):
continue # depends on [control=['if'], data=[]]
if extra:
infos = info(container, path=path)
method = 'update'
value = {container: infos} # depends on [control=['if'], data=[]]
else:
method = 'append'
value = container
if c_state == 'STOPPED':
getattr(stopped, method)(value)
continue # depends on [control=['if'], data=[]]
if c_state == 'FROZEN':
getattr(frozen, method)(value)
continue # depends on [control=['if'], data=[]]
if c_state == 'RUNNING':
getattr(running, method)(value)
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['container']]
if limit is not None:
return ret.get(limit, {} if extra else []) # depends on [control=['if'], data=['limit']]
return ret |
def open_notification(self):
"""
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
"""
sdk_version = self.device.info['sdkInt']
if sdk_version < 18:
height = self.device.info['displayHeight']
self.device.swipe(1, 1, 1, height - 1, 1)
else:
self.device.open.notification() | def function[open_notification, parameter[self]]:
constant[
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
]
variable[sdk_version] assign[=] call[name[self].device.info][constant[sdkInt]]
if compare[name[sdk_version] less[<] constant[18]] begin[:]
variable[height] assign[=] call[name[self].device.info][constant[displayHeight]]
call[name[self].device.swipe, parameter[constant[1], constant[1], constant[1], binary_operation[name[height] - constant[1]], constant[1]]] | keyword[def] identifier[open_notification] ( identifier[self] ):
literal[string]
identifier[sdk_version] = identifier[self] . identifier[device] . identifier[info] [ literal[string] ]
keyword[if] identifier[sdk_version] < literal[int] :
identifier[height] = identifier[self] . identifier[device] . identifier[info] [ literal[string] ]
identifier[self] . identifier[device] . identifier[swipe] ( literal[int] , literal[int] , literal[int] , identifier[height] - literal[int] , literal[int] )
keyword[else] :
identifier[self] . identifier[device] . identifier[open] . identifier[notification] () | def open_notification(self):
"""
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
"""
sdk_version = self.device.info['sdkInt']
if sdk_version < 18:
height = self.device.info['displayHeight']
self.device.swipe(1, 1, 1, height - 1, 1) # depends on [control=['if'], data=[]]
else:
self.device.open.notification() |
def tostr(self, object, indent=-2):
""" get s string representation of object """
history = []
return self.process(object, history, indent) | def function[tostr, parameter[self, object, indent]]:
constant[ get s string representation of object ]
variable[history] assign[=] list[[]]
return[call[name[self].process, parameter[name[object], name[history], name[indent]]]] | keyword[def] identifier[tostr] ( identifier[self] , identifier[object] , identifier[indent] =- literal[int] ):
literal[string]
identifier[history] =[]
keyword[return] identifier[self] . identifier[process] ( identifier[object] , identifier[history] , identifier[indent] ) | def tostr(self, object, indent=-2):
""" get s string representation of object """
history = []
return self.process(object, history, indent) |
def nac_v(msg):
"""Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NACv = common.bin2int(msgbin[42:45])
try:
HFOMr = uncertainty.NACv[NACv]['HFOMr']
VFOMr = uncertainty.NACv[NACv]['VFOMr']
except KeyError:
HFOMr, VFOMr = uncertainty.NA, uncertainty.NA
return HFOMr, VFOMr | def function[nac_v, parameter[msg]]:
constant[Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
]
variable[tc] assign[=] call[name[typecode], parameter[name[msg]]]
if compare[name[tc] not_equal[!=] constant[19]] begin[:]
<ast.Raise object at 0x7da2047e8850>
variable[msgbin] assign[=] call[name[common].hex2bin, parameter[name[msg]]]
variable[NACv] assign[=] call[name[common].bin2int, parameter[call[name[msgbin]][<ast.Slice object at 0x7da2047e8100>]]]
<ast.Try object at 0x7da2047e95d0>
return[tuple[[<ast.Name object at 0x7da2047ea3e0>, <ast.Name object at 0x7da2047e9ae0>]]] | keyword[def] identifier[nac_v] ( identifier[msg] ):
literal[string]
identifier[tc] = identifier[typecode] ( identifier[msg] )
keyword[if] identifier[tc] != literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[msg] )
identifier[msgbin] = identifier[common] . identifier[hex2bin] ( identifier[msg] )
identifier[NACv] = identifier[common] . identifier[bin2int] ( identifier[msgbin] [ literal[int] : literal[int] ])
keyword[try] :
identifier[HFOMr] = identifier[uncertainty] . identifier[NACv] [ identifier[NACv] ][ literal[string] ]
identifier[VFOMr] = identifier[uncertainty] . identifier[NACv] [ identifier[NACv] ][ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[HFOMr] , identifier[VFOMr] = identifier[uncertainty] . identifier[NA] , identifier[uncertainty] . identifier[NA]
keyword[return] identifier[HFOMr] , identifier[VFOMr] | def nac_v(msg):
"""Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError('%s: Not an airborne velocity message, expecting TC = 19' % msg) # depends on [control=['if'], data=[]]
msgbin = common.hex2bin(msg)
NACv = common.bin2int(msgbin[42:45])
try:
HFOMr = uncertainty.NACv[NACv]['HFOMr']
VFOMr = uncertainty.NACv[NACv]['VFOMr'] # depends on [control=['try'], data=[]]
except KeyError:
(HFOMr, VFOMr) = (uncertainty.NA, uncertainty.NA) # depends on [control=['except'], data=[]]
return (HFOMr, VFOMr) |
def remove_api_keys_from_account_group(self, account_id, group_id, **kwargs): # noqa: E501
"""Remove API keys from a group. # noqa: E501
An endpoint for removing API keys from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/api-keys -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_api_keys_from_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: A list of API keys to be removed from the group. (required)
:param SubjectList body:
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.remove_api_keys_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
else:
(data) = self.remove_api_keys_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
return data | def function[remove_api_keys_from_account_group, parameter[self, account_id, group_id]]:
constant[Remove API keys from a group. # noqa: E501
An endpoint for removing API keys from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/api-keys -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_api_keys_from_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: A list of API keys to be removed from the group. (required)
:param SubjectList body:
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].remove_api_keys_from_account_group_with_http_info, parameter[name[account_id], name[group_id]]]] | keyword[def] identifier[remove_api_keys_from_account_group] ( identifier[self] , identifier[account_id] , identifier[group_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[remove_api_keys_from_account_group_with_http_info] ( identifier[account_id] , identifier[group_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[remove_api_keys_from_account_group_with_http_info] ( identifier[account_id] , identifier[group_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def remove_api_keys_from_account_group(self, account_id, group_id, **kwargs): # noqa: E501
"Remove API keys from a group. # noqa: E501\n\n An endpoint for removing API keys from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/api-keys -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.remove_api_keys_from_account_group(account_id, group_id, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str account_id: Account ID. (required)\n :param str group_id: A list of API keys to be removed from the group. (required)\n :param SubjectList body:\n :return: UpdatedResponse\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.remove_api_keys_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.remove_api_keys_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
return data |
def detachChildren(self):
"""
Detach and return this element's children.
@return: The element's children (detached).
@rtype: [L{Element},...]
"""
detached = self.children
self.children = []
for child in detached:
child.parent = None
return detached | def function[detachChildren, parameter[self]]:
constant[
Detach and return this element's children.
@return: The element's children (detached).
@rtype: [L{Element},...]
]
variable[detached] assign[=] name[self].children
name[self].children assign[=] list[[]]
for taget[name[child]] in starred[name[detached]] begin[:]
name[child].parent assign[=] constant[None]
return[name[detached]] | keyword[def] identifier[detachChildren] ( identifier[self] ):
literal[string]
identifier[detached] = identifier[self] . identifier[children]
identifier[self] . identifier[children] =[]
keyword[for] identifier[child] keyword[in] identifier[detached] :
identifier[child] . identifier[parent] = keyword[None]
keyword[return] identifier[detached] | def detachChildren(self):
"""
Detach and return this element's children.
@return: The element's children (detached).
@rtype: [L{Element},...]
"""
detached = self.children
self.children = []
for child in detached:
child.parent = None # depends on [control=['for'], data=['child']]
return detached |
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json() | def function[word, parameter[self, word, url]]:
constant[查询单词]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2458f10>], [<ast.Name object at 0x7da1b24bb130>]]
return[call[call[name[self]._request, parameter[name[url]]].json, parameter[]]] | keyword[def] identifier[word] ( identifier[self] , identifier[word] , identifier[url] = literal[string] ):
literal[string]
identifier[params] ={
literal[string] : identifier[word]
}
keyword[return] identifier[self] . identifier[_request] ( identifier[url] , identifier[params] = identifier[params] ). identifier[json] () | def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {'word': word}
return self._request(url, params=params).json() |
def _make_axes_dict(self, axes):
"""Makes an axes dictionary.
.. note::
In case the input is ``None``, the dictionary :code:`{'empty': None}`
is returned.
**Function-call argument** \n
:param axes: axes input
:type axes: dict or single instance of
:class:`~climlab.domain.axis.Axis` object or ``None``
:raises: :exc:`ValueError` if input is not an instance of Axis class
or a dictionary of Axis objetcs
:returns: dictionary of input axes
:rtype: dict
"""
if type(axes) is dict:
axdict = axes
elif type(axes) is Axis:
ax = axes
axdict = {ax.axis_type: ax}
elif axes is None:
axdict = {'empty': None}
else:
raise ValueError('axes needs to be Axis object or dictionary of Axis object')
return axdict | def function[_make_axes_dict, parameter[self, axes]]:
constant[Makes an axes dictionary.
.. note::
In case the input is ``None``, the dictionary :code:`{'empty': None}`
is returned.
**Function-call argument**
:param axes: axes input
:type axes: dict or single instance of
:class:`~climlab.domain.axis.Axis` object or ``None``
:raises: :exc:`ValueError` if input is not an instance of Axis class
or a dictionary of Axis objetcs
:returns: dictionary of input axes
:rtype: dict
]
if compare[call[name[type], parameter[name[axes]]] is name[dict]] begin[:]
variable[axdict] assign[=] name[axes]
return[name[axdict]] | keyword[def] identifier[_make_axes_dict] ( identifier[self] , identifier[axes] ):
literal[string]
keyword[if] identifier[type] ( identifier[axes] ) keyword[is] identifier[dict] :
identifier[axdict] = identifier[axes]
keyword[elif] identifier[type] ( identifier[axes] ) keyword[is] identifier[Axis] :
identifier[ax] = identifier[axes]
identifier[axdict] ={ identifier[ax] . identifier[axis_type] : identifier[ax] }
keyword[elif] identifier[axes] keyword[is] keyword[None] :
identifier[axdict] ={ literal[string] : keyword[None] }
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[axdict] | def _make_axes_dict(self, axes):
"""Makes an axes dictionary.
.. note::
In case the input is ``None``, the dictionary :code:`{'empty': None}`
is returned.
**Function-call argument**
:param axes: axes input
:type axes: dict or single instance of
:class:`~climlab.domain.axis.Axis` object or ``None``
:raises: :exc:`ValueError` if input is not an instance of Axis class
or a dictionary of Axis objetcs
:returns: dictionary of input axes
:rtype: dict
"""
if type(axes) is dict:
axdict = axes # depends on [control=['if'], data=[]]
elif type(axes) is Axis:
ax = axes
axdict = {ax.axis_type: ax} # depends on [control=['if'], data=[]]
elif axes is None:
axdict = {'empty': None} # depends on [control=['if'], data=[]]
else:
raise ValueError('axes needs to be Axis object or dictionary of Axis object')
return axdict |
def mute_stdio(self) -> None:
"""
Intended to temporarily mute messages by applying a `BlockAll` filter.
Use in combination with `unmute_stdio()`
"""
# The benefit of using a Filter here for disabling messages is that we do not have to restore old logging levels.
for logger in self.loggers.values():
if logger.hasHandlers():
logger.handlers[0].addFilter(self.block_all_filter) | def function[mute_stdio, parameter[self]]:
constant[
Intended to temporarily mute messages by applying a `BlockAll` filter.
Use in combination with `unmute_stdio()`
]
for taget[name[logger]] in starred[call[name[self].loggers.values, parameter[]]] begin[:]
if call[name[logger].hasHandlers, parameter[]] begin[:]
call[call[name[logger].handlers][constant[0]].addFilter, parameter[name[self].block_all_filter]] | keyword[def] identifier[mute_stdio] ( identifier[self] )-> keyword[None] :
literal[string]
keyword[for] identifier[logger] keyword[in] identifier[self] . identifier[loggers] . identifier[values] ():
keyword[if] identifier[logger] . identifier[hasHandlers] ():
identifier[logger] . identifier[handlers] [ literal[int] ]. identifier[addFilter] ( identifier[self] . identifier[block_all_filter] ) | def mute_stdio(self) -> None:
"""
Intended to temporarily mute messages by applying a `BlockAll` filter.
Use in combination with `unmute_stdio()`
"""
# The benefit of using a Filter here for disabling messages is that we do not have to restore old logging levels.
for logger in self.loggers.values():
if logger.hasHandlers():
logger.handlers[0].addFilter(self.block_all_filter) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['logger']] |
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration
"""
prop = self._get_sub_prop("destinationEncryptionConfiguration")
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop | def function[destination_encryption_configuration, parameter[self]]:
constant[google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration
]
variable[prop] assign[=] call[name[self]._get_sub_prop, parameter[constant[destinationEncryptionConfiguration]]]
if compare[name[prop] is_not constant[None]] begin[:]
variable[prop] assign[=] call[name[EncryptionConfiguration].from_api_repr, parameter[name[prop]]]
return[name[prop]] | keyword[def] identifier[destination_encryption_configuration] ( identifier[self] ):
literal[string]
identifier[prop] = identifier[self] . identifier[_get_sub_prop] ( literal[string] )
keyword[if] identifier[prop] keyword[is] keyword[not] keyword[None] :
identifier[prop] = identifier[EncryptionConfiguration] . identifier[from_api_repr] ( identifier[prop] )
keyword[return] identifier[prop] | def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration
"""
prop = self._get_sub_prop('destinationEncryptionConfiguration')
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop) # depends on [control=['if'], data=['prop']]
return prop |
def sync(self, ws_name):
"""Synchronise workspace's repositories."""
path = self.config["workspaces"][ws_name]["path"]
repositories = self.config["workspaces"][ws_name]["repositories"]
logger = logging.getLogger(__name__)
color = Color()
for r in os.listdir(path):
try:
repo = Repository(os.path.join(path, r))
except RepositoryError:
continue
else:
repositories[r] = repo.path
for repo_name, path in repositories.items():
logger.info(color.colored(
" - %s" % repo_name, "blue"))
self.config["workspaces"][ws_name]["repositories"]
self.config.write() | def function[sync, parameter[self, ws_name]]:
constant[Synchronise workspace's repositories.]
variable[path] assign[=] call[call[call[name[self].config][constant[workspaces]]][name[ws_name]]][constant[path]]
variable[repositories] assign[=] call[call[call[name[self].config][constant[workspaces]]][name[ws_name]]][constant[repositories]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
variable[color] assign[=] call[name[Color], parameter[]]
for taget[name[r]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:]
<ast.Try object at 0x7da1b02ae500>
for taget[tuple[[<ast.Name object at 0x7da1b02acca0>, <ast.Name object at 0x7da1b02ae530>]]] in starred[call[name[repositories].items, parameter[]]] begin[:]
call[name[logger].info, parameter[call[name[color].colored, parameter[binary_operation[constant[ - %s] <ast.Mod object at 0x7da2590d6920> name[repo_name]], constant[blue]]]]]
call[call[call[name[self].config][constant[workspaces]]][name[ws_name]]][constant[repositories]]
call[name[self].config.write, parameter[]] | keyword[def] identifier[sync] ( identifier[self] , identifier[ws_name] ):
literal[string]
identifier[path] = identifier[self] . identifier[config] [ literal[string] ][ identifier[ws_name] ][ literal[string] ]
identifier[repositories] = identifier[self] . identifier[config] [ literal[string] ][ identifier[ws_name] ][ literal[string] ]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[color] = identifier[Color] ()
keyword[for] identifier[r] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ):
keyword[try] :
identifier[repo] = identifier[Repository] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[r] ))
keyword[except] identifier[RepositoryError] :
keyword[continue]
keyword[else] :
identifier[repositories] [ identifier[r] ]= identifier[repo] . identifier[path]
keyword[for] identifier[repo_name] , identifier[path] keyword[in] identifier[repositories] . identifier[items] ():
identifier[logger] . identifier[info] ( identifier[color] . identifier[colored] (
literal[string] % identifier[repo_name] , literal[string] ))
identifier[self] . identifier[config] [ literal[string] ][ identifier[ws_name] ][ literal[string] ]
identifier[self] . identifier[config] . identifier[write] () | def sync(self, ws_name):
"""Synchronise workspace's repositories."""
path = self.config['workspaces'][ws_name]['path']
repositories = self.config['workspaces'][ws_name]['repositories']
logger = logging.getLogger(__name__)
color = Color()
for r in os.listdir(path):
try:
repo = Repository(os.path.join(path, r)) # depends on [control=['try'], data=[]]
except RepositoryError:
continue # depends on [control=['except'], data=[]]
else:
repositories[r] = repo.path # depends on [control=['for'], data=['r']]
for (repo_name, path) in repositories.items():
logger.info(color.colored(' - %s' % repo_name, 'blue')) # depends on [control=['for'], data=[]]
self.config['workspaces'][ws_name]['repositories']
self.config.write() |
def _infer_embedded_object(value):
"""
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
"""
if value is None:
# The default behavior is to assume that a value of None is not
# an embedded object. If the user wants that, they must specify
# the embedded_object parameter.
return False
if isinstance(value, list):
if not value:
# The default behavior is to assume that an empty array value
# is not an embedded object. If the user wants that, they must
# specify the embedded_object parameter.
return False
value = value[0]
if isinstance(value, CIMInstance):
# The default behavior is to produce 'instance', although 'object'
# would also be valid.
return 'instance'
if isinstance(value, CIMClass):
return 'object'
return False | def function[_infer_embedded_object, parameter[value]]:
constant[
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
]
if compare[name[value] is constant[None]] begin[:]
return[constant[False]]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
if <ast.UnaryOp object at 0x7da20e9b3490> begin[:]
return[constant[False]]
variable[value] assign[=] call[name[value]][constant[0]]
if call[name[isinstance], parameter[name[value], name[CIMInstance]]] begin[:]
return[constant[instance]]
if call[name[isinstance], parameter[name[value], name[CIMClass]]] begin[:]
return[constant[object]]
return[constant[False]] | keyword[def] identifier[_infer_embedded_object] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[if] keyword[not] identifier[value] :
keyword[return] keyword[False]
identifier[value] = identifier[value] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[CIMInstance] ):
keyword[return] literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[CIMClass] ):
keyword[return] literal[string]
keyword[return] keyword[False] | def _infer_embedded_object(value):
"""
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
"""
if value is None:
# The default behavior is to assume that a value of None is not
# an embedded object. If the user wants that, they must specify
# the embedded_object parameter.
return False # depends on [control=['if'], data=[]]
if isinstance(value, list):
if not value:
# The default behavior is to assume that an empty array value
# is not an embedded object. If the user wants that, they must
# specify the embedded_object parameter.
return False # depends on [control=['if'], data=[]]
value = value[0] # depends on [control=['if'], data=[]]
if isinstance(value, CIMInstance):
# The default behavior is to produce 'instance', although 'object'
# would also be valid.
return 'instance' # depends on [control=['if'], data=[]]
if isinstance(value, CIMClass):
return 'object' # depends on [control=['if'], data=[]]
return False |
def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router and remove the interfaces attached. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.remove_interface_router(rout_id,
body=body)
intf.get('id')
except Exception as exc:
LOG.error("Failed to delete router interface %(name)s, "
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return False
return True | def function[delete_intf_router, parameter[self, name, tenant_id, rout_id, subnet_lst]]:
constant[Delete the openstack router and remove the interfaces attached. ]
<ast.Try object at 0x7da1b1c61690>
return[constant[True]] | keyword[def] identifier[delete_intf_router] ( identifier[self] , identifier[name] , identifier[tenant_id] , identifier[rout_id] , identifier[subnet_lst] ):
literal[string]
keyword[try] :
keyword[for] identifier[subnet_id] keyword[in] identifier[subnet_lst] :
identifier[body] ={ literal[string] : identifier[subnet_id] }
identifier[intf] = identifier[self] . identifier[neutronclient] . identifier[remove_interface_router] ( identifier[rout_id] ,
identifier[body] = identifier[body] )
identifier[intf] . identifier[get] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[LOG] . identifier[error] ( literal[string]
literal[string] ,{ literal[string] : identifier[name] , literal[string] : identifier[str] ( identifier[exc] )})
keyword[return] keyword[False]
keyword[return] keyword[True] | def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router and remove the interfaces attached. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.remove_interface_router(rout_id, body=body)
intf.get('id') # depends on [control=['for'], data=['subnet_id']] # depends on [control=['try'], data=[]]
except Exception as exc:
LOG.error('Failed to delete router interface %(name)s, Exc %(exc)s', {'name': name, 'exc': str(exc)})
return False # depends on [control=['except'], data=['exc']]
return True |
def last_of(self, unit, day_of_week=None):
"""
Returns an instance set to the last occurrence
of a given day of the week in the current unit.
If no day_of_week is provided, modify to the last day of the unit.
Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY.
Supported units are month, quarter and year.
:param unit: The unit to use
:type unit: str
:type day_of_week: int or None
:rtype: DateTime
"""
if unit not in ["month", "quarter", "year"]:
raise ValueError('Invalid unit "{}" for first_of()'.format(unit))
return getattr(self, "_last_of_{}".format(unit))(day_of_week) | def function[last_of, parameter[self, unit, day_of_week]]:
constant[
Returns an instance set to the last occurrence
of a given day of the week in the current unit.
If no day_of_week is provided, modify to the last day of the unit.
Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY.
Supported units are month, quarter and year.
:param unit: The unit to use
:type unit: str
:type day_of_week: int or None
:rtype: DateTime
]
if compare[name[unit] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b222dfc0>, <ast.Constant object at 0x7da1b222e230>, <ast.Constant object at 0x7da1b222c700>]]] begin[:]
<ast.Raise object at 0x7da1b222e920>
return[call[call[name[getattr], parameter[name[self], call[constant[_last_of_{}].format, parameter[name[unit]]]]], parameter[name[day_of_week]]]] | keyword[def] identifier[last_of] ( identifier[self] , identifier[unit] , identifier[day_of_week] = keyword[None] ):
literal[string]
keyword[if] identifier[unit] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[unit] ))
keyword[return] identifier[getattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[unit] ))( identifier[day_of_week] ) | def last_of(self, unit, day_of_week=None):
"""
Returns an instance set to the last occurrence
of a given day of the week in the current unit.
If no day_of_week is provided, modify to the last day of the unit.
Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY.
Supported units are month, quarter and year.
:param unit: The unit to use
:type unit: str
:type day_of_week: int or None
:rtype: DateTime
"""
if unit not in ['month', 'quarter', 'year']:
raise ValueError('Invalid unit "{}" for first_of()'.format(unit)) # depends on [control=['if'], data=['unit']]
return getattr(self, '_last_of_{}'.format(unit))(day_of_week) |
def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=0, spwind=[], pols=['XX','YY']):
""" Set select parameter that defines spectral window, time, or any other selection.
time (in mjd) defines the time to find solutions near for given calname.
freqs (in Hz) is frequencies in data.
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
calname defines the name of the calibrator to use. if blank, uses only the time selection.
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. not yet implemented.
radec, dist, spwind not used. here for uniformity with casa_sol.
"""
self.freqs = freqs
self.chansize = freqs[1]-freqs[0]
self.select = self.complete # use only complete solution sets (set during parse)
self.blarr = blarr
if spwind:
self.logger.warn('spwind option not used for telcal_sol. Applied based on freqs.')
if radec:
self.logger.warn('radec option not used for telcal_sol. Applied based on calname.')
if dist:
self.logger.warn('dist option not used for telcal_sol. Applied based on calname.')
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY']
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL']
self.polind = [polord.index(pol) for pol in pols]
if calname:
nameselect = []
for ss in n.unique(self.source[self.select]):
if calname in ss:
nameselect = n.where(self.source[self.select] == ss) # define selection for name
self.select = self.select[nameselect] # update overall selection
self.logger.debug('Selection down to %d solutions with %s' % (len(self.select), calname))
if not nameselect:
self.logger.warn('Calibrator name %s not found. Ignoring.' % (calname))
# select freq
freqselect = n.where([ff in n.around(self.freqs, -6) for ff in n.around(1e6*self.skyfreq[self.select], -6)]) # takes solution if band center is in (rounded) array of chan freqs
if len(freqselect[0]) == 0:
raise StandardError('No complete set of telcal solutions at that frequency.')
self.select = self.select[freqselect[0]] # update overall selection
self.logger.info('Frequency selection cut down to %d solutions' % (len(self.select)))
# select pol
# ifids = self.ifid[self.select]
# if (polstr == 'RR') or (polstr == 'XX'):
# polselect = n.where(['A' in ifid or 'B' in ifid for ifid in ifids])
# elif (polstr == 'LL') or (polstr == 'YY'):
# polselect = n.where(['C' in ifid or 'D' in ifid for ifid in ifids])
# self.select = self.select[polselect] # update overall selection
self.polarization = n.empty(len(self.ifid))
for i in range(len(self.ifid)):
if ('A' in self.ifid[i]) or ('B' in self.ifid[i]):
self.polarization[i] = 0
elif ('C' in self.ifid[i]) or ('D' in self.ifid[i]):
self.polarization[i] = 1
# select by smallest time distance for source
mjddist = n.abs(time - n.unique(self.mjd[self.select]))
closest = n.where(mjddist == mjddist.min())
if len(closest[0]) > 1:
self.logger.info('Multiple closest solutions in time (%s). Taking first.' % (str(closest[0])))
closest = closest[0][0]
timeselect = n.where(self.mjd[self.select] == n.unique(self.mjd[self.select])[closest]) # define selection for time
self.select = self.select[timeselect[0]] # update overall selection
self.logger.info('Selection down to %d solutions separated from given time by %d minutes' % (len(self.select), mjddist[closest]*24*60))
self.logger.debug('Selected solutions: %s' % str(self.select))
self.logger.info('MJD: %s' % str(n.unique(self.mjd[self.select])))
self.logger.debug('Mid frequency (MHz): %s' % str(n.unique(self.skyfreq[self.select])))
self.logger.debug('IFID: %s' % str(n.unique(self.ifid[self.select])))
self.logger.info('Source: %s' % str(n.unique(self.source[self.select])))
self.logger.debug('Ants: %s' % str(n.unique(self.antname[self.select]))) | def function[set_selection, parameter[self, time, freqs, blarr, calname, radec, dist, spwind, pols]]:
constant[ Set select parameter that defines spectral window, time, or any other selection.
time (in mjd) defines the time to find solutions near for given calname.
freqs (in Hz) is frequencies in data.
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
calname defines the name of the calibrator to use. if blank, uses only the time selection.
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. not yet implemented.
radec, dist, spwind not used. here for uniformity with casa_sol.
]
name[self].freqs assign[=] name[freqs]
name[self].chansize assign[=] binary_operation[call[name[freqs]][constant[1]] - call[name[freqs]][constant[0]]]
name[self].select assign[=] name[self].complete
name[self].blarr assign[=] name[blarr]
if name[spwind] begin[:]
call[name[self].logger.warn, parameter[constant[spwind option not used for telcal_sol. Applied based on freqs.]]]
if name[radec] begin[:]
call[name[self].logger.warn, parameter[constant[radec option not used for telcal_sol. Applied based on calname.]]]
if name[dist] begin[:]
call[name[self].logger.warn, parameter[constant[dist option not used for telcal_sol. Applied based on calname.]]]
if <ast.BoolOp object at 0x7da1b25c8df0> begin[:]
variable[polord] assign[=] list[[<ast.Constant object at 0x7da1b25c82b0>, <ast.Constant object at 0x7da1b25ca8c0>]]
name[self].polind assign[=] <ast.ListComp object at 0x7da1b25c9870>
if name[calname] begin[:]
variable[nameselect] assign[=] list[[]]
for taget[name[ss]] in starred[call[name[n].unique, parameter[call[name[self].source][name[self].select]]]] begin[:]
if compare[name[calname] in name[ss]] begin[:]
variable[nameselect] assign[=] call[name[n].where, parameter[compare[call[name[self].source][name[self].select] equal[==] name[ss]]]]
name[self].select assign[=] call[name[self].select][name[nameselect]]
call[name[self].logger.debug, parameter[binary_operation[constant[Selection down to %d solutions with %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b25c8070>, <ast.Name object at 0x7da1b2529cc0>]]]]]
if <ast.UnaryOp object at 0x7da1b2528820> begin[:]
call[name[self].logger.warn, parameter[binary_operation[constant[Calibrator name %s not found. Ignoring.] <ast.Mod object at 0x7da2590d6920> name[calname]]]]
variable[freqselect] assign[=] call[name[n].where, parameter[<ast.ListComp object at 0x7da1b252a0e0>]]
if compare[call[name[len], parameter[call[name[freqselect]][constant[0]]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b252b9a0>
name[self].select assign[=] call[name[self].select][call[name[freqselect]][constant[0]]]
call[name[self].logger.info, parameter[binary_operation[constant[Frequency selection cut down to %d solutions] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[self].select]]]]]
name[self].polarization assign[=] call[name[n].empty, parameter[call[name[len], parameter[name[self].ifid]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].ifid]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b252a440> begin[:]
call[name[self].polarization][name[i]] assign[=] constant[0]
variable[mjddist] assign[=] call[name[n].abs, parameter[binary_operation[name[time] - call[name[n].unique, parameter[call[name[self].mjd][name[self].select]]]]]]
variable[closest] assign[=] call[name[n].where, parameter[compare[name[mjddist] equal[==] call[name[mjddist].min, parameter[]]]]]
if compare[call[name[len], parameter[call[name[closest]][constant[0]]]] greater[>] constant[1]] begin[:]
call[name[self].logger.info, parameter[binary_operation[constant[Multiple closest solutions in time (%s). Taking first.] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[closest]][constant[0]]]]]]]
variable[closest] assign[=] call[call[name[closest]][constant[0]]][constant[0]]
variable[timeselect] assign[=] call[name[n].where, parameter[compare[call[name[self].mjd][name[self].select] equal[==] call[call[name[n].unique, parameter[call[name[self].mjd][name[self].select]]]][name[closest]]]]]
name[self].select assign[=] call[name[self].select][call[name[timeselect]][constant[0]]]
call[name[self].logger.info, parameter[binary_operation[constant[Selection down to %d solutions separated from given time by %d minutes] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b25282e0>, <ast.BinOp object at 0x7da1b2528850>]]]]]
call[name[self].logger.debug, parameter[binary_operation[constant[Selected solutions: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[self].select]]]]]
call[name[self].logger.info, parameter[binary_operation[constant[MJD: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[n].unique, parameter[call[name[self].mjd][name[self].select]]]]]]]]
call[name[self].logger.debug, parameter[binary_operation[constant[Mid frequency (MHz): %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[n].unique, parameter[call[name[self].skyfreq][name[self].select]]]]]]]]
call[name[self].logger.debug, parameter[binary_operation[constant[IFID: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[n].unique, parameter[call[name[self].ifid][name[self].select]]]]]]]]
call[name[self].logger.info, parameter[binary_operation[constant[Source: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[n].unique, parameter[call[name[self].source][name[self].select]]]]]]]]
call[name[self].logger.debug, parameter[binary_operation[constant[Ants: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[call[name[n].unique, parameter[call[name[self].antname][name[self].select]]]]]]]] | keyword[def] identifier[set_selection] ( identifier[self] , identifier[time] , identifier[freqs] , identifier[blarr] , identifier[calname] = literal[string] , identifier[radec] =(), identifier[dist] = literal[int] , identifier[spwind] =[], identifier[pols] =[ literal[string] , literal[string] ]):
literal[string]
identifier[self] . identifier[freqs] = identifier[freqs]
identifier[self] . identifier[chansize] = identifier[freqs] [ literal[int] ]- identifier[freqs] [ literal[int] ]
identifier[self] . identifier[select] = identifier[self] . identifier[complete]
identifier[self] . identifier[blarr] = identifier[blarr]
keyword[if] identifier[spwind] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[if] identifier[radec] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[if] identifier[dist] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[if] literal[string] keyword[in] literal[string] . identifier[join] ( identifier[pols] ) keyword[or] literal[string] keyword[in] literal[string] . identifier[join] ( identifier[pols] ):
identifier[polord] =[ literal[string] , literal[string] ]
keyword[elif] literal[string] keyword[in] literal[string] . identifier[join] ( identifier[pols] ) keyword[or] literal[string] keyword[in] literal[string] . identifier[join] ( identifier[pols] ):
identifier[polord] =[ literal[string] , literal[string] ]
identifier[self] . identifier[polind] =[ identifier[polord] . identifier[index] ( identifier[pol] ) keyword[for] identifier[pol] keyword[in] identifier[pols] ]
keyword[if] identifier[calname] :
identifier[nameselect] =[]
keyword[for] identifier[ss] keyword[in] identifier[n] . identifier[unique] ( identifier[self] . identifier[source] [ identifier[self] . identifier[select] ]):
keyword[if] identifier[calname] keyword[in] identifier[ss] :
identifier[nameselect] = identifier[n] . identifier[where] ( identifier[self] . identifier[source] [ identifier[self] . identifier[select] ]== identifier[ss] )
identifier[self] . identifier[select] = identifier[self] . identifier[select] [ identifier[nameselect] ]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[len] ( identifier[self] . identifier[select] ), identifier[calname] ))
keyword[if] keyword[not] identifier[nameselect] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] %( identifier[calname] ))
identifier[freqselect] = identifier[n] . identifier[where] ([ identifier[ff] keyword[in] identifier[n] . identifier[around] ( identifier[self] . identifier[freqs] ,- literal[int] ) keyword[for] identifier[ff] keyword[in] identifier[n] . identifier[around] ( literal[int] * identifier[self] . identifier[skyfreq] [ identifier[self] . identifier[select] ],- literal[int] )])
keyword[if] identifier[len] ( identifier[freqselect] [ literal[int] ])== literal[int] :
keyword[raise] identifier[StandardError] ( literal[string] )
identifier[self] . identifier[select] = identifier[self] . identifier[select] [ identifier[freqselect] [ literal[int] ]]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[len] ( identifier[self] . identifier[select] )))
identifier[self] . identifier[polarization] = identifier[n] . identifier[empty] ( identifier[len] ( identifier[self] . identifier[ifid] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[ifid] )):
keyword[if] ( literal[string] keyword[in] identifier[self] . identifier[ifid] [ identifier[i] ]) keyword[or] ( literal[string] keyword[in] identifier[self] . identifier[ifid] [ identifier[i] ]):
identifier[self] . identifier[polarization] [ identifier[i] ]= literal[int]
keyword[elif] ( literal[string] keyword[in] identifier[self] . identifier[ifid] [ identifier[i] ]) keyword[or] ( literal[string] keyword[in] identifier[self] . identifier[ifid] [ identifier[i] ]):
identifier[self] . identifier[polarization] [ identifier[i] ]= literal[int]
identifier[mjddist] = identifier[n] . identifier[abs] ( identifier[time] - identifier[n] . identifier[unique] ( identifier[self] . identifier[mjd] [ identifier[self] . identifier[select] ]))
identifier[closest] = identifier[n] . identifier[where] ( identifier[mjddist] == identifier[mjddist] . identifier[min] ())
keyword[if] identifier[len] ( identifier[closest] [ literal[int] ])> literal[int] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[str] ( identifier[closest] [ literal[int] ])))
identifier[closest] = identifier[closest] [ literal[int] ][ literal[int] ]
identifier[timeselect] = identifier[n] . identifier[where] ( identifier[self] . identifier[mjd] [ identifier[self] . identifier[select] ]== identifier[n] . identifier[unique] ( identifier[self] . identifier[mjd] [ identifier[self] . identifier[select] ])[ identifier[closest] ])
identifier[self] . identifier[select] = identifier[self] . identifier[select] [ identifier[timeselect] [ literal[int] ]]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[len] ( identifier[self] . identifier[select] ), identifier[mjddist] [ identifier[closest] ]* literal[int] * literal[int] ))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[str] ( identifier[self] . identifier[select] ))
identifier[self] . identifier[logger] . identifier[info] ( literal[string] % identifier[str] ( identifier[n] . identifier[unique] ( identifier[self] . identifier[mjd] [ identifier[self] . identifier[select] ])))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[str] ( identifier[n] . identifier[unique] ( identifier[self] . identifier[skyfreq] [ identifier[self] . identifier[select] ])))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[str] ( identifier[n] . identifier[unique] ( identifier[self] . identifier[ifid] [ identifier[self] . identifier[select] ])))
identifier[self] . identifier[logger] . identifier[info] ( literal[string] % identifier[str] ( identifier[n] . identifier[unique] ( identifier[self] . identifier[source] [ identifier[self] . identifier[select] ])))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[str] ( identifier[n] . identifier[unique] ( identifier[self] . identifier[antname] [ identifier[self] . identifier[select] ]))) | def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=0, spwind=[], pols=['XX', 'YY']):
""" Set select parameter that defines spectral window, time, or any other selection.
time (in mjd) defines the time to find solutions near for given calname.
freqs (in Hz) is frequencies in data.
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
calname defines the name of the calibrator to use. if blank, uses only the time selection.
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. not yet implemented.
radec, dist, spwind not used. here for uniformity with casa_sol.
"""
self.freqs = freqs
self.chansize = freqs[1] - freqs[0]
self.select = self.complete # use only complete solution sets (set during parse)
self.blarr = blarr
if spwind:
self.logger.warn('spwind option not used for telcal_sol. Applied based on freqs.') # depends on [control=['if'], data=[]]
if radec:
self.logger.warn('radec option not used for telcal_sol. Applied based on calname.') # depends on [control=['if'], data=[]]
if dist:
self.logger.warn('dist option not used for telcal_sol. Applied based on calname.') # depends on [control=['if'], data=[]]
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY'] # depends on [control=['if'], data=[]]
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL'] # depends on [control=['if'], data=[]]
self.polind = [polord.index(pol) for pol in pols]
if calname:
nameselect = []
for ss in n.unique(self.source[self.select]):
if calname in ss:
nameselect = n.where(self.source[self.select] == ss) # define selection for name
self.select = self.select[nameselect] # update overall selection
self.logger.debug('Selection down to %d solutions with %s' % (len(self.select), calname)) # depends on [control=['if'], data=['calname', 'ss']] # depends on [control=['for'], data=['ss']]
if not nameselect:
self.logger.warn('Calibrator name %s not found. Ignoring.' % calname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# select freq
freqselect = n.where([ff in n.around(self.freqs, -6) for ff in n.around(1000000.0 * self.skyfreq[self.select], -6)]) # takes solution if band center is in (rounded) array of chan freqs
if len(freqselect[0]) == 0:
raise StandardError('No complete set of telcal solutions at that frequency.') # depends on [control=['if'], data=[]]
self.select = self.select[freqselect[0]] # update overall selection
self.logger.info('Frequency selection cut down to %d solutions' % len(self.select))
# select pol
# ifids = self.ifid[self.select]
# if (polstr == 'RR') or (polstr == 'XX'):
# polselect = n.where(['A' in ifid or 'B' in ifid for ifid in ifids])
# elif (polstr == 'LL') or (polstr == 'YY'):
# polselect = n.where(['C' in ifid or 'D' in ifid for ifid in ifids])
# self.select = self.select[polselect] # update overall selection
self.polarization = n.empty(len(self.ifid))
for i in range(len(self.ifid)):
if 'A' in self.ifid[i] or 'B' in self.ifid[i]:
self.polarization[i] = 0 # depends on [control=['if'], data=[]]
elif 'C' in self.ifid[i] or 'D' in self.ifid[i]:
self.polarization[i] = 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# select by smallest time distance for source
mjddist = n.abs(time - n.unique(self.mjd[self.select]))
closest = n.where(mjddist == mjddist.min())
if len(closest[0]) > 1:
self.logger.info('Multiple closest solutions in time (%s). Taking first.' % str(closest[0]))
closest = closest[0][0] # depends on [control=['if'], data=[]]
timeselect = n.where(self.mjd[self.select] == n.unique(self.mjd[self.select])[closest]) # define selection for time
self.select = self.select[timeselect[0]] # update overall selection
self.logger.info('Selection down to %d solutions separated from given time by %d minutes' % (len(self.select), mjddist[closest] * 24 * 60))
self.logger.debug('Selected solutions: %s' % str(self.select))
self.logger.info('MJD: %s' % str(n.unique(self.mjd[self.select])))
self.logger.debug('Mid frequency (MHz): %s' % str(n.unique(self.skyfreq[self.select])))
self.logger.debug('IFID: %s' % str(n.unique(self.ifid[self.select])))
self.logger.info('Source: %s' % str(n.unique(self.source[self.select])))
self.logger.debug('Ants: %s' % str(n.unique(self.antname[self.select]))) |
def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K | def function[server_session, parameter[user, password, salt, A, B, b]]:
constant[
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
]
<ast.Tuple object at 0x7da1b1b9e8c0> assign[=] call[name[get_prime], parameter[]]
variable[u] assign[=] call[name[get_scramble], parameter[name[A], name[B]]]
variable[v] assign[=] call[name[get_verifier], parameter[name[user], name[password], name[salt]]]
variable[vu] assign[=] call[name[pow], parameter[name[v], name[u], name[N]]]
variable[Avu] assign[=] binary_operation[binary_operation[name[A] * name[vu]] <ast.Mod object at 0x7da2590d6920> name[N]]
variable[session_secret] assign[=] call[name[pow], parameter[name[Avu], name[b], name[N]]]
variable[K] assign[=] call[name[hash_digest], parameter[name[hashlib].sha1, name[session_secret]]]
if name[DEBUG_PRINT] begin[:]
call[name[print], parameter[constant[server session_secret=], call[name[binascii].b2a_hex, parameter[call[name[long2bytes], parameter[name[session_secret]]]]]]]
call[name[print], parameter[constant[server session hash K=], call[name[binascii].b2a_hex, parameter[name[K]]]]]
return[name[K]] | keyword[def] identifier[server_session] ( identifier[user] , identifier[password] , identifier[salt] , identifier[A] , identifier[B] , identifier[b] ):
literal[string]
identifier[N] , identifier[g] , identifier[k] = identifier[get_prime] ()
identifier[u] = identifier[get_scramble] ( identifier[A] , identifier[B] )
identifier[v] = identifier[get_verifier] ( identifier[user] , identifier[password] , identifier[salt] )
identifier[vu] = identifier[pow] ( identifier[v] , identifier[u] , identifier[N] )
identifier[Avu] =( identifier[A] * identifier[vu] )% identifier[N]
identifier[session_secret] = identifier[pow] ( identifier[Avu] , identifier[b] , identifier[N] )
identifier[K] = identifier[hash_digest] ( identifier[hashlib] . identifier[sha1] , identifier[session_secret] )
keyword[if] identifier[DEBUG_PRINT] :
identifier[print] ( literal[string] , identifier[binascii] . identifier[b2a_hex] ( identifier[long2bytes] ( identifier[session_secret] )), identifier[end] = literal[string] )
identifier[print] ( literal[string] , identifier[binascii] . identifier[b2a_hex] ( identifier[K] ))
keyword[return] identifier[K] | def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
(N, g, k) = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = A * vu % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K)) # depends on [control=['if'], data=[]]
return K |
def binary_search_batch(original_image, perturbed_images, decision_function,
shape, constraint, theta):
""" Binary search to approach the boundary. """
# Compute distance between each of perturbed image and original image.
dists_post_update = np.array([
compute_distance(
original_image,
perturbed_image,
constraint
)
for perturbed_image in perturbed_images])
# Choose upper thresholds in binary searchs based on constraint.
if constraint == 'linf':
highs = dists_post_update
# Stopping criteria.
thresholds = np.minimum(dists_post_update * theta, theta)
else:
highs = np.ones(len(perturbed_images))
thresholds = theta
lows = np.zeros(len(perturbed_images))
while np.max((highs - lows) / thresholds) > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_images = project(original_image, perturbed_images,
mids, shape, constraint)
# Update highs and lows based on model decisions.
decisions = decision_function(mid_images)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs)
out_images = project(original_image, perturbed_images,
highs, shape, constraint)
# Compute distance of the output image to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = np.array([
compute_distance(
original_image,
out_image,
constraint
)
for out_image in out_images])
idx = np.argmin(dists)
dist = dists_post_update[idx]
out_image = out_images[idx]
return out_image, dist | def function[binary_search_batch, parameter[original_image, perturbed_images, decision_function, shape, constraint, theta]]:
constant[ Binary search to approach the boundary. ]
variable[dists_post_update] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da207f99f30>]]
if compare[name[constraint] equal[==] constant[linf]] begin[:]
variable[highs] assign[=] name[dists_post_update]
variable[thresholds] assign[=] call[name[np].minimum, parameter[binary_operation[name[dists_post_update] * name[theta]], name[theta]]]
variable[lows] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[perturbed_images]]]]]
while compare[call[name[np].max, parameter[binary_operation[binary_operation[name[highs] - name[lows]] / name[thresholds]]]] greater[>] constant[1]] begin[:]
variable[mids] assign[=] binary_operation[binary_operation[name[highs] + name[lows]] / constant[2.0]]
variable[mid_images] assign[=] call[name[project], parameter[name[original_image], name[perturbed_images], name[mids], name[shape], name[constraint]]]
variable[decisions] assign[=] call[name[decision_function], parameter[name[mid_images]]]
variable[lows] assign[=] call[name[np].where, parameter[compare[name[decisions] equal[==] constant[0]], name[mids], name[lows]]]
variable[highs] assign[=] call[name[np].where, parameter[compare[name[decisions] equal[==] constant[1]], name[mids], name[highs]]]
variable[out_images] assign[=] call[name[project], parameter[name[original_image], name[perturbed_images], name[highs], name[shape], name[constraint]]]
variable[dists] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1ef15a0>]]
variable[idx] assign[=] call[name[np].argmin, parameter[name[dists]]]
variable[dist] assign[=] call[name[dists_post_update]][name[idx]]
variable[out_image] assign[=] call[name[out_images]][name[idx]]
return[tuple[[<ast.Name object at 0x7da1b1ef0e20>, <ast.Name object at 0x7da1b1ef3b50>]]] | keyword[def] identifier[binary_search_batch] ( identifier[original_image] , identifier[perturbed_images] , identifier[decision_function] ,
identifier[shape] , identifier[constraint] , identifier[theta] ):
literal[string]
identifier[dists_post_update] = identifier[np] . identifier[array] ([
identifier[compute_distance] (
identifier[original_image] ,
identifier[perturbed_image] ,
identifier[constraint]
)
keyword[for] identifier[perturbed_image] keyword[in] identifier[perturbed_images] ])
keyword[if] identifier[constraint] == literal[string] :
identifier[highs] = identifier[dists_post_update]
identifier[thresholds] = identifier[np] . identifier[minimum] ( identifier[dists_post_update] * identifier[theta] , identifier[theta] )
keyword[else] :
identifier[highs] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[perturbed_images] ))
identifier[thresholds] = identifier[theta]
identifier[lows] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[perturbed_images] ))
keyword[while] identifier[np] . identifier[max] (( identifier[highs] - identifier[lows] )/ identifier[thresholds] )> literal[int] :
identifier[mids] =( identifier[highs] + identifier[lows] )/ literal[int]
identifier[mid_images] = identifier[project] ( identifier[original_image] , identifier[perturbed_images] ,
identifier[mids] , identifier[shape] , identifier[constraint] )
identifier[decisions] = identifier[decision_function] ( identifier[mid_images] )
identifier[lows] = identifier[np] . identifier[where] ( identifier[decisions] == literal[int] , identifier[mids] , identifier[lows] )
identifier[highs] = identifier[np] . identifier[where] ( identifier[decisions] == literal[int] , identifier[mids] , identifier[highs] )
identifier[out_images] = identifier[project] ( identifier[original_image] , identifier[perturbed_images] ,
identifier[highs] , identifier[shape] , identifier[constraint] )
identifier[dists] = identifier[np] . identifier[array] ([
identifier[compute_distance] (
identifier[original_image] ,
identifier[out_image] ,
identifier[constraint]
)
keyword[for] identifier[out_image] keyword[in] identifier[out_images] ])
identifier[idx] = identifier[np] . identifier[argmin] ( identifier[dists] )
identifier[dist] = identifier[dists_post_update] [ identifier[idx] ]
identifier[out_image] = identifier[out_images] [ identifier[idx] ]
keyword[return] identifier[out_image] , identifier[dist] | def binary_search_batch(original_image, perturbed_images, decision_function, shape, constraint, theta):
""" Binary search to approach the boundary. """
# Compute distance between each of perturbed image and original image.
dists_post_update = np.array([compute_distance(original_image, perturbed_image, constraint) for perturbed_image in perturbed_images])
# Choose upper thresholds in binary searchs based on constraint.
if constraint == 'linf':
highs = dists_post_update
# Stopping criteria.
thresholds = np.minimum(dists_post_update * theta, theta) # depends on [control=['if'], data=[]]
else:
highs = np.ones(len(perturbed_images))
thresholds = theta
lows = np.zeros(len(perturbed_images))
while np.max((highs - lows) / thresholds) > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_images = project(original_image, perturbed_images, mids, shape, constraint)
# Update highs and lows based on model decisions.
decisions = decision_function(mid_images)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs) # depends on [control=['while'], data=[]]
out_images = project(original_image, perturbed_images, highs, shape, constraint)
# Compute distance of the output image to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = np.array([compute_distance(original_image, out_image, constraint) for out_image in out_images])
idx = np.argmin(dists)
dist = dists_post_update[idx]
out_image = out_images[idx]
return (out_image, dist) |
def register(self, bug: Bug) -> None:
"""
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
"""
path = "bugs/{}".format(bug.name)
payload = bug.to_dict()
r = self.__api.put(path, json=payload)
if r.status_code != 204:
self.__api.handle_erroneous_response(r) | def function[register, parameter[self, bug]]:
constant[
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
]
variable[path] assign[=] call[constant[bugs/{}].format, parameter[name[bug].name]]
variable[payload] assign[=] call[name[bug].to_dict, parameter[]]
variable[r] assign[=] call[name[self].__api.put, parameter[name[path]]]
if compare[name[r].status_code not_equal[!=] constant[204]] begin[:]
call[name[self].__api.handle_erroneous_response, parameter[name[r]]] | keyword[def] identifier[register] ( identifier[self] , identifier[bug] : identifier[Bug] )-> keyword[None] :
literal[string]
identifier[path] = literal[string] . identifier[format] ( identifier[bug] . identifier[name] )
identifier[payload] = identifier[bug] . identifier[to_dict] ()
identifier[r] = identifier[self] . identifier[__api] . identifier[put] ( identifier[path] , identifier[json] = identifier[payload] )
keyword[if] identifier[r] . identifier[status_code] != literal[int] :
identifier[self] . identifier[__api] . identifier[handle_erroneous_response] ( identifier[r] ) | def register(self, bug: Bug) -> None:
"""
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
"""
path = 'bugs/{}'.format(bug.name)
payload = bug.to_dict()
r = self.__api.put(path, json=payload)
if r.status_code != 204:
self.__api.handle_erroneous_response(r) # depends on [control=['if'], data=[]] |
def targets_format(self, value):
"""
Setter for **self.__targets_format** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"targets_format", value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("targets_format", value)
self.__targets_format = value | def function[targets_format, parameter[self, value]]:
constant[
Setter for **self.__targets_format** attribute.
:param value: Attribute value.
:type value: unicode
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[unicode]]]
assert[call[name[os].path.exists, parameter[name[value]]]]
name[self].__targets_format assign[=] name[value] | keyword[def] identifier[targets_format] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[unicode] , literal[string] . identifier[format] (
literal[string] , identifier[value] )
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[value] ), literal[string] . identifier[format] ( literal[string] , identifier[value] )
identifier[self] . identifier[__targets_format] = identifier[value] | def targets_format(self, value):
"""
Setter for **self.__targets_format** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format('targets_format', value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format('targets_format', value) # depends on [control=['if'], data=['value']]
self.__targets_format = value |
async def transform(self, file):
"""Function called when a file need to be transformed."""
path = get_path(self.dest, file)
await self.loop.run_in_executor(self.exe, write_file, path, file.contents)
return file | <ast.AsyncFunctionDef object at 0x7da1b24fdab0> | keyword[async] keyword[def] identifier[transform] ( identifier[self] , identifier[file] ):
literal[string]
identifier[path] = identifier[get_path] ( identifier[self] . identifier[dest] , identifier[file] )
keyword[await] identifier[self] . identifier[loop] . identifier[run_in_executor] ( identifier[self] . identifier[exe] , identifier[write_file] , identifier[path] , identifier[file] . identifier[contents] )
keyword[return] identifier[file] | async def transform(self, file):
"""Function called when a file need to be transformed."""
path = get_path(self.dest, file)
await self.loop.run_in_executor(self.exe, write_file, path, file.contents)
return file |
def set_user_licenses(self, user, add=None, remove=None):
"""Implements: assignLicense
https://msdn.microsoft.com/library/azure/ad/graph/api/functions-and-actions#assignLicense
"add" is a dictionary of licence sku id's that reference an
array of disabled plan id's
add = { '<license-sku-id>': ['<disabled-plan-id'>, ...]
"remove" is an array of license sku id's
remove = ['<license-sku-id'>, ...]
""" # noqa
url = '/users/%s/assignLicense' % (user)
add_licenses = []
if add:
for l in add:
add_licenses.append({
'skuId': l,
'disabledPlans': add[l]
})
body = {
'addLicenses': add_licenses,
'removeLicenses': remove if remove else []
}
data = self.post_resource(url, json=body)
return data | def function[set_user_licenses, parameter[self, user, add, remove]]:
constant[Implements: assignLicense
https://msdn.microsoft.com/library/azure/ad/graph/api/functions-and-actions#assignLicense
"add" is a dictionary of licence sku id's that reference an
array of disabled plan id's
add = { '<license-sku-id>': ['<disabled-plan-id'>, ...]
"remove" is an array of license sku id's
remove = ['<license-sku-id'>, ...]
]
variable[url] assign[=] binary_operation[constant[/users/%s/assignLicense] <ast.Mod object at 0x7da2590d6920> name[user]]
variable[add_licenses] assign[=] list[[]]
if name[add] begin[:]
for taget[name[l]] in starred[name[add]] begin[:]
call[name[add_licenses].append, parameter[dictionary[[<ast.Constant object at 0x7da204621120>, <ast.Constant object at 0x7da204620130>], [<ast.Name object at 0x7da204623970>, <ast.Subscript object at 0x7da204620700>]]]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da204623a30>, <ast.Constant object at 0x7da204621ea0>], [<ast.Name object at 0x7da204622560>, <ast.IfExp object at 0x7da204621bd0>]]
variable[data] assign[=] call[name[self].post_resource, parameter[name[url]]]
return[name[data]] | keyword[def] identifier[set_user_licenses] ( identifier[self] , identifier[user] , identifier[add] = keyword[None] , identifier[remove] = keyword[None] ):
literal[string]
identifier[url] = literal[string] %( identifier[user] )
identifier[add_licenses] =[]
keyword[if] identifier[add] :
keyword[for] identifier[l] keyword[in] identifier[add] :
identifier[add_licenses] . identifier[append] ({
literal[string] : identifier[l] ,
literal[string] : identifier[add] [ identifier[l] ]
})
identifier[body] ={
literal[string] : identifier[add_licenses] ,
literal[string] : identifier[remove] keyword[if] identifier[remove] keyword[else] []
}
identifier[data] = identifier[self] . identifier[post_resource] ( identifier[url] , identifier[json] = identifier[body] )
keyword[return] identifier[data] | def set_user_licenses(self, user, add=None, remove=None):
"""Implements: assignLicense
https://msdn.microsoft.com/library/azure/ad/graph/api/functions-and-actions#assignLicense
"add" is a dictionary of licence sku id's that reference an
array of disabled plan id's
add = { '<license-sku-id>': ['<disabled-plan-id'>, ...]
"remove" is an array of license sku id's
remove = ['<license-sku-id'>, ...]
""" # noqa
url = '/users/%s/assignLicense' % user
add_licenses = []
if add:
for l in add:
add_licenses.append({'skuId': l, 'disabledPlans': add[l]}) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=[]]
body = {'addLicenses': add_licenses, 'removeLicenses': remove if remove else []}
data = self.post_resource(url, json=body)
return data |
def get_decimal_precision(number):
"""Return maximum precision of a decimal instance's fractional part.
Precision is extracted from the fractional part only.
"""
# Copied from: https://github.com/mahmoud/boltons/pull/59
assert isinstance(number, decimal.Decimal)
decimal_tuple = number.normalize().as_tuple()
if decimal_tuple.exponent >= 0:
return 0
return abs(decimal_tuple.exponent) | def function[get_decimal_precision, parameter[number]]:
constant[Return maximum precision of a decimal instance's fractional part.
Precision is extracted from the fractional part only.
]
assert[call[name[isinstance], parameter[name[number], name[decimal].Decimal]]]
variable[decimal_tuple] assign[=] call[call[name[number].normalize, parameter[]].as_tuple, parameter[]]
if compare[name[decimal_tuple].exponent greater_or_equal[>=] constant[0]] begin[:]
return[constant[0]]
return[call[name[abs], parameter[name[decimal_tuple].exponent]]] | keyword[def] identifier[get_decimal_precision] ( identifier[number] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[number] , identifier[decimal] . identifier[Decimal] )
identifier[decimal_tuple] = identifier[number] . identifier[normalize] (). identifier[as_tuple] ()
keyword[if] identifier[decimal_tuple] . identifier[exponent] >= literal[int] :
keyword[return] literal[int]
keyword[return] identifier[abs] ( identifier[decimal_tuple] . identifier[exponent] ) | def get_decimal_precision(number):
"""Return maximum precision of a decimal instance's fractional part.
Precision is extracted from the fractional part only.
"""
# Copied from: https://github.com/mahmoud/boltons/pull/59
assert isinstance(number, decimal.Decimal)
decimal_tuple = number.normalize().as_tuple()
if decimal_tuple.exponent >= 0:
return 0 # depends on [control=['if'], data=[]]
return abs(decimal_tuple.exponent) |
def counts_map(self):
"""Return 3-D counts map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
"""
try:
if isinstance(self.like, gtutils.SummedLikelihood):
cmap = self.like.components[0].logLike.countsMap()
p_method = cmap.projection().method()
else:
cmap = self.like.logLike.countsMap()
p_method = cmap.projection().method()
except Exception:
p_method = 0
if p_method == 0: # WCS
z = cmap.data()
z = np.array(z).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self.geom), z)
elif p_method == 1: # HPX
z = cmap.data()
z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix))
return HpxNDMap(copy.deepcopy(self.geom), z)
else:
self.logger.error('Did not recognize CountsMap type %i' % p_method,
exc_info=True)
return None | def function[counts_map, parameter[self]]:
constant[Return 3-D counts map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
]
<ast.Try object at 0x7da18f813130>
if compare[name[p_method] equal[==] constant[0]] begin[:]
variable[z] assign[=] call[name[cmap].data, parameter[]]
variable[z] assign[=] call[call[name[np].array, parameter[name[z]]].reshape, parameter[name[self].enumbins, name[self].npix, name[self].npix]]
return[call[name[WcsNDMap], parameter[call[name[copy].deepcopy, parameter[name[self].geom]], name[z]]]]
return[constant[None]] | keyword[def] identifier[counts_map] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[like] , identifier[gtutils] . identifier[SummedLikelihood] ):
identifier[cmap] = identifier[self] . identifier[like] . identifier[components] [ literal[int] ]. identifier[logLike] . identifier[countsMap] ()
identifier[p_method] = identifier[cmap] . identifier[projection] (). identifier[method] ()
keyword[else] :
identifier[cmap] = identifier[self] . identifier[like] . identifier[logLike] . identifier[countsMap] ()
identifier[p_method] = identifier[cmap] . identifier[projection] (). identifier[method] ()
keyword[except] identifier[Exception] :
identifier[p_method] = literal[int]
keyword[if] identifier[p_method] == literal[int] :
identifier[z] = identifier[cmap] . identifier[data] ()
identifier[z] = identifier[np] . identifier[array] ( identifier[z] ). identifier[reshape] ( identifier[self] . identifier[enumbins] , identifier[self] . identifier[npix] , identifier[self] . identifier[npix] )
keyword[return] identifier[WcsNDMap] ( identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[geom] ), identifier[z] )
keyword[elif] identifier[p_method] == literal[int] :
identifier[z] = identifier[cmap] . identifier[data] ()
identifier[z] = identifier[np] . identifier[array] ( identifier[z] ). identifier[reshape] ( identifier[self] . identifier[enumbins] , identifier[np] . identifier[max] ( identifier[self] . identifier[geom] . identifier[npix] ))
keyword[return] identifier[HpxNDMap] ( identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[geom] ), identifier[z] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] % identifier[p_method] ,
identifier[exc_info] = keyword[True] )
keyword[return] keyword[None] | def counts_map(self):
"""Return 3-D counts map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
"""
try:
if isinstance(self.like, gtutils.SummedLikelihood):
cmap = self.like.components[0].logLike.countsMap()
p_method = cmap.projection().method() # depends on [control=['if'], data=[]]
else:
cmap = self.like.logLike.countsMap()
p_method = cmap.projection().method() # depends on [control=['try'], data=[]]
except Exception:
p_method = 0 # depends on [control=['except'], data=[]]
if p_method == 0: # WCS
z = cmap.data()
z = np.array(z).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self.geom), z) # depends on [control=['if'], data=[]]
elif p_method == 1: # HPX
z = cmap.data()
z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix))
return HpxNDMap(copy.deepcopy(self.geom), z) # depends on [control=['if'], data=[]]
else:
self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True)
return None |
def symmetrize_JMS_dict(C):
"""For a dictionary with JMS Wilson coefficients but keys that might not be
in the non-redundant basis, return a dictionary with keys from the basis
and values conjugated if necessary."""
wc_keys = set(wcxf.Basis['WET', 'JMS'].all_wcs)
Cs = {}
for op, v in C.items():
if '_' not in op or op in wc_keys:
Cs[op] = v
continue
name, ind = op.split('_')
if name in C_symm_keys[5]:
i, j, k, l = ind
indnew = ''.join([j, i, l, k])
Cs['_'.join([name, indnew])] = v.conjugate()
elif name in C_symm_keys[41]:
i, j, k, l = ind
indnew = ''.join([k, l, i, j])
Cs['_'.join([name, indnew])] = v
elif name in C_symm_keys[4]:
i, j, k, l = ind
indnew = ''.join([l, k, j, i])
newname = '_'.join([name, indnew])
if newname in wc_keys:
Cs[newname] = v.conjugate()
else:
indnew = ''.join([j, i, l, k])
newname = '_'.join([name, indnew])
if newname in wc_keys:
Cs[newname] = v.conjugate()
else:
indnew = ''.join([k, l, i, j])
newname = '_'.join([name, indnew])
Cs[newname] = v
elif name in C_symm_keys[9]:
i, j, k, l = ind
indnew = ''.join([j, i, k, l])
Cs['_'.join([name, indnew])] = -v
return Cs | def function[symmetrize_JMS_dict, parameter[C]]:
constant[For a dictionary with JMS Wilson coefficients but keys that might not be
in the non-redundant basis, return a dictionary with keys from the basis
and values conjugated if necessary.]
variable[wc_keys] assign[=] call[name[set], parameter[call[name[wcxf].Basis][tuple[[<ast.Constant object at 0x7da1b1af3d90>, <ast.Constant object at 0x7da1b1af3d60>]]].all_wcs]]
variable[Cs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1af1810>, <ast.Name object at 0x7da1b1af1930>]]] in starred[call[name[C].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1af1840> begin[:]
call[name[Cs]][name[op]] assign[=] name[v]
continue
<ast.Tuple object at 0x7da1b1af2800> assign[=] call[name[op].split, parameter[constant[_]]]
if compare[name[name] in call[name[C_symm_keys]][constant[5]]] begin[:]
<ast.Tuple object at 0x7da1b1af2530> assign[=] name[ind]
variable[indnew] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da1b1af3ee0>, <ast.Name object at 0x7da1b1af3f10>, <ast.Name object at 0x7da1b1af3f40>, <ast.Name object at 0x7da1b1af3f70>]]]]
call[name[Cs]][call[constant[_].join, parameter[list[[<ast.Name object at 0x7da1b1af0610>, <ast.Name object at 0x7da1b1af1570>]]]]] assign[=] call[name[v].conjugate, parameter[]]
return[name[Cs]] | keyword[def] identifier[symmetrize_JMS_dict] ( identifier[C] ):
literal[string]
identifier[wc_keys] = identifier[set] ( identifier[wcxf] . identifier[Basis] [ literal[string] , literal[string] ]. identifier[all_wcs] )
identifier[Cs] ={}
keyword[for] identifier[op] , identifier[v] keyword[in] identifier[C] . identifier[items] ():
keyword[if] literal[string] keyword[not] keyword[in] identifier[op] keyword[or] identifier[op] keyword[in] identifier[wc_keys] :
identifier[Cs] [ identifier[op] ]= identifier[v]
keyword[continue]
identifier[name] , identifier[ind] = identifier[op] . identifier[split] ( literal[string] )
keyword[if] identifier[name] keyword[in] identifier[C_symm_keys] [ literal[int] ]:
identifier[i] , identifier[j] , identifier[k] , identifier[l] = identifier[ind]
identifier[indnew] = literal[string] . identifier[join] ([ identifier[j] , identifier[i] , identifier[l] , identifier[k] ])
identifier[Cs] [ literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])]= identifier[v] . identifier[conjugate] ()
keyword[elif] identifier[name] keyword[in] identifier[C_symm_keys] [ literal[int] ]:
identifier[i] , identifier[j] , identifier[k] , identifier[l] = identifier[ind]
identifier[indnew] = literal[string] . identifier[join] ([ identifier[k] , identifier[l] , identifier[i] , identifier[j] ])
identifier[Cs] [ literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])]= identifier[v]
keyword[elif] identifier[name] keyword[in] identifier[C_symm_keys] [ literal[int] ]:
identifier[i] , identifier[j] , identifier[k] , identifier[l] = identifier[ind]
identifier[indnew] = literal[string] . identifier[join] ([ identifier[l] , identifier[k] , identifier[j] , identifier[i] ])
identifier[newname] = literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])
keyword[if] identifier[newname] keyword[in] identifier[wc_keys] :
identifier[Cs] [ identifier[newname] ]= identifier[v] . identifier[conjugate] ()
keyword[else] :
identifier[indnew] = literal[string] . identifier[join] ([ identifier[j] , identifier[i] , identifier[l] , identifier[k] ])
identifier[newname] = literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])
keyword[if] identifier[newname] keyword[in] identifier[wc_keys] :
identifier[Cs] [ identifier[newname] ]= identifier[v] . identifier[conjugate] ()
keyword[else] :
identifier[indnew] = literal[string] . identifier[join] ([ identifier[k] , identifier[l] , identifier[i] , identifier[j] ])
identifier[newname] = literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])
identifier[Cs] [ identifier[newname] ]= identifier[v]
keyword[elif] identifier[name] keyword[in] identifier[C_symm_keys] [ literal[int] ]:
identifier[i] , identifier[j] , identifier[k] , identifier[l] = identifier[ind]
identifier[indnew] = literal[string] . identifier[join] ([ identifier[j] , identifier[i] , identifier[k] , identifier[l] ])
identifier[Cs] [ literal[string] . identifier[join] ([ identifier[name] , identifier[indnew] ])]=- identifier[v]
keyword[return] identifier[Cs] | def symmetrize_JMS_dict(C):
"""For a dictionary with JMS Wilson coefficients but keys that might not be
in the non-redundant basis, return a dictionary with keys from the basis
and values conjugated if necessary."""
wc_keys = set(wcxf.Basis['WET', 'JMS'].all_wcs)
Cs = {}
for (op, v) in C.items():
if '_' not in op or op in wc_keys:
Cs[op] = v
continue # depends on [control=['if'], data=[]]
(name, ind) = op.split('_')
if name in C_symm_keys[5]:
(i, j, k, l) = ind
indnew = ''.join([j, i, l, k])
Cs['_'.join([name, indnew])] = v.conjugate() # depends on [control=['if'], data=['name']]
elif name in C_symm_keys[41]:
(i, j, k, l) = ind
indnew = ''.join([k, l, i, j])
Cs['_'.join([name, indnew])] = v # depends on [control=['if'], data=['name']]
elif name in C_symm_keys[4]:
(i, j, k, l) = ind
indnew = ''.join([l, k, j, i])
newname = '_'.join([name, indnew])
if newname in wc_keys:
Cs[newname] = v.conjugate() # depends on [control=['if'], data=['newname']]
else:
indnew = ''.join([j, i, l, k])
newname = '_'.join([name, indnew])
if newname in wc_keys:
Cs[newname] = v.conjugate() # depends on [control=['if'], data=['newname']]
else:
indnew = ''.join([k, l, i, j])
newname = '_'.join([name, indnew])
Cs[newname] = v # depends on [control=['if'], data=['name']]
elif name in C_symm_keys[9]:
(i, j, k, l) = ind
indnew = ''.join([j, i, k, l])
Cs['_'.join([name, indnew])] = -v # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]]
return Cs |
def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float or type(lat) is _np.float_:
return _shtools.MakeGridPoint(self.coeffs, lat=latin, lon=lonin,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=float)
for v, latitude, longitude in _np.nditer([values, latin, lonin],
op_flags=['readwrite']):
v[...] = _shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
values.append(
_shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase))
return values
else:
raise ValueError('lat and lon must be either an int, float, ' +
'ndarray, or list. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon)))) | def function[_expand_coord, parameter[self, lat, lon, lmax_calc, degrees]]:
constant[Evaluate the function at the coordinates lat and lon.]
if compare[name[self].normalization equal[==] constant[4pi]] begin[:]
variable[norm] assign[=] constant[1]
if compare[name[degrees] is constant[True]] begin[:]
variable[latin] assign[=] name[lat]
variable[lonin] assign[=] name[lon]
if compare[call[name[type], parameter[name[lat]]] is_not call[name[type], parameter[name[lon]]]] begin[:]
<ast.Raise object at 0x7da20c6c4700>
if <ast.BoolOp object at 0x7da20c6c4a00> begin[:]
return[call[name[_shtools].MakeGridPoint, parameter[name[self].coeffs]]] | keyword[def] identifier[_expand_coord] ( identifier[self] , identifier[lat] , identifier[lon] , identifier[lmax_calc] , identifier[degrees] ):
literal[string]
keyword[if] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[self] . identifier[normalization] )))
keyword[if] identifier[degrees] keyword[is] keyword[True] :
identifier[latin] = identifier[lat]
identifier[lonin] = identifier[lon]
keyword[else] :
identifier[latin] = identifier[_np] . identifier[rad2deg] ( identifier[lat] )
identifier[lonin] = identifier[_np] . identifier[rad2deg] ( identifier[lon] )
keyword[if] identifier[type] ( identifier[lat] ) keyword[is] keyword[not] identifier[type] ( identifier[lon] ):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[type] ( identifier[lat] )), identifier[repr] ( identifier[type] ( identifier[lon] ))))
keyword[if] identifier[type] ( identifier[lat] ) keyword[is] identifier[int] keyword[or] identifier[type] ( identifier[lat] ) keyword[is] identifier[float] keyword[or] identifier[type] ( identifier[lat] ) keyword[is] identifier[_np] . identifier[float_] :
keyword[return] identifier[_shtools] . identifier[MakeGridPoint] ( identifier[self] . identifier[coeffs] , identifier[lat] = identifier[latin] , identifier[lon] = identifier[lonin] ,
identifier[lmax] = identifier[lmax_calc] , identifier[norm] = identifier[norm] ,
identifier[csphase] = identifier[self] . identifier[csphase] )
keyword[elif] identifier[type] ( identifier[lat] ) keyword[is] identifier[_np] . identifier[ndarray] :
identifier[values] = identifier[_np] . identifier[empty_like] ( identifier[lat] , identifier[dtype] = identifier[float] )
keyword[for] identifier[v] , identifier[latitude] , identifier[longitude] keyword[in] identifier[_np] . identifier[nditer] ([ identifier[values] , identifier[latin] , identifier[lonin] ],
identifier[op_flags] =[ literal[string] ]):
identifier[v] [...]= identifier[_shtools] . identifier[MakeGridPoint] ( identifier[self] . identifier[coeffs] , identifier[lat] = identifier[latitude] ,
identifier[lon] = identifier[longitude] ,
identifier[lmax] = identifier[lmax_calc] , identifier[norm] = identifier[norm] ,
identifier[csphase] = identifier[self] . identifier[csphase] )
keyword[return] identifier[values]
keyword[elif] identifier[type] ( identifier[lat] ) keyword[is] identifier[list] :
identifier[values] =[]
keyword[for] identifier[latitude] , identifier[longitude] keyword[in] identifier[zip] ( identifier[latin] , identifier[lonin] ):
identifier[values] . identifier[append] (
identifier[_shtools] . identifier[MakeGridPoint] ( identifier[self] . identifier[coeffs] , identifier[lat] = identifier[latitude] ,
identifier[lon] = identifier[longitude] ,
identifier[lmax] = identifier[lmax_calc] , identifier[norm] = identifier[norm] ,
identifier[csphase] = identifier[self] . identifier[csphase] ))
keyword[return] identifier[values]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[type] ( identifier[lat] )), identifier[repr] ( identifier[type] ( identifier[lon] )))) | def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1 # depends on [control=['if'], data=[]]
elif self.normalization == 'schmidt':
norm = 2 # depends on [control=['if'], data=[]]
elif self.normalization == 'unnorm':
norm = 3 # depends on [control=['if'], data=[]]
elif self.normalization == 'ortho':
norm = 4 # depends on [control=['if'], data=[]]
else:
raise ValueError("Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}".format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon # depends on [control=['if'], data=[]]
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' + 'Input types are {:s} and {:s}'.format(repr(type(lat)), repr(type(lon)))) # depends on [control=['if'], data=[]]
if type(lat) is int or type(lat) is float or type(lat) is _np.float_:
return _shtools.MakeGridPoint(self.coeffs, lat=latin, lon=lonin, lmax=lmax_calc, norm=norm, csphase=self.csphase) # depends on [control=['if'], data=[]]
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=float)
for (v, latitude, longitude) in _np.nditer([values, latin, lonin], op_flags=['readwrite']):
v[...] = _shtools.MakeGridPoint(self.coeffs, lat=latitude, lon=longitude, lmax=lmax_calc, norm=norm, csphase=self.csphase) # depends on [control=['for'], data=[]]
return values # depends on [control=['if'], data=[]]
elif type(lat) is list:
values = []
for (latitude, longitude) in zip(latin, lonin):
values.append(_shtools.MakeGridPoint(self.coeffs, lat=latitude, lon=longitude, lmax=lmax_calc, norm=norm, csphase=self.csphase)) # depends on [control=['for'], data=[]]
return values # depends on [control=['if'], data=[]]
else:
raise ValueError('lat and lon must be either an int, float, ' + 'ndarray, or list. ' + 'Input types are {:s} and {:s}'.format(repr(type(lat)), repr(type(lon)))) |
def T11(word, rules):
'''If a VVV sequence contains a /u,y/-final diphthong, insert a syllable
boundary between the diphthong and the third vowel.'''
WORD = word
offset = 0
for vvv in precedence_sequences(WORD):
i = vvv.start(1) + (1 if vvv.group(1)[-1] in 'uyUY' else 2) + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
rules += ' T11' if word != WORD else ''
return WORD, rules | def function[T11, parameter[word, rules]]:
constant[If a VVV sequence contains a /u,y/-final diphthong, insert a syllable
boundary between the diphthong and the third vowel.]
variable[WORD] assign[=] name[word]
variable[offset] assign[=] constant[0]
for taget[name[vvv]] in starred[call[name[precedence_sequences], parameter[name[WORD]]]] begin[:]
variable[i] assign[=] binary_operation[binary_operation[call[name[vvv].start, parameter[constant[1]]] + <ast.IfExp object at 0x7da1b1138f10>] + name[offset]]
variable[WORD] assign[=] binary_operation[binary_operation[call[name[WORD]][<ast.Slice object at 0x7da1b1192650>] + constant[.]] + call[name[WORD]][<ast.Slice object at 0x7da1b1191780>]]
<ast.AugAssign object at 0x7da1b11913c0>
<ast.AugAssign object at 0x7da1b1191ff0>
return[tuple[[<ast.Name object at 0x7da1b1193070>, <ast.Name object at 0x7da1b11934c0>]]] | keyword[def] identifier[T11] ( identifier[word] , identifier[rules] ):
literal[string]
identifier[WORD] = identifier[word]
identifier[offset] = literal[int]
keyword[for] identifier[vvv] keyword[in] identifier[precedence_sequences] ( identifier[WORD] ):
identifier[i] = identifier[vvv] . identifier[start] ( literal[int] )+( literal[int] keyword[if] identifier[vvv] . identifier[group] ( literal[int] )[- literal[int] ] keyword[in] literal[string] keyword[else] literal[int] )+ identifier[offset]
identifier[WORD] = identifier[WORD] [: identifier[i] ]+ literal[string] + identifier[WORD] [ identifier[i] :]
identifier[offset] += literal[int]
identifier[rules] += literal[string] keyword[if] identifier[word] != identifier[WORD] keyword[else] literal[string]
keyword[return] identifier[WORD] , identifier[rules] | def T11(word, rules):
"""If a VVV sequence contains a /u,y/-final diphthong, insert a syllable
boundary between the diphthong and the third vowel."""
WORD = word
offset = 0
for vvv in precedence_sequences(WORD):
i = vvv.start(1) + (1 if vvv.group(1)[-1] in 'uyUY' else 2) + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1 # depends on [control=['for'], data=['vvv']]
rules += ' T11' if word != WORD else ''
return (WORD, rules) |
def _list_key(self, key):
"""
boilerplate
"""
ret = []
for msg_json in self.client.lrange(key, 0, -1):
ret.append(self._fromJSON(msg_json))
return ret | def function[_list_key, parameter[self, key]]:
constant[
boilerplate
]
variable[ret] assign[=] list[[]]
for taget[name[msg_json]] in starred[call[name[self].client.lrange, parameter[name[key], constant[0], <ast.UnaryOp object at 0x7da1afea89a0>]]] begin[:]
call[name[ret].append, parameter[call[name[self]._fromJSON, parameter[name[msg_json]]]]]
return[name[ret]] | keyword[def] identifier[_list_key] ( identifier[self] , identifier[key] ):
literal[string]
identifier[ret] =[]
keyword[for] identifier[msg_json] keyword[in] identifier[self] . identifier[client] . identifier[lrange] ( identifier[key] , literal[int] ,- literal[int] ):
identifier[ret] . identifier[append] ( identifier[self] . identifier[_fromJSON] ( identifier[msg_json] ))
keyword[return] identifier[ret] | def _list_key(self, key):
"""
boilerplate
"""
ret = []
for msg_json in self.client.lrange(key, 0, -1):
ret.append(self._fromJSON(msg_json)) # depends on [control=['for'], data=['msg_json']]
return ret |
def connect(self, service_name):
"""Connect to a SNEP server. This needs only be called to
connect to a server other than the Default SNEP Server at
`urn:nfc:sn:snep` or if the client wants to send multiple
requests with a single connection.
"""
self.close()
self.socket = nfc.llcp.Socket(self.llc, nfc.llcp.DATA_LINK_CONNECTION)
self.socket.connect(service_name)
self.send_miu = self.socket.getsockopt(nfc.llcp.SO_SNDMIU) | def function[connect, parameter[self, service_name]]:
constant[Connect to a SNEP server. This needs only be called to
connect to a server other than the Default SNEP Server at
`urn:nfc:sn:snep` or if the client wants to send multiple
requests with a single connection.
]
call[name[self].close, parameter[]]
name[self].socket assign[=] call[name[nfc].llcp.Socket, parameter[name[self].llc, name[nfc].llcp.DATA_LINK_CONNECTION]]
call[name[self].socket.connect, parameter[name[service_name]]]
name[self].send_miu assign[=] call[name[self].socket.getsockopt, parameter[name[nfc].llcp.SO_SNDMIU]] | keyword[def] identifier[connect] ( identifier[self] , identifier[service_name] ):
literal[string]
identifier[self] . identifier[close] ()
identifier[self] . identifier[socket] = identifier[nfc] . identifier[llcp] . identifier[Socket] ( identifier[self] . identifier[llc] , identifier[nfc] . identifier[llcp] . identifier[DATA_LINK_CONNECTION] )
identifier[self] . identifier[socket] . identifier[connect] ( identifier[service_name] )
identifier[self] . identifier[send_miu] = identifier[self] . identifier[socket] . identifier[getsockopt] ( identifier[nfc] . identifier[llcp] . identifier[SO_SNDMIU] ) | def connect(self, service_name):
"""Connect to a SNEP server. This needs only be called to
connect to a server other than the Default SNEP Server at
`urn:nfc:sn:snep` or if the client wants to send multiple
requests with a single connection.
"""
self.close()
self.socket = nfc.llcp.Socket(self.llc, nfc.llcp.DATA_LINK_CONNECTION)
self.socket.connect(service_name)
self.send_miu = self.socket.getsockopt(nfc.llcp.SO_SNDMIU) |
def download(cls, filename, input_dir, dl_dir=None):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
"""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.bucket,
os.path.dirname(file_info.key))
utils.safe_makedir(dl_dir)
out_file = os.path.join(dl_dir, os.path.basename(file_info.key))
if not utils.file_exists(out_file):
with file_transaction({}, out_file) as tx_out_file:
command, prog = cls._download_cl(filename)
if prog == "gof3r":
command.extend(["-p", tx_out_file])
elif prog == "awscli":
command.extend([tx_out_file])
else:
raise NotImplementedError(
"Unexpected download program %s" % prog)
subprocess.check_call(command)
return out_file | def function[download, parameter[cls, filename, input_dir, dl_dir]]:
constant[Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
]
variable[file_info] assign[=] call[name[cls].parse_remote, parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da1b19b9ea0> begin[:]
variable[dl_dir] assign[=] call[name[os].path.join, parameter[name[input_dir], name[file_info].bucket, call[name[os].path.dirname, parameter[name[file_info].key]]]]
call[name[utils].safe_makedir, parameter[name[dl_dir]]]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[dl_dir], call[name[os].path.basename, parameter[name[file_info].key]]]]
if <ast.UnaryOp object at 0x7da20c6aad70> begin[:]
with call[name[file_transaction], parameter[dictionary[[], []], name[out_file]]] begin[:]
<ast.Tuple object at 0x7da20c6a9e40> assign[=] call[name[cls]._download_cl, parameter[name[filename]]]
if compare[name[prog] equal[==] constant[gof3r]] begin[:]
call[name[command].extend, parameter[list[[<ast.Constant object at 0x7da20c6aa170>, <ast.Name object at 0x7da20c6aa1a0>]]]]
call[name[subprocess].check_call, parameter[name[command]]]
return[name[out_file]] | keyword[def] identifier[download] ( identifier[cls] , identifier[filename] , identifier[input_dir] , identifier[dl_dir] = keyword[None] ):
literal[string]
identifier[file_info] = identifier[cls] . identifier[parse_remote] ( identifier[filename] )
keyword[if] keyword[not] identifier[dl_dir] :
identifier[dl_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir] , identifier[file_info] . identifier[bucket] ,
identifier[os] . identifier[path] . identifier[dirname] ( identifier[file_info] . identifier[key] ))
identifier[utils] . identifier[safe_makedir] ( identifier[dl_dir] )
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[dl_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[file_info] . identifier[key] ))
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ):
keyword[with] identifier[file_transaction] ({}, identifier[out_file] ) keyword[as] identifier[tx_out_file] :
identifier[command] , identifier[prog] = identifier[cls] . identifier[_download_cl] ( identifier[filename] )
keyword[if] identifier[prog] == literal[string] :
identifier[command] . identifier[extend] ([ literal[string] , identifier[tx_out_file] ])
keyword[elif] identifier[prog] == literal[string] :
identifier[command] . identifier[extend] ([ identifier[tx_out_file] ])
keyword[else] :
keyword[raise] identifier[NotImplementedError] (
literal[string] % identifier[prog] )
identifier[subprocess] . identifier[check_call] ( identifier[command] )
keyword[return] identifier[out_file] | def download(cls, filename, input_dir, dl_dir=None):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
"""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.bucket, os.path.dirname(file_info.key))
utils.safe_makedir(dl_dir) # depends on [control=['if'], data=[]]
out_file = os.path.join(dl_dir, os.path.basename(file_info.key))
if not utils.file_exists(out_file):
with file_transaction({}, out_file) as tx_out_file:
(command, prog) = cls._download_cl(filename)
if prog == 'gof3r':
command.extend(['-p', tx_out_file]) # depends on [control=['if'], data=[]]
elif prog == 'awscli':
command.extend([tx_out_file]) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Unexpected download program %s' % prog)
subprocess.check_call(command) # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def to_json(self):
"""Get date time as a dictionary."""
return {'year': self.year,
'month': self.month,
'day': self.day,
'hour': self.hour,
'minute': self.minute} | def function[to_json, parameter[self]]:
constant[Get date time as a dictionary.]
return[dictionary[[<ast.Constant object at 0x7da1b1274c70>, <ast.Constant object at 0x7da1b1277fa0>, <ast.Constant object at 0x7da1b1274ca0>, <ast.Constant object at 0x7da1b1277e20>, <ast.Constant object at 0x7da1b1274310>], [<ast.Attribute object at 0x7da1b1274a00>, <ast.Attribute object at 0x7da1b1274370>, <ast.Attribute object at 0x7da1b1274040>, <ast.Attribute object at 0x7da1b1274ee0>, <ast.Attribute object at 0x7da1b12754b0>]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
keyword[return] { literal[string] : identifier[self] . identifier[year] ,
literal[string] : identifier[self] . identifier[month] ,
literal[string] : identifier[self] . identifier[day] ,
literal[string] : identifier[self] . identifier[hour] ,
literal[string] : identifier[self] . identifier[minute] } | def to_json(self):
"""Get date time as a dictionary."""
return {'year': self.year, 'month': self.month, 'day': self.day, 'hour': self.hour, 'minute': self.minute} |
def visualize(self):
"""
Given a Manticore workspace, or trace file, highlight the basic blocks.
"""
if os.path.isfile(self.workspace):
t = threading.Thread(target=self.highlight_from_file,
args=(self.workspace,))
elif os.path.isdir(self.workspace):
t = threading.Thread(target=self.highlight_from_dir,
args=(self.workspace,))
t.start() | def function[visualize, parameter[self]]:
constant[
Given a Manticore workspace, or trace file, highlight the basic blocks.
]
if call[name[os].path.isfile, parameter[name[self].workspace]] begin[:]
variable[t] assign[=] call[name[threading].Thread, parameter[]]
call[name[t].start, parameter[]] | keyword[def] identifier[visualize] ( identifier[self] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[workspace] ):
identifier[t] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[highlight_from_file] ,
identifier[args] =( identifier[self] . identifier[workspace] ,))
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[workspace] ):
identifier[t] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[highlight_from_dir] ,
identifier[args] =( identifier[self] . identifier[workspace] ,))
identifier[t] . identifier[start] () | def visualize(self):
"""
Given a Manticore workspace, or trace file, highlight the basic blocks.
"""
if os.path.isfile(self.workspace):
t = threading.Thread(target=self.highlight_from_file, args=(self.workspace,)) # depends on [control=['if'], data=[]]
elif os.path.isdir(self.workspace):
t = threading.Thread(target=self.highlight_from_dir, args=(self.workspace,)) # depends on [control=['if'], data=[]]
t.start() |
def set_execution_context(self, execution_context):
"""Set the ExecutionContext this async is executing under."""
if self._execution_context:
raise errors.AlreadyInContextError
self._execution_context = execution_context | def function[set_execution_context, parameter[self, execution_context]]:
constant[Set the ExecutionContext this async is executing under.]
if name[self]._execution_context begin[:]
<ast.Raise object at 0x7da18f721930>
name[self]._execution_context assign[=] name[execution_context] | keyword[def] identifier[set_execution_context] ( identifier[self] , identifier[execution_context] ):
literal[string]
keyword[if] identifier[self] . identifier[_execution_context] :
keyword[raise] identifier[errors] . identifier[AlreadyInContextError]
identifier[self] . identifier[_execution_context] = identifier[execution_context] | def set_execution_context(self, execution_context):
"""Set the ExecutionContext this async is executing under."""
if self._execution_context:
raise errors.AlreadyInContextError # depends on [control=['if'], data=[]]
self._execution_context = execution_context |
def resolve_python_path(path):
"""
Turns a python path like module.name.here:ClassName.SubClass into an object
"""
# Get the module
module_path, local_path = path.split(':', 1)
thing = importlib.import_module(module_path)
# Traverse the local sections
local_bits = local_path.split('.')
for bit in local_bits:
thing = getattr(thing, bit)
return thing | def function[resolve_python_path, parameter[path]]:
constant[
Turns a python path like module.name.here:ClassName.SubClass into an object
]
<ast.Tuple object at 0x7da18f58dcc0> assign[=] call[name[path].split, parameter[constant[:], constant[1]]]
variable[thing] assign[=] call[name[importlib].import_module, parameter[name[module_path]]]
variable[local_bits] assign[=] call[name[local_path].split, parameter[constant[.]]]
for taget[name[bit]] in starred[name[local_bits]] begin[:]
variable[thing] assign[=] call[name[getattr], parameter[name[thing], name[bit]]]
return[name[thing]] | keyword[def] identifier[resolve_python_path] ( identifier[path] ):
literal[string]
identifier[module_path] , identifier[local_path] = identifier[path] . identifier[split] ( literal[string] , literal[int] )
identifier[thing] = identifier[importlib] . identifier[import_module] ( identifier[module_path] )
identifier[local_bits] = identifier[local_path] . identifier[split] ( literal[string] )
keyword[for] identifier[bit] keyword[in] identifier[local_bits] :
identifier[thing] = identifier[getattr] ( identifier[thing] , identifier[bit] )
keyword[return] identifier[thing] | def resolve_python_path(path):
"""
Turns a python path like module.name.here:ClassName.SubClass into an object
"""
# Get the module
(module_path, local_path) = path.split(':', 1)
thing = importlib.import_module(module_path)
# Traverse the local sections
local_bits = local_path.split('.')
for bit in local_bits:
thing = getattr(thing, bit) # depends on [control=['for'], data=['bit']]
return thing |
def load_conditions(self, filename=None):
"""Read the initial conditions from a file and assign them to the
respective |StateSequence| and/or |LogSequence| objects handled by
the actual |Sequences| object.
If no filename or dirname is passed, the ones defined by the
|ConditionManager| stored in module |pub| are used.
"""
if self.hasconditions:
if not filename:
filename = self._conditiondefaultfilename
namespace = locals()
for seq in self.conditionsequences:
namespace[seq.name] = seq
namespace['model'] = self
code = hydpy.pub.conditionmanager.load_file(filename)
try:
# ToDo: raises an escape sequence deprecation sometimes
# ToDo: use runpy instead?
# ToDo: Move functionality to filetools.py?
exec(code)
except BaseException:
objecttools.augment_excmessage(
'While trying to gather initial conditions of element %s'
% objecttools.devicename(self)) | def function[load_conditions, parameter[self, filename]]:
constant[Read the initial conditions from a file and assign them to the
respective |StateSequence| and/or |LogSequence| objects handled by
the actual |Sequences| object.
If no filename or dirname is passed, the ones defined by the
|ConditionManager| stored in module |pub| are used.
]
if name[self].hasconditions begin[:]
if <ast.UnaryOp object at 0x7da1b0fe8f70> begin[:]
variable[filename] assign[=] name[self]._conditiondefaultfilename
variable[namespace] assign[=] call[name[locals], parameter[]]
for taget[name[seq]] in starred[name[self].conditionsequences] begin[:]
call[name[namespace]][name[seq].name] assign[=] name[seq]
call[name[namespace]][constant[model]] assign[=] name[self]
variable[code] assign[=] call[name[hydpy].pub.conditionmanager.load_file, parameter[name[filename]]]
<ast.Try object at 0x7da18bccbeb0> | keyword[def] identifier[load_conditions] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[hasconditions] :
keyword[if] keyword[not] identifier[filename] :
identifier[filename] = identifier[self] . identifier[_conditiondefaultfilename]
identifier[namespace] = identifier[locals] ()
keyword[for] identifier[seq] keyword[in] identifier[self] . identifier[conditionsequences] :
identifier[namespace] [ identifier[seq] . identifier[name] ]= identifier[seq]
identifier[namespace] [ literal[string] ]= identifier[self]
identifier[code] = identifier[hydpy] . identifier[pub] . identifier[conditionmanager] . identifier[load_file] ( identifier[filename] )
keyword[try] :
identifier[exec] ( identifier[code] )
keyword[except] identifier[BaseException] :
identifier[objecttools] . identifier[augment_excmessage] (
literal[string]
% identifier[objecttools] . identifier[devicename] ( identifier[self] )) | def load_conditions(self, filename=None):
"""Read the initial conditions from a file and assign them to the
respective |StateSequence| and/or |LogSequence| objects handled by
the actual |Sequences| object.
If no filename or dirname is passed, the ones defined by the
|ConditionManager| stored in module |pub| are used.
"""
if self.hasconditions:
if not filename:
filename = self._conditiondefaultfilename # depends on [control=['if'], data=[]]
namespace = locals()
for seq in self.conditionsequences:
namespace[seq.name] = seq # depends on [control=['for'], data=['seq']]
namespace['model'] = self
code = hydpy.pub.conditionmanager.load_file(filename)
try:
# ToDo: raises an escape sequence deprecation sometimes
# ToDo: use runpy instead?
# ToDo: Move functionality to filetools.py?
exec(code) # depends on [control=['try'], data=[]]
except BaseException:
objecttools.augment_excmessage('While trying to gather initial conditions of element %s' % objecttools.devicename(self)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] |
def unrank(n, sequence=string.ascii_lowercase):
"""Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's']
"""
return list(map(sequence.__getitem__, indexes(n))) | def function[unrank, parameter[n, sequence]]:
constant[Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's']
]
return[call[name[list], parameter[call[name[map], parameter[name[sequence].__getitem__, call[name[indexes], parameter[name[n]]]]]]]] | keyword[def] identifier[unrank] ( identifier[n] , identifier[sequence] = identifier[string] . identifier[ascii_lowercase] ):
literal[string]
keyword[return] identifier[list] ( identifier[map] ( identifier[sequence] . identifier[__getitem__] , identifier[indexes] ( identifier[n] ))) | def unrank(n, sequence=string.ascii_lowercase):
"""Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's']
"""
return list(map(sequence.__getitem__, indexes(n))) |
def add(self, synchronous=True, **kwargs):
"""Add provided Content View Component.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
if 'data' not in kwargs:
# data is required
kwargs['data'] = dict()
if 'component_ids' not in kwargs['data']:
kwargs['data']['components'] = [_payload(self.get_fields(), self.get_values())]
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('add'), **kwargs)
return _handle_response(response, self._server_config, synchronous) | def function[add, parameter[self, synchronous]]:
constant[Add provided Content View Component.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
]
variable[kwargs] assign[=] call[name[kwargs].copy, parameter[]]
if compare[constant[data] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[data]] assign[=] call[name[dict], parameter[]]
if compare[constant[component_ids] <ast.NotIn object at 0x7da2590d7190> call[name[kwargs]][constant[data]]] begin[:]
call[call[name[kwargs]][constant[data]]][constant[components]] assign[=] list[[<ast.Call object at 0x7da1b067ada0>]]
call[name[kwargs].update, parameter[call[name[self]._server_config.get_client_kwargs, parameter[]]]]
variable[response] assign[=] call[name[client].put, parameter[call[name[self].path, parameter[constant[add]]]]]
return[call[name[_handle_response], parameter[name[response], name[self]._server_config, name[synchronous]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[synchronous] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[kwargs] . identifier[copy] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[dict] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] [ literal[string] ]:
identifier[kwargs] [ literal[string] ][ literal[string] ]=[ identifier[_payload] ( identifier[self] . identifier[get_fields] (), identifier[self] . identifier[get_values] ())]
identifier[kwargs] . identifier[update] ( identifier[self] . identifier[_server_config] . identifier[get_client_kwargs] ())
identifier[response] = identifier[client] . identifier[put] ( identifier[self] . identifier[path] ( literal[string] ),** identifier[kwargs] )
keyword[return] identifier[_handle_response] ( identifier[response] , identifier[self] . identifier[_server_config] , identifier[synchronous] ) | def add(self, synchronous=True, **kwargs):
"""Add provided Content View Component.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
if 'data' not in kwargs:
# data is required
kwargs['data'] = dict() # depends on [control=['if'], data=['kwargs']]
if 'component_ids' not in kwargs['data']:
kwargs['data']['components'] = [_payload(self.get_fields(), self.get_values())] # depends on [control=['if'], data=[]]
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('add'), **kwargs)
return _handle_response(response, self._server_config, synchronous) |
def background(self):
"""Only a getter on purpose. See the tests."""
if self._background is None:
self._background = GSBackgroundLayer()
self._background._foreground = self
return self._background | def function[background, parameter[self]]:
constant[Only a getter on purpose. See the tests.]
if compare[name[self]._background is constant[None]] begin[:]
name[self]._background assign[=] call[name[GSBackgroundLayer], parameter[]]
name[self]._background._foreground assign[=] name[self]
return[name[self]._background] | keyword[def] identifier[background] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_background] keyword[is] keyword[None] :
identifier[self] . identifier[_background] = identifier[GSBackgroundLayer] ()
identifier[self] . identifier[_background] . identifier[_foreground] = identifier[self]
keyword[return] identifier[self] . identifier[_background] | def background(self):
"""Only a getter on purpose. See the tests."""
if self._background is None:
self._background = GSBackgroundLayer()
self._background._foreground = self # depends on [control=['if'], data=[]]
return self._background |
def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads):
"""Compute the shape of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = 'Wrong number of explicit pads for conv: expected {}, got {}.'
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)
return tuple(out_shape) | def function[_conv_shape_tuple, parameter[self, lhs_shape, rhs_shape, strides, pads]]:
constant[Compute the shape of a conv given input shapes in canonical order.]
if call[name[isinstance], parameter[name[pads], name[str]]] begin[:]
variable[pads] assign[=] call[name[padtype_to_pads], parameter[call[name[lhs_shape]][<ast.Slice object at 0x7da1b2005630>], call[name[rhs_shape]][<ast.Slice object at 0x7da1b2004d00>], name[strides], name[pads]]]
if compare[call[name[len], parameter[name[pads]]] not_equal[!=] binary_operation[call[name[len], parameter[name[lhs_shape]]] - constant[2]]] begin[:]
variable[msg] assign[=] constant[Wrong number of explicit pads for conv: expected {}, got {}.]
<ast.Raise object at 0x7da1b20049d0>
variable[lhs_padded] assign[=] call[name[onp].add, parameter[call[name[lhs_shape]][<ast.Slice object at 0x7da1b2005150>], call[name[onp].add, parameter[<ast.Starred object at 0x7da1b1ff25f0>]]]]
variable[out_space] assign[=] binary_operation[call[name[onp].floor_divide, parameter[call[name[onp].subtract, parameter[name[lhs_padded], call[name[rhs_shape]][<ast.Slice object at 0x7da1b20fbb80>]]], name[strides]]] + constant[1]]
variable[out_space] assign[=] call[name[onp].maximum, parameter[constant[0], name[out_space]]]
variable[out_shape] assign[=] binary_operation[tuple[[<ast.Subscript object at 0x7da1b20fa200>, <ast.Subscript object at 0x7da1b20f93c0>]] + call[name[tuple], parameter[name[out_space]]]]
return[call[name[tuple], parameter[name[out_shape]]]] | keyword[def] identifier[_conv_shape_tuple] ( identifier[self] , identifier[lhs_shape] , identifier[rhs_shape] , identifier[strides] , identifier[pads] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[pads] , identifier[str] ):
identifier[pads] = identifier[padtype_to_pads] ( identifier[lhs_shape] [ literal[int] :], identifier[rhs_shape] [ literal[int] :], identifier[strides] , identifier[pads] )
keyword[if] identifier[len] ( identifier[pads] )!= identifier[len] ( identifier[lhs_shape] )- literal[int] :
identifier[msg] = literal[string]
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( identifier[len] ( identifier[lhs_shape] )- literal[int] , identifier[len] ( identifier[pads] )))
identifier[lhs_padded] = identifier[onp] . identifier[add] ( identifier[lhs_shape] [ literal[int] :], identifier[onp] . identifier[add] (* identifier[zip] (* identifier[pads] )))
identifier[out_space] = identifier[onp] . identifier[floor_divide] (
identifier[onp] . identifier[subtract] ( identifier[lhs_padded] , identifier[rhs_shape] [ literal[int] :]), identifier[strides] )+ literal[int]
identifier[out_space] = identifier[onp] . identifier[maximum] ( literal[int] , identifier[out_space] )
identifier[out_shape] =( identifier[lhs_shape] [ literal[int] ], identifier[rhs_shape] [ literal[int] ])+ identifier[tuple] ( identifier[out_space] )
keyword[return] identifier[tuple] ( identifier[out_shape] ) | def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads):
"""Compute the shape of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads) # depends on [control=['if'], data=[]]
if len(pads) != len(lhs_shape) - 2:
msg = 'Wrong number of explicit pads for conv: expected {}, got {}.'
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) # depends on [control=['if'], data=[]]
lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))
out_space = onp.floor_divide(onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)
return tuple(out_shape) |
def verify_user(self):
"""
start verify finger mode (after capture)
:return: bool
"""
command = const.CMD_STARTVERIFY
cmd_response = self.__send_command(command)
if cmd_response.get('status'):
return True
else:
raise ZKErrorResponse("Cant Verify") | def function[verify_user, parameter[self]]:
constant[
start verify finger mode (after capture)
:return: bool
]
variable[command] assign[=] name[const].CMD_STARTVERIFY
variable[cmd_response] assign[=] call[name[self].__send_command, parameter[name[command]]]
if call[name[cmd_response].get, parameter[constant[status]]] begin[:]
return[constant[True]] | keyword[def] identifier[verify_user] ( identifier[self] ):
literal[string]
identifier[command] = identifier[const] . identifier[CMD_STARTVERIFY]
identifier[cmd_response] = identifier[self] . identifier[__send_command] ( identifier[command] )
keyword[if] identifier[cmd_response] . identifier[get] ( literal[string] ):
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[ZKErrorResponse] ( literal[string] ) | def verify_user(self):
"""
start verify finger mode (after capture)
:return: bool
"""
command = const.CMD_STARTVERIFY
cmd_response = self.__send_command(command)
if cmd_response.get('status'):
return True # depends on [control=['if'], data=[]]
else:
raise ZKErrorResponse('Cant Verify') |
def enrich(self, columns, groupby):
""" This method calculates the maximum and minimum value
of a given set of columns depending on another column.
This is the usual group by clause in SQL.
:param columns: list of columns to apply the max and min values
:param groupby: column use to calculate the max/min values
:type columns: list of strings
"""
for column in columns:
if column not in self.data.columns:
return self.data
for column in columns:
df_grouped = self.data.groupby([groupby]).agg({column: 'max'})
df_grouped = df_grouped.reset_index()
df_grouped.rename(columns={column: 'max_' + column}, inplace=True)
self.data = pandas.merge(self.data, df_grouped, how='left', on=[groupby])
df_grouped = self.data.groupby([groupby]).agg({column: 'min'})
df_grouped = df_grouped.reset_index()
df_grouped.rename(columns={column: 'min_' + column}, inplace=True)
self.data = pandas.merge(self.data, df_grouped, how='left', on=[groupby])
return self.data | def function[enrich, parameter[self, columns, groupby]]:
constant[ This method calculates the maximum and minimum value
of a given set of columns depending on another column.
This is the usual group by clause in SQL.
:param columns: list of columns to apply the max and min values
:param groupby: column use to calculate the max/min values
:type columns: list of strings
]
for taget[name[column]] in starred[name[columns]] begin[:]
if compare[name[column] <ast.NotIn object at 0x7da2590d7190> name[self].data.columns] begin[:]
return[name[self].data]
for taget[name[column]] in starred[name[columns]] begin[:]
variable[df_grouped] assign[=] call[call[name[self].data.groupby, parameter[list[[<ast.Name object at 0x7da1b2586740>]]]].agg, parameter[dictionary[[<ast.Name object at 0x7da18bc718d0>], [<ast.Constant object at 0x7da18bc71c00>]]]]
variable[df_grouped] assign[=] call[name[df_grouped].reset_index, parameter[]]
call[name[df_grouped].rename, parameter[]]
name[self].data assign[=] call[name[pandas].merge, parameter[name[self].data, name[df_grouped]]]
variable[df_grouped] assign[=] call[call[name[self].data.groupby, parameter[list[[<ast.Name object at 0x7da18bc72650>]]]].agg, parameter[dictionary[[<ast.Name object at 0x7da18bc73220>], [<ast.Constant object at 0x7da18bc71f00>]]]]
variable[df_grouped] assign[=] call[name[df_grouped].reset_index, parameter[]]
call[name[df_grouped].rename, parameter[]]
name[self].data assign[=] call[name[pandas].merge, parameter[name[self].data, name[df_grouped]]]
return[name[self].data] | keyword[def] identifier[enrich] ( identifier[self] , identifier[columns] , identifier[groupby] ):
literal[string]
keyword[for] identifier[column] keyword[in] identifier[columns] :
keyword[if] identifier[column] keyword[not] keyword[in] identifier[self] . identifier[data] . identifier[columns] :
keyword[return] identifier[self] . identifier[data]
keyword[for] identifier[column] keyword[in] identifier[columns] :
identifier[df_grouped] = identifier[self] . identifier[data] . identifier[groupby] ([ identifier[groupby] ]). identifier[agg] ({ identifier[column] : literal[string] })
identifier[df_grouped] = identifier[df_grouped] . identifier[reset_index] ()
identifier[df_grouped] . identifier[rename] ( identifier[columns] ={ identifier[column] : literal[string] + identifier[column] }, identifier[inplace] = keyword[True] )
identifier[self] . identifier[data] = identifier[pandas] . identifier[merge] ( identifier[self] . identifier[data] , identifier[df_grouped] , identifier[how] = literal[string] , identifier[on] =[ identifier[groupby] ])
identifier[df_grouped] = identifier[self] . identifier[data] . identifier[groupby] ([ identifier[groupby] ]). identifier[agg] ({ identifier[column] : literal[string] })
identifier[df_grouped] = identifier[df_grouped] . identifier[reset_index] ()
identifier[df_grouped] . identifier[rename] ( identifier[columns] ={ identifier[column] : literal[string] + identifier[column] }, identifier[inplace] = keyword[True] )
identifier[self] . identifier[data] = identifier[pandas] . identifier[merge] ( identifier[self] . identifier[data] , identifier[df_grouped] , identifier[how] = literal[string] , identifier[on] =[ identifier[groupby] ])
keyword[return] identifier[self] . identifier[data] | def enrich(self, columns, groupby):
""" This method calculates the maximum and minimum value
of a given set of columns depending on another column.
This is the usual group by clause in SQL.
:param columns: list of columns to apply the max and min values
:param groupby: column use to calculate the max/min values
:type columns: list of strings
"""
for column in columns:
if column not in self.data.columns:
return self.data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']]
for column in columns:
df_grouped = self.data.groupby([groupby]).agg({column: 'max'})
df_grouped = df_grouped.reset_index()
df_grouped.rename(columns={column: 'max_' + column}, inplace=True)
self.data = pandas.merge(self.data, df_grouped, how='left', on=[groupby])
df_grouped = self.data.groupby([groupby]).agg({column: 'min'})
df_grouped = df_grouped.reset_index()
df_grouped.rename(columns={column: 'min_' + column}, inplace=True)
self.data = pandas.merge(self.data, df_grouped, how='left', on=[groupby]) # depends on [control=['for'], data=['column']]
return self.data |
def any_form_default(form_cls, **kwargs):
"""
Returns tuple with form data and files
"""
form_data = {}
form_files = {}
form_fields, fields_args = split_model_kwargs(kwargs)
for name, field in form_cls.base_fields.iteritems():
if name in form_fields:
form_data[name] = kwargs[name]
else:
form_data[name] = any_form_field(field, **fields_args[name])
return form_data, form_files | def function[any_form_default, parameter[form_cls]]:
constant[
Returns tuple with form data and files
]
variable[form_data] assign[=] dictionary[[], []]
variable[form_files] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b0211b70> assign[=] call[name[split_model_kwargs], parameter[name[kwargs]]]
for taget[tuple[[<ast.Name object at 0x7da1b0212d70>, <ast.Name object at 0x7da1b0213400>]]] in starred[call[name[form_cls].base_fields.iteritems, parameter[]]] begin[:]
if compare[name[name] in name[form_fields]] begin[:]
call[name[form_data]][name[name]] assign[=] call[name[kwargs]][name[name]]
return[tuple[[<ast.Name object at 0x7da1b0210a30>, <ast.Name object at 0x7da1b0212e00>]]] | keyword[def] identifier[any_form_default] ( identifier[form_cls] ,** identifier[kwargs] ):
literal[string]
identifier[form_data] ={}
identifier[form_files] ={}
identifier[form_fields] , identifier[fields_args] = identifier[split_model_kwargs] ( identifier[kwargs] )
keyword[for] identifier[name] , identifier[field] keyword[in] identifier[form_cls] . identifier[base_fields] . identifier[iteritems] ():
keyword[if] identifier[name] keyword[in] identifier[form_fields] :
identifier[form_data] [ identifier[name] ]= identifier[kwargs] [ identifier[name] ]
keyword[else] :
identifier[form_data] [ identifier[name] ]= identifier[any_form_field] ( identifier[field] ,** identifier[fields_args] [ identifier[name] ])
keyword[return] identifier[form_data] , identifier[form_files] | def any_form_default(form_cls, **kwargs):
"""
Returns tuple with form data and files
"""
form_data = {}
form_files = {}
(form_fields, fields_args) = split_model_kwargs(kwargs)
for (name, field) in form_cls.base_fields.iteritems():
if name in form_fields:
form_data[name] = kwargs[name] # depends on [control=['if'], data=['name']]
else:
form_data[name] = any_form_field(field, **fields_args[name]) # depends on [control=['for'], data=[]]
return (form_data, form_files) |
def get_term_and_background_counts(self):
'''
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
'''
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = self.get_term_count_df()
corpus_unigram_freq = self._get_corpus_unigram_freq(corpus_freq_df)
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
del df.index.name
return df | def function[get_term_and_background_counts, parameter[self]]:
constant[
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
]
variable[background_df] assign[=] call[name[self]._get_background_unigram_frequencies, parameter[]]
variable[corpus_freq_df] assign[=] call[name[self].get_term_count_df, parameter[]]
variable[corpus_unigram_freq] assign[=] call[name[self]._get_corpus_unigram_freq, parameter[name[corpus_freq_df]]]
variable[df] assign[=] call[call[name[corpus_unigram_freq].join, parameter[name[background_df]]].fillna, parameter[constant[0]]]
<ast.Delete object at 0x7da1b1bf8ac0>
return[name[df]] | keyword[def] identifier[get_term_and_background_counts] ( identifier[self] ):
literal[string]
identifier[background_df] = identifier[self] . identifier[_get_background_unigram_frequencies] ()
identifier[corpus_freq_df] = identifier[self] . identifier[get_term_count_df] ()
identifier[corpus_unigram_freq] = identifier[self] . identifier[_get_corpus_unigram_freq] ( identifier[corpus_freq_df] )
identifier[df] = identifier[corpus_unigram_freq] . identifier[join] ( identifier[background_df] , identifier[how] = literal[string] ). identifier[fillna] ( literal[int] )
keyword[del] identifier[df] . identifier[index] . identifier[name]
keyword[return] identifier[df] | def get_term_and_background_counts(self):
"""
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
"""
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = self.get_term_count_df()
corpus_unigram_freq = self._get_corpus_unigram_freq(corpus_freq_df)
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
del df.index.name
return df |
def from_index_amount(cls, matrixpos, amt):
"""
Factory method for constructing a Deformation object
from a matrix position and amount
Args:
matrixpos (tuple): tuple corresponding the matrix position to
have a perturbation added
amt (float): amount to add to the identity matrix at position
matrixpos
"""
f = np.identity(3)
f[matrixpos] += amt
return cls(f) | def function[from_index_amount, parameter[cls, matrixpos, amt]]:
constant[
Factory method for constructing a Deformation object
from a matrix position and amount
Args:
matrixpos (tuple): tuple corresponding the matrix position to
have a perturbation added
amt (float): amount to add to the identity matrix at position
matrixpos
]
variable[f] assign[=] call[name[np].identity, parameter[constant[3]]]
<ast.AugAssign object at 0x7da1b1cd6fe0>
return[call[name[cls], parameter[name[f]]]] | keyword[def] identifier[from_index_amount] ( identifier[cls] , identifier[matrixpos] , identifier[amt] ):
literal[string]
identifier[f] = identifier[np] . identifier[identity] ( literal[int] )
identifier[f] [ identifier[matrixpos] ]+= identifier[amt]
keyword[return] identifier[cls] ( identifier[f] ) | def from_index_amount(cls, matrixpos, amt):
"""
Factory method for constructing a Deformation object
from a matrix position and amount
Args:
matrixpos (tuple): tuple corresponding the matrix position to
have a perturbation added
amt (float): amount to add to the identity matrix at position
matrixpos
"""
f = np.identity(3)
f[matrixpos] += amt
return cls(f) |
def get_words(data):
"""
Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list
"""
words = re.findall(r"\w+", data)
LOGGER.debug("> Words: '{0}'".format(", ".join(words)))
return words | def function[get_words, parameter[data]]:
constant[
Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list
]
variable[words] assign[=] call[name[re].findall, parameter[constant[\w+], name[data]]]
call[name[LOGGER].debug, parameter[call[constant[> Words: '{0}'].format, parameter[call[constant[, ].join, parameter[name[words]]]]]]]
return[name[words]] | keyword[def] identifier[get_words] ( identifier[data] ):
literal[string]
identifier[words] = identifier[re] . identifier[findall] ( literal[string] , identifier[data] )
identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[words] )))
keyword[return] identifier[words] | def get_words(data):
"""
Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list
"""
words = re.findall('\\w+', data)
LOGGER.debug("> Words: '{0}'".format(', '.join(words)))
return words |
def _resolve_serializer(self, serializer):
"""
Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer
"""
if isinstance(serializer, Serializer):
return serializer
if serializer in self._serializers:
return self._serializers[serializer]
raise RuntimeError('Unsupported serializer') | def function[_resolve_serializer, parameter[self, serializer]]:
constant[
Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer
]
if call[name[isinstance], parameter[name[serializer], name[Serializer]]] begin[:]
return[name[serializer]]
if compare[name[serializer] in name[self]._serializers] begin[:]
return[call[name[self]._serializers][name[serializer]]]
<ast.Raise object at 0x7da1b1932920> | keyword[def] identifier[_resolve_serializer] ( identifier[self] , identifier[serializer] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[serializer] , identifier[Serializer] ):
keyword[return] identifier[serializer]
keyword[if] identifier[serializer] keyword[in] identifier[self] . identifier[_serializers] :
keyword[return] identifier[self] . identifier[_serializers] [ identifier[serializer] ]
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def _resolve_serializer(self, serializer):
"""
Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer
"""
if isinstance(serializer, Serializer):
return serializer # depends on [control=['if'], data=[]]
if serializer in self._serializers:
return self._serializers[serializer] # depends on [control=['if'], data=['serializer']]
raise RuntimeError('Unsupported serializer') |
def tag_and_push_image(self, image, target_image, insecure=False, force=False,
dockercfg=None):
"""
tag provided image and push it to registry
:param image: str or ImageName, image id or name
:param target_image: ImageName, img
:param insecure: bool, allow connecting to registry over plain http
:param force: bool, force the tag?
:param dockercfg: path to docker config
:return: str, image (reg.com/img:v1)
"""
logger.info("tagging and pushing image '%s' as '%s'", image, target_image)
logger.debug("image = '%s', target_image = '%s'", image, target_image)
self.tag_image(image, target_image, force=force)
if dockercfg:
self.login(registry=target_image.registry, docker_secret_path=dockercfg)
return self.push_image(target_image, insecure=insecure) | def function[tag_and_push_image, parameter[self, image, target_image, insecure, force, dockercfg]]:
constant[
tag provided image and push it to registry
:param image: str or ImageName, image id or name
:param target_image: ImageName, img
:param insecure: bool, allow connecting to registry over plain http
:param force: bool, force the tag?
:param dockercfg: path to docker config
:return: str, image (reg.com/img:v1)
]
call[name[logger].info, parameter[constant[tagging and pushing image '%s' as '%s'], name[image], name[target_image]]]
call[name[logger].debug, parameter[constant[image = '%s', target_image = '%s'], name[image], name[target_image]]]
call[name[self].tag_image, parameter[name[image], name[target_image]]]
if name[dockercfg] begin[:]
call[name[self].login, parameter[]]
return[call[name[self].push_image, parameter[name[target_image]]]] | keyword[def] identifier[tag_and_push_image] ( identifier[self] , identifier[image] , identifier[target_image] , identifier[insecure] = keyword[False] , identifier[force] = keyword[False] ,
identifier[dockercfg] = keyword[None] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[image] , identifier[target_image] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[image] , identifier[target_image] )
identifier[self] . identifier[tag_image] ( identifier[image] , identifier[target_image] , identifier[force] = identifier[force] )
keyword[if] identifier[dockercfg] :
identifier[self] . identifier[login] ( identifier[registry] = identifier[target_image] . identifier[registry] , identifier[docker_secret_path] = identifier[dockercfg] )
keyword[return] identifier[self] . identifier[push_image] ( identifier[target_image] , identifier[insecure] = identifier[insecure] ) | def tag_and_push_image(self, image, target_image, insecure=False, force=False, dockercfg=None):
"""
tag provided image and push it to registry
:param image: str or ImageName, image id or name
:param target_image: ImageName, img
:param insecure: bool, allow connecting to registry over plain http
:param force: bool, force the tag?
:param dockercfg: path to docker config
:return: str, image (reg.com/img:v1)
"""
logger.info("tagging and pushing image '%s' as '%s'", image, target_image)
logger.debug("image = '%s', target_image = '%s'", image, target_image)
self.tag_image(image, target_image, force=force)
if dockercfg:
self.login(registry=target_image.registry, docker_secret_path=dockercfg) # depends on [control=['if'], data=[]]
return self.push_image(target_image, insecure=insecure) |
def log_errors(self):
"""
Log errors for all stored EventualResults that have error results.
"""
for result in self._stored.values():
failure = result.original_failure()
if failure is not None:
log.err(failure, "Unhandled error in stashed EventualResult:") | def function[log_errors, parameter[self]]:
constant[
Log errors for all stored EventualResults that have error results.
]
for taget[name[result]] in starred[call[name[self]._stored.values, parameter[]]] begin[:]
variable[failure] assign[=] call[name[result].original_failure, parameter[]]
if compare[name[failure] is_not constant[None]] begin[:]
call[name[log].err, parameter[name[failure], constant[Unhandled error in stashed EventualResult:]]] | keyword[def] identifier[log_errors] ( identifier[self] ):
literal[string]
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[_stored] . identifier[values] ():
identifier[failure] = identifier[result] . identifier[original_failure] ()
keyword[if] identifier[failure] keyword[is] keyword[not] keyword[None] :
identifier[log] . identifier[err] ( identifier[failure] , literal[string] ) | def log_errors(self):
"""
Log errors for all stored EventualResults that have error results.
"""
for result in self._stored.values():
failure = result.original_failure()
if failure is not None:
log.err(failure, 'Unhandled error in stashed EventualResult:') # depends on [control=['if'], data=['failure']] # depends on [control=['for'], data=['result']] |
def scores_to_probs(scores, proba, eps=0.01):
"""Transforms scores to probabilities by applying the logistic function"""
if np.any(~proba):
# Need to convert some of the scores into probabilities
probs = copy.deepcopy(scores)
n_class = len(proba)
for m in range(n_class):
if not proba[m]:
#TODO: incorporate threshold (currently assuming zero)
# find most extreme absolute score
max_extreme_score = max(np.abs(np.min(scores[:,m])),\
np.abs(np.max(scores[:,m])))
k = np.log((1-eps)/eps)/max_extreme_score # scale factor
self._probs[:,m] = expit(k * self.scores[:,m])
return probs
else:
return scores | def function[scores_to_probs, parameter[scores, proba, eps]]:
constant[Transforms scores to probabilities by applying the logistic function]
if call[name[np].any, parameter[<ast.UnaryOp object at 0x7da18bccbbb0>]] begin[:]
variable[probs] assign[=] call[name[copy].deepcopy, parameter[name[scores]]]
variable[n_class] assign[=] call[name[len], parameter[name[proba]]]
for taget[name[m]] in starred[call[name[range], parameter[name[n_class]]]] begin[:]
if <ast.UnaryOp object at 0x7da18bccac50> begin[:]
variable[max_extreme_score] assign[=] call[name[max], parameter[call[name[np].abs, parameter[call[name[np].min, parameter[call[name[scores]][tuple[[<ast.Slice object at 0x7da1b23697e0>, <ast.Name object at 0x7da1b236a230>]]]]]]], call[name[np].abs, parameter[call[name[np].max, parameter[call[name[scores]][tuple[[<ast.Slice object at 0x7da1b236a620>, <ast.Name object at 0x7da1b2368310>]]]]]]]]]
variable[k] assign[=] binary_operation[call[name[np].log, parameter[binary_operation[binary_operation[constant[1] - name[eps]] / name[eps]]]] / name[max_extreme_score]]
call[name[self]._probs][tuple[[<ast.Slice object at 0x7da1b2369780>, <ast.Name object at 0x7da1b2368820>]]] assign[=] call[name[expit], parameter[binary_operation[name[k] * call[name[self].scores][tuple[[<ast.Slice object at 0x7da1b236b040>, <ast.Name object at 0x7da1b236a770>]]]]]]
return[name[probs]] | keyword[def] identifier[scores_to_probs] ( identifier[scores] , identifier[proba] , identifier[eps] = literal[int] ):
literal[string]
keyword[if] identifier[np] . identifier[any] (~ identifier[proba] ):
identifier[probs] = identifier[copy] . identifier[deepcopy] ( identifier[scores] )
identifier[n_class] = identifier[len] ( identifier[proba] )
keyword[for] identifier[m] keyword[in] identifier[range] ( identifier[n_class] ):
keyword[if] keyword[not] identifier[proba] [ identifier[m] ]:
identifier[max_extreme_score] = identifier[max] ( identifier[np] . identifier[abs] ( identifier[np] . identifier[min] ( identifier[scores] [:, identifier[m] ])), identifier[np] . identifier[abs] ( identifier[np] . identifier[max] ( identifier[scores] [:, identifier[m] ])))
identifier[k] = identifier[np] . identifier[log] (( literal[int] - identifier[eps] )/ identifier[eps] )/ identifier[max_extreme_score]
identifier[self] . identifier[_probs] [:, identifier[m] ]= identifier[expit] ( identifier[k] * identifier[self] . identifier[scores] [:, identifier[m] ])
keyword[return] identifier[probs]
keyword[else] :
keyword[return] identifier[scores] | def scores_to_probs(scores, proba, eps=0.01):
"""Transforms scores to probabilities by applying the logistic function"""
if np.any(~proba):
# Need to convert some of the scores into probabilities
probs = copy.deepcopy(scores)
n_class = len(proba)
for m in range(n_class):
if not proba[m]:
#TODO: incorporate threshold (currently assuming zero)
# find most extreme absolute score
max_extreme_score = max(np.abs(np.min(scores[:, m])), np.abs(np.max(scores[:, m])))
k = np.log((1 - eps) / eps) / max_extreme_score # scale factor
self._probs[:, m] = expit(k * self.scores[:, m]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
return probs # depends on [control=['if'], data=[]]
else:
return scores |
def class_dict_to_specs(mcs, class_dict):
"""Takes a class `__dict__` and returns `HeronComponentSpec` entries"""
specs = {}
for name, spec in class_dict.items():
if isinstance(spec, HeronComponentSpec):
# Use the variable name as the specification name.
if spec.name is None:
spec.name = name
if spec.name in specs:
raise ValueError("Duplicate component name: %s" % spec.name)
else:
specs[spec.name] = spec
return specs | def function[class_dict_to_specs, parameter[mcs, class_dict]]:
constant[Takes a class `__dict__` and returns `HeronComponentSpec` entries]
variable[specs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c76dae0>, <ast.Name object at 0x7da20c76e9e0>]]] in starred[call[name[class_dict].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[spec], name[HeronComponentSpec]]] begin[:]
if compare[name[spec].name is constant[None]] begin[:]
name[spec].name assign[=] name[name]
if compare[name[spec].name in name[specs]] begin[:]
<ast.Raise object at 0x7da20c76ffa0>
return[name[specs]] | keyword[def] identifier[class_dict_to_specs] ( identifier[mcs] , identifier[class_dict] ):
literal[string]
identifier[specs] ={}
keyword[for] identifier[name] , identifier[spec] keyword[in] identifier[class_dict] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[spec] , identifier[HeronComponentSpec] ):
keyword[if] identifier[spec] . identifier[name] keyword[is] keyword[None] :
identifier[spec] . identifier[name] = identifier[name]
keyword[if] identifier[spec] . identifier[name] keyword[in] identifier[specs] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[spec] . identifier[name] )
keyword[else] :
identifier[specs] [ identifier[spec] . identifier[name] ]= identifier[spec]
keyword[return] identifier[specs] | def class_dict_to_specs(mcs, class_dict):
"""Takes a class `__dict__` and returns `HeronComponentSpec` entries"""
specs = {}
for (name, spec) in class_dict.items():
if isinstance(spec, HeronComponentSpec):
# Use the variable name as the specification name.
if spec.name is None:
spec.name = name # depends on [control=['if'], data=[]]
if spec.name in specs:
raise ValueError('Duplicate component name: %s' % spec.name) # depends on [control=['if'], data=[]]
else:
specs[spec.name] = spec # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return specs |
def primary_transcript(entrystream, parenttype='gene', logstream=stderr):
"""
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue
for parent in tag.select.features(entry, parenttype, traverse=True):
if parent.num_children == 0:
continue
transcripts = defaultdict(list)
for child in parent.children:
if child.type in type_terms:
transcripts[child.type].append(child)
if len(transcripts) == 0:
continue
ttypes = list(transcripts.keys())
ttype = _get_primary_type(ttypes, parent)
transcript_list = transcripts[ttype]
if ttype == 'mRNA':
_emplace_pmrna(transcript_list, parent, strict=True)
else:
_emplace_transcript(transcript_list, parent)
yield entry | def function[primary_transcript, parameter[entrystream, parenttype, logstream]]:
constant[
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
]
for taget[name[entry]] in starred[name[entrystream]] begin[:]
if <ast.UnaryOp object at 0x7da1b274a590> begin[:]
<ast.Yield object at 0x7da1b27490f0>
continue
for taget[name[parent]] in starred[call[name[tag].select.features, parameter[name[entry], name[parenttype]]]] begin[:]
if compare[name[parent].num_children equal[==] constant[0]] begin[:]
continue
variable[transcripts] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[child]] in starred[name[parent].children] begin[:]
if compare[name[child].type in name[type_terms]] begin[:]
call[call[name[transcripts]][name[child].type].append, parameter[name[child]]]
if compare[call[name[len], parameter[name[transcripts]]] equal[==] constant[0]] begin[:]
continue
variable[ttypes] assign[=] call[name[list], parameter[call[name[transcripts].keys, parameter[]]]]
variable[ttype] assign[=] call[name[_get_primary_type], parameter[name[ttypes], name[parent]]]
variable[transcript_list] assign[=] call[name[transcripts]][name[ttype]]
if compare[name[ttype] equal[==] constant[mRNA]] begin[:]
call[name[_emplace_pmrna], parameter[name[transcript_list], name[parent]]]
<ast.Yield object at 0x7da1b27486d0> | keyword[def] identifier[primary_transcript] ( identifier[entrystream] , identifier[parenttype] = literal[string] , identifier[logstream] = identifier[stderr] ):
literal[string]
keyword[for] identifier[entry] keyword[in] identifier[entrystream] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[entry] , identifier[tag] . identifier[Feature] ):
keyword[yield] identifier[entry]
keyword[continue]
keyword[for] identifier[parent] keyword[in] identifier[tag] . identifier[select] . identifier[features] ( identifier[entry] , identifier[parenttype] , identifier[traverse] = keyword[True] ):
keyword[if] identifier[parent] . identifier[num_children] == literal[int] :
keyword[continue]
identifier[transcripts] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[child] keyword[in] identifier[parent] . identifier[children] :
keyword[if] identifier[child] . identifier[type] keyword[in] identifier[type_terms] :
identifier[transcripts] [ identifier[child] . identifier[type] ]. identifier[append] ( identifier[child] )
keyword[if] identifier[len] ( identifier[transcripts] )== literal[int] :
keyword[continue]
identifier[ttypes] = identifier[list] ( identifier[transcripts] . identifier[keys] ())
identifier[ttype] = identifier[_get_primary_type] ( identifier[ttypes] , identifier[parent] )
identifier[transcript_list] = identifier[transcripts] [ identifier[ttype] ]
keyword[if] identifier[ttype] == literal[string] :
identifier[_emplace_pmrna] ( identifier[transcript_list] , identifier[parent] , identifier[strict] = keyword[True] )
keyword[else] :
identifier[_emplace_transcript] ( identifier[transcript_list] , identifier[parent] )
keyword[yield] identifier[entry] | def primary_transcript(entrystream, parenttype='gene', logstream=stderr):
"""
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue # depends on [control=['if'], data=[]]
for parent in tag.select.features(entry, parenttype, traverse=True):
if parent.num_children == 0:
continue # depends on [control=['if'], data=[]]
transcripts = defaultdict(list)
for child in parent.children:
if child.type in type_terms:
transcripts[child.type].append(child) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
if len(transcripts) == 0:
continue # depends on [control=['if'], data=[]]
ttypes = list(transcripts.keys())
ttype = _get_primary_type(ttypes, parent)
transcript_list = transcripts[ttype]
if ttype == 'mRNA':
_emplace_pmrna(transcript_list, parent, strict=True) # depends on [control=['if'], data=[]]
else:
_emplace_transcript(transcript_list, parent) # depends on [control=['for'], data=['parent']]
yield entry # depends on [control=['for'], data=['entry']] |
def evaluate_at(self, eval_at, testcases, mode=None):
""" Sets the evaluation interation indices.
:param list eval_at: iteration indices where an evaluation should be performed
:param numpy.array testcases: testcases used for evaluation
"""
self.eval_at = eval_at
self.log.eval_at = eval_at
if mode is None:
if self.context_mode is None or (self.context_mode.has_key('choose_m') and self.context_mode['choose_m']):
mode = 'inverse'
else:
mode = self.context_mode["mode"]
self.evaluation = Evaluation(self.ag, self.env, testcases, mode=mode)
for test in testcases:
self.log.add('testcases', test) | def function[evaluate_at, parameter[self, eval_at, testcases, mode]]:
constant[ Sets the evaluation interation indices.
:param list eval_at: iteration indices where an evaluation should be performed
:param numpy.array testcases: testcases used for evaluation
]
name[self].eval_at assign[=] name[eval_at]
name[self].log.eval_at assign[=] name[eval_at]
if compare[name[mode] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b0c0b730> begin[:]
variable[mode] assign[=] constant[inverse]
name[self].evaluation assign[=] call[name[Evaluation], parameter[name[self].ag, name[self].env, name[testcases]]]
for taget[name[test]] in starred[name[testcases]] begin[:]
call[name[self].log.add, parameter[constant[testcases], name[test]]] | keyword[def] identifier[evaluate_at] ( identifier[self] , identifier[eval_at] , identifier[testcases] , identifier[mode] = keyword[None] ):
literal[string]
identifier[self] . identifier[eval_at] = identifier[eval_at]
identifier[self] . identifier[log] . identifier[eval_at] = identifier[eval_at]
keyword[if] identifier[mode] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[context_mode] keyword[is] keyword[None] keyword[or] ( identifier[self] . identifier[context_mode] . identifier[has_key] ( literal[string] ) keyword[and] identifier[self] . identifier[context_mode] [ literal[string] ]):
identifier[mode] = literal[string]
keyword[else] :
identifier[mode] = identifier[self] . identifier[context_mode] [ literal[string] ]
identifier[self] . identifier[evaluation] = identifier[Evaluation] ( identifier[self] . identifier[ag] , identifier[self] . identifier[env] , identifier[testcases] , identifier[mode] = identifier[mode] )
keyword[for] identifier[test] keyword[in] identifier[testcases] :
identifier[self] . identifier[log] . identifier[add] ( literal[string] , identifier[test] ) | def evaluate_at(self, eval_at, testcases, mode=None):
""" Sets the evaluation interation indices.
:param list eval_at: iteration indices where an evaluation should be performed
:param numpy.array testcases: testcases used for evaluation
"""
self.eval_at = eval_at
self.log.eval_at = eval_at
if mode is None:
if self.context_mode is None or (self.context_mode.has_key('choose_m') and self.context_mode['choose_m']):
mode = 'inverse' # depends on [control=['if'], data=[]]
else:
mode = self.context_mode['mode'] # depends on [control=['if'], data=['mode']]
self.evaluation = Evaluation(self.ag, self.env, testcases, mode=mode)
for test in testcases:
self.log.add('testcases', test) # depends on [control=['for'], data=['test']] |
def _summarize(self):
"""Game summary implementation."""
self._achievements_summarized = True
data = None
if self._postgame:
data = self._postgame.action
game_type = 'DM' if self._header.lobby.game_type == 'DM' else 'RM'
self._summary = {
'players': list(self.players(data, game_type)),
'diplomacy': self._diplomacy,
'rec_owner_index': self._header.replay.rec_player,
'rec_owner_number': self._rec_owner_number(),
'settings': {
'type': game_type,
'difficulty': self._header.scenario.game_settings.difficulty,
# data.resource_level
'resource_level': 'standard',
'population_limit': self._header.lobby.population_limit * 25,
'speed': mgz.const.SPEEDS.get(self._header.replay.game_speed),
'reveal_map': self._header.lobby.reveal_map,
# self._get_starting_age(data.starting_age)
'starting_age': 'Dark' if game_type == 'RM' else 'Post Imperial',
'victory_condition': ('conquest' if self._header.scenario.victory.is_conquest
else 'other'),
# not data.team_together
'team_together': True,
# data.all_techs
'all_technologies': False,
'cheats': self._header.replay.cheats_enabled,
'lock_teams': self._header.lobby.lock_teams,
# data.lock_speed
'lock_speed': True,
'record_game': True
},
'map': {
'name': self._map.name(),
'size': self._map.size(),
'x': self._header.map_info.size_x,
'y': self._header.map_info.size_y,
'nomad': self.is_nomad(),
'regicide': self.is_regicide(),
'arena': self.is_arena(),
'hash': self._map_hash()
},
'mod': self._get_mod(),
'restore': {
'restored': self._header.initial.restore_time > 0,
'start_int': self._header.initial.restore_time,
'start_time': mgz.util.convert_to_timestamp(self._header.initial.restore_time /
1000)
},
'voobly': {
'ladder': self._ladder,
'rated': self._ladder != None
},
'number_of_humans': len([p for p in self._header.scenario.game_settings.player_info
if p['type'] == 'human']),
'number_of_ai': len([p for p in self._header.scenario.game_settings.player_info
if p['type'] == 'computer']),
'duration': mgz.util.convert_to_timestamp(self._time / 1000),
'time_int': self._time,
'metadata': {
'hash': self._hash,
'version': mgz.const.VERSIONS[self._header.version],
'sub_version': round(self._header.sub_version, 2),
'filename': os.path.basename(self._path),
'timestamp': self._get_timestamp()
},
'action_histogram': dict(self._actions_without_player),
'queue': self._queue
}
self._summary['finished'] = guess_finished(self._summary, data)
if self._summary['finished']:
self._summary['won_in'] = self._won_in().title()
self._set_winning_team()
if self._show_chat:
self._summary['chat'] = self._chat
if self._show_timeline:
self._summary['timeline'] = self._timeline
if self._show_coords:
self._summary['coords'] = self._coords | def function[_summarize, parameter[self]]:
constant[Game summary implementation.]
name[self]._achievements_summarized assign[=] constant[True]
variable[data] assign[=] constant[None]
if name[self]._postgame begin[:]
variable[data] assign[=] name[self]._postgame.action
variable[game_type] assign[=] <ast.IfExp object at 0x7da18eb55ed0>
name[self]._summary assign[=] dictionary[[<ast.Constant object at 0x7da18eb55330>, <ast.Constant object at 0x7da18eb56320>, <ast.Constant object at 0x7da18eb55810>, <ast.Constant object at 0x7da18eb57c40>, <ast.Constant object at 0x7da18eb57af0>, <ast.Constant object at 0x7da18eb54460>, <ast.Constant object at 0x7da18eb574c0>, <ast.Constant object at 0x7da18eb57670>, <ast.Constant object at 0x7da18eb57f40>, <ast.Constant object at 0x7da18eb54af0>, <ast.Constant object at 0x7da18eb54fa0>, <ast.Constant object at 0x7da18eb56290>, <ast.Constant object at 0x7da18eb541f0>, <ast.Constant object at 0x7da18eb56230>, <ast.Constant object at 0x7da18eb57cd0>, <ast.Constant object at 0x7da18eb551b0>], [<ast.Call object at 0x7da18eb54580>, <ast.Attribute object at 0x7da18eb55e40>, <ast.Attribute object at 0x7da18eb55210>, <ast.Call object at 0x7da18eb56590>, <ast.Dict object at 0x7da18eb56a40>, <ast.Dict object at 0x7da1b2594340>, <ast.Call object at 0x7da1b2597370>, <ast.Dict object at 0x7da1b2597340>, <ast.Dict object at 0x7da1b2594880>, <ast.Call object at 0x7da1b25943a0>, <ast.Call object at 0x7da1b25957e0>, <ast.Call object at 0x7da1b25941c0>, <ast.Attribute object at 0x7da1b25961d0>, <ast.Dict object at 0x7da1b2596290>, <ast.Call object at 0x7da1b25950f0>, <ast.Attribute object at 0x7da1b25948b0>]]
call[name[self]._summary][constant[finished]] assign[=] call[name[guess_finished], parameter[name[self]._summary, name[data]]]
if call[name[self]._summary][constant[finished]] begin[:]
call[name[self]._summary][constant[won_in]] assign[=] call[call[name[self]._won_in, parameter[]].title, parameter[]]
call[name[self]._set_winning_team, parameter[]]
if name[self]._show_chat begin[:]
call[name[self]._summary][constant[chat]] assign[=] name[self]._chat
if name[self]._show_timeline begin[:]
call[name[self]._summary][constant[timeline]] assign[=] name[self]._timeline
if name[self]._show_coords begin[:]
call[name[self]._summary][constant[coords]] assign[=] name[self]._coords | keyword[def] identifier[_summarize] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_achievements_summarized] = keyword[True]
identifier[data] = keyword[None]
keyword[if] identifier[self] . identifier[_postgame] :
identifier[data] = identifier[self] . identifier[_postgame] . identifier[action]
identifier[game_type] = literal[string] keyword[if] identifier[self] . identifier[_header] . identifier[lobby] . identifier[game_type] == literal[string] keyword[else] literal[string]
identifier[self] . identifier[_summary] ={
literal[string] : identifier[list] ( identifier[self] . identifier[players] ( identifier[data] , identifier[game_type] )),
literal[string] : identifier[self] . identifier[_diplomacy] ,
literal[string] : identifier[self] . identifier[_header] . identifier[replay] . identifier[rec_player] ,
literal[string] : identifier[self] . identifier[_rec_owner_number] (),
literal[string] :{
literal[string] : identifier[game_type] ,
literal[string] : identifier[self] . identifier[_header] . identifier[scenario] . identifier[game_settings] . identifier[difficulty] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[_header] . identifier[lobby] . identifier[population_limit] * literal[int] ,
literal[string] : identifier[mgz] . identifier[const] . identifier[SPEEDS] . identifier[get] ( identifier[self] . identifier[_header] . identifier[replay] . identifier[game_speed] ),
literal[string] : identifier[self] . identifier[_header] . identifier[lobby] . identifier[reveal_map] ,
literal[string] : literal[string] keyword[if] identifier[game_type] == literal[string] keyword[else] literal[string] ,
literal[string] :( literal[string] keyword[if] identifier[self] . identifier[_header] . identifier[scenario] . identifier[victory] . identifier[is_conquest]
keyword[else] literal[string] ),
literal[string] : keyword[True] ,
literal[string] : keyword[False] ,
literal[string] : identifier[self] . identifier[_header] . identifier[replay] . identifier[cheats_enabled] ,
literal[string] : identifier[self] . identifier[_header] . identifier[lobby] . identifier[lock_teams] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True]
},
literal[string] :{
literal[string] : identifier[self] . identifier[_map] . identifier[name] (),
literal[string] : identifier[self] . identifier[_map] . identifier[size] (),
literal[string] : identifier[self] . identifier[_header] . identifier[map_info] . identifier[size_x] ,
literal[string] : identifier[self] . identifier[_header] . identifier[map_info] . identifier[size_y] ,
literal[string] : identifier[self] . identifier[is_nomad] (),
literal[string] : identifier[self] . identifier[is_regicide] (),
literal[string] : identifier[self] . identifier[is_arena] (),
literal[string] : identifier[self] . identifier[_map_hash] ()
},
literal[string] : identifier[self] . identifier[_get_mod] (),
literal[string] :{
literal[string] : identifier[self] . identifier[_header] . identifier[initial] . identifier[restore_time] > literal[int] ,
literal[string] : identifier[self] . identifier[_header] . identifier[initial] . identifier[restore_time] ,
literal[string] : identifier[mgz] . identifier[util] . identifier[convert_to_timestamp] ( identifier[self] . identifier[_header] . identifier[initial] . identifier[restore_time] /
literal[int] )
},
literal[string] :{
literal[string] : identifier[self] . identifier[_ladder] ,
literal[string] : identifier[self] . identifier[_ladder] != keyword[None]
},
literal[string] : identifier[len] ([ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_header] . identifier[scenario] . identifier[game_settings] . identifier[player_info]
keyword[if] identifier[p] [ literal[string] ]== literal[string] ]),
literal[string] : identifier[len] ([ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_header] . identifier[scenario] . identifier[game_settings] . identifier[player_info]
keyword[if] identifier[p] [ literal[string] ]== literal[string] ]),
literal[string] : identifier[mgz] . identifier[util] . identifier[convert_to_timestamp] ( identifier[self] . identifier[_time] / literal[int] ),
literal[string] : identifier[self] . identifier[_time] ,
literal[string] :{
literal[string] : identifier[self] . identifier[_hash] ,
literal[string] : identifier[mgz] . identifier[const] . identifier[VERSIONS] [ identifier[self] . identifier[_header] . identifier[version] ],
literal[string] : identifier[round] ( identifier[self] . identifier[_header] . identifier[sub_version] , literal[int] ),
literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[_path] ),
literal[string] : identifier[self] . identifier[_get_timestamp] ()
},
literal[string] : identifier[dict] ( identifier[self] . identifier[_actions_without_player] ),
literal[string] : identifier[self] . identifier[_queue]
}
identifier[self] . identifier[_summary] [ literal[string] ]= identifier[guess_finished] ( identifier[self] . identifier[_summary] , identifier[data] )
keyword[if] identifier[self] . identifier[_summary] [ literal[string] ]:
identifier[self] . identifier[_summary] [ literal[string] ]= identifier[self] . identifier[_won_in] (). identifier[title] ()
identifier[self] . identifier[_set_winning_team] ()
keyword[if] identifier[self] . identifier[_show_chat] :
identifier[self] . identifier[_summary] [ literal[string] ]= identifier[self] . identifier[_chat]
keyword[if] identifier[self] . identifier[_show_timeline] :
identifier[self] . identifier[_summary] [ literal[string] ]= identifier[self] . identifier[_timeline]
keyword[if] identifier[self] . identifier[_show_coords] :
identifier[self] . identifier[_summary] [ literal[string] ]= identifier[self] . identifier[_coords] | def _summarize(self):
"""Game summary implementation."""
self._achievements_summarized = True
data = None
if self._postgame:
data = self._postgame.action # depends on [control=['if'], data=[]]
game_type = 'DM' if self._header.lobby.game_type == 'DM' else 'RM'
# data.resource_level
# self._get_starting_age(data.starting_age)
# not data.team_together
# data.all_techs
# data.lock_speed
self._summary = {'players': list(self.players(data, game_type)), 'diplomacy': self._diplomacy, 'rec_owner_index': self._header.replay.rec_player, 'rec_owner_number': self._rec_owner_number(), 'settings': {'type': game_type, 'difficulty': self._header.scenario.game_settings.difficulty, 'resource_level': 'standard', 'population_limit': self._header.lobby.population_limit * 25, 'speed': mgz.const.SPEEDS.get(self._header.replay.game_speed), 'reveal_map': self._header.lobby.reveal_map, 'starting_age': 'Dark' if game_type == 'RM' else 'Post Imperial', 'victory_condition': 'conquest' if self._header.scenario.victory.is_conquest else 'other', 'team_together': True, 'all_technologies': False, 'cheats': self._header.replay.cheats_enabled, 'lock_teams': self._header.lobby.lock_teams, 'lock_speed': True, 'record_game': True}, 'map': {'name': self._map.name(), 'size': self._map.size(), 'x': self._header.map_info.size_x, 'y': self._header.map_info.size_y, 'nomad': self.is_nomad(), 'regicide': self.is_regicide(), 'arena': self.is_arena(), 'hash': self._map_hash()}, 'mod': self._get_mod(), 'restore': {'restored': self._header.initial.restore_time > 0, 'start_int': self._header.initial.restore_time, 'start_time': mgz.util.convert_to_timestamp(self._header.initial.restore_time / 1000)}, 'voobly': {'ladder': self._ladder, 'rated': self._ladder != None}, 'number_of_humans': len([p for p in self._header.scenario.game_settings.player_info if p['type'] == 'human']), 'number_of_ai': len([p for p in self._header.scenario.game_settings.player_info if p['type'] == 'computer']), 'duration': mgz.util.convert_to_timestamp(self._time / 1000), 'time_int': self._time, 'metadata': {'hash': self._hash, 'version': mgz.const.VERSIONS[self._header.version], 'sub_version': round(self._header.sub_version, 2), 'filename': os.path.basename(self._path), 'timestamp': self._get_timestamp()}, 'action_histogram': dict(self._actions_without_player), 'queue': self._queue}
self._summary['finished'] = guess_finished(self._summary, data)
if self._summary['finished']:
self._summary['won_in'] = self._won_in().title()
self._set_winning_team() # depends on [control=['if'], data=[]]
if self._show_chat:
self._summary['chat'] = self._chat # depends on [control=['if'], data=[]]
if self._show_timeline:
self._summary['timeline'] = self._timeline # depends on [control=['if'], data=[]]
if self._show_coords:
self._summary['coords'] = self._coords # depends on [control=['if'], data=[]] |
def parse(self):
"""
Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError`
"""
d = {
'pathname': None,
'protocols': self._get_protocols(),
'protocol': 'ssh',
'href': self._url,
'resource': None,
'user': None,
'port': None,
'name': None,
'owner': None,
}
for regex in POSSIBLE_REGEXES:
match = regex.search(self._url)
if match:
d.update(match.groupdict())
break
else:
msg = "Invalid URL '{}'".format(self._url)
raise ParserError(msg)
return Parsed(**d) | def function[parse, parameter[self]]:
constant[
Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError`
]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da204623eb0>, <ast.Constant object at 0x7da2046227a0>, <ast.Constant object at 0x7da204623df0>, <ast.Constant object at 0x7da204622a40>, <ast.Constant object at 0x7da204622f20>, <ast.Constant object at 0x7da204621c90>, <ast.Constant object at 0x7da204621930>, <ast.Constant object at 0x7da204620f70>, <ast.Constant object at 0x7da204623fd0>], [<ast.Constant object at 0x7da204623820>, <ast.Call object at 0x7da204622020>, <ast.Constant object at 0x7da204620250>, <ast.Attribute object at 0x7da2046238e0>, <ast.Constant object at 0x7da204622680>, <ast.Constant object at 0x7da2046206d0>, <ast.Constant object at 0x7da204623340>, <ast.Constant object at 0x7da204620eb0>, <ast.Constant object at 0x7da204621600>]]
for taget[name[regex]] in starred[name[POSSIBLE_REGEXES]] begin[:]
variable[match] assign[=] call[name[regex].search, parameter[name[self]._url]]
if name[match] begin[:]
call[name[d].update, parameter[call[name[match].groupdict, parameter[]]]]
break
return[call[name[Parsed], parameter[]]] | keyword[def] identifier[parse] ( identifier[self] ):
literal[string]
identifier[d] ={
literal[string] : keyword[None] ,
literal[string] : identifier[self] . identifier[_get_protocols] (),
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[_url] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
}
keyword[for] identifier[regex] keyword[in] identifier[POSSIBLE_REGEXES] :
identifier[match] = identifier[regex] . identifier[search] ( identifier[self] . identifier[_url] )
keyword[if] identifier[match] :
identifier[d] . identifier[update] ( identifier[match] . identifier[groupdict] ())
keyword[break]
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[self] . identifier[_url] )
keyword[raise] identifier[ParserError] ( identifier[msg] )
keyword[return] identifier[Parsed] (** identifier[d] ) | def parse(self):
"""
Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError`
"""
d = {'pathname': None, 'protocols': self._get_protocols(), 'protocol': 'ssh', 'href': self._url, 'resource': None, 'user': None, 'port': None, 'name': None, 'owner': None}
for regex in POSSIBLE_REGEXES:
match = regex.search(self._url)
if match:
d.update(match.groupdict())
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['regex']]
else:
msg = "Invalid URL '{}'".format(self._url)
raise ParserError(msg)
return Parsed(**d) |
def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | def function[get_archives, parameter[self, offset, count, session_id]]:
constant[Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
]
variable[params] assign[=] dictionary[[], []]
if compare[name[offset] is_not constant[None]] begin[:]
call[name[params]][constant[offset]] assign[=] name[offset]
if compare[name[count] is_not constant[None]] begin[:]
call[name[params]][constant[count]] assign[=] name[count]
if compare[name[session_id] is_not constant[None]] begin[:]
call[name[params]][constant[sessionId]] assign[=] name[session_id]
variable[endpoint] assign[=] binary_operation[binary_operation[call[name[self].endpoints.archive_url, parameter[]] + constant[?]] + call[name[urlencode], parameter[name[params]]]]
variable[response] assign[=] call[name[requests].get, parameter[name[endpoint]]]
if compare[name[response].status_code less[<] constant[300]] begin[:]
return[call[name[ArchiveList], parameter[name[self], call[name[response].json, parameter[]]]]] | keyword[def] identifier[get_archives] ( identifier[self] , identifier[offset] = keyword[None] , identifier[count] = keyword[None] , identifier[session_id] = keyword[None] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[offset]
keyword[if] identifier[count] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[count]
keyword[if] identifier[session_id] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[session_id]
identifier[endpoint] = identifier[self] . identifier[endpoints] . identifier[archive_url] ()+ literal[string] + identifier[urlencode] ( identifier[params] )
identifier[response] = identifier[requests] . identifier[get] (
identifier[endpoint] , identifier[headers] = identifier[self] . identifier[json_headers] (), identifier[proxies] = identifier[self] . identifier[proxies] , identifier[timeout] = identifier[self] . identifier[timeout]
)
keyword[if] identifier[response] . identifier[status_code] < literal[int] :
keyword[return] identifier[ArchiveList] ( identifier[self] , identifier[response] . identifier[json] ())
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[raise] identifier[AuthError] ()
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[raise] identifier[NotFoundError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[RequestError] ( literal[string] , identifier[response] . identifier[status_code] ) | def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset # depends on [control=['if'], data=['offset']]
if count is not None:
params['count'] = count # depends on [control=['if'], data=['count']]
if session_id is not None:
params['sessionId'] = session_id # depends on [control=['if'], data=['session_id']]
endpoint = self.endpoints.archive_url() + '?' + urlencode(params)
response = requests.get(endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return ArchiveList(self, response.json()) # depends on [control=['if'], data=[]]
elif response.status_code == 403:
raise AuthError() # depends on [control=['if'], data=[]]
elif response.status_code == 404:
raise NotFoundError('Archive not found') # depends on [control=['if'], data=[]]
else:
raise RequestError('An unexpected error occurred', response.status_code) |
def at_reminder(client, channel, nick, args):
"""
Schedule a reminder to occur at a specific time. The given time can optionally
be specified to occur at a specific timezone, but will default to the value
of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock.
These types of reminders are repeatable, should the last two words of the message
be of the form "repeat <days_of_week>" where days_of_week is a single string consisting
of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF'
will repeat a reminder at the same time every Monday, Wednesday, and Friday.
A full example of how one would use this:
<sduncan> helga at 13:00 EST standup time repeat MTuWThF
This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday.
Optionally, a specific channel can be specified to receive the reminder message. This
is useful if creating several reminders via a private message. To use this, specify
"on <channel>" between the time amount and the message:
<sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF
<sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF
Note that the '#' char for specifying the channel is entirely optional.
"""
global _scheduled
now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
# Parse the time it should go off, and the minute offset of the day
hh, mm = map(int, args[0].split(':'))
# Strip time from args
args = args[1:]
# Default timezone
timezone = pytz.timezone(getattr(settings, 'TIMEZONE', 'US/Eastern'))
try:
# If there was a timezone passed in
timezone = pytz.timezone(args[0])
except pytz.UnknownTimeZoneError:
pass
else:
# If so, remove it from args
args = args[1:]
local_now = now.astimezone(timezone)
local_next = local_now.replace(hour=hh, minute=mm)
if local_next <= local_now:
local_next += datetime.timedelta(days=1)
reminder = {
'when': local_next.astimezone(pytz.UTC),
'channel': channel,
'message': ' '.join(args),
'creator': nick,
}
# Check for 'repeat' arg
try:
repeat = args[-2] == 'repeat'
except IndexError:
repeat = False
if repeat:
# If repeating, strip off the last two for the message
sched = args[-1]
reminder['message'] = ' '.join(args[:-2])
repeat_days = sorted([v for k, v in days_of_week.iteritems() if k in sched])
if not repeat_days:
return u"I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF".format(sched)
reminder['repeat'] = repeat_days
for attempt in xrange(7):
if reminder['when'].weekday() in repeat_days:
break
reminder['when'] += datetime.timedelta(days=1)
# Handle ability to specify the channel
if reminder['message'].startswith('on'):
parts = reminder['message'].split(' ')
chan = parts[1]
reminder['message'] = ' '.join(parts[2:])
# Make sure channel is formatted correctly
if not chan.startswith('#'):
chan = '#{0}'.format(chan)
reminder['channel'] = chan
id = db.reminders.insert(reminder)
diff = reminder['when'] - now
delay = (diff.days * 24 * 3600) + diff.seconds
_scheduled.add(id)
reactor.callLater(delay, _do_reminder, id, client)
return u'Reminder set for {0} from now'.format(readable_time_delta(delay)) | def function[at_reminder, parameter[client, channel, nick, args]]:
constant[
Schedule a reminder to occur at a specific time. The given time can optionally
be specified to occur at a specific timezone, but will default to the value
of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock.
These types of reminders are repeatable, should the last two words of the message
be of the form "repeat <days_of_week>" where days_of_week is a single string consisting
of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF'
will repeat a reminder at the same time every Monday, Wednesday, and Friday.
A full example of how one would use this:
<sduncan> helga at 13:00 EST standup time repeat MTuWThF
This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday.
Optionally, a specific channel can be specified to receive the reminder message. This
is useful if creating several reminders via a private message. To use this, specify
"on <channel>" between the time amount and the message:
<sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF
<sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF
Note that the '#' char for specifying the channel is entirely optional.
]
<ast.Global object at 0x7da1b138dcc0>
variable[now] assign[=] call[call[name[datetime].datetime.utcnow, parameter[]].replace, parameter[]]
<ast.Tuple object at 0x7da1b138fee0> assign[=] call[name[map], parameter[name[int], call[call[name[args]][constant[0]].split, parameter[constant[:]]]]]
variable[args] assign[=] call[name[args]][<ast.Slice object at 0x7da1b138c160>]
variable[timezone] assign[=] call[name[pytz].timezone, parameter[call[name[getattr], parameter[name[settings], constant[TIMEZONE], constant[US/Eastern]]]]]
<ast.Try object at 0x7da1b138e980>
variable[local_now] assign[=] call[name[now].astimezone, parameter[name[timezone]]]
variable[local_next] assign[=] call[name[local_now].replace, parameter[]]
if compare[name[local_next] less_or_equal[<=] name[local_now]] begin[:]
<ast.AugAssign object at 0x7da1b138ec80>
variable[reminder] assign[=] dictionary[[<ast.Constant object at 0x7da1b138fc10>, <ast.Constant object at 0x7da1b138e3b0>, <ast.Constant object at 0x7da1b138f640>, <ast.Constant object at 0x7da1b138fa90>], [<ast.Call object at 0x7da1b138fa00>, <ast.Name object at 0x7da1b138c4f0>, <ast.Call object at 0x7da1b138fbe0>, <ast.Name object at 0x7da1b138c280>]]
<ast.Try object at 0x7da1b138dea0>
if name[repeat] begin[:]
variable[sched] assign[=] call[name[args]][<ast.UnaryOp object at 0x7da1b138f760>]
call[name[reminder]][constant[message]] assign[=] call[constant[ ].join, parameter[call[name[args]][<ast.Slice object at 0x7da1b138f130>]]]
variable[repeat_days] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b138e740>]]
if <ast.UnaryOp object at 0x7da1b138ea40> begin[:]
return[call[constant[I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF].format, parameter[name[sched]]]]
call[name[reminder]][constant[repeat]] assign[=] name[repeat_days]
for taget[name[attempt]] in starred[call[name[xrange], parameter[constant[7]]]] begin[:]
if compare[call[call[name[reminder]][constant[when]].weekday, parameter[]] in name[repeat_days]] begin[:]
break
<ast.AugAssign object at 0x7da1b15b33d0>
if call[call[name[reminder]][constant[message]].startswith, parameter[constant[on]]] begin[:]
variable[parts] assign[=] call[call[name[reminder]][constant[message]].split, parameter[constant[ ]]]
variable[chan] assign[=] call[name[parts]][constant[1]]
call[name[reminder]][constant[message]] assign[=] call[constant[ ].join, parameter[call[name[parts]][<ast.Slice object at 0x7da1b1305090>]]]
if <ast.UnaryOp object at 0x7da1b1306a40> begin[:]
variable[chan] assign[=] call[constant[#{0}].format, parameter[name[chan]]]
call[name[reminder]][constant[channel]] assign[=] name[chan]
variable[id] assign[=] call[name[db].reminders.insert, parameter[name[reminder]]]
variable[diff] assign[=] binary_operation[call[name[reminder]][constant[when]] - name[now]]
variable[delay] assign[=] binary_operation[binary_operation[binary_operation[name[diff].days * constant[24]] * constant[3600]] + name[diff].seconds]
call[name[_scheduled].add, parameter[name[id]]]
call[name[reactor].callLater, parameter[name[delay], name[_do_reminder], name[id], name[client]]]
return[call[constant[Reminder set for {0} from now].format, parameter[call[name[readable_time_delta], parameter[name[delay]]]]]] | keyword[def] identifier[at_reminder] ( identifier[client] , identifier[channel] , identifier[nick] , identifier[args] ):
literal[string]
keyword[global] identifier[_scheduled]
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[UTC] )
identifier[hh] , identifier[mm] = identifier[map] ( identifier[int] , identifier[args] [ literal[int] ]. identifier[split] ( literal[string] ))
identifier[args] = identifier[args] [ literal[int] :]
identifier[timezone] = identifier[pytz] . identifier[timezone] ( identifier[getattr] ( identifier[settings] , literal[string] , literal[string] ))
keyword[try] :
identifier[timezone] = identifier[pytz] . identifier[timezone] ( identifier[args] [ literal[int] ])
keyword[except] identifier[pytz] . identifier[UnknownTimeZoneError] :
keyword[pass]
keyword[else] :
identifier[args] = identifier[args] [ literal[int] :]
identifier[local_now] = identifier[now] . identifier[astimezone] ( identifier[timezone] )
identifier[local_next] = identifier[local_now] . identifier[replace] ( identifier[hour] = identifier[hh] , identifier[minute] = identifier[mm] )
keyword[if] identifier[local_next] <= identifier[local_now] :
identifier[local_next] += identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
identifier[reminder] ={
literal[string] : identifier[local_next] . identifier[astimezone] ( identifier[pytz] . identifier[UTC] ),
literal[string] : identifier[channel] ,
literal[string] : literal[string] . identifier[join] ( identifier[args] ),
literal[string] : identifier[nick] ,
}
keyword[try] :
identifier[repeat] = identifier[args] [- literal[int] ]== literal[string]
keyword[except] identifier[IndexError] :
identifier[repeat] = keyword[False]
keyword[if] identifier[repeat] :
identifier[sched] = identifier[args] [- literal[int] ]
identifier[reminder] [ literal[string] ]= literal[string] . identifier[join] ( identifier[args] [:- literal[int] ])
identifier[repeat_days] = identifier[sorted] ([ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[days_of_week] . identifier[iteritems] () keyword[if] identifier[k] keyword[in] identifier[sched] ])
keyword[if] keyword[not] identifier[repeat_days] :
keyword[return] literal[string] . identifier[format] ( identifier[sched] )
identifier[reminder] [ literal[string] ]= identifier[repeat_days]
keyword[for] identifier[attempt] keyword[in] identifier[xrange] ( literal[int] ):
keyword[if] identifier[reminder] [ literal[string] ]. identifier[weekday] () keyword[in] identifier[repeat_days] :
keyword[break]
identifier[reminder] [ literal[string] ]+= identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
keyword[if] identifier[reminder] [ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[parts] = identifier[reminder] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[chan] = identifier[parts] [ literal[int] ]
identifier[reminder] [ literal[string] ]= literal[string] . identifier[join] ( identifier[parts] [ literal[int] :])
keyword[if] keyword[not] identifier[chan] . identifier[startswith] ( literal[string] ):
identifier[chan] = literal[string] . identifier[format] ( identifier[chan] )
identifier[reminder] [ literal[string] ]= identifier[chan]
identifier[id] = identifier[db] . identifier[reminders] . identifier[insert] ( identifier[reminder] )
identifier[diff] = identifier[reminder] [ literal[string] ]- identifier[now]
identifier[delay] =( identifier[diff] . identifier[days] * literal[int] * literal[int] )+ identifier[diff] . identifier[seconds]
identifier[_scheduled] . identifier[add] ( identifier[id] )
identifier[reactor] . identifier[callLater] ( identifier[delay] , identifier[_do_reminder] , identifier[id] , identifier[client] )
keyword[return] literal[string] . identifier[format] ( identifier[readable_time_delta] ( identifier[delay] )) | def at_reminder(client, channel, nick, args):
"""
Schedule a reminder to occur at a specific time. The given time can optionally
be specified to occur at a specific timezone, but will default to the value
of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock.
These types of reminders are repeatable, should the last two words of the message
be of the form "repeat <days_of_week>" where days_of_week is a single string consisting
of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF'
will repeat a reminder at the same time every Monday, Wednesday, and Friday.
A full example of how one would use this:
<sduncan> helga at 13:00 EST standup time repeat MTuWThF
This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday.
Optionally, a specific channel can be specified to receive the reminder message. This
is useful if creating several reminders via a private message. To use this, specify
"on <channel>" between the time amount and the message:
<sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF
<sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF
Note that the '#' char for specifying the channel is entirely optional.
"""
global _scheduled
now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
# Parse the time it should go off, and the minute offset of the day
(hh, mm) = map(int, args[0].split(':'))
# Strip time from args
args = args[1:]
# Default timezone
timezone = pytz.timezone(getattr(settings, 'TIMEZONE', 'US/Eastern'))
try:
# If there was a timezone passed in
timezone = pytz.timezone(args[0]) # depends on [control=['try'], data=[]]
except pytz.UnknownTimeZoneError:
pass # depends on [control=['except'], data=[]]
else:
# If so, remove it from args
args = args[1:]
local_now = now.astimezone(timezone)
local_next = local_now.replace(hour=hh, minute=mm)
if local_next <= local_now:
local_next += datetime.timedelta(days=1) # depends on [control=['if'], data=['local_next']]
reminder = {'when': local_next.astimezone(pytz.UTC), 'channel': channel, 'message': ' '.join(args), 'creator': nick}
# Check for 'repeat' arg
try:
repeat = args[-2] == 'repeat' # depends on [control=['try'], data=[]]
except IndexError:
repeat = False # depends on [control=['except'], data=[]]
if repeat:
# If repeating, strip off the last two for the message
sched = args[-1]
reminder['message'] = ' '.join(args[:-2])
repeat_days = sorted([v for (k, v) in days_of_week.iteritems() if k in sched])
if not repeat_days:
return u"I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF".format(sched) # depends on [control=['if'], data=[]]
reminder['repeat'] = repeat_days
for attempt in xrange(7):
if reminder['when'].weekday() in repeat_days:
break # depends on [control=['if'], data=[]]
reminder['when'] += datetime.timedelta(days=1) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Handle ability to specify the channel
if reminder['message'].startswith('on'):
parts = reminder['message'].split(' ')
chan = parts[1]
reminder['message'] = ' '.join(parts[2:])
# Make sure channel is formatted correctly
if not chan.startswith('#'):
chan = '#{0}'.format(chan) # depends on [control=['if'], data=[]]
reminder['channel'] = chan # depends on [control=['if'], data=[]]
id = db.reminders.insert(reminder)
diff = reminder['when'] - now
delay = diff.days * 24 * 3600 + diff.seconds
_scheduled.add(id)
reactor.callLater(delay, _do_reminder, id, client)
return u'Reminder set for {0} from now'.format(readable_time_delta(delay)) |
def _initial_proposal_distribution(self, parameters, theta, size,
default_std=1e-4):
"""
Generate an initial proposal distribution around the point theta.
"""
missing_parameters = set(parameters).difference(theta)
if missing_parameters:
raise ValueError("cannot create initial proposal distribution "\
"because the following parameters are missing: {}".format(
", ".join(missing_parameters)))
std = np.ones(len(parameters), dtype=float)
initial_proposal_stds \
= self._configuration.get("initial_proposal_stds", {})
p0 = np.array([theta[p] for p in parameters])
std = np.array(map(float, [initial_proposal_stds.get(p, default_std) \
for p in parameters]))
return np.vstack([p0 + std * np.random.normal(size=len(p0)) \
for i in range(size)]) | def function[_initial_proposal_distribution, parameter[self, parameters, theta, size, default_std]]:
constant[
Generate an initial proposal distribution around the point theta.
]
variable[missing_parameters] assign[=] call[call[name[set], parameter[name[parameters]]].difference, parameter[name[theta]]]
if name[missing_parameters] begin[:]
<ast.Raise object at 0x7da18dc04970>
variable[std] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[parameters]]]]]
variable[initial_proposal_stds] assign[=] call[name[self]._configuration.get, parameter[constant[initial_proposal_stds], dictionary[[], []]]]
variable[p0] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18dc077c0>]]
variable[std] assign[=] call[name[np].array, parameter[call[name[map], parameter[name[float], <ast.ListComp object at 0x7da18dc07fa0>]]]]
return[call[name[np].vstack, parameter[<ast.ListComp object at 0x7da20e9566b0>]]] | keyword[def] identifier[_initial_proposal_distribution] ( identifier[self] , identifier[parameters] , identifier[theta] , identifier[size] ,
identifier[default_std] = literal[int] ):
literal[string]
identifier[missing_parameters] = identifier[set] ( identifier[parameters] ). identifier[difference] ( identifier[theta] )
keyword[if] identifier[missing_parameters] :
keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[missing_parameters] )))
identifier[std] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[parameters] ), identifier[dtype] = identifier[float] )
identifier[initial_proposal_stds] = identifier[self] . identifier[_configuration] . identifier[get] ( literal[string] ,{})
identifier[p0] = identifier[np] . identifier[array] ([ identifier[theta] [ identifier[p] ] keyword[for] identifier[p] keyword[in] identifier[parameters] ])
identifier[std] = identifier[np] . identifier[array] ( identifier[map] ( identifier[float] ,[ identifier[initial_proposal_stds] . identifier[get] ( identifier[p] , identifier[default_std] ) keyword[for] identifier[p] keyword[in] identifier[parameters] ]))
keyword[return] identifier[np] . identifier[vstack] ([ identifier[p0] + identifier[std] * identifier[np] . identifier[random] . identifier[normal] ( identifier[size] = identifier[len] ( identifier[p0] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] )]) | def _initial_proposal_distribution(self, parameters, theta, size, default_std=0.0001):
"""
Generate an initial proposal distribution around the point theta.
"""
missing_parameters = set(parameters).difference(theta)
if missing_parameters:
raise ValueError('cannot create initial proposal distribution because the following parameters are missing: {}'.format(', '.join(missing_parameters))) # depends on [control=['if'], data=[]]
std = np.ones(len(parameters), dtype=float)
initial_proposal_stds = self._configuration.get('initial_proposal_stds', {})
p0 = np.array([theta[p] for p in parameters])
std = np.array(map(float, [initial_proposal_stds.get(p, default_std) for p in parameters]))
return np.vstack([p0 + std * np.random.normal(size=len(p0)) for i in range(size)]) |
def age(self):
"""
Get the age of the PID file.
"""
# Created timestamp
created = self.created()
# Age in seconds / minutes / hours / days
age_secs = time() - created
age_mins = 0 if (age_secs < 60) else (age_secs / 60)
age_hours = 0 if (age_secs < 3600) else (age_mins / 60)
age_days = 0 if (age_secs < 86400) else (age_hours / 24)
# Return the age tuple
return (
int(age_secs),
int(age_mins),
int(age_hours),
int(age_days)
) | def function[age, parameter[self]]:
constant[
Get the age of the PID file.
]
variable[created] assign[=] call[name[self].created, parameter[]]
variable[age_secs] assign[=] binary_operation[call[name[time], parameter[]] - name[created]]
variable[age_mins] assign[=] <ast.IfExp object at 0x7da18f723d30>
variable[age_hours] assign[=] <ast.IfExp object at 0x7da18f721d20>
variable[age_days] assign[=] <ast.IfExp object at 0x7da18f58eda0>
return[tuple[[<ast.Call object at 0x7da18f58f190>, <ast.Call object at 0x7da18f58e170>, <ast.Call object at 0x7da18f58e860>, <ast.Call object at 0x7da18f58d1e0>]]] | keyword[def] identifier[age] ( identifier[self] ):
literal[string]
identifier[created] = identifier[self] . identifier[created] ()
identifier[age_secs] = identifier[time] ()- identifier[created]
identifier[age_mins] = literal[int] keyword[if] ( identifier[age_secs] < literal[int] ) keyword[else] ( identifier[age_secs] / literal[int] )
identifier[age_hours] = literal[int] keyword[if] ( identifier[age_secs] < literal[int] ) keyword[else] ( identifier[age_mins] / literal[int] )
identifier[age_days] = literal[int] keyword[if] ( identifier[age_secs] < literal[int] ) keyword[else] ( identifier[age_hours] / literal[int] )
keyword[return] (
identifier[int] ( identifier[age_secs] ),
identifier[int] ( identifier[age_mins] ),
identifier[int] ( identifier[age_hours] ),
identifier[int] ( identifier[age_days] )
) | def age(self):
"""
Get the age of the PID file.
"""
# Created timestamp
created = self.created()
# Age in seconds / minutes / hours / days
age_secs = time() - created
age_mins = 0 if age_secs < 60 else age_secs / 60
age_hours = 0 if age_secs < 3600 else age_mins / 60
age_days = 0 if age_secs < 86400 else age_hours / 24
# Return the age tuple
return (int(age_secs), int(age_mins), int(age_hours), int(age_days)) |
def sismember(self, name, value):
"""
Return a boolean indicating if ``value`` is a member of set ``name``
send raw (source) values here. Right functioning with other values
not guaranteed (and even worse).
"""
return self.storage.sismember(name, self.dump(value)) | def function[sismember, parameter[self, name, value]]:
constant[
Return a boolean indicating if ``value`` is a member of set ``name``
send raw (source) values here. Right functioning with other values
not guaranteed (and even worse).
]
return[call[name[self].storage.sismember, parameter[name[name], call[name[self].dump, parameter[name[value]]]]]] | keyword[def] identifier[sismember] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
keyword[return] identifier[self] . identifier[storage] . identifier[sismember] ( identifier[name] , identifier[self] . identifier[dump] ( identifier[value] )) | def sismember(self, name, value):
"""
Return a boolean indicating if ``value`` is a member of set ``name``
send raw (source) values here. Right functioning with other values
not guaranteed (and even worse).
"""
return self.storage.sismember(name, self.dump(value)) |
def link(url, text='', classes='', target='', get="", **kwargs):
'''
Output a link tag.
'''
if not (url.startswith('http') or url.startswith('/')):
# Handle additional reverse args.
urlargs = {}
for arg, val in kwargs.items():
if arg[:4] == "url_":
urlargs[arg[4:]] = val
url = reverse(url, kwargs=urlargs)
if get:
url += '?' + get
return html.tag('a', text or url, {
'class': classes, 'target': target, 'href': url}) | def function[link, parameter[url, text, classes, target, get]]:
constant[
Output a link tag.
]
if <ast.UnaryOp object at 0x7da204621c90> begin[:]
variable[urlargs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b021f910>, <ast.Name object at 0x7da1b021ee00>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[call[name[arg]][<ast.Slice object at 0x7da1b021c0d0>] equal[==] constant[url_]] begin[:]
call[name[urlargs]][call[name[arg]][<ast.Slice object at 0x7da1b021fa30>]] assign[=] name[val]
variable[url] assign[=] call[name[reverse], parameter[name[url]]]
if name[get] begin[:]
<ast.AugAssign object at 0x7da1b021e380>
return[call[name[html].tag, parameter[constant[a], <ast.BoolOp object at 0x7da1b021dde0>, dictionary[[<ast.Constant object at 0x7da18f8122f0>, <ast.Constant object at 0x7da18f8118d0>, <ast.Constant object at 0x7da18f812290>], [<ast.Name object at 0x7da18f8134f0>, <ast.Name object at 0x7da18f812650>, <ast.Name object at 0x7da18f812b60>]]]]] | keyword[def] identifier[link] ( identifier[url] , identifier[text] = literal[string] , identifier[classes] = literal[string] , identifier[target] = literal[string] , identifier[get] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] ( identifier[url] . identifier[startswith] ( literal[string] ) keyword[or] identifier[url] . identifier[startswith] ( literal[string] )):
identifier[urlargs] ={}
keyword[for] identifier[arg] , identifier[val] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[arg] [: literal[int] ]== literal[string] :
identifier[urlargs] [ identifier[arg] [ literal[int] :]]= identifier[val]
identifier[url] = identifier[reverse] ( identifier[url] , identifier[kwargs] = identifier[urlargs] )
keyword[if] identifier[get] :
identifier[url] += literal[string] + identifier[get]
keyword[return] identifier[html] . identifier[tag] ( literal[string] , identifier[text] keyword[or] identifier[url] ,{
literal[string] : identifier[classes] , literal[string] : identifier[target] , literal[string] : identifier[url] }) | def link(url, text='', classes='', target='', get='', **kwargs):
"""
Output a link tag.
"""
if not (url.startswith('http') or url.startswith('/')):
# Handle additional reverse args.
urlargs = {}
for (arg, val) in kwargs.items():
if arg[:4] == 'url_':
urlargs[arg[4:]] = val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
url = reverse(url, kwargs=urlargs)
if get:
url += '?' + get # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return html.tag('a', text or url, {'class': classes, 'target': target, 'href': url}) |
def create(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Note - this does not wait for the command to complete
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
conn = Qubole.agent()
if kwargs.get('command_type') is None:
kwargs['command_type'] = cls.__name__
if kwargs.get('tags') is not None:
kwargs['tags'] = kwargs['tags'].split(',')
return cls(conn.post(cls.rest_entity_path, data=kwargs)) | def function[create, parameter[cls]]:
constant[
Create a command object by issuing a POST request to the /command endpoint
Note - this does not wait for the command to complete
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
]
variable[conn] assign[=] call[name[Qubole].agent, parameter[]]
if compare[call[name[kwargs].get, parameter[constant[command_type]]] is constant[None]] begin[:]
call[name[kwargs]][constant[command_type]] assign[=] name[cls].__name__
if compare[call[name[kwargs].get, parameter[constant[tags]]] is_not constant[None]] begin[:]
call[name[kwargs]][constant[tags]] assign[=] call[call[name[kwargs]][constant[tags]].split, parameter[constant[,]]]
return[call[name[cls], parameter[call[name[conn].post, parameter[name[cls].rest_entity_path]]]]] | keyword[def] identifier[create] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[Qubole] . identifier[agent] ()
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[cls] . identifier[__name__]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]. identifier[split] ( literal[string] )
keyword[return] identifier[cls] ( identifier[conn] . identifier[post] ( identifier[cls] . identifier[rest_entity_path] , identifier[data] = identifier[kwargs] )) | def create(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Note - this does not wait for the command to complete
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
conn = Qubole.agent()
if kwargs.get('command_type') is None:
kwargs['command_type'] = cls.__name__ # depends on [control=['if'], data=[]]
if kwargs.get('tags') is not None:
kwargs['tags'] = kwargs['tags'].split(',') # depends on [control=['if'], data=[]]
return cls(conn.post(cls.rest_entity_path, data=kwargs)) |
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename) | def function[_send, parameter[self, method, path, data, filename]]:
constant[Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
]
if compare[name[filename] is constant[None]] begin[:]
return[call[name[self]._send_json, parameter[name[method], name[path], name[data]]]] | keyword[def] identifier[_send] ( identifier[self] , identifier[method] , identifier[path] , identifier[data] , identifier[filename] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_send_json] ( identifier[method] , identifier[path] , identifier[data] )
keyword[else] :
keyword[return] identifier[self] . identifier[_send_file] ( identifier[method] , identifier[path] , identifier[data] , identifier[filename] ) | def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data) # depends on [control=['if'], data=[]]
else:
return self._send_file(method, path, data, filename) |
def stateDict(self):
"""Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...}
"""
state = {
'duration' : self._duration,
'intensity' : self._intensity,
'risefall' : self._risefall,
'stim_type' : self.name
}
return state | def function[stateDict, parameter[self]]:
constant[Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...}
]
variable[state] assign[=] dictionary[[<ast.Constant object at 0x7da18f00db70>, <ast.Constant object at 0x7da18f00e440>, <ast.Constant object at 0x7da18f00ef50>, <ast.Constant object at 0x7da18f00ee60>], [<ast.Attribute object at 0x7da18f00de70>, <ast.Attribute object at 0x7da18f00ed10>, <ast.Attribute object at 0x7da18f00eb00>, <ast.Attribute object at 0x7da18f00da50>]]
return[name[state]] | keyword[def] identifier[stateDict] ( identifier[self] ):
literal[string]
identifier[state] ={
literal[string] : identifier[self] . identifier[_duration] ,
literal[string] : identifier[self] . identifier[_intensity] ,
literal[string] : identifier[self] . identifier[_risefall] ,
literal[string] : identifier[self] . identifier[name]
}
keyword[return] identifier[state] | def stateDict(self):
"""Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...}
"""
state = {'duration': self._duration, 'intensity': self._intensity, 'risefall': self._risefall, 'stim_type': self.name}
return state |
def _levenshtein_compute(source, target, rd_flag):
"""Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
"""
# Create matrix of correct size (this is s_len + 1 * t_len + 1 so that the
# empty prefixes "" can also be included). The leftmost column represents
# transforming various source prefixes into an empty string, which can
# always be done by deleting all characters in the respective prefix, and
# the top row represents transforming the empty string into various target
# prefixes, which can always be done by inserting every character in the
# respective prefix. The ternary used to build the list should ensure that
# this row and column are now filled correctly
s_range = range(len(source) + 1)
t_range = range(len(target) + 1)
matrix = [[(i if j == 0 else j) for j in t_range] for i in s_range]
# Iterate through rest of matrix, filling it in with Levenshtein
# distances for the remaining prefix combinations
for i in s_range[1:]:
for j in t_range[1:]:
# Applies the recursive logic outlined above using the values
# stored in the matrix so far. The options for the last pair of
# characters are deletion, insertion, and substitution, which
# amount to dropping the source character, the target character,
# or both and then calculating the distance for the resulting
# prefix combo. If the characters at this point are the same, the
# situation can be thought of as a free substitution
del_dist = matrix[i - 1][j] + 1
ins_dist = matrix[i][j - 1] + 1
sub_trans_cost = 0 if source[i - 1] == target[j - 1] else 1
sub_dist = matrix[i - 1][j - 1] + sub_trans_cost
# Choose option that produces smallest distance
matrix[i][j] = min(del_dist, ins_dist, sub_dist)
# If restricted Damerau-Levenshtein was requested via the flag,
# then there may be a fourth option: transposing the current and
# previous characters in the source string. This can be thought of
# as a double substitution and has a similar free case, where the
# current and preceeding character in both strings is the same
if rd_flag and i > 1 and j > 1 and source[i - 1] == target[j - 2] \
and source[i - 2] == target[j - 1]:
trans_dist = matrix[i - 2][j - 2] + sub_trans_cost
matrix[i][j] = min(matrix[i][j], trans_dist)
# At this point, the matrix is full, and the biggest prefixes are just the
# strings themselves, so this is the desired distance
return matrix[len(source)][len(target)] | def function[_levenshtein_compute, parameter[source, target, rd_flag]]:
constant[Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
]
variable[s_range] assign[=] call[name[range], parameter[binary_operation[call[name[len], parameter[name[source]]] + constant[1]]]]
variable[t_range] assign[=] call[name[range], parameter[binary_operation[call[name[len], parameter[name[target]]] + constant[1]]]]
variable[matrix] assign[=] <ast.ListComp object at 0x7da204564130>
for taget[name[i]] in starred[call[name[s_range]][<ast.Slice object at 0x7da204564580>]] begin[:]
for taget[name[j]] in starred[call[name[t_range]][<ast.Slice object at 0x7da204564430>]] begin[:]
variable[del_dist] assign[=] binary_operation[call[call[name[matrix]][binary_operation[name[i] - constant[1]]]][name[j]] + constant[1]]
variable[ins_dist] assign[=] binary_operation[call[call[name[matrix]][name[i]]][binary_operation[name[j] - constant[1]]] + constant[1]]
variable[sub_trans_cost] assign[=] <ast.IfExp object at 0x7da204566e60>
variable[sub_dist] assign[=] binary_operation[call[call[name[matrix]][binary_operation[name[i] - constant[1]]]][binary_operation[name[j] - constant[1]]] + name[sub_trans_cost]]
call[call[name[matrix]][name[i]]][name[j]] assign[=] call[name[min], parameter[name[del_dist], name[ins_dist], name[sub_dist]]]
if <ast.BoolOp object at 0x7da2045660e0> begin[:]
variable[trans_dist] assign[=] binary_operation[call[call[name[matrix]][binary_operation[name[i] - constant[2]]]][binary_operation[name[j] - constant[2]]] + name[sub_trans_cost]]
call[call[name[matrix]][name[i]]][name[j]] assign[=] call[name[min], parameter[call[call[name[matrix]][name[i]]][name[j]], name[trans_dist]]]
return[call[call[name[matrix]][call[name[len], parameter[name[source]]]]][call[name[len], parameter[name[target]]]]] | keyword[def] identifier[_levenshtein_compute] ( identifier[source] , identifier[target] , identifier[rd_flag] ):
literal[string]
identifier[s_range] = identifier[range] ( identifier[len] ( identifier[source] )+ literal[int] )
identifier[t_range] = identifier[range] ( identifier[len] ( identifier[target] )+ literal[int] )
identifier[matrix] =[[( identifier[i] keyword[if] identifier[j] == literal[int] keyword[else] identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[t_range] ] keyword[for] identifier[i] keyword[in] identifier[s_range] ]
keyword[for] identifier[i] keyword[in] identifier[s_range] [ literal[int] :]:
keyword[for] identifier[j] keyword[in] identifier[t_range] [ literal[int] :]:
identifier[del_dist] = identifier[matrix] [ identifier[i] - literal[int] ][ identifier[j] ]+ literal[int]
identifier[ins_dist] = identifier[matrix] [ identifier[i] ][ identifier[j] - literal[int] ]+ literal[int]
identifier[sub_trans_cost] = literal[int] keyword[if] identifier[source] [ identifier[i] - literal[int] ]== identifier[target] [ identifier[j] - literal[int] ] keyword[else] literal[int]
identifier[sub_dist] = identifier[matrix] [ identifier[i] - literal[int] ][ identifier[j] - literal[int] ]+ identifier[sub_trans_cost]
identifier[matrix] [ identifier[i] ][ identifier[j] ]= identifier[min] ( identifier[del_dist] , identifier[ins_dist] , identifier[sub_dist] )
keyword[if] identifier[rd_flag] keyword[and] identifier[i] > literal[int] keyword[and] identifier[j] > literal[int] keyword[and] identifier[source] [ identifier[i] - literal[int] ]== identifier[target] [ identifier[j] - literal[int] ] keyword[and] identifier[source] [ identifier[i] - literal[int] ]== identifier[target] [ identifier[j] - literal[int] ]:
identifier[trans_dist] = identifier[matrix] [ identifier[i] - literal[int] ][ identifier[j] - literal[int] ]+ identifier[sub_trans_cost]
identifier[matrix] [ identifier[i] ][ identifier[j] ]= identifier[min] ( identifier[matrix] [ identifier[i] ][ identifier[j] ], identifier[trans_dist] )
keyword[return] identifier[matrix] [ identifier[len] ( identifier[source] )][ identifier[len] ( identifier[target] )] | def _levenshtein_compute(source, target, rd_flag):
"""Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
"""
# Create matrix of correct size (this is s_len + 1 * t_len + 1 so that the
# empty prefixes "" can also be included). The leftmost column represents
# transforming various source prefixes into an empty string, which can
# always be done by deleting all characters in the respective prefix, and
# the top row represents transforming the empty string into various target
# prefixes, which can always be done by inserting every character in the
# respective prefix. The ternary used to build the list should ensure that
# this row and column are now filled correctly
s_range = range(len(source) + 1)
t_range = range(len(target) + 1)
matrix = [[i if j == 0 else j for j in t_range] for i in s_range]
# Iterate through rest of matrix, filling it in with Levenshtein
# distances for the remaining prefix combinations
for i in s_range[1:]:
for j in t_range[1:]:
# Applies the recursive logic outlined above using the values
# stored in the matrix so far. The options for the last pair of
# characters are deletion, insertion, and substitution, which
# amount to dropping the source character, the target character,
# or both and then calculating the distance for the resulting
# prefix combo. If the characters at this point are the same, the
# situation can be thought of as a free substitution
del_dist = matrix[i - 1][j] + 1
ins_dist = matrix[i][j - 1] + 1
sub_trans_cost = 0 if source[i - 1] == target[j - 1] else 1
sub_dist = matrix[i - 1][j - 1] + sub_trans_cost
# Choose option that produces smallest distance
matrix[i][j] = min(del_dist, ins_dist, sub_dist)
# If restricted Damerau-Levenshtein was requested via the flag,
# then there may be a fourth option: transposing the current and
# previous characters in the source string. This can be thought of
# as a double substitution and has a similar free case, where the
# current and preceeding character in both strings is the same
if rd_flag and i > 1 and (j > 1) and (source[i - 1] == target[j - 2]) and (source[i - 2] == target[j - 1]):
trans_dist = matrix[i - 2][j - 2] + sub_trans_cost
matrix[i][j] = min(matrix[i][j], trans_dist) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
# At this point, the matrix is full, and the biggest prefixes are just the
# strings themselves, so this is the desired distance
return matrix[len(source)][len(target)] |
def add_display(cls, attr, title=''):
"""Adds a ``list_display`` property without any extra wrappers,
similar to :func:`add_displays`, but can also change the title.
:param attr:
Name of the attribute to add to the display
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``attr``
"""
global klass_count
klass_count += 1
fn_name = 'dyn_fn_%d' % klass_count
cls.list_display.append(fn_name)
if not title:
title = attr.capitalize()
def _ref(self, obj):
# use the django mechanism for field value lookup
_, _, value = lookup_field(attr, obj, cls)
return value
_ref.short_description = title
_ref.allow_tags = True
_ref.admin_order_field = attr
setattr(cls, fn_name, _ref) | def function[add_display, parameter[cls, attr, title]]:
constant[Adds a ``list_display`` property without any extra wrappers,
similar to :func:`add_displays`, but can also change the title.
:param attr:
Name of the attribute to add to the display
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``attr``
]
<ast.Global object at 0x7da1b0048fa0>
<ast.AugAssign object at 0x7da1b00482e0>
variable[fn_name] assign[=] binary_operation[constant[dyn_fn_%d] <ast.Mod object at 0x7da2590d6920> name[klass_count]]
call[name[cls].list_display.append, parameter[name[fn_name]]]
if <ast.UnaryOp object at 0x7da20e954c70> begin[:]
variable[title] assign[=] call[name[attr].capitalize, parameter[]]
def function[_ref, parameter[self, obj]]:
<ast.Tuple object at 0x7da20e955b70> assign[=] call[name[lookup_field], parameter[name[attr], name[obj], name[cls]]]
return[name[value]]
name[_ref].short_description assign[=] name[title]
name[_ref].allow_tags assign[=] constant[True]
name[_ref].admin_order_field assign[=] name[attr]
call[name[setattr], parameter[name[cls], name[fn_name], name[_ref]]] | keyword[def] identifier[add_display] ( identifier[cls] , identifier[attr] , identifier[title] = literal[string] ):
literal[string]
keyword[global] identifier[klass_count]
identifier[klass_count] += literal[int]
identifier[fn_name] = literal[string] % identifier[klass_count]
identifier[cls] . identifier[list_display] . identifier[append] ( identifier[fn_name] )
keyword[if] keyword[not] identifier[title] :
identifier[title] = identifier[attr] . identifier[capitalize] ()
keyword[def] identifier[_ref] ( identifier[self] , identifier[obj] ):
identifier[_] , identifier[_] , identifier[value] = identifier[lookup_field] ( identifier[attr] , identifier[obj] , identifier[cls] )
keyword[return] identifier[value]
identifier[_ref] . identifier[short_description] = identifier[title]
identifier[_ref] . identifier[allow_tags] = keyword[True]
identifier[_ref] . identifier[admin_order_field] = identifier[attr]
identifier[setattr] ( identifier[cls] , identifier[fn_name] , identifier[_ref] ) | def add_display(cls, attr, title=''):
"""Adds a ``list_display`` property without any extra wrappers,
similar to :func:`add_displays`, but can also change the title.
:param attr:
Name of the attribute to add to the display
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``attr``
"""
global klass_count
klass_count += 1
fn_name = 'dyn_fn_%d' % klass_count
cls.list_display.append(fn_name)
if not title:
title = attr.capitalize() # depends on [control=['if'], data=[]]
def _ref(self, obj):
# use the django mechanism for field value lookup
(_, _, value) = lookup_field(attr, obj, cls)
return value
_ref.short_description = title
_ref.allow_tags = True
_ref.admin_order_field = attr
setattr(cls, fn_name, _ref) |
def files(self, *, bundle: str=None, tags: List[str]=None, version: int=None,
path: str=None) -> models.File:
"""Fetch files from the store."""
query = self.File.query
if bundle:
query = (query.join(self.File.version, self.Version.bundle)
.filter(self.Bundle.name == bundle))
if tags:
# require records to match ALL tags
query = (
query.join(self.File.tags)
.filter(self.Tag.name.in_(tags))
.group_by(models.File.id)
.having(func.count(models.Tag.name) == len(tags))
)
if version:
query = query.join(self.File.version).filter(self.Version.id == version)
if path:
query = query.filter_by(path=path)
return query | def function[files, parameter[self]]:
constant[Fetch files from the store.]
variable[query] assign[=] name[self].File.query
if name[bundle] begin[:]
variable[query] assign[=] call[call[name[query].join, parameter[name[self].File.version, name[self].Version.bundle]].filter, parameter[compare[name[self].Bundle.name equal[==] name[bundle]]]]
if name[tags] begin[:]
variable[query] assign[=] call[call[call[call[name[query].join, parameter[name[self].File.tags]].filter, parameter[call[name[self].Tag.name.in_, parameter[name[tags]]]]].group_by, parameter[name[models].File.id]].having, parameter[compare[call[name[func].count, parameter[name[models].Tag.name]] equal[==] call[name[len], parameter[name[tags]]]]]]
if name[version] begin[:]
variable[query] assign[=] call[call[name[query].join, parameter[name[self].File.version]].filter, parameter[compare[name[self].Version.id equal[==] name[version]]]]
if name[path] begin[:]
variable[query] assign[=] call[name[query].filter_by, parameter[]]
return[name[query]] | keyword[def] identifier[files] ( identifier[self] ,*, identifier[bundle] : identifier[str] = keyword[None] , identifier[tags] : identifier[List] [ identifier[str] ]= keyword[None] , identifier[version] : identifier[int] = keyword[None] ,
identifier[path] : identifier[str] = keyword[None] )-> identifier[models] . identifier[File] :
literal[string]
identifier[query] = identifier[self] . identifier[File] . identifier[query]
keyword[if] identifier[bundle] :
identifier[query] =( identifier[query] . identifier[join] ( identifier[self] . identifier[File] . identifier[version] , identifier[self] . identifier[Version] . identifier[bundle] )
. identifier[filter] ( identifier[self] . identifier[Bundle] . identifier[name] == identifier[bundle] ))
keyword[if] identifier[tags] :
identifier[query] =(
identifier[query] . identifier[join] ( identifier[self] . identifier[File] . identifier[tags] )
. identifier[filter] ( identifier[self] . identifier[Tag] . identifier[name] . identifier[in_] ( identifier[tags] ))
. identifier[group_by] ( identifier[models] . identifier[File] . identifier[id] )
. identifier[having] ( identifier[func] . identifier[count] ( identifier[models] . identifier[Tag] . identifier[name] )== identifier[len] ( identifier[tags] ))
)
keyword[if] identifier[version] :
identifier[query] = identifier[query] . identifier[join] ( identifier[self] . identifier[File] . identifier[version] ). identifier[filter] ( identifier[self] . identifier[Version] . identifier[id] == identifier[version] )
keyword[if] identifier[path] :
identifier[query] = identifier[query] . identifier[filter_by] ( identifier[path] = identifier[path] )
keyword[return] identifier[query] | def files(self, *, bundle: str=None, tags: List[str]=None, version: int=None, path: str=None) -> models.File:
"""Fetch files from the store."""
query = self.File.query
if bundle:
query = query.join(self.File.version, self.Version.bundle).filter(self.Bundle.name == bundle) # depends on [control=['if'], data=[]]
if tags:
# require records to match ALL tags
query = query.join(self.File.tags).filter(self.Tag.name.in_(tags)).group_by(models.File.id).having(func.count(models.Tag.name) == len(tags)) # depends on [control=['if'], data=[]]
if version:
query = query.join(self.File.version).filter(self.Version.id == version) # depends on [control=['if'], data=[]]
if path:
query = query.filter_by(path=path) # depends on [control=['if'], data=[]]
return query |
def get_comment(self, table: str, column: str) -> str:
"""Returns database SQL comment for a column."""
return self.flavour.get_comment(self, table, column) | def function[get_comment, parameter[self, table, column]]:
constant[Returns database SQL comment for a column.]
return[call[name[self].flavour.get_comment, parameter[name[self], name[table], name[column]]]] | keyword[def] identifier[get_comment] ( identifier[self] , identifier[table] : identifier[str] , identifier[column] : identifier[str] )-> identifier[str] :
literal[string]
keyword[return] identifier[self] . identifier[flavour] . identifier[get_comment] ( identifier[self] , identifier[table] , identifier[column] ) | def get_comment(self, table: str, column: str) -> str:
"""Returns database SQL comment for a column."""
return self.flavour.get_comment(self, table, column) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.