code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def open(self):
"""
Open the connection if not already open.
:return: True
"""
try:
if self.conn.get_backend_pid():
return True
except psycopg2.InterfaceError as e:
if str(e) == "connection already closed":
# We already lost our connection. Attempt to reforge it.
self.__reconnect__()
if self.conn.get_backend_pid() >= 0:
return True # Resets it.
else:
raise e | def function[open, parameter[self]]:
constant[
Open the connection if not already open.
:return: True
]
<ast.Try object at 0x7da18bcca170> | keyword[def] identifier[open] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[conn] . identifier[get_backend_pid] ():
keyword[return] keyword[True]
keyword[except] identifier[psycopg2] . identifier[InterfaceError] keyword[as] identifier[e] :
keyword[if] identifier[str] ( identifier[e] )== literal[string] :
identifier[self] . identifier[__reconnect__] ()
keyword[if] identifier[self] . identifier[conn] . identifier[get_backend_pid] ()>= literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[e] | def open(self):
"""
Open the connection if not already open.
:return: True
"""
try:
if self.conn.get_backend_pid():
return True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except psycopg2.InterfaceError as e:
if str(e) == 'connection already closed':
# We already lost our connection. Attempt to reforge it.
self.__reconnect__()
if self.conn.get_backend_pid() >= 0:
return True # Resets it. # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['except'], data=['e']] |
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play) | def function[set_fresh_watermark, parameter[game_queue, count_from, window_size, fresh_fraction, minimum_fresh]]:
constant[Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
]
variable[already_played] assign[=] binary_operation[name[game_queue].latest_game_number - name[count_from]]
call[name[print], parameter[constant[== already_played: ], name[already_played]]]
if compare[name[window_size] greater[>] name[count_from]] begin[:]
call[name[game_queue].require_fresh_games, parameter[call[name[int], parameter[binary_operation[name[minimum_fresh] * constant[0.9]]]]]] | keyword[def] identifier[set_fresh_watermark] ( identifier[game_queue] , identifier[count_from] , identifier[window_size] ,
identifier[fresh_fraction] = literal[int] , identifier[minimum_fresh] = literal[int] ):
literal[string]
identifier[already_played] = identifier[game_queue] . identifier[latest_game_number] - identifier[count_from]
identifier[print] ( literal[string] , identifier[already_played] , identifier[flush] = keyword[True] )
keyword[if] identifier[window_size] > identifier[count_from] :
identifier[game_queue] . identifier[require_fresh_games] ( identifier[int] ( identifier[minimum_fresh] * literal[int] ))
keyword[else] :
identifier[num_to_play] = identifier[max] ( literal[int] , identifier[math] . identifier[ceil] ( identifier[window_size] * literal[int] * identifier[fresh_fraction] )- identifier[already_played] )
identifier[print] ( literal[string] , identifier[num_to_play] , identifier[flush] = keyword[True] )
identifier[game_queue] . identifier[require_fresh_games] ( identifier[num_to_play] ) | def set_fresh_watermark(game_queue, count_from, window_size, fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print('== already_played: ', already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * 0.9)) # depends on [control=['if'], data=[]]
else:
num_to_play = max(0, math.ceil(window_size * 0.9 * fresh_fraction) - already_played)
print('== Num to play: ', num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play) |
def select_projects(self, *args):
"""Copy the query and add filtering by monitored projects.
This is only useful if the target project represents a Stackdriver
account containing the specified monitored projects.
Examples::
query = query.select_projects('project-1')
query = query.select_projects('project-1', 'project-2')
:type args: tuple
:param args: Project IDs limiting the resources to be included
in the query.
:rtype: :class:`Query`
:returns: The new query object.
"""
new_query = copy.deepcopy(self)
new_query._filter.projects = args
return new_query | def function[select_projects, parameter[self]]:
constant[Copy the query and add filtering by monitored projects.
This is only useful if the target project represents a Stackdriver
account containing the specified monitored projects.
Examples::
query = query.select_projects('project-1')
query = query.select_projects('project-1', 'project-2')
:type args: tuple
:param args: Project IDs limiting the resources to be included
in the query.
:rtype: :class:`Query`
:returns: The new query object.
]
variable[new_query] assign[=] call[name[copy].deepcopy, parameter[name[self]]]
name[new_query]._filter.projects assign[=] name[args]
return[name[new_query]] | keyword[def] identifier[select_projects] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[new_query] = identifier[copy] . identifier[deepcopy] ( identifier[self] )
identifier[new_query] . identifier[_filter] . identifier[projects] = identifier[args]
keyword[return] identifier[new_query] | def select_projects(self, *args):
"""Copy the query and add filtering by monitored projects.
This is only useful if the target project represents a Stackdriver
account containing the specified monitored projects.
Examples::
query = query.select_projects('project-1')
query = query.select_projects('project-1', 'project-2')
:type args: tuple
:param args: Project IDs limiting the resources to be included
in the query.
:rtype: :class:`Query`
:returns: The new query object.
"""
new_query = copy.deepcopy(self)
new_query._filter.projects = args
return new_query |
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False | def function[validate_ip, parameter[s]]:
constant[Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
]
if call[name[_HEX_RE].match, parameter[name[s]]] begin[:]
return[compare[call[name[len], parameter[call[name[s].split, parameter[constant[::]]]]] less_or_equal[<=] constant[2]]]
if call[name[_DOTTED_QUAD_RE].match, parameter[name[s]]] begin[:]
variable[halves] assign[=] call[name[s].split, parameter[constant[::]]]
if compare[call[name[len], parameter[name[halves]]] greater[>] constant[2]] begin[:]
return[constant[False]]
variable[hextets] assign[=] call[name[s].split, parameter[constant[:]]]
variable[quads] assign[=] call[call[name[hextets]][<ast.UnaryOp object at 0x7da20e956530>].split, parameter[constant[.]]]
for taget[name[q]] in starred[name[quads]] begin[:]
if compare[call[name[int], parameter[name[q]]] greater[>] constant[255]] begin[:]
return[constant[False]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[validate_ip] ( identifier[s] ):
literal[string]
keyword[if] identifier[_HEX_RE] . identifier[match] ( identifier[s] ):
keyword[return] identifier[len] ( identifier[s] . identifier[split] ( literal[string] ))<= literal[int]
keyword[if] identifier[_DOTTED_QUAD_RE] . identifier[match] ( identifier[s] ):
identifier[halves] = identifier[s] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[halves] )> literal[int] :
keyword[return] keyword[False]
identifier[hextets] = identifier[s] . identifier[split] ( literal[string] )
identifier[quads] = identifier[hextets] [- literal[int] ]. identifier[split] ( literal[string] )
keyword[for] identifier[q] keyword[in] identifier[quads] :
keyword[if] identifier[int] ( identifier[q] )> literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[return] keyword[False] | def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2 # depends on [control=['if'], data=[]]
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False # depends on [control=['if'], data=[]]
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['q']]
return True # depends on [control=['if'], data=[]]
return False |
def as_operation(self, timer=datetime.utcnow):
"""Makes an ``Operation`` from this instance.
Returns:
an ``Operation``
"""
now = timer()
op = sc_messages.Operation(
endTime=timestamp.to_rfc3339(now),
startTime=timestamp.to_rfc3339(now),
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW)
if self.operation_id:
op.operationId = self.operation_id
if self.operation_name:
op.operationName = self.operation_name
if self.api_key and self.api_key_valid:
op.consumerId = u'api_key:' + self.api_key
elif self.consumer_project_id:
op.consumerId = u'project:' + self.consumer_project_id
return op | def function[as_operation, parameter[self, timer]]:
constant[Makes an ``Operation`` from this instance.
Returns:
an ``Operation``
]
variable[now] assign[=] call[name[timer], parameter[]]
variable[op] assign[=] call[name[sc_messages].Operation, parameter[]]
if name[self].operation_id begin[:]
name[op].operationId assign[=] name[self].operation_id
if name[self].operation_name begin[:]
name[op].operationName assign[=] name[self].operation_name
if <ast.BoolOp object at 0x7da1b0401960> begin[:]
name[op].consumerId assign[=] binary_operation[constant[api_key:] + name[self].api_key]
return[name[op]] | keyword[def] identifier[as_operation] ( identifier[self] , identifier[timer] = identifier[datetime] . identifier[utcnow] ):
literal[string]
identifier[now] = identifier[timer] ()
identifier[op] = identifier[sc_messages] . identifier[Operation] (
identifier[endTime] = identifier[timestamp] . identifier[to_rfc3339] ( identifier[now] ),
identifier[startTime] = identifier[timestamp] . identifier[to_rfc3339] ( identifier[now] ),
identifier[importance] = identifier[sc_messages] . identifier[Operation] . identifier[ImportanceValueValuesEnum] . identifier[LOW] )
keyword[if] identifier[self] . identifier[operation_id] :
identifier[op] . identifier[operationId] = identifier[self] . identifier[operation_id]
keyword[if] identifier[self] . identifier[operation_name] :
identifier[op] . identifier[operationName] = identifier[self] . identifier[operation_name]
keyword[if] identifier[self] . identifier[api_key] keyword[and] identifier[self] . identifier[api_key_valid] :
identifier[op] . identifier[consumerId] = literal[string] + identifier[self] . identifier[api_key]
keyword[elif] identifier[self] . identifier[consumer_project_id] :
identifier[op] . identifier[consumerId] = literal[string] + identifier[self] . identifier[consumer_project_id]
keyword[return] identifier[op] | def as_operation(self, timer=datetime.utcnow):
"""Makes an ``Operation`` from this instance.
Returns:
an ``Operation``
"""
now = timer()
op = sc_messages.Operation(endTime=timestamp.to_rfc3339(now), startTime=timestamp.to_rfc3339(now), importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW)
if self.operation_id:
op.operationId = self.operation_id # depends on [control=['if'], data=[]]
if self.operation_name:
op.operationName = self.operation_name # depends on [control=['if'], data=[]]
if self.api_key and self.api_key_valid:
op.consumerId = u'api_key:' + self.api_key # depends on [control=['if'], data=[]]
elif self.consumer_project_id:
op.consumerId = u'project:' + self.consumer_project_id # depends on [control=['if'], data=[]]
return op |
def disconnect(self, signal=None, slot=None, transform=None, condition=None):
"""Removes connection(s) between this objects signal and connected slot(s)
signal: the signal this class will emit, to cause the slot method to be called
receiver: the object containing the slot method to be called
slot: the slot method or function to call
transform: an optional value override to pass into the slot method as the first variable
condition: only call the slot method if the value emitted matches this condition
"""
if slot:
self.connections[signal][condition].pop(slot, None)
elif condition is not None:
self.connections[signal].pop(condition, None)
elif signal:
self.connections.pop(signal, None)
else:
delattr(self, 'connections') | def function[disconnect, parameter[self, signal, slot, transform, condition]]:
constant[Removes connection(s) between this objects signal and connected slot(s)
signal: the signal this class will emit, to cause the slot method to be called
receiver: the object containing the slot method to be called
slot: the slot method or function to call
transform: an optional value override to pass into the slot method as the first variable
condition: only call the slot method if the value emitted matches this condition
]
if name[slot] begin[:]
call[call[call[name[self].connections][name[signal]]][name[condition]].pop, parameter[name[slot], constant[None]]] | keyword[def] identifier[disconnect] ( identifier[self] , identifier[signal] = keyword[None] , identifier[slot] = keyword[None] , identifier[transform] = keyword[None] , identifier[condition] = keyword[None] ):
literal[string]
keyword[if] identifier[slot] :
identifier[self] . identifier[connections] [ identifier[signal] ][ identifier[condition] ]. identifier[pop] ( identifier[slot] , keyword[None] )
keyword[elif] identifier[condition] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[connections] [ identifier[signal] ]. identifier[pop] ( identifier[condition] , keyword[None] )
keyword[elif] identifier[signal] :
identifier[self] . identifier[connections] . identifier[pop] ( identifier[signal] , keyword[None] )
keyword[else] :
identifier[delattr] ( identifier[self] , literal[string] ) | def disconnect(self, signal=None, slot=None, transform=None, condition=None):
"""Removes connection(s) between this objects signal and connected slot(s)
signal: the signal this class will emit, to cause the slot method to be called
receiver: the object containing the slot method to be called
slot: the slot method or function to call
transform: an optional value override to pass into the slot method as the first variable
condition: only call the slot method if the value emitted matches this condition
"""
if slot:
self.connections[signal][condition].pop(slot, None) # depends on [control=['if'], data=[]]
elif condition is not None:
self.connections[signal].pop(condition, None) # depends on [control=['if'], data=['condition']]
elif signal:
self.connections.pop(signal, None) # depends on [control=['if'], data=[]]
else:
delattr(self, 'connections') |
def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
b = r_core * Rs ** -1
x = R * Rs ** -1
Fx = self._F(x, b)
return 2 * rho0 * Rs * Fx | def function[density_2d, parameter[self, x, y, Rs, rho0, r_core, center_x, center_y]]:
constant[
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
]
variable[x_] assign[=] binary_operation[name[x] - name[center_x]]
variable[y_] assign[=] binary_operation[name[y] - name[center_y]]
variable[R] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x_] ** constant[2]] + binary_operation[name[y_] ** constant[2]]]]]
variable[b] assign[=] binary_operation[name[r_core] * binary_operation[name[Rs] ** <ast.UnaryOp object at 0x7da1b26acbe0>]]
variable[x] assign[=] binary_operation[name[R] * binary_operation[name[Rs] ** <ast.UnaryOp object at 0x7da1b26ae860>]]
variable[Fx] assign[=] call[name[self]._F, parameter[name[x], name[b]]]
return[binary_operation[binary_operation[binary_operation[constant[2] * name[rho0]] * name[Rs]] * name[Fx]]] | keyword[def] identifier[density_2d] ( identifier[self] , identifier[x] , identifier[y] , identifier[Rs] , identifier[rho0] , identifier[r_core] , identifier[center_x] = literal[int] , identifier[center_y] = literal[int] ):
literal[string]
identifier[x_] = identifier[x] - identifier[center_x]
identifier[y_] = identifier[y] - identifier[center_y]
identifier[R] = identifier[np] . identifier[sqrt] ( identifier[x_] ** literal[int] + identifier[y_] ** literal[int] )
identifier[b] = identifier[r_core] * identifier[Rs] **- literal[int]
identifier[x] = identifier[R] * identifier[Rs] **- literal[int]
identifier[Fx] = identifier[self] . identifier[_F] ( identifier[x] , identifier[b] )
keyword[return] literal[int] * identifier[rho0] * identifier[Rs] * identifier[Fx] | def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
b = r_core * Rs ** (-1)
x = R * Rs ** (-1)
Fx = self._F(x, b)
return 2 * rho0 * Rs * Fx |
def download_next_song(self, song):
"""Downloads the next song and starts playing it"""
dl_ydl_opts = dict(ydl_opts)
dl_ydl_opts["progress_hooks"] = [self.ytdl_progress_hook]
dl_ydl_opts["outtmpl"] = self.output_format
# Move the songs from the next cache to the current cache
self.move_next_cache()
self.state = 'ready'
self.play_empty()
# Download the file and create the stream
with youtube_dl.YoutubeDL(dl_ydl_opts) as ydl:
try:
ydl.download([song])
except DownloadStreamException:
# This is a livestream, use the appropriate player
future = asyncio.run_coroutine_threadsafe(self.create_stream_player(song, dl_ydl_opts), client.loop)
try:
future.result()
except Exception as e:
logger.exception(e)
self.vafter_ts()
return
except PermissionError:
# File is still in use, it'll get cleared next time
pass
except youtube_dl.utils.DownloadError as e:
self.logger.exception(e)
self.statuslog.error(e)
self.vafter_ts()
return
except Exception as e:
self.logger.exception(e)
self.vafter_ts()
return | def function[download_next_song, parameter[self, song]]:
constant[Downloads the next song and starts playing it]
variable[dl_ydl_opts] assign[=] call[name[dict], parameter[name[ydl_opts]]]
call[name[dl_ydl_opts]][constant[progress_hooks]] assign[=] list[[<ast.Attribute object at 0x7da1b193eb30>]]
call[name[dl_ydl_opts]][constant[outtmpl]] assign[=] name[self].output_format
call[name[self].move_next_cache, parameter[]]
name[self].state assign[=] constant[ready]
call[name[self].play_empty, parameter[]]
with call[name[youtube_dl].YoutubeDL, parameter[name[dl_ydl_opts]]] begin[:]
<ast.Try object at 0x7da1b193e7a0> | keyword[def] identifier[download_next_song] ( identifier[self] , identifier[song] ):
literal[string]
identifier[dl_ydl_opts] = identifier[dict] ( identifier[ydl_opts] )
identifier[dl_ydl_opts] [ literal[string] ]=[ identifier[self] . identifier[ytdl_progress_hook] ]
identifier[dl_ydl_opts] [ literal[string] ]= identifier[self] . identifier[output_format]
identifier[self] . identifier[move_next_cache] ()
identifier[self] . identifier[state] = literal[string]
identifier[self] . identifier[play_empty] ()
keyword[with] identifier[youtube_dl] . identifier[YoutubeDL] ( identifier[dl_ydl_opts] ) keyword[as] identifier[ydl] :
keyword[try] :
identifier[ydl] . identifier[download] ([ identifier[song] ])
keyword[except] identifier[DownloadStreamException] :
identifier[future] = identifier[asyncio] . identifier[run_coroutine_threadsafe] ( identifier[self] . identifier[create_stream_player] ( identifier[song] , identifier[dl_ydl_opts] ), identifier[client] . identifier[loop] )
keyword[try] :
identifier[future] . identifier[result] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] ( identifier[e] )
identifier[self] . identifier[vafter_ts] ()
keyword[return]
keyword[except] identifier[PermissionError] :
keyword[pass]
keyword[except] identifier[youtube_dl] . identifier[utils] . identifier[DownloadError] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[exception] ( identifier[e] )
identifier[self] . identifier[statuslog] . identifier[error] ( identifier[e] )
identifier[self] . identifier[vafter_ts] ()
keyword[return]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[exception] ( identifier[e] )
identifier[self] . identifier[vafter_ts] ()
keyword[return] | def download_next_song(self, song):
"""Downloads the next song and starts playing it"""
dl_ydl_opts = dict(ydl_opts)
dl_ydl_opts['progress_hooks'] = [self.ytdl_progress_hook]
dl_ydl_opts['outtmpl'] = self.output_format
# Move the songs from the next cache to the current cache
self.move_next_cache()
self.state = 'ready'
self.play_empty()
# Download the file and create the stream
with youtube_dl.YoutubeDL(dl_ydl_opts) as ydl:
try:
ydl.download([song]) # depends on [control=['try'], data=[]]
except DownloadStreamException:
# This is a livestream, use the appropriate player
future = asyncio.run_coroutine_threadsafe(self.create_stream_player(song, dl_ydl_opts), client.loop)
try:
future.result() # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception(e)
self.vafter_ts()
return # depends on [control=['except'], data=['e']] # depends on [control=['except'], data=[]]
except PermissionError:
# File is still in use, it'll get cleared next time
pass # depends on [control=['except'], data=[]]
except youtube_dl.utils.DownloadError as e:
self.logger.exception(e)
self.statuslog.error(e)
self.vafter_ts()
return # depends on [control=['except'], data=['e']]
except Exception as e:
self.logger.exception(e)
self.vafter_ts()
return # depends on [control=['except'], data=['e']] # depends on [control=['with'], data=['ydl']] |
def render_button(
content,
button_type=None,
icon=None,
button_class="btn-default",
size="",
href="",
name=None,
value=None,
title=None,
extra_classes="",
id="",
):
"""
Render a button with content
"""
attrs = {}
classes = add_css_class("btn", button_class)
size = text_value(size).lower().strip()
if size == "xs":
classes = add_css_class(classes, "btn-xs")
elif size == "sm" or size == "small":
classes = add_css_class(classes, "btn-sm")
elif size == "lg" or size == "large":
classes = add_css_class(classes, "btn-lg")
elif size == "md" or size == "medium":
pass
elif size:
raise BootstrapError(
'Parameter "size" should be "xs", "sm", "lg" or '
+ 'empty ("{}" given).'.format(size)
)
if button_type:
if button_type not in ("submit", "reset", "button", "link"):
raise BootstrapError(
'Parameter "button_type" should be "submit", "reset", '
+ '"button", "link" or empty ("{}" given).'.format(button_type)
)
attrs["type"] = button_type
classes = add_css_class(classes, extra_classes)
attrs["class"] = classes
icon_content = render_icon(icon) if icon else ""
if href:
attrs["href"] = href
tag = "a"
else:
tag = "button"
if id:
attrs["id"] = id
if name:
attrs["name"] = name
if value:
attrs["value"] = value
if title:
attrs["title"] = title
return render_tag(
tag,
attrs=attrs,
content=mark_safe(text_concat(icon_content, content, separator=" ")),
) | def function[render_button, parameter[content, button_type, icon, button_class, size, href, name, value, title, extra_classes, id]]:
constant[
Render a button with content
]
variable[attrs] assign[=] dictionary[[], []]
variable[classes] assign[=] call[name[add_css_class], parameter[constant[btn], name[button_class]]]
variable[size] assign[=] call[call[call[name[text_value], parameter[name[size]]].lower, parameter[]].strip, parameter[]]
if compare[name[size] equal[==] constant[xs]] begin[:]
variable[classes] assign[=] call[name[add_css_class], parameter[name[classes], constant[btn-xs]]]
if name[button_type] begin[:]
if compare[name[button_type] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1da01c0>, <ast.Constant object at 0x7da1b1da0220>, <ast.Constant object at 0x7da1b1da2620>, <ast.Constant object at 0x7da1b1da0b20>]]] begin[:]
<ast.Raise object at 0x7da1b1da3820>
call[name[attrs]][constant[type]] assign[=] name[button_type]
variable[classes] assign[=] call[name[add_css_class], parameter[name[classes], name[extra_classes]]]
call[name[attrs]][constant[class]] assign[=] name[classes]
variable[icon_content] assign[=] <ast.IfExp object at 0x7da1b22e9ff0>
if name[href] begin[:]
call[name[attrs]][constant[href]] assign[=] name[href]
variable[tag] assign[=] constant[a]
if name[id] begin[:]
call[name[attrs]][constant[id]] assign[=] name[id]
if name[name] begin[:]
call[name[attrs]][constant[name]] assign[=] name[name]
if name[value] begin[:]
call[name[attrs]][constant[value]] assign[=] name[value]
if name[title] begin[:]
call[name[attrs]][constant[title]] assign[=] name[title]
return[call[name[render_tag], parameter[name[tag]]]] | keyword[def] identifier[render_button] (
identifier[content] ,
identifier[button_type] = keyword[None] ,
identifier[icon] = keyword[None] ,
identifier[button_class] = literal[string] ,
identifier[size] = literal[string] ,
identifier[href] = literal[string] ,
identifier[name] = keyword[None] ,
identifier[value] = keyword[None] ,
identifier[title] = keyword[None] ,
identifier[extra_classes] = literal[string] ,
identifier[id] = literal[string] ,
):
literal[string]
identifier[attrs] ={}
identifier[classes] = identifier[add_css_class] ( literal[string] , identifier[button_class] )
identifier[size] = identifier[text_value] ( identifier[size] ). identifier[lower] (). identifier[strip] ()
keyword[if] identifier[size] == literal[string] :
identifier[classes] = identifier[add_css_class] ( identifier[classes] , literal[string] )
keyword[elif] identifier[size] == literal[string] keyword[or] identifier[size] == literal[string] :
identifier[classes] = identifier[add_css_class] ( identifier[classes] , literal[string] )
keyword[elif] identifier[size] == literal[string] keyword[or] identifier[size] == literal[string] :
identifier[classes] = identifier[add_css_class] ( identifier[classes] , literal[string] )
keyword[elif] identifier[size] == literal[string] keyword[or] identifier[size] == literal[string] :
keyword[pass]
keyword[elif] identifier[size] :
keyword[raise] identifier[BootstrapError] (
literal[string]
+ literal[string] . identifier[format] ( identifier[size] )
)
keyword[if] identifier[button_type] :
keyword[if] identifier[button_type] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[BootstrapError] (
literal[string]
+ literal[string] . identifier[format] ( identifier[button_type] )
)
identifier[attrs] [ literal[string] ]= identifier[button_type]
identifier[classes] = identifier[add_css_class] ( identifier[classes] , identifier[extra_classes] )
identifier[attrs] [ literal[string] ]= identifier[classes]
identifier[icon_content] = identifier[render_icon] ( identifier[icon] ) keyword[if] identifier[icon] keyword[else] literal[string]
keyword[if] identifier[href] :
identifier[attrs] [ literal[string] ]= identifier[href]
identifier[tag] = literal[string]
keyword[else] :
identifier[tag] = literal[string]
keyword[if] identifier[id] :
identifier[attrs] [ literal[string] ]= identifier[id]
keyword[if] identifier[name] :
identifier[attrs] [ literal[string] ]= identifier[name]
keyword[if] identifier[value] :
identifier[attrs] [ literal[string] ]= identifier[value]
keyword[if] identifier[title] :
identifier[attrs] [ literal[string] ]= identifier[title]
keyword[return] identifier[render_tag] (
identifier[tag] ,
identifier[attrs] = identifier[attrs] ,
identifier[content] = identifier[mark_safe] ( identifier[text_concat] ( identifier[icon_content] , identifier[content] , identifier[separator] = literal[string] )),
) | def render_button(content, button_type=None, icon=None, button_class='btn-default', size='', href='', name=None, value=None, title=None, extra_classes='', id=''):
"""
Render a button with content
"""
attrs = {}
classes = add_css_class('btn', button_class)
size = text_value(size).lower().strip()
if size == 'xs':
classes = add_css_class(classes, 'btn-xs') # depends on [control=['if'], data=[]]
elif size == 'sm' or size == 'small':
classes = add_css_class(classes, 'btn-sm') # depends on [control=['if'], data=[]]
elif size == 'lg' or size == 'large':
classes = add_css_class(classes, 'btn-lg') # depends on [control=['if'], data=[]]
elif size == 'md' or size == 'medium':
pass # depends on [control=['if'], data=[]]
elif size:
raise BootstrapError('Parameter "size" should be "xs", "sm", "lg" or ' + 'empty ("{}" given).'.format(size)) # depends on [control=['if'], data=[]]
if button_type:
if button_type not in ('submit', 'reset', 'button', 'link'):
raise BootstrapError('Parameter "button_type" should be "submit", "reset", ' + '"button", "link" or empty ("{}" given).'.format(button_type)) # depends on [control=['if'], data=['button_type']]
attrs['type'] = button_type # depends on [control=['if'], data=[]]
classes = add_css_class(classes, extra_classes)
attrs['class'] = classes
icon_content = render_icon(icon) if icon else ''
if href:
attrs['href'] = href
tag = 'a' # depends on [control=['if'], data=[]]
else:
tag = 'button'
if id:
attrs['id'] = id # depends on [control=['if'], data=[]]
if name:
attrs['name'] = name # depends on [control=['if'], data=[]]
if value:
attrs['value'] = value # depends on [control=['if'], data=[]]
if title:
attrs['title'] = title # depends on [control=['if'], data=[]]
return render_tag(tag, attrs=attrs, content=mark_safe(text_concat(icon_content, content, separator=' '))) |
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet) | def function[read_packet, parameter[self]]:
constant[Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
]
variable[packet] assign[=] call[name[ffi].new, parameter[constant[RTMPPacket*]]]
variable[packet_complete] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da20c6e6fb0> begin[:]
variable[res] assign[=] call[name[librtmp].RTMP_ReadPacket, parameter[name[self].rtmp, name[packet]]]
if compare[name[res] less[<] constant[1]] begin[:]
if call[name[librtmp].RTMP_IsTimedout, parameter[name[self].rtmp]] begin[:]
<ast.Raise object at 0x7da20c6e5870>
variable[packet_complete] assign[=] compare[name[packet].m_nBytesRead equal[==] name[packet].m_nBodySize]
return[call[name[RTMPPacket]._from_pointer, parameter[name[packet]]]] | keyword[def] identifier[read_packet] ( identifier[self] ):
literal[string]
identifier[packet] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[packet_complete] = keyword[False]
keyword[while] keyword[not] identifier[packet_complete] :
identifier[res] = identifier[librtmp] . identifier[RTMP_ReadPacket] ( identifier[self] . identifier[rtmp] , identifier[packet] )
keyword[if] identifier[res] < literal[int] :
keyword[if] identifier[librtmp] . identifier[RTMP_IsTimedout] ( identifier[self] . identifier[rtmp] ):
keyword[raise] identifier[RTMPTimeoutError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[RTMPError] ( literal[string] )
identifier[packet_complete] = identifier[packet] . identifier[m_nBytesRead] == identifier[packet] . identifier[m_nBodySize]
keyword[return] identifier[RTMPPacket] . identifier[_from_pointer] ( identifier[packet] ) | def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new('RTMPPacket*')
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError('Timed out while reading packet') # depends on [control=['if'], data=[]]
else:
raise RTMPError('Failed to read packet') # depends on [control=['if'], data=[]]
packet_complete = packet.m_nBytesRead == packet.m_nBodySize # depends on [control=['while'], data=[]]
return RTMPPacket._from_pointer(packet) |
def custom_modify_user_view(request, targetUsername):
''' The page to modify a user. '''
if targetUsername == ANONYMOUS_USERNAME:
messages.add_message(request, messages.WARNING, MESSAGES['ANONYMOUS_EDIT'])
page_name = "Admin - Modify User"
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
update_user_form = UpdateUserForm(
request.POST if "update_user_profile" in request.POST else None,
instance=targetUser,
profile = UserProfile.objects.get(user=request.user),
prefix="user",
)
update_profile_form = FullProfileForm(
request.POST if "update_user_profile" in request.POST else None,
instance=targetProfile,
prefix="profile",
)
change_user_password_form = AdminPasswordChangeForm(
targetUser,
request.POST if "change_user_password" in request.POST else None,
)
delete_user_form = DeleteUserForm(
request.POST if "delete_user" in request.POST else None,
user=targetUser,
request=request,
)
if update_user_form.is_valid() and update_profile_form.is_valid():
update_user_form.save()
update_profile_form.save()
messages.add_message(
request, messages.SUCCESS,
MESSAGES['USER_PROFILE_SAVED'].format(username=targetUser.username),
)
return HttpResponseRedirect(reverse(
'custom_modify_user', kwargs={'targetUsername': targetUsername}
))
if change_user_password_form.is_valid():
change_user_password_form.save()
messages.add_message(
request, messages.SUCCESS,
MESSAGES['USER_PW_CHANGED'].format(username=targetUser.username),
)
return HttpResponseRedirect(reverse(
'custom_modify_user', kwargs={'targetUsername': targetUsername})
)
if delete_user_form.is_valid():
delete_user_form.save()
messages.add_message(
request, messages.SUCCESS,
MESSAGES['USER_DELETED'].format(username=targetUser.username),
)
return HttpResponseRedirect(reverse("custom_manage_users"))
template_dict = {
'targetUser': targetUser,
'targetProfile': targetProfile,
'page_name': page_name,
'update_user_form': update_user_form,
'update_profile_form': update_profile_form,
'change_user_password_form': change_user_password_form,
'delete_user_form': delete_user_form,
}
if "wiki" in settings.INSTALLED_APPS:
from wiki.models import Revision
template_dict["revision_count"] = \
Revision.objects.filter(created_by=targetUser).count()
template_dict['thread_count'] = \
Thread.objects.filter(owner=targetProfile).count()
template_dict['message_count'] = \
Message.objects.filter(owner=targetProfile).count()
template_dict['request_count'] = \
Request.objects.filter(owner=targetProfile).count()
template_dict['response_count'] = \
Response.objects.filter(owner=targetProfile).count()
template_dict['announcement_count'] = \
Announcement.objects.filter(incumbent=targetProfile).count()
template_dict['event_count'] = \
Event.objects.filter(owner=targetProfile).count()
return render_to_response(
'custom_modify_user.html',
template_dict,
context_instance=RequestContext(request),
) | def function[custom_modify_user_view, parameter[request, targetUsername]]:
constant[ The page to modify a user. ]
if compare[name[targetUsername] equal[==] name[ANONYMOUS_USERNAME]] begin[:]
call[name[messages].add_message, parameter[name[request], name[messages].WARNING, call[name[MESSAGES]][constant[ANONYMOUS_EDIT]]]]
variable[page_name] assign[=] constant[Admin - Modify User]
variable[targetUser] assign[=] call[name[get_object_or_404], parameter[name[User]]]
variable[targetProfile] assign[=] call[name[get_object_or_404], parameter[name[UserProfile]]]
variable[update_user_form] assign[=] call[name[UpdateUserForm], parameter[<ast.IfExp object at 0x7da20e74b850>]]
variable[update_profile_form] assign[=] call[name[FullProfileForm], parameter[<ast.IfExp object at 0x7da207f9b160>]]
variable[change_user_password_form] assign[=] call[name[AdminPasswordChangeForm], parameter[name[targetUser], <ast.IfExp object at 0x7da207f9a7d0>]]
variable[delete_user_form] assign[=] call[name[DeleteUserForm], parameter[<ast.IfExp object at 0x7da207f996f0>]]
if <ast.BoolOp object at 0x7da207f99480> begin[:]
call[name[update_user_form].save, parameter[]]
call[name[update_profile_form].save, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, call[call[name[MESSAGES]][constant[USER_PROFILE_SAVED]].format, parameter[]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[custom_modify_user]]]]]]
if call[name[change_user_password_form].is_valid, parameter[]] begin[:]
call[name[change_user_password_form].save, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, call[call[name[MESSAGES]][constant[USER_PW_CHANGED]].format, parameter[]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[custom_modify_user]]]]]]
if call[name[delete_user_form].is_valid, parameter[]] begin[:]
call[name[delete_user_form].save, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, call[call[name[MESSAGES]][constant[USER_DELETED]].format, parameter[]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[custom_manage_users]]]]]]
variable[template_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f9a650>, <ast.Constant object at 0x7da207f9bfd0>, <ast.Constant object at 0x7da207f9b640>, <ast.Constant object at 0x7da207f987c0>, <ast.Constant object at 0x7da207f99a80>, <ast.Constant object at 0x7da207f98760>, <ast.Constant object at 0x7da207f9a410>], [<ast.Name object at 0x7da207f99ff0>, <ast.Name object at 0x7da207f98f10>, <ast.Name object at 0x7da207f992a0>, <ast.Name object at 0x7da207f98400>, <ast.Name object at 0x7da207f9a9e0>, <ast.Name object at 0x7da207f98ca0>, <ast.Name object at 0x7da207f9ad40>]]
if compare[constant[wiki] in name[settings].INSTALLED_APPS] begin[:]
from relative_module[wiki.models] import module[Revision]
call[name[template_dict]][constant[revision_count]] assign[=] call[call[name[Revision].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[thread_count]] assign[=] call[call[name[Thread].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[message_count]] assign[=] call[call[name[Message].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[request_count]] assign[=] call[call[name[Request].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[response_count]] assign[=] call[call[name[Response].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[announcement_count]] assign[=] call[call[name[Announcement].objects.filter, parameter[]].count, parameter[]]
call[name[template_dict]][constant[event_count]] assign[=] call[call[name[Event].objects.filter, parameter[]].count, parameter[]]
return[call[name[render_to_response], parameter[constant[custom_modify_user.html], name[template_dict]]]] | keyword[def] identifier[custom_modify_user_view] ( identifier[request] , identifier[targetUsername] ):
literal[string]
keyword[if] identifier[targetUsername] == identifier[ANONYMOUS_USERNAME] :
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[WARNING] , identifier[MESSAGES] [ literal[string] ])
identifier[page_name] = literal[string]
identifier[targetUser] = identifier[get_object_or_404] ( identifier[User] , identifier[username] = identifier[targetUsername] )
identifier[targetProfile] = identifier[get_object_or_404] ( identifier[UserProfile] , identifier[user] = identifier[targetUser] )
identifier[update_user_form] = identifier[UpdateUserForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[instance] = identifier[targetUser] ,
identifier[profile] = identifier[UserProfile] . identifier[objects] . identifier[get] ( identifier[user] = identifier[request] . identifier[user] ),
identifier[prefix] = literal[string] ,
)
identifier[update_profile_form] = identifier[FullProfileForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[instance] = identifier[targetProfile] ,
identifier[prefix] = literal[string] ,
)
identifier[change_user_password_form] = identifier[AdminPasswordChangeForm] (
identifier[targetUser] ,
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
)
identifier[delete_user_form] = identifier[DeleteUserForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[user] = identifier[targetUser] ,
identifier[request] = identifier[request] ,
)
keyword[if] identifier[update_user_form] . identifier[is_valid] () keyword[and] identifier[update_profile_form] . identifier[is_valid] ():
identifier[update_user_form] . identifier[save] ()
identifier[update_profile_form] . identifier[save] ()
identifier[messages] . identifier[add_message] (
identifier[request] , identifier[messages] . identifier[SUCCESS] ,
identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[username] = identifier[targetUser] . identifier[username] ),
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] (
literal[string] , identifier[kwargs] ={ literal[string] : identifier[targetUsername] }
))
keyword[if] identifier[change_user_password_form] . identifier[is_valid] ():
identifier[change_user_password_form] . identifier[save] ()
identifier[messages] . identifier[add_message] (
identifier[request] , identifier[messages] . identifier[SUCCESS] ,
identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[username] = identifier[targetUser] . identifier[username] ),
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] (
literal[string] , identifier[kwargs] ={ literal[string] : identifier[targetUsername] })
)
keyword[if] identifier[delete_user_form] . identifier[is_valid] ():
identifier[delete_user_form] . identifier[save] ()
identifier[messages] . identifier[add_message] (
identifier[request] , identifier[messages] . identifier[SUCCESS] ,
identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[username] = identifier[targetUser] . identifier[username] ),
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[template_dict] ={
literal[string] : identifier[targetUser] ,
literal[string] : identifier[targetProfile] ,
literal[string] : identifier[page_name] ,
literal[string] : identifier[update_user_form] ,
literal[string] : identifier[update_profile_form] ,
literal[string] : identifier[change_user_password_form] ,
literal[string] : identifier[delete_user_form] ,
}
keyword[if] literal[string] keyword[in] identifier[settings] . identifier[INSTALLED_APPS] :
keyword[from] identifier[wiki] . identifier[models] keyword[import] identifier[Revision]
identifier[template_dict] [ literal[string] ]= identifier[Revision] . identifier[objects] . identifier[filter] ( identifier[created_by] = identifier[targetUser] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Thread] . identifier[objects] . identifier[filter] ( identifier[owner] = identifier[targetProfile] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Message] . identifier[objects] . identifier[filter] ( identifier[owner] = identifier[targetProfile] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Request] . identifier[objects] . identifier[filter] ( identifier[owner] = identifier[targetProfile] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Response] . identifier[objects] . identifier[filter] ( identifier[owner] = identifier[targetProfile] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Announcement] . identifier[objects] . identifier[filter] ( identifier[incumbent] = identifier[targetProfile] ). identifier[count] ()
identifier[template_dict] [ literal[string] ]= identifier[Event] . identifier[objects] . identifier[filter] ( identifier[owner] = identifier[targetProfile] ). identifier[count] ()
keyword[return] identifier[render_to_response] (
literal[string] ,
identifier[template_dict] ,
identifier[context_instance] = identifier[RequestContext] ( identifier[request] ),
) | def custom_modify_user_view(request, targetUsername):
""" The page to modify a user. """
if targetUsername == ANONYMOUS_USERNAME:
messages.add_message(request, messages.WARNING, MESSAGES['ANONYMOUS_EDIT']) # depends on [control=['if'], data=[]]
page_name = 'Admin - Modify User'
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
update_user_form = UpdateUserForm(request.POST if 'update_user_profile' in request.POST else None, instance=targetUser, profile=UserProfile.objects.get(user=request.user), prefix='user')
update_profile_form = FullProfileForm(request.POST if 'update_user_profile' in request.POST else None, instance=targetProfile, prefix='profile')
change_user_password_form = AdminPasswordChangeForm(targetUser, request.POST if 'change_user_password' in request.POST else None)
delete_user_form = DeleteUserForm(request.POST if 'delete_user' in request.POST else None, user=targetUser, request=request)
if update_user_form.is_valid() and update_profile_form.is_valid():
update_user_form.save()
update_profile_form.save()
messages.add_message(request, messages.SUCCESS, MESSAGES['USER_PROFILE_SAVED'].format(username=targetUser.username))
return HttpResponseRedirect(reverse('custom_modify_user', kwargs={'targetUsername': targetUsername})) # depends on [control=['if'], data=[]]
if change_user_password_form.is_valid():
change_user_password_form.save()
messages.add_message(request, messages.SUCCESS, MESSAGES['USER_PW_CHANGED'].format(username=targetUser.username))
return HttpResponseRedirect(reverse('custom_modify_user', kwargs={'targetUsername': targetUsername})) # depends on [control=['if'], data=[]]
if delete_user_form.is_valid():
delete_user_form.save()
messages.add_message(request, messages.SUCCESS, MESSAGES['USER_DELETED'].format(username=targetUser.username))
return HttpResponseRedirect(reverse('custom_manage_users')) # depends on [control=['if'], data=[]]
template_dict = {'targetUser': targetUser, 'targetProfile': targetProfile, 'page_name': page_name, 'update_user_form': update_user_form, 'update_profile_form': update_profile_form, 'change_user_password_form': change_user_password_form, 'delete_user_form': delete_user_form}
if 'wiki' in settings.INSTALLED_APPS:
from wiki.models import Revision
template_dict['revision_count'] = Revision.objects.filter(created_by=targetUser).count() # depends on [control=['if'], data=[]]
template_dict['thread_count'] = Thread.objects.filter(owner=targetProfile).count()
template_dict['message_count'] = Message.objects.filter(owner=targetProfile).count()
template_dict['request_count'] = Request.objects.filter(owner=targetProfile).count()
template_dict['response_count'] = Response.objects.filter(owner=targetProfile).count()
template_dict['announcement_count'] = Announcement.objects.filter(incumbent=targetProfile).count()
template_dict['event_count'] = Event.objects.filter(owner=targetProfile).count()
return render_to_response('custom_modify_user.html', template_dict, context_instance=RequestContext(request)) |
def RGB_to_HSL(cobj, *args, **kwargs):
"""
Converts from RGB to HSL.
H values are in degrees and are 0 to 360.
S values are a percentage, 0.0 to 1.0.
L values are a percentage, 0.0 to 1.0.
"""
var_R = cobj.rgb_r
var_G = cobj.rgb_g
var_B = cobj.rgb_b
var_max = max(var_R, var_G, var_B)
var_min = min(var_R, var_G, var_B)
var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max)
var_L = 0.5 * (var_max + var_min)
if var_max == var_min:
var_S = 0
elif var_L <= 0.5:
var_S = (var_max - var_min) / (2.0 * var_L)
else:
var_S = (var_max - var_min) / (2.0 - (2.0 * var_L))
return HSLColor(
var_H, var_S, var_L) | def function[RGB_to_HSL, parameter[cobj]]:
constant[
Converts from RGB to HSL.
H values are in degrees and are 0 to 360.
S values are a percentage, 0.0 to 1.0.
L values are a percentage, 0.0 to 1.0.
]
variable[var_R] assign[=] name[cobj].rgb_r
variable[var_G] assign[=] name[cobj].rgb_g
variable[var_B] assign[=] name[cobj].rgb_b
variable[var_max] assign[=] call[name[max], parameter[name[var_R], name[var_G], name[var_B]]]
variable[var_min] assign[=] call[name[min], parameter[name[var_R], name[var_G], name[var_B]]]
variable[var_H] assign[=] call[name[__RGB_to_Hue], parameter[name[var_R], name[var_G], name[var_B], name[var_min], name[var_max]]]
variable[var_L] assign[=] binary_operation[constant[0.5] * binary_operation[name[var_max] + name[var_min]]]
if compare[name[var_max] equal[==] name[var_min]] begin[:]
variable[var_S] assign[=] constant[0]
return[call[name[HSLColor], parameter[name[var_H], name[var_S], name[var_L]]]] | keyword[def] identifier[RGB_to_HSL] ( identifier[cobj] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[var_R] = identifier[cobj] . identifier[rgb_r]
identifier[var_G] = identifier[cobj] . identifier[rgb_g]
identifier[var_B] = identifier[cobj] . identifier[rgb_b]
identifier[var_max] = identifier[max] ( identifier[var_R] , identifier[var_G] , identifier[var_B] )
identifier[var_min] = identifier[min] ( identifier[var_R] , identifier[var_G] , identifier[var_B] )
identifier[var_H] = identifier[__RGB_to_Hue] ( identifier[var_R] , identifier[var_G] , identifier[var_B] , identifier[var_min] , identifier[var_max] )
identifier[var_L] = literal[int] *( identifier[var_max] + identifier[var_min] )
keyword[if] identifier[var_max] == identifier[var_min] :
identifier[var_S] = literal[int]
keyword[elif] identifier[var_L] <= literal[int] :
identifier[var_S] =( identifier[var_max] - identifier[var_min] )/( literal[int] * identifier[var_L] )
keyword[else] :
identifier[var_S] =( identifier[var_max] - identifier[var_min] )/( literal[int] -( literal[int] * identifier[var_L] ))
keyword[return] identifier[HSLColor] (
identifier[var_H] , identifier[var_S] , identifier[var_L] ) | def RGB_to_HSL(cobj, *args, **kwargs):
"""
Converts from RGB to HSL.
H values are in degrees and are 0 to 360.
S values are a percentage, 0.0 to 1.0.
L values are a percentage, 0.0 to 1.0.
"""
var_R = cobj.rgb_r
var_G = cobj.rgb_g
var_B = cobj.rgb_b
var_max = max(var_R, var_G, var_B)
var_min = min(var_R, var_G, var_B)
var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max)
var_L = 0.5 * (var_max + var_min)
if var_max == var_min:
var_S = 0 # depends on [control=['if'], data=[]]
elif var_L <= 0.5:
var_S = (var_max - var_min) / (2.0 * var_L) # depends on [control=['if'], data=['var_L']]
else:
var_S = (var_max - var_min) / (2.0 - 2.0 * var_L)
return HSLColor(var_H, var_S, var_L) |
def get_inline_func(inline_str, modules=None, **stream_kwargs):
"""returns a function decorated by `cbox.stream` decorator.
:param str inline_str: the inline function to execute,
can use `s` - local variable as the input line/char/raw
(according to `input_type` param).
:param str modules: comma separated list of modules to import before
running the inline function.
:param dict stream_kwargs: optional arguments to `cbox.stream` decorator
:rtype: callable
"""
if not _is_compilable(inline_str):
raise ValueError(
'cannot compile the inline expression - "%s"' % inline_str
)
inline_globals = _import_inline_modules(modules)
func = _inline2func(inline_str, inline_globals, **stream_kwargs)
return func | def function[get_inline_func, parameter[inline_str, modules]]:
constant[returns a function decorated by `cbox.stream` decorator.
:param str inline_str: the inline function to execute,
can use `s` - local variable as the input line/char/raw
(according to `input_type` param).
:param str modules: comma separated list of modules to import before
running the inline function.
:param dict stream_kwargs: optional arguments to `cbox.stream` decorator
:rtype: callable
]
if <ast.UnaryOp object at 0x7da18c4cdb10> begin[:]
<ast.Raise object at 0x7da204344f10>
variable[inline_globals] assign[=] call[name[_import_inline_modules], parameter[name[modules]]]
variable[func] assign[=] call[name[_inline2func], parameter[name[inline_str], name[inline_globals]]]
return[name[func]] | keyword[def] identifier[get_inline_func] ( identifier[inline_str] , identifier[modules] = keyword[None] ,** identifier[stream_kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[_is_compilable] ( identifier[inline_str] ):
keyword[raise] identifier[ValueError] (
literal[string] % identifier[inline_str]
)
identifier[inline_globals] = identifier[_import_inline_modules] ( identifier[modules] )
identifier[func] = identifier[_inline2func] ( identifier[inline_str] , identifier[inline_globals] ,** identifier[stream_kwargs] )
keyword[return] identifier[func] | def get_inline_func(inline_str, modules=None, **stream_kwargs):
"""returns a function decorated by `cbox.stream` decorator.
:param str inline_str: the inline function to execute,
can use `s` - local variable as the input line/char/raw
(according to `input_type` param).
:param str modules: comma separated list of modules to import before
running the inline function.
:param dict stream_kwargs: optional arguments to `cbox.stream` decorator
:rtype: callable
"""
if not _is_compilable(inline_str):
raise ValueError('cannot compile the inline expression - "%s"' % inline_str) # depends on [control=['if'], data=[]]
inline_globals = _import_inline_modules(modules)
func = _inline2func(inline_str, inline_globals, **stream_kwargs)
return func |
def order_key_defs(key_def):
"""
Sort a set of key definitions. A key definition that defines more then
one usage type are splitted into as many definitions as the number of
usage types specified. One key definition per usage type.
:param key_def: A set of key definitions
:return: The set of definitions as a sorted list
"""
_int = []
# First make sure all defs only reference one usage
for kd in key_def:
if len(kd['use']) > 1:
for _use in kd['use']:
_kd = kd.copy()
_kd['use'] = _use
_int.append(_kd)
else:
_int.append(kd)
_int.sort(key=cmp_to_key(sort_func))
return _int | def function[order_key_defs, parameter[key_def]]:
constant[
Sort a set of key definitions. A key definition that defines more then
one usage type are splitted into as many definitions as the number of
usage types specified. One key definition per usage type.
:param key_def: A set of key definitions
:return: The set of definitions as a sorted list
]
variable[_int] assign[=] list[[]]
for taget[name[kd]] in starred[name[key_def]] begin[:]
if compare[call[name[len], parameter[call[name[kd]][constant[use]]]] greater[>] constant[1]] begin[:]
for taget[name[_use]] in starred[call[name[kd]][constant[use]]] begin[:]
variable[_kd] assign[=] call[name[kd].copy, parameter[]]
call[name[_kd]][constant[use]] assign[=] name[_use]
call[name[_int].append, parameter[name[_kd]]]
call[name[_int].sort, parameter[]]
return[name[_int]] | keyword[def] identifier[order_key_defs] ( identifier[key_def] ):
literal[string]
identifier[_int] =[]
keyword[for] identifier[kd] keyword[in] identifier[key_def] :
keyword[if] identifier[len] ( identifier[kd] [ literal[string] ])> literal[int] :
keyword[for] identifier[_use] keyword[in] identifier[kd] [ literal[string] ]:
identifier[_kd] = identifier[kd] . identifier[copy] ()
identifier[_kd] [ literal[string] ]= identifier[_use]
identifier[_int] . identifier[append] ( identifier[_kd] )
keyword[else] :
identifier[_int] . identifier[append] ( identifier[kd] )
identifier[_int] . identifier[sort] ( identifier[key] = identifier[cmp_to_key] ( identifier[sort_func] ))
keyword[return] identifier[_int] | def order_key_defs(key_def):
"""
Sort a set of key definitions. A key definition that defines more then
one usage type are splitted into as many definitions as the number of
usage types specified. One key definition per usage type.
:param key_def: A set of key definitions
:return: The set of definitions as a sorted list
"""
_int = []
# First make sure all defs only reference one usage
for kd in key_def:
if len(kd['use']) > 1:
for _use in kd['use']:
_kd = kd.copy()
_kd['use'] = _use
_int.append(_kd) # depends on [control=['for'], data=['_use']] # depends on [control=['if'], data=[]]
else:
_int.append(kd) # depends on [control=['for'], data=['kd']]
_int.sort(key=cmp_to_key(sort_func))
return _int |
def printableVal(val, type_bit=True, justlength=False):
"""
Very old way of doing pretty printing. Need to update and refactor.
DEPRICATE
"""
from utool import util_dev
# Move to util_dev
# NUMPY ARRAY
import numpy as np
if type(val) is np.ndarray:
info = npArrInfo(val)
if info.dtypestr.startswith('bool'):
_valstr = '{ shape:' + info.shapestr + ' bittotal: ' + info.bittotal + '}'
# + '\n |_____'
elif info.dtypestr.startswith('float'):
_valstr = util_dev.get_stats_str(val)
else:
_valstr = '{ shape:' + info.shapestr + ' mM:' + info.minmaxstr + ' }' # + '\n |_____'
# String
elif isinstance(val, (str, unicode)): # NOQA
_valstr = '\'%s\'' % val
# List
elif isinstance(val, list):
if justlength or len(val) > 30:
_valstr = 'len=' + str(len(val))
else:
_valstr = '[ ' + (', \n '.join([str(v) for v in val])) + ' ]'
# ??? isinstance(val, AbstractPrintable):
elif hasattr(val, 'get_printable') and type(val) != type:
_valstr = val.get_printable(type_bit=type_bit)
elif isinstance(val, dict):
_valstr = '{\n'
for val_key in val.keys():
val_val = val[val_key]
_valstr += ' ' + str(val_key) + ' : ' + str(val_val) + '\n'
_valstr += '}'
else:
_valstr = str(val)
if _valstr.find('\n') > 0: # Indent if necessary
_valstr = _valstr.replace('\n', '\n ')
_valstr = '\n ' + _valstr
_valstr = re.sub('\n *$', '', _valstr) # Replace empty lines
return _valstr | def function[printableVal, parameter[val, type_bit, justlength]]:
constant[
Very old way of doing pretty printing. Need to update and refactor.
DEPRICATE
]
from relative_module[utool] import module[util_dev]
import module[numpy] as alias[np]
if compare[call[name[type], parameter[name[val]]] is name[np].ndarray] begin[:]
variable[info] assign[=] call[name[npArrInfo], parameter[name[val]]]
if call[name[info].dtypestr.startswith, parameter[constant[bool]]] begin[:]
variable[_valstr] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[{ shape:] + name[info].shapestr] + constant[ bittotal: ]] + name[info].bittotal] + constant[}]]
if compare[call[name[_valstr].find, parameter[constant[
]]] greater[>] constant[0]] begin[:]
variable[_valstr] assign[=] call[name[_valstr].replace, parameter[constant[
], constant[
]]]
variable[_valstr] assign[=] binary_operation[constant[
] + name[_valstr]]
variable[_valstr] assign[=] call[name[re].sub, parameter[constant[
*$], constant[], name[_valstr]]]
return[name[_valstr]] | keyword[def] identifier[printableVal] ( identifier[val] , identifier[type_bit] = keyword[True] , identifier[justlength] = keyword[False] ):
literal[string]
keyword[from] identifier[utool] keyword[import] identifier[util_dev]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[if] identifier[type] ( identifier[val] ) keyword[is] identifier[np] . identifier[ndarray] :
identifier[info] = identifier[npArrInfo] ( identifier[val] )
keyword[if] identifier[info] . identifier[dtypestr] . identifier[startswith] ( literal[string] ):
identifier[_valstr] = literal[string] + identifier[info] . identifier[shapestr] + literal[string] + identifier[info] . identifier[bittotal] + literal[string]
keyword[elif] identifier[info] . identifier[dtypestr] . identifier[startswith] ( literal[string] ):
identifier[_valstr] = identifier[util_dev] . identifier[get_stats_str] ( identifier[val] )
keyword[else] :
identifier[_valstr] = literal[string] + identifier[info] . identifier[shapestr] + literal[string] + identifier[info] . identifier[minmaxstr] + literal[string]
keyword[elif] identifier[isinstance] ( identifier[val] ,( identifier[str] , identifier[unicode] )):
identifier[_valstr] = literal[string] % identifier[val]
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[list] ):
keyword[if] identifier[justlength] keyword[or] identifier[len] ( identifier[val] )> literal[int] :
identifier[_valstr] = literal[string] + identifier[str] ( identifier[len] ( identifier[val] ))
keyword[else] :
identifier[_valstr] = literal[string] +( literal[string] . identifier[join] ([ identifier[str] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[val] ]))+ literal[string]
keyword[elif] identifier[hasattr] ( identifier[val] , literal[string] ) keyword[and] identifier[type] ( identifier[val] )!= identifier[type] :
identifier[_valstr] = identifier[val] . identifier[get_printable] ( identifier[type_bit] = identifier[type_bit] )
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[dict] ):
identifier[_valstr] = literal[string]
keyword[for] identifier[val_key] keyword[in] identifier[val] . identifier[keys] ():
identifier[val_val] = identifier[val] [ identifier[val_key] ]
identifier[_valstr] += literal[string] + identifier[str] ( identifier[val_key] )+ literal[string] + identifier[str] ( identifier[val_val] )+ literal[string]
identifier[_valstr] += literal[string]
keyword[else] :
identifier[_valstr] = identifier[str] ( identifier[val] )
keyword[if] identifier[_valstr] . identifier[find] ( literal[string] )> literal[int] :
identifier[_valstr] = identifier[_valstr] . identifier[replace] ( literal[string] , literal[string] )
identifier[_valstr] = literal[string] + identifier[_valstr]
identifier[_valstr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[_valstr] )
keyword[return] identifier[_valstr] | def printableVal(val, type_bit=True, justlength=False):
"""
Very old way of doing pretty printing. Need to update and refactor.
DEPRICATE
"""
from utool import util_dev
# Move to util_dev
# NUMPY ARRAY
import numpy as np
if type(val) is np.ndarray:
info = npArrInfo(val)
if info.dtypestr.startswith('bool'):
_valstr = '{ shape:' + info.shapestr + ' bittotal: ' + info.bittotal + '}' # depends on [control=['if'], data=[]]
# + '\n |_____'
elif info.dtypestr.startswith('float'):
_valstr = util_dev.get_stats_str(val) # depends on [control=['if'], data=[]]
else:
_valstr = '{ shape:' + info.shapestr + ' mM:' + info.minmaxstr + ' }' # + '\n |_____' # depends on [control=['if'], data=[]]
# String
elif isinstance(val, (str, unicode)): # NOQA
_valstr = "'%s'" % val # depends on [control=['if'], data=[]]
# List
elif isinstance(val, list):
if justlength or len(val) > 30:
_valstr = 'len=' + str(len(val)) # depends on [control=['if'], data=[]]
else:
_valstr = '[ ' + ', \n '.join([str(v) for v in val]) + ' ]' # depends on [control=['if'], data=[]]
# ??? isinstance(val, AbstractPrintable):
elif hasattr(val, 'get_printable') and type(val) != type:
_valstr = val.get_printable(type_bit=type_bit) # depends on [control=['if'], data=[]]
elif isinstance(val, dict):
_valstr = '{\n'
for val_key in val.keys():
val_val = val[val_key]
_valstr += ' ' + str(val_key) + ' : ' + str(val_val) + '\n' # depends on [control=['for'], data=['val_key']]
_valstr += '}' # depends on [control=['if'], data=[]]
else:
_valstr = str(val)
if _valstr.find('\n') > 0: # Indent if necessary
_valstr = _valstr.replace('\n', '\n ')
_valstr = '\n ' + _valstr # depends on [control=['if'], data=[]]
_valstr = re.sub('\n *$', '', _valstr) # Replace empty lines
return _valstr |
def unzip(filename, match_dir=False, destdir=None):
"""
Extract all files from a zip archive
filename: The path to the zip file
match_dir: If True all files in the zip must be contained in a subdirectory
named after the archive file with extension removed
destdir: Extract the zip into this directory, default current directory
return: If match_dir is True then returns the subdirectory (including
destdir), otherwise returns destdir or '.'
"""
if not destdir:
destdir = '.'
z = zipfile.ZipFile(filename)
unzipped = '.'
if match_dir:
if not filename.endswith('.zip'):
raise FileException('Expected .zip file extension', filename)
unzipped = os.path.basename(filename)[:-4]
check_extracted_paths(z.namelist(), unzipped)
else:
check_extracted_paths(z.namelist())
# File permissions, see
# http://stackoverflow.com/a/6297838
# http://stackoverflow.com/a/3015466
for info in z.infolist():
log.debug('Extracting %s to %s', info.filename, destdir)
z.extract(info, destdir)
perms = info.external_attr >> 16 & 4095
if perms > 0:
os.chmod(os.path.join(destdir, info.filename), perms)
return os.path.join(destdir, unzipped) | def function[unzip, parameter[filename, match_dir, destdir]]:
constant[
Extract all files from a zip archive
filename: The path to the zip file
match_dir: If True all files in the zip must be contained in a subdirectory
named after the archive file with extension removed
destdir: Extract the zip into this directory, default current directory
return: If match_dir is True then returns the subdirectory (including
destdir), otherwise returns destdir or '.'
]
if <ast.UnaryOp object at 0x7da1b0fe9000> begin[:]
variable[destdir] assign[=] constant[.]
variable[z] assign[=] call[name[zipfile].ZipFile, parameter[name[filename]]]
variable[unzipped] assign[=] constant[.]
if name[match_dir] begin[:]
if <ast.UnaryOp object at 0x7da1b0fe9570> begin[:]
<ast.Raise object at 0x7da1b0fe9510>
variable[unzipped] assign[=] call[call[name[os].path.basename, parameter[name[filename]]]][<ast.Slice object at 0x7da1b0feb610>]
call[name[check_extracted_paths], parameter[call[name[z].namelist, parameter[]], name[unzipped]]]
for taget[name[info]] in starred[call[name[z].infolist, parameter[]]] begin[:]
call[name[log].debug, parameter[constant[Extracting %s to %s], name[info].filename, name[destdir]]]
call[name[z].extract, parameter[name[info], name[destdir]]]
variable[perms] assign[=] binary_operation[binary_operation[name[info].external_attr <ast.RShift object at 0x7da2590d6a40> constant[16]] <ast.BitAnd object at 0x7da2590d6b60> constant[4095]]
if compare[name[perms] greater[>] constant[0]] begin[:]
call[name[os].chmod, parameter[call[name[os].path.join, parameter[name[destdir], name[info].filename]], name[perms]]]
return[call[name[os].path.join, parameter[name[destdir], name[unzipped]]]] | keyword[def] identifier[unzip] ( identifier[filename] , identifier[match_dir] = keyword[False] , identifier[destdir] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[destdir] :
identifier[destdir] = literal[string]
identifier[z] = identifier[zipfile] . identifier[ZipFile] ( identifier[filename] )
identifier[unzipped] = literal[string]
keyword[if] identifier[match_dir] :
keyword[if] keyword[not] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[raise] identifier[FileException] ( literal[string] , identifier[filename] )
identifier[unzipped] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] )[:- literal[int] ]
identifier[check_extracted_paths] ( identifier[z] . identifier[namelist] (), identifier[unzipped] )
keyword[else] :
identifier[check_extracted_paths] ( identifier[z] . identifier[namelist] ())
keyword[for] identifier[info] keyword[in] identifier[z] . identifier[infolist] ():
identifier[log] . identifier[debug] ( literal[string] , identifier[info] . identifier[filename] , identifier[destdir] )
identifier[z] . identifier[extract] ( identifier[info] , identifier[destdir] )
identifier[perms] = identifier[info] . identifier[external_attr] >> literal[int] & literal[int]
keyword[if] identifier[perms] > literal[int] :
identifier[os] . identifier[chmod] ( identifier[os] . identifier[path] . identifier[join] ( identifier[destdir] , identifier[info] . identifier[filename] ), identifier[perms] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[destdir] , identifier[unzipped] ) | def unzip(filename, match_dir=False, destdir=None):
"""
Extract all files from a zip archive
filename: The path to the zip file
match_dir: If True all files in the zip must be contained in a subdirectory
named after the archive file with extension removed
destdir: Extract the zip into this directory, default current directory
return: If match_dir is True then returns the subdirectory (including
destdir), otherwise returns destdir or '.'
"""
if not destdir:
destdir = '.' # depends on [control=['if'], data=[]]
z = zipfile.ZipFile(filename)
unzipped = '.'
if match_dir:
if not filename.endswith('.zip'):
raise FileException('Expected .zip file extension', filename) # depends on [control=['if'], data=[]]
unzipped = os.path.basename(filename)[:-4]
check_extracted_paths(z.namelist(), unzipped) # depends on [control=['if'], data=[]]
else:
check_extracted_paths(z.namelist())
# File permissions, see
# http://stackoverflow.com/a/6297838
# http://stackoverflow.com/a/3015466
for info in z.infolist():
log.debug('Extracting %s to %s', info.filename, destdir)
z.extract(info, destdir)
perms = info.external_attr >> 16 & 4095
if perms > 0:
os.chmod(os.path.join(destdir, info.filename), perms) # depends on [control=['if'], data=['perms']] # depends on [control=['for'], data=['info']]
return os.path.join(destdir, unzipped) |
def register_id(self, cmd_type, obj):
"""Registers an object (through its integration id) to receive update
notifications. This is the core mechanism how Output and Keypad objects get
notified when the controller sends status updates."""
ids = self._ids.setdefault(cmd_type, {})
if obj.id in ids:
raise IntegrationIdExistsError
self._ids[cmd_type][obj.id] = obj | def function[register_id, parameter[self, cmd_type, obj]]:
constant[Registers an object (through its integration id) to receive update
notifications. This is the core mechanism how Output and Keypad objects get
notified when the controller sends status updates.]
variable[ids] assign[=] call[name[self]._ids.setdefault, parameter[name[cmd_type], dictionary[[], []]]]
if compare[name[obj].id in name[ids]] begin[:]
<ast.Raise object at 0x7da1b05bffa0>
call[call[name[self]._ids][name[cmd_type]]][name[obj].id] assign[=] name[obj] | keyword[def] identifier[register_id] ( identifier[self] , identifier[cmd_type] , identifier[obj] ):
literal[string]
identifier[ids] = identifier[self] . identifier[_ids] . identifier[setdefault] ( identifier[cmd_type] ,{})
keyword[if] identifier[obj] . identifier[id] keyword[in] identifier[ids] :
keyword[raise] identifier[IntegrationIdExistsError]
identifier[self] . identifier[_ids] [ identifier[cmd_type] ][ identifier[obj] . identifier[id] ]= identifier[obj] | def register_id(self, cmd_type, obj):
"""Registers an object (through its integration id) to receive update
notifications. This is the core mechanism how Output and Keypad objects get
notified when the controller sends status updates."""
ids = self._ids.setdefault(cmd_type, {})
if obj.id in ids:
raise IntegrationIdExistsError # depends on [control=['if'], data=[]]
self._ids[cmd_type][obj.id] = obj |
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary]) | def function[_training, parameter[self]]:
constant[Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
]
with call[name[tf].device, parameter[<ast.IfExp object at 0x7da20c795390>]] begin[:]
with call[name[tf].name_scope, parameter[constant[training]]] begin[:]
variable[assert_full] assign[=] call[name[tf].assert_equal, parameter[name[self]._num_finished_episodes, name[self]._config.update_every]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da20c7962f0>]]]] begin[:]
variable[data] assign[=] call[name[self]._finished_episodes.data, parameter[]]
<ast.Tuple object at 0x7da20c794af0> assign[=] name[data]
variable[old_policy_params] assign[=] call[name[tools].nested.map, parameter[<ast.Lambda object at 0x7da20c794eb0>, name[old_policy_params]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Call object at 0x7da2054a4970>]]]] begin[:]
variable[length] assign[=] call[name[tf].identity, parameter[name[length]]]
variable[observ] assign[=] call[name[self]._observ_filter.transform, parameter[name[observ]]]
variable[reward] assign[=] call[name[self]._reward_filter.transform, parameter[name[reward]]]
variable[update_summary] assign[=] call[name[self]._perform_update_steps, parameter[name[observ], name[action], name[old_policy_params], name[reward], name[length]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da2054a58a0>]]]] begin[:]
variable[penalty_summary] assign[=] call[name[self]._adjust_penalty, parameter[name[observ], name[old_policy_params], name[length]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da2054a6c20>]]]] begin[:]
variable[clear_memory] assign[=] call[name[tf].group, parameter[call[name[self]._finished_episodes.clear, parameter[]], call[name[self]._num_finished_episodes.assign, parameter[constant[0]]]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da2054a4070>]]]] begin[:]
variable[weight_summary] assign[=] call[name[utility].variable_summaries, parameter[call[name[tf].trainable_variables, parameter[]], name[self]._config.weight_summaries]]
return[call[name[tf].summary.merge, parameter[list[[<ast.Name object at 0x7da2054a6a10>, <ast.Name object at 0x7da2054a46d0>, <ast.Name object at 0x7da2054a7520>]]]]] | keyword[def] identifier[_training] ( identifier[self] ):
literal[string]
keyword[with] identifier[tf] . identifier[device] ( literal[string] keyword[if] identifier[self] . identifier[_use_gpu] keyword[else] literal[string] ):
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[assert_full] = identifier[tf] . identifier[assert_equal] (
identifier[self] . identifier[_num_finished_episodes] , identifier[self] . identifier[_config] . identifier[update_every] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[assert_full] ]):
identifier[data] = identifier[self] . identifier[_finished_episodes] . identifier[data] ()
( identifier[observ] , identifier[action] , identifier[old_policy_params] , identifier[reward] ), identifier[length] = identifier[data]
identifier[old_policy_params] = identifier[tools] . identifier[nested] . identifier[map] (
keyword[lambda] identifier[param] : identifier[self] . identifier[_mask] ( identifier[param] , identifier[length] , literal[int] ), identifier[old_policy_params] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[tf] . identifier[assert_greater] ( identifier[length] , literal[int] )]):
identifier[length] = identifier[tf] . identifier[identity] ( identifier[length] )
identifier[observ] = identifier[self] . identifier[_observ_filter] . identifier[transform] ( identifier[observ] )
identifier[reward] = identifier[self] . identifier[_reward_filter] . identifier[transform] ( identifier[reward] )
identifier[update_summary] = identifier[self] . identifier[_perform_update_steps] (
identifier[observ] , identifier[action] , identifier[old_policy_params] , identifier[reward] , identifier[length] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[update_summary] ]):
identifier[penalty_summary] = identifier[self] . identifier[_adjust_penalty] (
identifier[observ] , identifier[old_policy_params] , identifier[length] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[penalty_summary] ]):
identifier[clear_memory] = identifier[tf] . identifier[group] (
identifier[self] . identifier[_finished_episodes] . identifier[clear] (),
identifier[self] . identifier[_num_finished_episodes] . identifier[assign] ( literal[int] ))
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[clear_memory] ]):
identifier[weight_summary] = identifier[utility] . identifier[variable_summaries] (
identifier[tf] . identifier[trainable_variables] (), identifier[self] . identifier[_config] . identifier[weight_summaries] )
keyword[return] identifier[tf] . identifier[summary] . identifier[merge] ([
identifier[update_summary] , identifier[penalty_summary] , identifier[weight_summary] ]) | def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data() # depends on [control=['with'], data=[]]
((observ, action, old_policy_params, reward), length) = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length) # depends on [control=['with'], data=[]]
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(observ, old_policy_params, length) # depends on [control=['with'], data=[]]
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(self._finished_episodes.clear(), self._num_finished_episodes.assign(0)) # depends on [control=['with'], data=[]]
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([update_summary, penalty_summary, weight_summary]) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] |
def abstracts(soup):
"""
Find the article abstract and format it
"""
abstracts = []
abstract_tags = raw_parser.abstract(soup)
for tag in abstract_tags:
abstract = {}
abstract["abstract_type"] = tag.get("abstract-type")
title_tag = raw_parser.title(tag)
if title_tag:
abstract["title"] = node_text(title_tag)
abstract["content"] = None
if raw_parser.paragraph(tag):
abstract["content"] = ""
abstract["full_content"] = ""
good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag))
# Plain text content
glue = ""
for p_tag in good_paragraphs:
abstract["content"] += glue + node_text(p_tag)
glue = " "
# Content including markup tags
# When more than one paragraph, wrap each in a <p> tag
for p_tag in good_paragraphs:
abstract["full_content"] += '<p>' + node_contents_str(p_tag) + '</p>'
abstracts.append(abstract)
return abstracts | def function[abstracts, parameter[soup]]:
constant[
Find the article abstract and format it
]
variable[abstracts] assign[=] list[[]]
variable[abstract_tags] assign[=] call[name[raw_parser].abstract, parameter[name[soup]]]
for taget[name[tag]] in starred[name[abstract_tags]] begin[:]
variable[abstract] assign[=] dictionary[[], []]
call[name[abstract]][constant[abstract_type]] assign[=] call[name[tag].get, parameter[constant[abstract-type]]]
variable[title_tag] assign[=] call[name[raw_parser].title, parameter[name[tag]]]
if name[title_tag] begin[:]
call[name[abstract]][constant[title]] assign[=] call[name[node_text], parameter[name[title_tag]]]
call[name[abstract]][constant[content]] assign[=] constant[None]
if call[name[raw_parser].paragraph, parameter[name[tag]]] begin[:]
call[name[abstract]][constant[content]] assign[=] constant[]
call[name[abstract]][constant[full_content]] assign[=] constant[]
variable[good_paragraphs] assign[=] call[name[remove_doi_paragraph], parameter[call[name[raw_parser].paragraph, parameter[name[tag]]]]]
variable[glue] assign[=] constant[]
for taget[name[p_tag]] in starred[name[good_paragraphs]] begin[:]
<ast.AugAssign object at 0x7da1b112b760>
variable[glue] assign[=] constant[ ]
for taget[name[p_tag]] in starred[name[good_paragraphs]] begin[:]
<ast.AugAssign object at 0x7da1b112aa70>
call[name[abstracts].append, parameter[name[abstract]]]
return[name[abstracts]] | keyword[def] identifier[abstracts] ( identifier[soup] ):
literal[string]
identifier[abstracts] =[]
identifier[abstract_tags] = identifier[raw_parser] . identifier[abstract] ( identifier[soup] )
keyword[for] identifier[tag] keyword[in] identifier[abstract_tags] :
identifier[abstract] ={}
identifier[abstract] [ literal[string] ]= identifier[tag] . identifier[get] ( literal[string] )
identifier[title_tag] = identifier[raw_parser] . identifier[title] ( identifier[tag] )
keyword[if] identifier[title_tag] :
identifier[abstract] [ literal[string] ]= identifier[node_text] ( identifier[title_tag] )
identifier[abstract] [ literal[string] ]= keyword[None]
keyword[if] identifier[raw_parser] . identifier[paragraph] ( identifier[tag] ):
identifier[abstract] [ literal[string] ]= literal[string]
identifier[abstract] [ literal[string] ]= literal[string]
identifier[good_paragraphs] = identifier[remove_doi_paragraph] ( identifier[raw_parser] . identifier[paragraph] ( identifier[tag] ))
identifier[glue] = literal[string]
keyword[for] identifier[p_tag] keyword[in] identifier[good_paragraphs] :
identifier[abstract] [ literal[string] ]+= identifier[glue] + identifier[node_text] ( identifier[p_tag] )
identifier[glue] = literal[string]
keyword[for] identifier[p_tag] keyword[in] identifier[good_paragraphs] :
identifier[abstract] [ literal[string] ]+= literal[string] + identifier[node_contents_str] ( identifier[p_tag] )+ literal[string]
identifier[abstracts] . identifier[append] ( identifier[abstract] )
keyword[return] identifier[abstracts] | def abstracts(soup):
"""
Find the article abstract and format it
"""
abstracts = []
abstract_tags = raw_parser.abstract(soup)
for tag in abstract_tags:
abstract = {}
abstract['abstract_type'] = tag.get('abstract-type')
title_tag = raw_parser.title(tag)
if title_tag:
abstract['title'] = node_text(title_tag) # depends on [control=['if'], data=[]]
abstract['content'] = None
if raw_parser.paragraph(tag):
abstract['content'] = ''
abstract['full_content'] = ''
good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag))
# Plain text content
glue = ''
for p_tag in good_paragraphs:
abstract['content'] += glue + node_text(p_tag)
glue = ' ' # depends on [control=['for'], data=['p_tag']]
# Content including markup tags
# When more than one paragraph, wrap each in a <p> tag
for p_tag in good_paragraphs:
abstract['full_content'] += '<p>' + node_contents_str(p_tag) + '</p>' # depends on [control=['for'], data=['p_tag']] # depends on [control=['if'], data=[]]
abstracts.append(abstract) # depends on [control=['for'], data=['tag']]
return abstracts |
def to_dict(self, short_pred=True, properties=True):
"""
Encode the Mrs as a dictionary suitable for JSON serialization.
"""
def _lnk(obj): return {'from': obj.cfrom, 'to': obj.cto}
def _ep(ep, short_pred=True):
p = ep.pred.short_form() if short_pred else ep.pred.string
d = dict(label=ep.label, predicate=p, arguments=ep.args)
if ep.lnk is not None: d['lnk'] = _lnk(ep)
return d
def _hcons(hc): return {'relation':hc[1], 'high':hc[0], 'low':hc[2]}
def _icons(ic): return {'relation':ic[1], 'left':ic[0], 'right':ic[2]}
def _var(v):
d = {'type': var_sort(v)}
if properties and self.properties(v):
d['properties'] = self.properties(v)
return d
d = dict(
relations=[_ep(ep, short_pred=short_pred) for ep in self.eps()],
constraints=([_hcons(hc) for hc in self.hcons()] +
[_icons(ic) for ic in self.icons()]),
variables={v: _var(v) for v in self.variables()}
)
if self.top is not None: d['top'] = self.top
if self.index is not None: d['index'] = self.index
# if self.xarg is not None: d['xarg'] = self.xarg
# if self.lnk is not None: d['lnk'] = self.lnk
# if self.surface is not None: d['surface'] = self.surface
# if self.identifier is not None: d['identifier'] = self.identifier
return d | def function[to_dict, parameter[self, short_pred, properties]]:
constant[
Encode the Mrs as a dictionary suitable for JSON serialization.
]
def function[_lnk, parameter[obj]]:
return[dictionary[[<ast.Constant object at 0x7da18f58ff10>, <ast.Constant object at 0x7da18f58efb0>], [<ast.Attribute object at 0x7da18f58faf0>, <ast.Attribute object at 0x7da18f58d7b0>]]]
def function[_ep, parameter[ep, short_pred]]:
variable[p] assign[=] <ast.IfExp object at 0x7da18f58fa30>
variable[d] assign[=] call[name[dict], parameter[]]
if compare[name[ep].lnk is_not constant[None]] begin[:]
call[name[d]][constant[lnk]] assign[=] call[name[_lnk], parameter[name[ep]]]
return[name[d]]
def function[_hcons, parameter[hc]]:
return[dictionary[[<ast.Constant object at 0x7da18f58d120>, <ast.Constant object at 0x7da18f58c2b0>, <ast.Constant object at 0x7da18f58de10>], [<ast.Subscript object at 0x7da18f58df30>, <ast.Subscript object at 0x7da1b034a7d0>, <ast.Subscript object at 0x7da18bc73d00>]]]
def function[_icons, parameter[ic]]:
return[dictionary[[<ast.Constant object at 0x7da18bc739d0>, <ast.Constant object at 0x7da18bc73b20>, <ast.Constant object at 0x7da18bc71810>], [<ast.Subscript object at 0x7da18bc71a20>, <ast.Subscript object at 0x7da18bc71db0>, <ast.Subscript object at 0x7da18bc71f00>]]]
def function[_var, parameter[v]]:
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70e50>], [<ast.Call object at 0x7da18bc72e60>]]
if <ast.BoolOp object at 0x7da18bc727d0> begin[:]
call[name[d]][constant[properties]] assign[=] call[name[self].properties, parameter[name[v]]]
return[name[d]]
variable[d] assign[=] call[name[dict], parameter[]]
if compare[name[self].top is_not constant[None]] begin[:]
call[name[d]][constant[top]] assign[=] name[self].top
if compare[name[self].index is_not constant[None]] begin[:]
call[name[d]][constant[index]] assign[=] name[self].index
return[name[d]] | keyword[def] identifier[to_dict] ( identifier[self] , identifier[short_pred] = keyword[True] , identifier[properties] = keyword[True] ):
literal[string]
keyword[def] identifier[_lnk] ( identifier[obj] ): keyword[return] { literal[string] : identifier[obj] . identifier[cfrom] , literal[string] : identifier[obj] . identifier[cto] }
keyword[def] identifier[_ep] ( identifier[ep] , identifier[short_pred] = keyword[True] ):
identifier[p] = identifier[ep] . identifier[pred] . identifier[short_form] () keyword[if] identifier[short_pred] keyword[else] identifier[ep] . identifier[pred] . identifier[string]
identifier[d] = identifier[dict] ( identifier[label] = identifier[ep] . identifier[label] , identifier[predicate] = identifier[p] , identifier[arguments] = identifier[ep] . identifier[args] )
keyword[if] identifier[ep] . identifier[lnk] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[_lnk] ( identifier[ep] )
keyword[return] identifier[d]
keyword[def] identifier[_hcons] ( identifier[hc] ): keyword[return] { literal[string] : identifier[hc] [ literal[int] ], literal[string] : identifier[hc] [ literal[int] ], literal[string] : identifier[hc] [ literal[int] ]}
keyword[def] identifier[_icons] ( identifier[ic] ): keyword[return] { literal[string] : identifier[ic] [ literal[int] ], literal[string] : identifier[ic] [ literal[int] ], literal[string] : identifier[ic] [ literal[int] ]}
keyword[def] identifier[_var] ( identifier[v] ):
identifier[d] ={ literal[string] : identifier[var_sort] ( identifier[v] )}
keyword[if] identifier[properties] keyword[and] identifier[self] . identifier[properties] ( identifier[v] ):
identifier[d] [ literal[string] ]= identifier[self] . identifier[properties] ( identifier[v] )
keyword[return] identifier[d]
identifier[d] = identifier[dict] (
identifier[relations] =[ identifier[_ep] ( identifier[ep] , identifier[short_pred] = identifier[short_pred] ) keyword[for] identifier[ep] keyword[in] identifier[self] . identifier[eps] ()],
identifier[constraints] =([ identifier[_hcons] ( identifier[hc] ) keyword[for] identifier[hc] keyword[in] identifier[self] . identifier[hcons] ()]+
[ identifier[_icons] ( identifier[ic] ) keyword[for] identifier[ic] keyword[in] identifier[self] . identifier[icons] ()]),
identifier[variables] ={ identifier[v] : identifier[_var] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[self] . identifier[variables] ()}
)
keyword[if] identifier[self] . identifier[top] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[self] . identifier[top]
keyword[if] identifier[self] . identifier[index] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[self] . identifier[index]
keyword[return] identifier[d] | def to_dict(self, short_pred=True, properties=True):
"""
Encode the Mrs as a dictionary suitable for JSON serialization.
"""
def _lnk(obj):
return {'from': obj.cfrom, 'to': obj.cto}
def _ep(ep, short_pred=True):
p = ep.pred.short_form() if short_pred else ep.pred.string
d = dict(label=ep.label, predicate=p, arguments=ep.args)
if ep.lnk is not None:
d['lnk'] = _lnk(ep) # depends on [control=['if'], data=[]]
return d
def _hcons(hc):
return {'relation': hc[1], 'high': hc[0], 'low': hc[2]}
def _icons(ic):
return {'relation': ic[1], 'left': ic[0], 'right': ic[2]}
def _var(v):
d = {'type': var_sort(v)}
if properties and self.properties(v):
d['properties'] = self.properties(v) # depends on [control=['if'], data=[]]
return d
d = dict(relations=[_ep(ep, short_pred=short_pred) for ep in self.eps()], constraints=[_hcons(hc) for hc in self.hcons()] + [_icons(ic) for ic in self.icons()], variables={v: _var(v) for v in self.variables()})
if self.top is not None:
d['top'] = self.top # depends on [control=['if'], data=[]]
if self.index is not None:
d['index'] = self.index # depends on [control=['if'], data=[]]
# if self.xarg is not None: d['xarg'] = self.xarg
# if self.lnk is not None: d['lnk'] = self.lnk
# if self.surface is not None: d['surface'] = self.surface
# if self.identifier is not None: d['identifier'] = self.identifier
return d |
def append_to_history(self):
"""
Append the current input to the history.
(Only if valid input.)
"""
# Validate first. If not valid, set validation exception.
if not self.validate():
return
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text and (not len(self.history) or self.history[-1] != self.text):
self.history.append(self.text) | def function[append_to_history, parameter[self]]:
constant[
Append the current input to the history.
(Only if valid input.)
]
if <ast.UnaryOp object at 0x7da204565690> begin[:]
return[None]
if <ast.BoolOp object at 0x7da18f00fac0> begin[:]
call[name[self].history.append, parameter[name[self].text]] | keyword[def] identifier[append_to_history] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[validate] ():
keyword[return]
keyword[if] identifier[self] . identifier[text] keyword[and] ( keyword[not] identifier[len] ( identifier[self] . identifier[history] ) keyword[or] identifier[self] . identifier[history] [- literal[int] ]!= identifier[self] . identifier[text] ):
identifier[self] . identifier[history] . identifier[append] ( identifier[self] . identifier[text] ) | def append_to_history(self):
"""
Append the current input to the history.
(Only if valid input.)
"""
# Validate first. If not valid, set validation exception.
if not self.validate():
return # depends on [control=['if'], data=[]]
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text and (not len(self.history) or self.history[-1] != self.text):
self.history.append(self.text) # depends on [control=['if'], data=[]] |
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams | def function[denoise_z15, parameter[]]:
constant[Replace tokens instead of masking.]
variable[hparams] assign[=] call[name[xmoe2_dense_0], parameter[]]
name[hparams].decoder_type assign[=] constant[denoising]
name[hparams].noising_spec_train assign[=] dictionary[[<ast.Constant object at 0x7da1b20897e0>, <ast.Constant object at 0x7da1b208a0e0>], [<ast.Constant object at 0x7da1b208a680>, <ast.Constant object at 0x7da1b208bf10>]]
name[hparams].noising_use_eval_during_train assign[=] constant[0.25]
return[name[hparams]] | keyword[def] identifier[denoise_z15] ():
literal[string]
identifier[hparams] = identifier[xmoe2_dense_0] ()
identifier[hparams] . identifier[decoder_type] = literal[string]
identifier[hparams] . identifier[noising_spec_train] ={ literal[string] : literal[string] , literal[string] : literal[int] }
identifier[hparams] . identifier[noising_use_eval_during_train] = literal[int]
keyword[return] identifier[hparams] | def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = 'denoising'
hparams.noising_spec_train = {'type': 'random_zipfian', 'prob': 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams |
def eventFilter(self, object, event):
"""
Reimplements the **QObject.eventFilter** method.
:param object: Object.
:type object: QObject
:param event: Event.
:type event: QEvent
:return: Event filtered.
:rtype: bool
"""
if event.type() == QEvent.MouseButtonDblClick:
view = object.parent()
if view.read_only:
self.__raise_user_error(view)
return True
return False | def function[eventFilter, parameter[self, object, event]]:
constant[
Reimplements the **QObject.eventFilter** method.
:param object: Object.
:type object: QObject
:param event: Event.
:type event: QEvent
:return: Event filtered.
:rtype: bool
]
if compare[call[name[event].type, parameter[]] equal[==] name[QEvent].MouseButtonDblClick] begin[:]
variable[view] assign[=] call[name[object].parent, parameter[]]
if name[view].read_only begin[:]
call[name[self].__raise_user_error, parameter[name[view]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[eventFilter] ( identifier[self] , identifier[object] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[type] ()== identifier[QEvent] . identifier[MouseButtonDblClick] :
identifier[view] = identifier[object] . identifier[parent] ()
keyword[if] identifier[view] . identifier[read_only] :
identifier[self] . identifier[__raise_user_error] ( identifier[view] )
keyword[return] keyword[True]
keyword[return] keyword[False] | def eventFilter(self, object, event):
"""
Reimplements the **QObject.eventFilter** method.
:param object: Object.
:type object: QObject
:param event: Event.
:type event: QEvent
:return: Event filtered.
:rtype: bool
"""
if event.type() == QEvent.MouseButtonDblClick:
view = object.parent()
if view.read_only:
self.__raise_user_error(view)
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False |
def get_im(self, force_update=False):
"""Get the influence map for the model, generating it if necessary.
Parameters
----------
force_update : bool
Whether to generate the influence map when the function is called.
If False, returns the previously generated influence map if
available. Defaults to True.
Returns
-------
networkx MultiDiGraph object containing the influence map.
The influence map can be rendered as a pdf using the dot layout
program as follows::
im_agraph = nx.nx_agraph.to_agraph(influence_map)
im_agraph.draw('influence_map.pdf', prog='dot')
"""
if self._im and not force_update:
return self._im
if not self.model:
raise Exception("Cannot get influence map if there is no model.")
def add_obs_for_agent(agent):
obj_mps = list(pa.grounded_monomer_patterns(self.model, agent))
if not obj_mps:
logger.debug('No monomer patterns found in model for agent %s, '
'skipping' % agent)
return
obs_list = []
for obj_mp in obj_mps:
obs_name = _monomer_pattern_label(obj_mp) + '_obs'
# Add the observable
obj_obs = Observable(obs_name, obj_mp, _export=False)
obs_list.append(obs_name)
try:
self.model.add_component(obj_obs)
except ComponentDuplicateNameError as e:
pass
return obs_list
# Create observables for all statements to check, and add to model
# Remove any existing observables in the model
self.model.observables = ComponentSet([])
for stmt in self.statements:
# Generate observables for Modification statements
if isinstance(stmt, Modification):
mod_condition_name = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_condition_name = modtype_to_inverse[mod_condition_name]
# Add modification to substrate agent
modified_sub = _add_modification_to_agent(stmt.sub,
mod_condition_name, stmt.residue,
stmt.position)
obs_list = add_obs_for_agent(modified_sub)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
# Generate observables for Activation/Inhibition statements
elif isinstance(stmt, RegulateActivity):
regulated_obj, polarity = \
_add_activity_to_agent(stmt.obj, stmt.obj_activity,
stmt.is_activation)
obs_list = add_obs_for_agent(regulated_obj)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, RegulateAmount):
obs_list = add_obs_for_agent(stmt.obj)
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, Influence):
obs_list = add_obs_for_agent(stmt.obj.concept)
self.stmt_to_obs[stmt] = obs_list
# Add observables for each agent
for ag in self.agent_obs:
obs_list = add_obs_for_agent(ag)
self.agent_to_obs[ag] = obs_list
logger.info("Generating influence map")
self._im = self.generate_im(self.model)
#self._im.is_multigraph = lambda: False
# Now, for every rule in the model, check if there are any observables
# downstream; alternatively, for every observable in the model, get a
# list of rules.
# We'll need the dictionary to check if nodes are observables
node_attributes = nx.get_node_attributes(self._im, 'node_type')
for rule in self.model.rules:
obs_list = []
# Get successors of the rule node
for neighb in self._im.neighbors(rule.name):
# Check if the node is an observable
if node_attributes[neighb] != 'variable':
continue
# Get the edge and check the polarity
edge_sign = _get_edge_sign(self._im, (rule.name, neighb))
obs_list.append((neighb, edge_sign))
self.rule_obs_dict[rule.name] = obs_list
return self._im | def function[get_im, parameter[self, force_update]]:
constant[Get the influence map for the model, generating it if necessary.
Parameters
----------
force_update : bool
Whether to generate the influence map when the function is called.
If False, returns the previously generated influence map if
available. Defaults to True.
Returns
-------
networkx MultiDiGraph object containing the influence map.
The influence map can be rendered as a pdf using the dot layout
program as follows::
im_agraph = nx.nx_agraph.to_agraph(influence_map)
im_agraph.draw('influence_map.pdf', prog='dot')
]
if <ast.BoolOp object at 0x7da18fe924d0> begin[:]
return[name[self]._im]
if <ast.UnaryOp object at 0x7da18fe90df0> begin[:]
<ast.Raise object at 0x7da18fe92a10>
def function[add_obs_for_agent, parameter[agent]]:
variable[obj_mps] assign[=] call[name[list], parameter[call[name[pa].grounded_monomer_patterns, parameter[name[self].model, name[agent]]]]]
if <ast.UnaryOp object at 0x7da18fe932e0> begin[:]
call[name[logger].debug, parameter[binary_operation[constant[No monomer patterns found in model for agent %s, skipping] <ast.Mod object at 0x7da2590d6920> name[agent]]]]
return[None]
variable[obs_list] assign[=] list[[]]
for taget[name[obj_mp]] in starred[name[obj_mps]] begin[:]
variable[obs_name] assign[=] binary_operation[call[name[_monomer_pattern_label], parameter[name[obj_mp]]] + constant[_obs]]
variable[obj_obs] assign[=] call[name[Observable], parameter[name[obs_name], name[obj_mp]]]
call[name[obs_list].append, parameter[name[obs_name]]]
<ast.Try object at 0x7da18dc04310>
return[name[obs_list]]
name[self].model.observables assign[=] call[name[ComponentSet], parameter[list[[]]]]
for taget[name[stmt]] in starred[name[self].statements] begin[:]
if call[name[isinstance], parameter[name[stmt], name[Modification]]] begin[:]
variable[mod_condition_name] assign[=] call[name[modclass_to_modtype]][name[stmt].__class__]
if call[name[isinstance], parameter[name[stmt], name[RemoveModification]]] begin[:]
variable[mod_condition_name] assign[=] call[name[modtype_to_inverse]][name[mod_condition_name]]
variable[modified_sub] assign[=] call[name[_add_modification_to_agent], parameter[name[stmt].sub, name[mod_condition_name], name[stmt].residue, name[stmt].position]]
variable[obs_list] assign[=] call[name[add_obs_for_agent], parameter[name[modified_sub]]]
call[name[self].stmt_to_obs][name[stmt]] assign[=] name[obs_list]
for taget[name[ag]] in starred[name[self].agent_obs] begin[:]
variable[obs_list] assign[=] call[name[add_obs_for_agent], parameter[name[ag]]]
call[name[self].agent_to_obs][name[ag]] assign[=] name[obs_list]
call[name[logger].info, parameter[constant[Generating influence map]]]
name[self]._im assign[=] call[name[self].generate_im, parameter[name[self].model]]
variable[node_attributes] assign[=] call[name[nx].get_node_attributes, parameter[name[self]._im, constant[node_type]]]
for taget[name[rule]] in starred[name[self].model.rules] begin[:]
variable[obs_list] assign[=] list[[]]
for taget[name[neighb]] in starred[call[name[self]._im.neighbors, parameter[name[rule].name]]] begin[:]
if compare[call[name[node_attributes]][name[neighb]] not_equal[!=] constant[variable]] begin[:]
continue
variable[edge_sign] assign[=] call[name[_get_edge_sign], parameter[name[self]._im, tuple[[<ast.Attribute object at 0x7da18bcca980>, <ast.Name object at 0x7da18bcca800>]]]]
call[name[obs_list].append, parameter[tuple[[<ast.Name object at 0x7da18bccb880>, <ast.Name object at 0x7da18bcc94b0>]]]]
call[name[self].rule_obs_dict][name[rule].name] assign[=] name[obs_list]
return[name[self]._im] | keyword[def] identifier[get_im] ( identifier[self] , identifier[force_update] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[_im] keyword[and] keyword[not] identifier[force_update] :
keyword[return] identifier[self] . identifier[_im]
keyword[if] keyword[not] identifier[self] . identifier[model] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[def] identifier[add_obs_for_agent] ( identifier[agent] ):
identifier[obj_mps] = identifier[list] ( identifier[pa] . identifier[grounded_monomer_patterns] ( identifier[self] . identifier[model] , identifier[agent] ))
keyword[if] keyword[not] identifier[obj_mps] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] % identifier[agent] )
keyword[return]
identifier[obs_list] =[]
keyword[for] identifier[obj_mp] keyword[in] identifier[obj_mps] :
identifier[obs_name] = identifier[_monomer_pattern_label] ( identifier[obj_mp] )+ literal[string]
identifier[obj_obs] = identifier[Observable] ( identifier[obs_name] , identifier[obj_mp] , identifier[_export] = keyword[False] )
identifier[obs_list] . identifier[append] ( identifier[obs_name] )
keyword[try] :
identifier[self] . identifier[model] . identifier[add_component] ( identifier[obj_obs] )
keyword[except] identifier[ComponentDuplicateNameError] keyword[as] identifier[e] :
keyword[pass]
keyword[return] identifier[obs_list]
identifier[self] . identifier[model] . identifier[observables] = identifier[ComponentSet] ([])
keyword[for] identifier[stmt] keyword[in] identifier[self] . identifier[statements] :
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[Modification] ):
identifier[mod_condition_name] = identifier[modclass_to_modtype] [ identifier[stmt] . identifier[__class__] ]
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[RemoveModification] ):
identifier[mod_condition_name] = identifier[modtype_to_inverse] [ identifier[mod_condition_name] ]
identifier[modified_sub] = identifier[_add_modification_to_agent] ( identifier[stmt] . identifier[sub] ,
identifier[mod_condition_name] , identifier[stmt] . identifier[residue] ,
identifier[stmt] . identifier[position] )
identifier[obs_list] = identifier[add_obs_for_agent] ( identifier[modified_sub] )
identifier[self] . identifier[stmt_to_obs] [ identifier[stmt] ]= identifier[obs_list]
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[RegulateActivity] ):
identifier[regulated_obj] , identifier[polarity] = identifier[_add_activity_to_agent] ( identifier[stmt] . identifier[obj] , identifier[stmt] . identifier[obj_activity] ,
identifier[stmt] . identifier[is_activation] )
identifier[obs_list] = identifier[add_obs_for_agent] ( identifier[regulated_obj] )
identifier[self] . identifier[stmt_to_obs] [ identifier[stmt] ]= identifier[obs_list]
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[RegulateAmount] ):
identifier[obs_list] = identifier[add_obs_for_agent] ( identifier[stmt] . identifier[obj] )
identifier[self] . identifier[stmt_to_obs] [ identifier[stmt] ]= identifier[obs_list]
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[Influence] ):
identifier[obs_list] = identifier[add_obs_for_agent] ( identifier[stmt] . identifier[obj] . identifier[concept] )
identifier[self] . identifier[stmt_to_obs] [ identifier[stmt] ]= identifier[obs_list]
keyword[for] identifier[ag] keyword[in] identifier[self] . identifier[agent_obs] :
identifier[obs_list] = identifier[add_obs_for_agent] ( identifier[ag] )
identifier[self] . identifier[agent_to_obs] [ identifier[ag] ]= identifier[obs_list]
identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[_im] = identifier[self] . identifier[generate_im] ( identifier[self] . identifier[model] )
identifier[node_attributes] = identifier[nx] . identifier[get_node_attributes] ( identifier[self] . identifier[_im] , literal[string] )
keyword[for] identifier[rule] keyword[in] identifier[self] . identifier[model] . identifier[rules] :
identifier[obs_list] =[]
keyword[for] identifier[neighb] keyword[in] identifier[self] . identifier[_im] . identifier[neighbors] ( identifier[rule] . identifier[name] ):
keyword[if] identifier[node_attributes] [ identifier[neighb] ]!= literal[string] :
keyword[continue]
identifier[edge_sign] = identifier[_get_edge_sign] ( identifier[self] . identifier[_im] ,( identifier[rule] . identifier[name] , identifier[neighb] ))
identifier[obs_list] . identifier[append] (( identifier[neighb] , identifier[edge_sign] ))
identifier[self] . identifier[rule_obs_dict] [ identifier[rule] . identifier[name] ]= identifier[obs_list]
keyword[return] identifier[self] . identifier[_im] | def get_im(self, force_update=False):
"""Get the influence map for the model, generating it if necessary.
Parameters
----------
force_update : bool
Whether to generate the influence map when the function is called.
If False, returns the previously generated influence map if
available. Defaults to True.
Returns
-------
networkx MultiDiGraph object containing the influence map.
The influence map can be rendered as a pdf using the dot layout
program as follows::
im_agraph = nx.nx_agraph.to_agraph(influence_map)
im_agraph.draw('influence_map.pdf', prog='dot')
"""
if self._im and (not force_update):
return self._im # depends on [control=['if'], data=[]]
if not self.model:
raise Exception('Cannot get influence map if there is no model.') # depends on [control=['if'], data=[]]
def add_obs_for_agent(agent):
obj_mps = list(pa.grounded_monomer_patterns(self.model, agent))
if not obj_mps:
logger.debug('No monomer patterns found in model for agent %s, skipping' % agent)
return # depends on [control=['if'], data=[]]
obs_list = []
for obj_mp in obj_mps:
obs_name = _monomer_pattern_label(obj_mp) + '_obs'
# Add the observable
obj_obs = Observable(obs_name, obj_mp, _export=False)
obs_list.append(obs_name)
try:
self.model.add_component(obj_obs) # depends on [control=['try'], data=[]]
except ComponentDuplicateNameError as e:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['obj_mp']]
return obs_list
# Create observables for all statements to check, and add to model
# Remove any existing observables in the model
self.model.observables = ComponentSet([])
for stmt in self.statements:
# Generate observables for Modification statements
if isinstance(stmt, Modification):
mod_condition_name = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_condition_name = modtype_to_inverse[mod_condition_name] # depends on [control=['if'], data=[]]
# Add modification to substrate agent
modified_sub = _add_modification_to_agent(stmt.sub, mod_condition_name, stmt.residue, stmt.position)
obs_list = add_obs_for_agent(modified_sub)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list # depends on [control=['if'], data=[]]
# Generate observables for Activation/Inhibition statements
elif isinstance(stmt, RegulateActivity):
(regulated_obj, polarity) = _add_activity_to_agent(stmt.obj, stmt.obj_activity, stmt.is_activation)
obs_list = add_obs_for_agent(regulated_obj)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list # depends on [control=['if'], data=[]]
elif isinstance(stmt, RegulateAmount):
obs_list = add_obs_for_agent(stmt.obj)
self.stmt_to_obs[stmt] = obs_list # depends on [control=['if'], data=[]]
elif isinstance(stmt, Influence):
obs_list = add_obs_for_agent(stmt.obj.concept)
self.stmt_to_obs[stmt] = obs_list # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stmt']]
# Add observables for each agent
for ag in self.agent_obs:
obs_list = add_obs_for_agent(ag)
self.agent_to_obs[ag] = obs_list # depends on [control=['for'], data=['ag']]
logger.info('Generating influence map')
self._im = self.generate_im(self.model)
#self._im.is_multigraph = lambda: False
# Now, for every rule in the model, check if there are any observables
# downstream; alternatively, for every observable in the model, get a
# list of rules.
# We'll need the dictionary to check if nodes are observables
node_attributes = nx.get_node_attributes(self._im, 'node_type')
for rule in self.model.rules:
obs_list = []
# Get successors of the rule node
for neighb in self._im.neighbors(rule.name):
# Check if the node is an observable
if node_attributes[neighb] != 'variable':
continue # depends on [control=['if'], data=[]]
# Get the edge and check the polarity
edge_sign = _get_edge_sign(self._im, (rule.name, neighb))
obs_list.append((neighb, edge_sign)) # depends on [control=['for'], data=['neighb']]
self.rule_obs_dict[rule.name] = obs_list # depends on [control=['for'], data=['rule']]
return self._im |
def run(self):
"""Compute and store inflation-adjusted movie budgets
"""
self.mark_incomplete()
session = client.get_client().create_session()
# load CPI data
cpi = ConsumerPriceIndexFile().load()
# max year we have CPI data for
max_cpi_year = cpi['Year'].max()
# extract annual average only, index by year
cpi = cpi.set_index('Year')['Annual']
# process all movies
for movie in session.query(models.Movie).all():
# we can only compute an inflation-adjusted budget if we know the year and budget
if movie.year is not None and movie.budget is not None:
if movie.year > max_cpi_year:
# if movie is too new, don't inflation-adjust
movie.budget_inflation_adjusted = movie.budget
else:
movie.budget_inflation_adjusted = movie.budget * cpi.loc[max_cpi_year] / cpi.loc[movie.year]
# done, save all data, finalize task
session.commit()
session.close()
self.mark_complete() | def function[run, parameter[self]]:
constant[Compute and store inflation-adjusted movie budgets
]
call[name[self].mark_incomplete, parameter[]]
variable[session] assign[=] call[call[name[client].get_client, parameter[]].create_session, parameter[]]
variable[cpi] assign[=] call[call[name[ConsumerPriceIndexFile], parameter[]].load, parameter[]]
variable[max_cpi_year] assign[=] call[call[name[cpi]][constant[Year]].max, parameter[]]
variable[cpi] assign[=] call[call[name[cpi].set_index, parameter[constant[Year]]]][constant[Annual]]
for taget[name[movie]] in starred[call[call[name[session].query, parameter[name[models].Movie]].all, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b24469b0> begin[:]
if compare[name[movie].year greater[>] name[max_cpi_year]] begin[:]
name[movie].budget_inflation_adjusted assign[=] name[movie].budget
call[name[session].commit, parameter[]]
call[name[session].close, parameter[]]
call[name[self].mark_complete, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[mark_incomplete] ()
identifier[session] = identifier[client] . identifier[get_client] (). identifier[create_session] ()
identifier[cpi] = identifier[ConsumerPriceIndexFile] (). identifier[load] ()
identifier[max_cpi_year] = identifier[cpi] [ literal[string] ]. identifier[max] ()
identifier[cpi] = identifier[cpi] . identifier[set_index] ( literal[string] )[ literal[string] ]
keyword[for] identifier[movie] keyword[in] identifier[session] . identifier[query] ( identifier[models] . identifier[Movie] ). identifier[all] ():
keyword[if] identifier[movie] . identifier[year] keyword[is] keyword[not] keyword[None] keyword[and] identifier[movie] . identifier[budget] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[movie] . identifier[year] > identifier[max_cpi_year] :
identifier[movie] . identifier[budget_inflation_adjusted] = identifier[movie] . identifier[budget]
keyword[else] :
identifier[movie] . identifier[budget_inflation_adjusted] = identifier[movie] . identifier[budget] * identifier[cpi] . identifier[loc] [ identifier[max_cpi_year] ]/ identifier[cpi] . identifier[loc] [ identifier[movie] . identifier[year] ]
identifier[session] . identifier[commit] ()
identifier[session] . identifier[close] ()
identifier[self] . identifier[mark_complete] () | def run(self):
"""Compute and store inflation-adjusted movie budgets
"""
self.mark_incomplete()
session = client.get_client().create_session()
# load CPI data
cpi = ConsumerPriceIndexFile().load()
# max year we have CPI data for
max_cpi_year = cpi['Year'].max()
# extract annual average only, index by year
cpi = cpi.set_index('Year')['Annual']
# process all movies
for movie in session.query(models.Movie).all():
# we can only compute an inflation-adjusted budget if we know the year and budget
if movie.year is not None and movie.budget is not None:
if movie.year > max_cpi_year:
# if movie is too new, don't inflation-adjust
movie.budget_inflation_adjusted = movie.budget # depends on [control=['if'], data=[]]
else:
movie.budget_inflation_adjusted = movie.budget * cpi.loc[max_cpi_year] / cpi.loc[movie.year] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['movie']]
# done, save all data, finalize task
session.commit()
session.close()
self.mark_complete() |
def clean_int(x) -> int:
"""
Returns its parameter as an integer, or raises
``django.forms.ValidationError``.
"""
try:
return int(x)
except ValueError:
raise forms.ValidationError(
"Cannot convert to integer: {}".format(repr(x))) | def function[clean_int, parameter[x]]:
constant[
Returns its parameter as an integer, or raises
``django.forms.ValidationError``.
]
<ast.Try object at 0x7da1b17093f0> | keyword[def] identifier[clean_int] ( identifier[x] )-> identifier[int] :
literal[string]
keyword[try] :
keyword[return] identifier[int] ( identifier[x] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[forms] . identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[repr] ( identifier[x] ))) | def clean_int(x) -> int:
"""
Returns its parameter as an integer, or raises
``django.forms.ValidationError``.
"""
try:
return int(x) # depends on [control=['try'], data=[]]
except ValueError:
raise forms.ValidationError('Cannot convert to integer: {}'.format(repr(x))) # depends on [control=['except'], data=[]] |
def p_encaps_var_dollar_curly_array_offset(p):
'encaps_var : DOLLAR_OPEN_CURLY_BRACES STRING_VARNAME LBRACKET expr RBRACKET RBRACE'
p[0] = ast.ArrayOffset(ast.Variable('$' + p[2], lineno=p.lineno(2)), p[4],
lineno=p.lineno(3)) | def function[p_encaps_var_dollar_curly_array_offset, parameter[p]]:
constant[encaps_var : DOLLAR_OPEN_CURLY_BRACES STRING_VARNAME LBRACKET expr RBRACKET RBRACE]
call[name[p]][constant[0]] assign[=] call[name[ast].ArrayOffset, parameter[call[name[ast].Variable, parameter[binary_operation[constant[$] + call[name[p]][constant[2]]]]], call[name[p]][constant[4]]]] | keyword[def] identifier[p_encaps_var_dollar_curly_array_offset] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[ast] . identifier[ArrayOffset] ( identifier[ast] . identifier[Variable] ( literal[string] + identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )), identifier[p] [ literal[int] ],
identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) | def p_encaps_var_dollar_curly_array_offset(p):
"""encaps_var : DOLLAR_OPEN_CURLY_BRACES STRING_VARNAME LBRACKET expr RBRACKET RBRACE"""
p[0] = ast.ArrayOffset(ast.Variable('$' + p[2], lineno=p.lineno(2)), p[4], lineno=p.lineno(3)) |
def _ReadFileEntry(self, file_object, file_offset):
"""Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
"""
if self.file_format == 'bin-big-endian':
data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'bin-little-endian':
data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'odc':
data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE
elif self.file_format in ('crc', 'newc'):
data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE
file_entry = self._ReadStructure(
file_object, file_offset, file_entry_data_size, data_type_map,
'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
path_data = file_object.read(file_entry.path_size)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry()
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
archive_file_entry.size += padding_size
return archive_file_entry | def function[_ReadFileEntry, parameter[self, file_object, file_offset]]:
constant[Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
]
if compare[name[self].file_format equal[==] constant[bin-big-endian]] begin[:]
variable[data_type_map] assign[=] name[self]._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
variable[file_entry_data_size] assign[=] name[self]._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE
variable[file_entry] assign[=] call[name[self]._ReadStructure, parameter[name[file_object], name[file_offset], name[file_entry_data_size], name[data_type_map], constant[file entry]]]
<ast.AugAssign object at 0x7da1b07ac6d0>
if compare[name[self].file_format in tuple[[<ast.Constant object at 0x7da1b07aeda0>, <ast.Constant object at 0x7da1b07ac430>]]] begin[:]
name[file_entry].modification_time assign[=] binary_operation[binary_operation[name[file_entry].modification_time.upper <ast.LShift object at 0x7da2590d69e0> constant[16]] <ast.BitOr object at 0x7da2590d6aa0> name[file_entry].modification_time.lower]
name[file_entry].file_size assign[=] binary_operation[binary_operation[name[file_entry].file_size.upper <ast.LShift object at 0x7da2590d69e0> constant[16]] <ast.BitOr object at 0x7da2590d6aa0> name[file_entry].file_size.lower]
if compare[name[self].file_format equal[==] constant[odc]] begin[:]
for taget[name[attribute_name]] in starred[name[self]._CPIO_ATTRIBUTE_NAMES_ODC] begin[:]
variable[value] assign[=] call[name[getattr], parameter[name[file_entry], name[attribute_name], constant[None]]]
<ast.Try object at 0x7da1b07af8e0>
variable[value] assign[=] call[name[setattr], parameter[name[file_entry], name[attribute_name], name[value]]]
variable[path_data] assign[=] call[name[file_object].read, parameter[name[file_entry].path_size]]
<ast.AugAssign object at 0x7da1b07ae5f0>
variable[path] assign[=] call[name[path_data].decode, parameter[constant[ascii]]]
<ast.Tuple object at 0x7da1b07ac310> assign[=] call[name[path].partition, parameter[constant[ ]]]
if compare[name[self].file_format in tuple[[<ast.Constant object at 0x7da1b07ad480>, <ast.Constant object at 0x7da1b07ade40>]]] begin[:]
variable[padding_size] assign[=] binary_operation[name[file_offset] <ast.Mod object at 0x7da2590d6920> constant[2]]
if compare[name[padding_size] greater[>] constant[0]] begin[:]
variable[padding_size] assign[=] binary_operation[constant[2] - name[padding_size]]
<ast.AugAssign object at 0x7da1b07add20>
variable[archive_file_entry] assign[=] call[name[CPIOArchiveFileEntry], parameter[]]
name[archive_file_entry].data_offset assign[=] name[file_offset]
name[archive_file_entry].data_size assign[=] name[file_entry].file_size
name[archive_file_entry].group_identifier assign[=] name[file_entry].group_identifier
name[archive_file_entry].inode_number assign[=] name[file_entry].inode_number
name[archive_file_entry].modification_time assign[=] name[file_entry].modification_time
name[archive_file_entry].path assign[=] name[path]
name[archive_file_entry].mode assign[=] name[file_entry].mode
name[archive_file_entry].size assign[=] binary_operation[binary_operation[binary_operation[name[file_entry_data_size] + name[file_entry].path_size] + name[padding_size]] + name[file_entry].file_size]
name[archive_file_entry].user_identifier assign[=] name[file_entry].user_identifier
<ast.AugAssign object at 0x7da1b07accd0>
if compare[name[self].file_format in tuple[[<ast.Constant object at 0x7da1b07ad000>, <ast.Constant object at 0x7da1b07ac670>]]] begin[:]
variable[padding_size] assign[=] binary_operation[name[file_offset] <ast.Mod object at 0x7da2590d6920> constant[2]]
if compare[name[padding_size] greater[>] constant[0]] begin[:]
variable[padding_size] assign[=] binary_operation[constant[2] - name[padding_size]]
if compare[name[padding_size] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b07f5060>
return[name[archive_file_entry]] | keyword[def] identifier[_ReadFileEntry] ( identifier[self] , identifier[file_object] , identifier[file_offset] ):
literal[string]
keyword[if] identifier[self] . identifier[file_format] == literal[string] :
identifier[data_type_map] = identifier[self] . identifier[_CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY]
identifier[file_entry_data_size] = identifier[self] . identifier[_CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE]
keyword[elif] identifier[self] . identifier[file_format] == literal[string] :
identifier[data_type_map] = identifier[self] . identifier[_CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY]
identifier[file_entry_data_size] = identifier[self] . identifier[_CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE]
keyword[elif] identifier[self] . identifier[file_format] == literal[string] :
identifier[data_type_map] = identifier[self] . identifier[_CPIO_PORTABLE_ASCII_FILE_ENTRY]
identifier[file_entry_data_size] = identifier[self] . identifier[_CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE]
keyword[elif] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[data_type_map] = identifier[self] . identifier[_CPIO_NEW_ASCII_FILE_ENTRY]
identifier[file_entry_data_size] = identifier[self] . identifier[_CPIO_NEW_ASCII_FILE_ENTRY_SIZE]
identifier[file_entry] = identifier[self] . identifier[_ReadStructure] (
identifier[file_object] , identifier[file_offset] , identifier[file_entry_data_size] , identifier[data_type_map] ,
literal[string] )
identifier[file_offset] += identifier[file_entry_data_size]
keyword[if] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[file_entry] . identifier[modification_time] =(
( identifier[file_entry] . identifier[modification_time] . identifier[upper] << literal[int] )|
identifier[file_entry] . identifier[modification_time] . identifier[lower] )
identifier[file_entry] . identifier[file_size] =(
( identifier[file_entry] . identifier[file_size] . identifier[upper] << literal[int] )| identifier[file_entry] . identifier[file_size] . identifier[lower] )
keyword[if] identifier[self] . identifier[file_format] == literal[string] :
keyword[for] identifier[attribute_name] keyword[in] identifier[self] . identifier[_CPIO_ATTRIBUTE_NAMES_ODC] :
identifier[value] = identifier[getattr] ( identifier[file_entry] , identifier[attribute_name] , keyword[None] )
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[errors] . identifier[FileFormatError] (
literal[string] . identifier[format] (
identifier[attribute_name] ))
identifier[value] = identifier[setattr] ( identifier[file_entry] , identifier[attribute_name] , identifier[value] )
keyword[elif] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
keyword[for] identifier[attribute_name] keyword[in] identifier[self] . identifier[_CPIO_ATTRIBUTE_NAMES_CRC] :
identifier[value] = identifier[getattr] ( identifier[file_entry] , identifier[attribute_name] , keyword[None] )
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[errors] . identifier[FileFormatError] (
literal[string] . identifier[format] (
identifier[attribute_name] ))
identifier[value] = identifier[setattr] ( identifier[file_entry] , identifier[attribute_name] , identifier[value] )
identifier[path_data] = identifier[file_object] . identifier[read] ( identifier[file_entry] . identifier[path_size] )
identifier[file_offset] += identifier[file_entry] . identifier[path_size]
identifier[path] = identifier[path_data] . identifier[decode] ( literal[string] )
identifier[path] , identifier[_] , identifier[_] = identifier[path] . identifier[partition] ( literal[string] )
keyword[if] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[padding_size] = identifier[file_offset] % literal[int]
keyword[if] identifier[padding_size] > literal[int] :
identifier[padding_size] = literal[int] - identifier[padding_size]
keyword[elif] identifier[self] . identifier[file_format] == literal[string] :
identifier[padding_size] = literal[int]
keyword[elif] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[padding_size] = identifier[file_offset] % literal[int]
keyword[if] identifier[padding_size] > literal[int] :
identifier[padding_size] = literal[int] - identifier[padding_size]
identifier[file_offset] += identifier[padding_size]
identifier[archive_file_entry] = identifier[CPIOArchiveFileEntry] ()
identifier[archive_file_entry] . identifier[data_offset] = identifier[file_offset]
identifier[archive_file_entry] . identifier[data_size] = identifier[file_entry] . identifier[file_size]
identifier[archive_file_entry] . identifier[group_identifier] = identifier[file_entry] . identifier[group_identifier]
identifier[archive_file_entry] . identifier[inode_number] = identifier[file_entry] . identifier[inode_number]
identifier[archive_file_entry] . identifier[modification_time] = identifier[file_entry] . identifier[modification_time]
identifier[archive_file_entry] . identifier[path] = identifier[path]
identifier[archive_file_entry] . identifier[mode] = identifier[file_entry] . identifier[mode]
identifier[archive_file_entry] . identifier[size] =(
identifier[file_entry_data_size] + identifier[file_entry] . identifier[path_size] + identifier[padding_size] +
identifier[file_entry] . identifier[file_size] )
identifier[archive_file_entry] . identifier[user_identifier] = identifier[file_entry] . identifier[user_identifier]
identifier[file_offset] += identifier[file_entry] . identifier[file_size]
keyword[if] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[padding_size] = identifier[file_offset] % literal[int]
keyword[if] identifier[padding_size] > literal[int] :
identifier[padding_size] = literal[int] - identifier[padding_size]
keyword[elif] identifier[self] . identifier[file_format] == literal[string] :
identifier[padding_size] = literal[int]
keyword[elif] identifier[self] . identifier[file_format] keyword[in] ( literal[string] , literal[string] ):
identifier[padding_size] = identifier[file_offset] % literal[int]
keyword[if] identifier[padding_size] > literal[int] :
identifier[padding_size] = literal[int] - identifier[padding_size]
keyword[if] identifier[padding_size] > literal[int] :
identifier[archive_file_entry] . identifier[size] += identifier[padding_size]
keyword[return] identifier[archive_file_entry] | def _ReadFileEntry(self, file_object, file_offset):
"""Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
"""
if self.file_format == 'bin-big-endian':
data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE # depends on [control=['if'], data=[]]
elif self.file_format == 'bin-little-endian':
data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE # depends on [control=['if'], data=[]]
elif self.file_format == 'odc':
data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE # depends on [control=['if'], data=[]]
elif self.file_format in ('crc', 'newc'):
data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE # depends on [control=['if'], data=[]]
file_entry = self._ReadStructure(file_object, file_offset, file_entry_data_size, data_type_map, 'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = file_entry.modification_time.upper << 16 | file_entry.modification_time.lower
file_entry.file_size = file_entry.file_size.upper << 16 | file_entry.file_size.lower # depends on [control=['if'], data=[]]
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8) # depends on [control=['try'], data=[]]
except ValueError:
raise errors.FileFormatError('Unable to convert attribute: {0:s} into an integer'.format(attribute_name)) # depends on [control=['except'], data=[]]
value = setattr(file_entry, attribute_name, value) # depends on [control=['for'], data=['attribute_name']] # depends on [control=['if'], data=[]]
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16) # depends on [control=['try'], data=[]]
except ValueError:
raise errors.FileFormatError('Unable to convert attribute: {0:s} into an integer'.format(attribute_name)) # depends on [control=['except'], data=[]]
value = setattr(file_entry, attribute_name, value) # depends on [control=['for'], data=['attribute_name']] # depends on [control=['if'], data=[]]
path_data = file_object.read(file_entry.path_size)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
(path, _, _) = path.partition('\x00')
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size # depends on [control=['if'], data=['padding_size']] # depends on [control=['if'], data=[]]
elif self.file_format == 'odc':
padding_size = 0 # depends on [control=['if'], data=[]]
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size # depends on [control=['if'], data=['padding_size']] # depends on [control=['if'], data=[]]
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry()
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = file_entry_data_size + file_entry.path_size + padding_size + file_entry.file_size
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size # depends on [control=['if'], data=['padding_size']] # depends on [control=['if'], data=[]]
elif self.file_format == 'odc':
padding_size = 0 # depends on [control=['if'], data=[]]
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size # depends on [control=['if'], data=['padding_size']] # depends on [control=['if'], data=[]]
if padding_size > 0:
archive_file_entry.size += padding_size # depends on [control=['if'], data=['padding_size']]
return archive_file_entry |
def clonetopath(self, dest):
"""
Clone the repo at <self.pushablepath> into <dest>
Note that if self.pushablepath is None, then self.path will be used
instead.
"""
raise Exception(
"%s.%s needs to implement @classmethod .clonetopath(dest)" % (
self.__class__.__module__, self.__class__.__name__)) | def function[clonetopath, parameter[self, dest]]:
constant[
Clone the repo at <self.pushablepath> into <dest>
Note that if self.pushablepath is None, then self.path will be used
instead.
]
<ast.Raise object at 0x7da1b0cf6710> | keyword[def] identifier[clonetopath] ( identifier[self] , identifier[dest] ):
literal[string]
keyword[raise] identifier[Exception] (
literal[string] %(
identifier[self] . identifier[__class__] . identifier[__module__] , identifier[self] . identifier[__class__] . identifier[__name__] )) | def clonetopath(self, dest):
"""
Clone the repo at <self.pushablepath> into <dest>
Note that if self.pushablepath is None, then self.path will be used
instead.
"""
raise Exception('%s.%s needs to implement @classmethod .clonetopath(dest)' % (self.__class__.__module__, self.__class__.__name__)) |
def more_than_one_index(s, brackets=2):
'''
Search for two sets of [] []
@param s: string to search
'''
start = 0
brackets_num = 0
while start != -1 and brackets_num < brackets:
start = s.find("[", start)
if start == -1:
break
start = s.find("]", start)
brackets_num += 1
if start != -1:
return True
return False | def function[more_than_one_index, parameter[s, brackets]]:
constant[
Search for two sets of [] []
@param s: string to search
]
variable[start] assign[=] constant[0]
variable[brackets_num] assign[=] constant[0]
while <ast.BoolOp object at 0x7da2041d8fd0> begin[:]
variable[start] assign[=] call[name[s].find, parameter[constant[[], name[start]]]
if compare[name[start] equal[==] <ast.UnaryOp object at 0x7da2041d9c00>] begin[:]
break
variable[start] assign[=] call[name[s].find, parameter[constant[]], name[start]]]
<ast.AugAssign object at 0x7da1b28dd660>
if compare[name[start] not_equal[!=] <ast.UnaryOp object at 0x7da1b28de620>] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[more_than_one_index] ( identifier[s] , identifier[brackets] = literal[int] ):
literal[string]
identifier[start] = literal[int]
identifier[brackets_num] = literal[int]
keyword[while] identifier[start] !=- literal[int] keyword[and] identifier[brackets_num] < identifier[brackets] :
identifier[start] = identifier[s] . identifier[find] ( literal[string] , identifier[start] )
keyword[if] identifier[start] ==- literal[int] :
keyword[break]
identifier[start] = identifier[s] . identifier[find] ( literal[string] , identifier[start] )
identifier[brackets_num] += literal[int]
keyword[if] identifier[start] !=- literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def more_than_one_index(s, brackets=2):
"""
Search for two sets of [] []
@param s: string to search
"""
start = 0
brackets_num = 0
while start != -1 and brackets_num < brackets:
start = s.find('[', start)
if start == -1:
break # depends on [control=['if'], data=[]]
start = s.find(']', start)
brackets_num += 1 # depends on [control=['while'], data=[]]
if start != -1:
return True # depends on [control=['if'], data=[]]
return False |
def register_id(self, id_string):
"""Register a manually assigned id as used, to avoid collisions.
"""
try:
prefix, count = id_string.rsplit("_", 1)
count = int(count)
except ValueError:
# We don't need to worry about ids that don't match our pattern
pass
else:
if prefix == self.prefix:
self.counter = max(count, self.counter) | def function[register_id, parameter[self, id_string]]:
constant[Register a manually assigned id as used, to avoid collisions.
]
<ast.Try object at 0x7da1b1b7fa90> | keyword[def] identifier[register_id] ( identifier[self] , identifier[id_string] ):
literal[string]
keyword[try] :
identifier[prefix] , identifier[count] = identifier[id_string] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[count] = identifier[int] ( identifier[count] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[else] :
keyword[if] identifier[prefix] == identifier[self] . identifier[prefix] :
identifier[self] . identifier[counter] = identifier[max] ( identifier[count] , identifier[self] . identifier[counter] ) | def register_id(self, id_string):
"""Register a manually assigned id as used, to avoid collisions.
"""
try:
(prefix, count) = id_string.rsplit('_', 1)
count = int(count) # depends on [control=['try'], data=[]]
except ValueError:
# We don't need to worry about ids that don't match our pattern
pass # depends on [control=['except'], data=[]]
else:
if prefix == self.prefix:
self.counter = max(count, self.counter) # depends on [control=['if'], data=[]] |
def _get_argv(index, default=None):
''' get the argv input argument defined by index. Return the default
attribute if that argument does not exist
'''
return _sys.argv[index] if len(_sys.argv) > index else default | def function[_get_argv, parameter[index, default]]:
constant[ get the argv input argument defined by index. Return the default
attribute if that argument does not exist
]
return[<ast.IfExp object at 0x7da1b0ef6c20>] | keyword[def] identifier[_get_argv] ( identifier[index] , identifier[default] = keyword[None] ):
literal[string]
keyword[return] identifier[_sys] . identifier[argv] [ identifier[index] ] keyword[if] identifier[len] ( identifier[_sys] . identifier[argv] )> identifier[index] keyword[else] identifier[default] | def _get_argv(index, default=None):
""" get the argv input argument defined by index. Return the default
attribute if that argument does not exist
"""
return _sys.argv[index] if len(_sys.argv) > index else default |
def uid(self, value):
"""User ID setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_int32(value or 0)) | def function[uid, parameter[self, value]]:
constant[User ID setter.]
call[name[self].bytearray][call[name[self]._get_slicers, parameter[constant[1]]]] assign[=] call[name[bytearray], parameter[call[name[c_int32], parameter[<ast.BoolOp object at 0x7da1b2636020>]]]] | keyword[def] identifier[uid] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[bytearray] [ identifier[self] . identifier[_get_slicers] ( literal[int] )]= identifier[bytearray] ( identifier[c_int32] ( identifier[value] keyword[or] literal[int] )) | def uid(self, value):
"""User ID setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_int32(value or 0)) |
def analyze(self, analysis_set = '', analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
self.calculate_metrics(analysis_set, analysis_directory = analysis_directory)
if self.generate_plots:
self.plot(analysis_set, analysis_directory = analysis_directory) | def function[analyze, parameter[self, analysis_set, analysis_directory]]:
constant[This function runs the analysis and creates the plots and summary file.]
call[name[self].calculate_metrics, parameter[name[analysis_set]]]
if name[self].generate_plots begin[:]
call[name[self].plot, parameter[name[analysis_set]]] | keyword[def] identifier[analyze] ( identifier[self] , identifier[analysis_set] = literal[string] , identifier[analysis_directory] = keyword[None] ):
literal[string]
identifier[self] . identifier[calculate_metrics] ( identifier[analysis_set] , identifier[analysis_directory] = identifier[analysis_directory] )
keyword[if] identifier[self] . identifier[generate_plots] :
identifier[self] . identifier[plot] ( identifier[analysis_set] , identifier[analysis_directory] = identifier[analysis_directory] ) | def analyze(self, analysis_set='', analysis_directory=None):
"""This function runs the analysis and creates the plots and summary file."""
self.calculate_metrics(analysis_set, analysis_directory=analysis_directory)
if self.generate_plots:
self.plot(analysis_set, analysis_directory=analysis_directory) # depends on [control=['if'], data=[]] |
def _get_spi_control_byte(self, read_write_cmd):
"""Returns an SPI control byte.
The MCP23S17 is a slave SPI device. The slave address contains
four fixed bits and three user-defined hardware address bits
(if enabled via IOCON.HAEN) (pins A2, A1 and A0) with the
read/write bit filling out the control byte::
+--------------------+
|0|1|0|0|A2|A1|A0|R/W|
+--------------------+
7 6 5 4 3 2 1 0
:param read_write_cmd: Read or write command.
:type read_write_cmd: int
"""
# board_addr_pattern = (self.hardware_addr & 0b111) << 1
board_addr_pattern = (self.hardware_addr << 1) & 0xE
rw_cmd_pattern = read_write_cmd & 1 # make sure it's just 1 bit long
return 0x40 | board_addr_pattern | rw_cmd_pattern | def function[_get_spi_control_byte, parameter[self, read_write_cmd]]:
constant[Returns an SPI control byte.
The MCP23S17 is a slave SPI device. The slave address contains
four fixed bits and three user-defined hardware address bits
(if enabled via IOCON.HAEN) (pins A2, A1 and A0) with the
read/write bit filling out the control byte::
+--------------------+
|0|1|0|0|A2|A1|A0|R/W|
+--------------------+
7 6 5 4 3 2 1 0
:param read_write_cmd: Read or write command.
:type read_write_cmd: int
]
variable[board_addr_pattern] assign[=] binary_operation[binary_operation[name[self].hardware_addr <ast.LShift object at 0x7da2590d69e0> constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[14]]
variable[rw_cmd_pattern] assign[=] binary_operation[name[read_write_cmd] <ast.BitAnd object at 0x7da2590d6b60> constant[1]]
return[binary_operation[binary_operation[constant[64] <ast.BitOr object at 0x7da2590d6aa0> name[board_addr_pattern]] <ast.BitOr object at 0x7da2590d6aa0> name[rw_cmd_pattern]]] | keyword[def] identifier[_get_spi_control_byte] ( identifier[self] , identifier[read_write_cmd] ):
literal[string]
identifier[board_addr_pattern] =( identifier[self] . identifier[hardware_addr] << literal[int] )& literal[int]
identifier[rw_cmd_pattern] = identifier[read_write_cmd] & literal[int]
keyword[return] literal[int] | identifier[board_addr_pattern] | identifier[rw_cmd_pattern] | def _get_spi_control_byte(self, read_write_cmd):
"""Returns an SPI control byte.
The MCP23S17 is a slave SPI device. The slave address contains
four fixed bits and three user-defined hardware address bits
(if enabled via IOCON.HAEN) (pins A2, A1 and A0) with the
read/write bit filling out the control byte::
+--------------------+
|0|1|0|0|A2|A1|A0|R/W|
+--------------------+
7 6 5 4 3 2 1 0
:param read_write_cmd: Read or write command.
:type read_write_cmd: int
"""
# board_addr_pattern = (self.hardware_addr & 0b111) << 1
board_addr_pattern = self.hardware_addr << 1 & 14
rw_cmd_pattern = read_write_cmd & 1 # make sure it's just 1 bit long
return 64 | board_addr_pattern | rw_cmd_pattern |
def azlyrics(song):
"""
Returns the lyrics found in azlyrics for the specified mp3 file or an empty
string if not found.
"""
artist = song.artist.lower()
if artist[0:2] == 'a ':
artist = artist[2:]
artist = normalize(artist, URLESCAPES, '')
title = song.title.lower()
title = normalize(title, URLESCAPES, '')
url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title)
soup = get_url(url)
body = soup.find_all('div', class_='')[-1]
return body.get_text().strip() | def function[azlyrics, parameter[song]]:
constant[
Returns the lyrics found in azlyrics for the specified mp3 file or an empty
string if not found.
]
variable[artist] assign[=] call[name[song].artist.lower, parameter[]]
if compare[call[name[artist]][<ast.Slice object at 0x7da20c6c7880>] equal[==] constant[a ]] begin[:]
variable[artist] assign[=] call[name[artist]][<ast.Slice object at 0x7da20c6c71c0>]
variable[artist] assign[=] call[name[normalize], parameter[name[artist], name[URLESCAPES], constant[]]]
variable[title] assign[=] call[name[song].title.lower, parameter[]]
variable[title] assign[=] call[name[normalize], parameter[name[title], name[URLESCAPES], constant[]]]
variable[url] assign[=] call[constant[https://www.azlyrics.com/lyrics/{}/{}.html].format, parameter[name[artist], name[title]]]
variable[soup] assign[=] call[name[get_url], parameter[name[url]]]
variable[body] assign[=] call[call[name[soup].find_all, parameter[constant[div]]]][<ast.UnaryOp object at 0x7da1b1801450>]
return[call[call[name[body].get_text, parameter[]].strip, parameter[]]] | keyword[def] identifier[azlyrics] ( identifier[song] ):
literal[string]
identifier[artist] = identifier[song] . identifier[artist] . identifier[lower] ()
keyword[if] identifier[artist] [ literal[int] : literal[int] ]== literal[string] :
identifier[artist] = identifier[artist] [ literal[int] :]
identifier[artist] = identifier[normalize] ( identifier[artist] , identifier[URLESCAPES] , literal[string] )
identifier[title] = identifier[song] . identifier[title] . identifier[lower] ()
identifier[title] = identifier[normalize] ( identifier[title] , identifier[URLESCAPES] , literal[string] )
identifier[url] = literal[string] . identifier[format] ( identifier[artist] , identifier[title] )
identifier[soup] = identifier[get_url] ( identifier[url] )
identifier[body] = identifier[soup] . identifier[find_all] ( literal[string] , identifier[class_] = literal[string] )[- literal[int] ]
keyword[return] identifier[body] . identifier[get_text] (). identifier[strip] () | def azlyrics(song):
"""
Returns the lyrics found in azlyrics for the specified mp3 file or an empty
string if not found.
"""
artist = song.artist.lower()
if artist[0:2] == 'a ':
artist = artist[2:] # depends on [control=['if'], data=[]]
artist = normalize(artist, URLESCAPES, '')
title = song.title.lower()
title = normalize(title, URLESCAPES, '')
url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title)
soup = get_url(url)
body = soup.find_all('div', class_='')[-1]
return body.get_text().strip() |
def get_queryset(self):
"""
Retrieve the author by his username and
build a queryset of his published entries.
"""
self.author = get_object_or_404(
Author, **{Author.USERNAME_FIELD: self.kwargs['username']})
return self.author.entries_published() | def function[get_queryset, parameter[self]]:
constant[
Retrieve the author by his username and
build a queryset of his published entries.
]
name[self].author assign[=] call[name[get_object_or_404], parameter[name[Author]]]
return[call[name[self].author.entries_published, parameter[]]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[author] = identifier[get_object_or_404] (
identifier[Author] ,**{ identifier[Author] . identifier[USERNAME_FIELD] : identifier[self] . identifier[kwargs] [ literal[string] ]})
keyword[return] identifier[self] . identifier[author] . identifier[entries_published] () | def get_queryset(self):
"""
Retrieve the author by his username and
build a queryset of his published entries.
"""
self.author = get_object_or_404(Author, **{Author.USERNAME_FIELD: self.kwargs['username']})
return self.author.entries_published() |
def data_filler_customer(self, number_of_rows, pipe):
'''creates keys with customer data
'''
try:
for i in range(number_of_rows):
pipe.hmset('customer:%s' % i, {
'id': rnd_id_generator(self),
'name': self.faker.first_name(),
'lastname': self.faker.last_name(),
'address': self.faker.address(),
'country': self.faker.country(),
'city': self.faker.city(),
'registry_date': self.faker.date(pattern="%d-%m-%Y"),
'birthdate': self.faker.date(pattern="%d-%m-%Y"),
'email': self.faker.safe_email(),
'phone_number': self.faker.phone_number(),
'locale': self.faker.locale()
})
pipe.execute()
logger.warning('customer Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d) | def function[data_filler_customer, parameter[self, number_of_rows, pipe]]:
constant[creates keys with customer data
]
<ast.Try object at 0x7da1b08885b0> | keyword[def] identifier[data_filler_customer] ( identifier[self] , identifier[number_of_rows] , identifier[pipe] ):
literal[string]
keyword[try] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_rows] ):
identifier[pipe] . identifier[hmset] ( literal[string] % identifier[i] ,{
literal[string] : identifier[rnd_id_generator] ( identifier[self] ),
literal[string] : identifier[self] . identifier[faker] . identifier[first_name] (),
literal[string] : identifier[self] . identifier[faker] . identifier[last_name] (),
literal[string] : identifier[self] . identifier[faker] . identifier[address] (),
literal[string] : identifier[self] . identifier[faker] . identifier[country] (),
literal[string] : identifier[self] . identifier[faker] . identifier[city] (),
literal[string] : identifier[self] . identifier[faker] . identifier[date] ( identifier[pattern] = literal[string] ),
literal[string] : identifier[self] . identifier[faker] . identifier[date] ( identifier[pattern] = literal[string] ),
literal[string] : identifier[self] . identifier[faker] . identifier[safe_email] (),
literal[string] : identifier[self] . identifier[faker] . identifier[phone_number] (),
literal[string] : identifier[self] . identifier[faker] . identifier[locale] ()
})
identifier[pipe] . identifier[execute] ()
identifier[logger] . identifier[warning] ( literal[string] , identifier[extra] = identifier[d] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[e] , identifier[extra] = identifier[d] ) | def data_filler_customer(self, number_of_rows, pipe):
"""creates keys with customer data
"""
try:
for i in range(number_of_rows):
pipe.hmset('customer:%s' % i, {'id': rnd_id_generator(self), 'name': self.faker.first_name(), 'lastname': self.faker.last_name(), 'address': self.faker.address(), 'country': self.faker.country(), 'city': self.faker.city(), 'registry_date': self.faker.date(pattern='%d-%m-%Y'), 'birthdate': self.faker.date(pattern='%d-%m-%Y'), 'email': self.faker.safe_email(), 'phone_number': self.faker.phone_number(), 'locale': self.faker.locale()}) # depends on [control=['for'], data=['i']]
pipe.execute()
logger.warning('customer Commits are successful after write job!', extra=d) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error(e, extra=d) # depends on [control=['except'], data=['e']] |
def __sub_add_reference(self, key):
"""Used by __sub_make_request to save reference for pending sub request"""
new_subs = self.__new_subs
with new_subs:
# don't allow multiple subscription requests to overwrite internal reference
if key in new_subs:
raise ValueError('subscription for given args pending: %s' % str(key))
new_subs[key] = None
try:
yield
except:
# don't preserve reference if request creation failed
with new_subs:
new_subs.pop(key, None)
raise | def function[__sub_add_reference, parameter[self, key]]:
constant[Used by __sub_make_request to save reference for pending sub request]
variable[new_subs] assign[=] name[self].__new_subs
with name[new_subs] begin[:]
if compare[name[key] in name[new_subs]] begin[:]
<ast.Raise object at 0x7da1b1baa9e0>
call[name[new_subs]][name[key]] assign[=] constant[None]
<ast.Try object at 0x7da1b1bab8b0> | keyword[def] identifier[__sub_add_reference] ( identifier[self] , identifier[key] ):
literal[string]
identifier[new_subs] = identifier[self] . identifier[__new_subs]
keyword[with] identifier[new_subs] :
keyword[if] identifier[key] keyword[in] identifier[new_subs] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[str] ( identifier[key] ))
identifier[new_subs] [ identifier[key] ]= keyword[None]
keyword[try] :
keyword[yield]
keyword[except] :
keyword[with] identifier[new_subs] :
identifier[new_subs] . identifier[pop] ( identifier[key] , keyword[None] )
keyword[raise] | def __sub_add_reference(self, key):
"""Used by __sub_make_request to save reference for pending sub request"""
new_subs = self.__new_subs
with new_subs:
# don't allow multiple subscription requests to overwrite internal reference
if key in new_subs:
raise ValueError('subscription for given args pending: %s' % str(key)) # depends on [control=['if'], data=['key']]
new_subs[key] = None # depends on [control=['with'], data=[]]
try:
yield # depends on [control=['try'], data=[]]
except:
# don't preserve reference if request creation failed
with new_subs:
new_subs.pop(key, None) # depends on [control=['with'], data=[]]
raise # depends on [control=['except'], data=[]] |
def rename(self, old, new):
"""Rename the old schema/identifier to the new schema/identifier and
update references.
If the new schema/identifier is already present, that is an error.
If the schema/identifier key is absent, we only debug log and return,
assuming it's a temp table being renamed.
:param BaseRelation old: The existing relation name information.
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
"""
old_key = _make_key(old)
new_key = _make_key(new)
logger.debug('Renaming relation {!s} to {!s}'.format(
old_key, new_key)
)
logger.debug('before rename: {}'.format(
pprint.pformat(self.dump_graph()))
)
with self.lock:
if self._check_rename_constraints(old_key, new_key):
self._rename_relation(old_key, _CachedRelation(new))
else:
self._setdefault(_CachedRelation(new))
logger.debug('after rename: {}'.format(
pprint.pformat(self.dump_graph()))
) | def function[rename, parameter[self, old, new]]:
constant[Rename the old schema/identifier to the new schema/identifier and
update references.
If the new schema/identifier is already present, that is an error.
If the schema/identifier key is absent, we only debug log and return,
assuming it's a temp table being renamed.
:param BaseRelation old: The existing relation name information.
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
]
variable[old_key] assign[=] call[name[_make_key], parameter[name[old]]]
variable[new_key] assign[=] call[name[_make_key], parameter[name[new]]]
call[name[logger].debug, parameter[call[constant[Renaming relation {!s} to {!s}].format, parameter[name[old_key], name[new_key]]]]]
call[name[logger].debug, parameter[call[constant[before rename: {}].format, parameter[call[name[pprint].pformat, parameter[call[name[self].dump_graph, parameter[]]]]]]]]
with name[self].lock begin[:]
if call[name[self]._check_rename_constraints, parameter[name[old_key], name[new_key]]] begin[:]
call[name[self]._rename_relation, parameter[name[old_key], call[name[_CachedRelation], parameter[name[new]]]]]
call[name[logger].debug, parameter[call[constant[after rename: {}].format, parameter[call[name[pprint].pformat, parameter[call[name[self].dump_graph, parameter[]]]]]]]] | keyword[def] identifier[rename] ( identifier[self] , identifier[old] , identifier[new] ):
literal[string]
identifier[old_key] = identifier[_make_key] ( identifier[old] )
identifier[new_key] = identifier[_make_key] ( identifier[new] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[old_key] , identifier[new_key] )
)
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[pprint] . identifier[pformat] ( identifier[self] . identifier[dump_graph] ()))
)
keyword[with] identifier[self] . identifier[lock] :
keyword[if] identifier[self] . identifier[_check_rename_constraints] ( identifier[old_key] , identifier[new_key] ):
identifier[self] . identifier[_rename_relation] ( identifier[old_key] , identifier[_CachedRelation] ( identifier[new] ))
keyword[else] :
identifier[self] . identifier[_setdefault] ( identifier[_CachedRelation] ( identifier[new] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[pprint] . identifier[pformat] ( identifier[self] . identifier[dump_graph] ()))
) | def rename(self, old, new):
"""Rename the old schema/identifier to the new schema/identifier and
update references.
If the new schema/identifier is already present, that is an error.
If the schema/identifier key is absent, we only debug log and return,
assuming it's a temp table being renamed.
:param BaseRelation old: The existing relation name information.
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
"""
old_key = _make_key(old)
new_key = _make_key(new)
logger.debug('Renaming relation {!s} to {!s}'.format(old_key, new_key))
logger.debug('before rename: {}'.format(pprint.pformat(self.dump_graph())))
with self.lock:
if self._check_rename_constraints(old_key, new_key):
self._rename_relation(old_key, _CachedRelation(new)) # depends on [control=['if'], data=[]]
else:
self._setdefault(_CachedRelation(new)) # depends on [control=['with'], data=[]]
logger.debug('after rename: {}'.format(pprint.pformat(self.dump_graph()))) |
def merge(id, card, cardscript=None):
"""
Find the xmlcard and the card definition of \a id
Then return a merged class of the two
"""
if card is None:
card = cardxml.CardXML(id)
if cardscript is None:
cardscript = get_script_definition(id)
if cardscript:
card.scripts = type(id, (cardscript, ), {})
else:
card.scripts = type(id, (), {})
scriptnames = (
"activate", "combo", "deathrattle", "draw", "inspire", "play",
"enrage", "update", "powered_up"
)
for script in scriptnames:
actions = getattr(card.scripts, script, None)
if actions is None:
# Set the action by default to avoid runtime hasattr() calls
setattr(card.scripts, script, [])
elif not callable(actions):
if not hasattr(actions, "__iter__"):
# Ensure the actions are always iterable
setattr(card.scripts, script, (actions, ))
for script in ("events", "secret"):
events = getattr(card.scripts, script, None)
if events is None:
setattr(card.scripts, script, [])
elif not hasattr(events, "__iter__"):
setattr(card.scripts, script, [events])
if not hasattr(card.scripts, "cost_mod"):
card.scripts.cost_mod = None
if not hasattr(card.scripts, "Hand"):
card.scripts.Hand = type("Hand", (), {})
if not hasattr(card.scripts.Hand, "events"):
card.scripts.Hand.events = []
if not hasattr(card.scripts.Hand.events, "__iter__"):
card.scripts.Hand.events = [card.scripts.Hand.events]
if not hasattr(card.scripts.Hand, "update"):
card.scripts.Hand.update = ()
if not hasattr(card.scripts.Hand.update, "__iter__"):
card.scripts.Hand.update = (card.scripts.Hand.update, )
# Set choose one cards
if hasattr(cardscript, "choose"):
card.choose_cards = cardscript.choose[:]
else:
card.choose_cards = []
if hasattr(cardscript, "tags"):
for tag, value in cardscript.tags.items():
card.tags[tag] = value
# Set some additional events based on the base tags...
if card.poisonous:
card.scripts.events.append(POISONOUS)
return card | def function[merge, parameter[id, card, cardscript]]:
constant[
Find the xmlcard and the card definition of id
Then return a merged class of the two
]
if compare[name[card] is constant[None]] begin[:]
variable[card] assign[=] call[name[cardxml].CardXML, parameter[name[id]]]
if compare[name[cardscript] is constant[None]] begin[:]
variable[cardscript] assign[=] call[name[get_script_definition], parameter[name[id]]]
if name[cardscript] begin[:]
name[card].scripts assign[=] call[name[type], parameter[name[id], tuple[[<ast.Name object at 0x7da2045640d0>]], dictionary[[], []]]]
variable[scriptnames] assign[=] tuple[[<ast.Constant object at 0x7da204565480>, <ast.Constant object at 0x7da2045662f0>, <ast.Constant object at 0x7da204564a60>, <ast.Constant object at 0x7da204564370>, <ast.Constant object at 0x7da204566770>, <ast.Constant object at 0x7da204566e00>, <ast.Constant object at 0x7da204564490>, <ast.Constant object at 0x7da204565570>, <ast.Constant object at 0x7da2045677c0>]]
for taget[name[script]] in starred[name[scriptnames]] begin[:]
variable[actions] assign[=] call[name[getattr], parameter[name[card].scripts, name[script], constant[None]]]
if compare[name[actions] is constant[None]] begin[:]
call[name[setattr], parameter[name[card].scripts, name[script], list[[]]]]
for taget[name[script]] in starred[tuple[[<ast.Constant object at 0x7da2045671f0>, <ast.Constant object at 0x7da204566380>]]] begin[:]
variable[events] assign[=] call[name[getattr], parameter[name[card].scripts, name[script], constant[None]]]
if compare[name[events] is constant[None]] begin[:]
call[name[setattr], parameter[name[card].scripts, name[script], list[[]]]]
if <ast.UnaryOp object at 0x7da2045646d0> begin[:]
name[card].scripts.cost_mod assign[=] constant[None]
if <ast.UnaryOp object at 0x7da204567490> begin[:]
name[card].scripts.Hand assign[=] call[name[type], parameter[constant[Hand], tuple[[]], dictionary[[], []]]]
if <ast.UnaryOp object at 0x7da204565270> begin[:]
name[card].scripts.Hand.events assign[=] list[[]]
if <ast.UnaryOp object at 0x7da204565960> begin[:]
name[card].scripts.Hand.events assign[=] list[[<ast.Attribute object at 0x7da204565030>]]
if <ast.UnaryOp object at 0x7da204564250> begin[:]
name[card].scripts.Hand.update assign[=] tuple[[]]
if <ast.UnaryOp object at 0x7da1b26aed70> begin[:]
name[card].scripts.Hand.update assign[=] tuple[[<ast.Attribute object at 0x7da1b26ac310>]]
if call[name[hasattr], parameter[name[cardscript], constant[choose]]] begin[:]
name[card].choose_cards assign[=] call[name[cardscript].choose][<ast.Slice object at 0x7da1b26afc70>]
if call[name[hasattr], parameter[name[cardscript], constant[tags]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b26af4c0>, <ast.Name object at 0x7da1b26ada80>]]] in starred[call[name[cardscript].tags.items, parameter[]]] begin[:]
call[name[card].tags][name[tag]] assign[=] name[value]
if name[card].poisonous begin[:]
call[name[card].scripts.events.append, parameter[name[POISONOUS]]]
return[name[card]] | keyword[def] identifier[merge] ( identifier[id] , identifier[card] , identifier[cardscript] = keyword[None] ):
literal[string]
keyword[if] identifier[card] keyword[is] keyword[None] :
identifier[card] = identifier[cardxml] . identifier[CardXML] ( identifier[id] )
keyword[if] identifier[cardscript] keyword[is] keyword[None] :
identifier[cardscript] = identifier[get_script_definition] ( identifier[id] )
keyword[if] identifier[cardscript] :
identifier[card] . identifier[scripts] = identifier[type] ( identifier[id] ,( identifier[cardscript] ,),{})
keyword[else] :
identifier[card] . identifier[scripts] = identifier[type] ( identifier[id] ,(),{})
identifier[scriptnames] =(
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string]
)
keyword[for] identifier[script] keyword[in] identifier[scriptnames] :
identifier[actions] = identifier[getattr] ( identifier[card] . identifier[scripts] , identifier[script] , keyword[None] )
keyword[if] identifier[actions] keyword[is] keyword[None] :
identifier[setattr] ( identifier[card] . identifier[scripts] , identifier[script] ,[])
keyword[elif] keyword[not] identifier[callable] ( identifier[actions] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[actions] , literal[string] ):
identifier[setattr] ( identifier[card] . identifier[scripts] , identifier[script] ,( identifier[actions] ,))
keyword[for] identifier[script] keyword[in] ( literal[string] , literal[string] ):
identifier[events] = identifier[getattr] ( identifier[card] . identifier[scripts] , identifier[script] , keyword[None] )
keyword[if] identifier[events] keyword[is] keyword[None] :
identifier[setattr] ( identifier[card] . identifier[scripts] , identifier[script] ,[])
keyword[elif] keyword[not] identifier[hasattr] ( identifier[events] , literal[string] ):
identifier[setattr] ( identifier[card] . identifier[scripts] , identifier[script] ,[ identifier[events] ])
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[cost_mod] = keyword[None]
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[Hand] = identifier[type] ( literal[string] ,(),{})
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] . identifier[Hand] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[Hand] . identifier[events] =[]
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] . identifier[Hand] . identifier[events] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[Hand] . identifier[events] =[ identifier[card] . identifier[scripts] . identifier[Hand] . identifier[events] ]
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] . identifier[Hand] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[Hand] . identifier[update] =()
keyword[if] keyword[not] identifier[hasattr] ( identifier[card] . identifier[scripts] . identifier[Hand] . identifier[update] , literal[string] ):
identifier[card] . identifier[scripts] . identifier[Hand] . identifier[update] =( identifier[card] . identifier[scripts] . identifier[Hand] . identifier[update] ,)
keyword[if] identifier[hasattr] ( identifier[cardscript] , literal[string] ):
identifier[card] . identifier[choose_cards] = identifier[cardscript] . identifier[choose] [:]
keyword[else] :
identifier[card] . identifier[choose_cards] =[]
keyword[if] identifier[hasattr] ( identifier[cardscript] , literal[string] ):
keyword[for] identifier[tag] , identifier[value] keyword[in] identifier[cardscript] . identifier[tags] . identifier[items] ():
identifier[card] . identifier[tags] [ identifier[tag] ]= identifier[value]
keyword[if] identifier[card] . identifier[poisonous] :
identifier[card] . identifier[scripts] . identifier[events] . identifier[append] ( identifier[POISONOUS] )
keyword[return] identifier[card] | def merge(id, card, cardscript=None):
"""
Find the xmlcard and the card definition of \x07 id
Then return a merged class of the two
"""
if card is None:
card = cardxml.CardXML(id) # depends on [control=['if'], data=['card']]
if cardscript is None:
cardscript = get_script_definition(id) # depends on [control=['if'], data=['cardscript']]
if cardscript:
card.scripts = type(id, (cardscript,), {}) # depends on [control=['if'], data=[]]
else:
card.scripts = type(id, (), {})
scriptnames = ('activate', 'combo', 'deathrattle', 'draw', 'inspire', 'play', 'enrage', 'update', 'powered_up')
for script in scriptnames:
actions = getattr(card.scripts, script, None)
if actions is None: # Set the action by default to avoid runtime hasattr() calls
setattr(card.scripts, script, []) # depends on [control=['if'], data=[]]
elif not callable(actions):
if not hasattr(actions, '__iter__'): # Ensure the actions are always iterable
setattr(card.scripts, script, (actions,)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']]
for script in ('events', 'secret'):
events = getattr(card.scripts, script, None)
if events is None:
setattr(card.scripts, script, []) # depends on [control=['if'], data=[]]
elif not hasattr(events, '__iter__'):
setattr(card.scripts, script, [events]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']]
if not hasattr(card.scripts, 'cost_mod'):
card.scripts.cost_mod = None # depends on [control=['if'], data=[]]
if not hasattr(card.scripts, 'Hand'):
card.scripts.Hand = type('Hand', (), {}) # depends on [control=['if'], data=[]]
if not hasattr(card.scripts.Hand, 'events'):
card.scripts.Hand.events = [] # depends on [control=['if'], data=[]]
if not hasattr(card.scripts.Hand.events, '__iter__'):
card.scripts.Hand.events = [card.scripts.Hand.events] # depends on [control=['if'], data=[]]
if not hasattr(card.scripts.Hand, 'update'):
card.scripts.Hand.update = () # depends on [control=['if'], data=[]]
if not hasattr(card.scripts.Hand.update, '__iter__'):
card.scripts.Hand.update = (card.scripts.Hand.update,) # depends on [control=['if'], data=[]] # Set choose one cards
if hasattr(cardscript, 'choose'):
card.choose_cards = cardscript.choose[:] # depends on [control=['if'], data=[]]
else:
card.choose_cards = []
if hasattr(cardscript, 'tags'):
for (tag, value) in cardscript.tags.items():
card.tags[tag] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # Set some additional events based on the base tags...
if card.poisonous:
card.scripts.events.append(POISONOUS) # depends on [control=['if'], data=[]]
return card |
def port(self):
"""
Allocate either the requested port or a random empty port for use by
application
"""
if 'port' not in self.state:
sock = socket.socket()
sock.bind(('', self.requested_port))
self.state['port'] = sock.getsockname()[1]
sock.close()
return self.state['port'] | def function[port, parameter[self]]:
constant[
Allocate either the requested port or a random empty port for use by
application
]
if compare[constant[port] <ast.NotIn object at 0x7da2590d7190> name[self].state] begin[:]
variable[sock] assign[=] call[name[socket].socket, parameter[]]
call[name[sock].bind, parameter[tuple[[<ast.Constant object at 0x7da18ede7910>, <ast.Attribute object at 0x7da18ede6c20>]]]]
call[name[self].state][constant[port]] assign[=] call[call[name[sock].getsockname, parameter[]]][constant[1]]
call[name[sock].close, parameter[]]
return[call[name[self].state][constant[port]]] | keyword[def] identifier[port] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[state] :
identifier[sock] = identifier[socket] . identifier[socket] ()
identifier[sock] . identifier[bind] (( literal[string] , identifier[self] . identifier[requested_port] ))
identifier[self] . identifier[state] [ literal[string] ]= identifier[sock] . identifier[getsockname] ()[ literal[int] ]
identifier[sock] . identifier[close] ()
keyword[return] identifier[self] . identifier[state] [ literal[string] ] | def port(self):
"""
Allocate either the requested port or a random empty port for use by
application
"""
if 'port' not in self.state:
sock = socket.socket()
sock.bind(('', self.requested_port))
self.state['port'] = sock.getsockname()[1]
sock.close() # depends on [control=['if'], data=[]]
return self.state['port'] |
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
"""
try:import pods
except ImportError:raise ImportWarning('Need pods for example datasets. See https://github.com/sods/ods, or pip install pods.')
data = pods.datasets.oil()
X = data['X']
Xtest = data['Xtest']
Y = data['Y'][:, 0:1]
Ytest = data['Ytest'][:, 0:1]
Y[Y.flatten()==-1] = 0
Ytest[Ytest.flatten()==-1] = 0
# Create GP model
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
m.Ytest = Ytest
# Contrain all parameters to be positive
#m.tie_params('.*len')
m['.*len'] = 10.
# Optimize
if optimize:
m.optimize(messages=1)
print(m)
#Test
probs = m.predict(Xtest)[0]
GPy.util.classification.conf_matrix(probs, Ytest)
return m | def function[oil, parameter[num_inducing, max_iters, kernel, optimize, plot]]:
constant[
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
]
<ast.Try object at 0x7da2044c31f0>
variable[data] assign[=] call[name[pods].datasets.oil, parameter[]]
variable[X] assign[=] call[name[data]][constant[X]]
variable[Xtest] assign[=] call[name[data]][constant[Xtest]]
variable[Y] assign[=] call[call[name[data]][constant[Y]]][tuple[[<ast.Slice object at 0x7da2044c20b0>, <ast.Slice object at 0x7da2044c1f60>]]]
variable[Ytest] assign[=] call[call[name[data]][constant[Ytest]]][tuple[[<ast.Slice object at 0x7da2044c33d0>, <ast.Slice object at 0x7da2044c1cc0>]]]
call[name[Y]][compare[call[name[Y].flatten, parameter[]] equal[==] <ast.UnaryOp object at 0x7da2044c3460>]] assign[=] constant[0]
call[name[Ytest]][compare[call[name[Ytest].flatten, parameter[]] equal[==] <ast.UnaryOp object at 0x7da2044c1d80>]] assign[=] constant[0]
variable[m] assign[=] call[name[GPy].models.SparseGPClassification, parameter[name[X], name[Y]]]
name[m].Ytest assign[=] name[Ytest]
call[name[m]][constant[.*len]] assign[=] constant[10.0]
if name[optimize] begin[:]
call[name[m].optimize, parameter[]]
call[name[print], parameter[name[m]]]
variable[probs] assign[=] call[call[name[m].predict, parameter[name[Xtest]]]][constant[0]]
call[name[GPy].util.classification.conf_matrix, parameter[name[probs], name[Ytest]]]
return[name[m]] | keyword[def] identifier[oil] ( identifier[num_inducing] = literal[int] , identifier[max_iters] = literal[int] , identifier[kernel] = keyword[None] , identifier[optimize] = keyword[True] , identifier[plot] = keyword[True] ):
literal[string]
keyword[try] : keyword[import] identifier[pods]
keyword[except] identifier[ImportError] : keyword[raise] identifier[ImportWarning] ( literal[string] )
identifier[data] = identifier[pods] . identifier[datasets] . identifier[oil] ()
identifier[X] = identifier[data] [ literal[string] ]
identifier[Xtest] = identifier[data] [ literal[string] ]
identifier[Y] = identifier[data] [ literal[string] ][:, literal[int] : literal[int] ]
identifier[Ytest] = identifier[data] [ literal[string] ][:, literal[int] : literal[int] ]
identifier[Y] [ identifier[Y] . identifier[flatten] ()==- literal[int] ]= literal[int]
identifier[Ytest] [ identifier[Ytest] . identifier[flatten] ()==- literal[int] ]= literal[int]
identifier[m] = identifier[GPy] . identifier[models] . identifier[SparseGPClassification] ( identifier[X] , identifier[Y] , identifier[kernel] = identifier[kernel] , identifier[num_inducing] = identifier[num_inducing] )
identifier[m] . identifier[Ytest] = identifier[Ytest]
identifier[m] [ literal[string] ]= literal[int]
keyword[if] identifier[optimize] :
identifier[m] . identifier[optimize] ( identifier[messages] = literal[int] )
identifier[print] ( identifier[m] )
identifier[probs] = identifier[m] . identifier[predict] ( identifier[Xtest] )[ literal[int] ]
identifier[GPy] . identifier[util] . identifier[classification] . identifier[conf_matrix] ( identifier[probs] , identifier[Ytest] )
keyword[return] identifier[m] | def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
"""
try:
import pods # depends on [control=['try'], data=[]]
except ImportError:
raise ImportWarning('Need pods for example datasets. See https://github.com/sods/ods, or pip install pods.') # depends on [control=['except'], data=[]]
data = pods.datasets.oil()
X = data['X']
Xtest = data['Xtest']
Y = data['Y'][:, 0:1]
Ytest = data['Ytest'][:, 0:1]
Y[Y.flatten() == -1] = 0
Ytest[Ytest.flatten() == -1] = 0
# Create GP model
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
m.Ytest = Ytest
# Contrain all parameters to be positive
#m.tie_params('.*len')
m['.*len'] = 10.0
# Optimize
if optimize:
m.optimize(messages=1) # depends on [control=['if'], data=[]]
print(m)
#Test
probs = m.predict(Xtest)[0]
GPy.util.classification.conf_matrix(probs, Ytest)
return m |
def create_virtualenv(self):
"""
Creates the virtualenv for the project
"""
if check_command('virtualenv'):
ve_dir = os.path.join(self._ve_dir, self._project_name)
if os.path.exists(ve_dir):
if self._force:
logging.warn('Removing existing virtualenv')
shutil.rmtree(ve_dir)
else:
logging.warn('Found existing virtualenv; not creating (use --force to overwrite)')
return
logging.info('Creating virtualenv')
p = subprocess.Popen('virtualenv --no-site-packages {0} > /dev/null'.format(ve_dir), shell=True)
os.waitpid(p.pid, 0)
# install modules
for m in self._modules:
self.log.info('Installing module {0}'.format(m))
p = subprocess.Popen('{0} install {1} > /dev/null'.format(os.path.join(self._ve_dir, \
self._project_name) + os.sep + 'bin' + os.sep + 'pip', m), shell=True)
os.waitpid(p.pid, 0) | def function[create_virtualenv, parameter[self]]:
constant[
Creates the virtualenv for the project
]
if call[name[check_command], parameter[constant[virtualenv]]] begin[:]
variable[ve_dir] assign[=] call[name[os].path.join, parameter[name[self]._ve_dir, name[self]._project_name]]
if call[name[os].path.exists, parameter[name[ve_dir]]] begin[:]
if name[self]._force begin[:]
call[name[logging].warn, parameter[constant[Removing existing virtualenv]]]
call[name[shutil].rmtree, parameter[name[ve_dir]]]
call[name[logging].info, parameter[constant[Creating virtualenv]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[call[constant[virtualenv --no-site-packages {0} > /dev/null].format, parameter[name[ve_dir]]]]]
call[name[os].waitpid, parameter[name[p].pid, constant[0]]]
for taget[name[m]] in starred[name[self]._modules] begin[:]
call[name[self].log.info, parameter[call[constant[Installing module {0}].format, parameter[name[m]]]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[call[constant[{0} install {1} > /dev/null].format, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[os].path.join, parameter[name[self]._ve_dir, name[self]._project_name]] + name[os].sep] + constant[bin]] + name[os].sep] + constant[pip]], name[m]]]]]
call[name[os].waitpid, parameter[name[p].pid, constant[0]]] | keyword[def] identifier[create_virtualenv] ( identifier[self] ):
literal[string]
keyword[if] identifier[check_command] ( literal[string] ):
identifier[ve_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_ve_dir] , identifier[self] . identifier[_project_name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[ve_dir] ):
keyword[if] identifier[self] . identifier[_force] :
identifier[logging] . identifier[warn] ( literal[string] )
identifier[shutil] . identifier[rmtree] ( identifier[ve_dir] )
keyword[else] :
identifier[logging] . identifier[warn] ( literal[string] )
keyword[return]
identifier[logging] . identifier[info] ( literal[string] )
identifier[p] = identifier[subprocess] . identifier[Popen] ( literal[string] . identifier[format] ( identifier[ve_dir] ), identifier[shell] = keyword[True] )
identifier[os] . identifier[waitpid] ( identifier[p] . identifier[pid] , literal[int] )
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_modules] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[m] ))
identifier[p] = identifier[subprocess] . identifier[Popen] ( literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_ve_dir] , identifier[self] . identifier[_project_name] )+ identifier[os] . identifier[sep] + literal[string] + identifier[os] . identifier[sep] + literal[string] , identifier[m] ), identifier[shell] = keyword[True] )
identifier[os] . identifier[waitpid] ( identifier[p] . identifier[pid] , literal[int] ) | def create_virtualenv(self):
"""
Creates the virtualenv for the project
"""
if check_command('virtualenv'):
ve_dir = os.path.join(self._ve_dir, self._project_name)
if os.path.exists(ve_dir):
if self._force:
logging.warn('Removing existing virtualenv')
shutil.rmtree(ve_dir) # depends on [control=['if'], data=[]]
else:
logging.warn('Found existing virtualenv; not creating (use --force to overwrite)')
return # depends on [control=['if'], data=[]]
logging.info('Creating virtualenv')
p = subprocess.Popen('virtualenv --no-site-packages {0} > /dev/null'.format(ve_dir), shell=True)
os.waitpid(p.pid, 0)
# install modules
for m in self._modules:
self.log.info('Installing module {0}'.format(m))
p = subprocess.Popen('{0} install {1} > /dev/null'.format(os.path.join(self._ve_dir, self._project_name) + os.sep + 'bin' + os.sep + 'pip', m), shell=True)
os.waitpid(p.pid, 0) # depends on [control=['for'], data=['m']] # depends on [control=['if'], data=[]] |
def get_category(self, metric):
"""
Return a string category for the metric.
The category is made up of this reporter's prefix and the
metric's group and tags.
Examples:
prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2}
returns: 'foo.bar.a=1,b=2'
prefix = 'foo', group = 'bar', tags = None
returns: 'foo.bar'
prefix = None, group = 'bar', tags = None
returns: 'bar'
"""
tags = ','.join('%s=%s' % (k, v) for k, v in
sorted(metric.metric_name.tags.items()))
return '.'.join(x for x in
[self._prefix, metric.metric_name.group, tags] if x) | def function[get_category, parameter[self, metric]]:
constant[
Return a string category for the metric.
The category is made up of this reporter's prefix and the
metric's group and tags.
Examples:
prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2}
returns: 'foo.bar.a=1,b=2'
prefix = 'foo', group = 'bar', tags = None
returns: 'foo.bar'
prefix = None, group = 'bar', tags = None
returns: 'bar'
]
variable[tags] assign[=] call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da1b1c71cc0>]]
return[call[constant[.].join, parameter[<ast.GeneratorExp object at 0x7da1b1c71b70>]]] | keyword[def] identifier[get_category] ( identifier[self] , identifier[metric] ):
literal[string]
identifier[tags] = literal[string] . identifier[join] ( literal[string] %( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in]
identifier[sorted] ( identifier[metric] . identifier[metric_name] . identifier[tags] . identifier[items] ()))
keyword[return] literal[string] . identifier[join] ( identifier[x] keyword[for] identifier[x] keyword[in]
[ identifier[self] . identifier[_prefix] , identifier[metric] . identifier[metric_name] . identifier[group] , identifier[tags] ] keyword[if] identifier[x] ) | def get_category(self, metric):
"""
Return a string category for the metric.
The category is made up of this reporter's prefix and the
metric's group and tags.
Examples:
prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2}
returns: 'foo.bar.a=1,b=2'
prefix = 'foo', group = 'bar', tags = None
returns: 'foo.bar'
prefix = None, group = 'bar', tags = None
returns: 'bar'
"""
tags = ','.join(('%s=%s' % (k, v) for (k, v) in sorted(metric.metric_name.tags.items())))
return '.'.join((x for x in [self._prefix, metric.metric_name.group, tags] if x)) |
def get_drake_data(steps):
"""
Returns: a dictionary of outputs mapped to inputs
Note that an output is either a target or a leaf node in the
step tree
"""
output_inputs = {}
if len(steps) == 0:
return output_inputs
for step in steps:
output_inputs[step] = get_inputs(step, target=True)
# recursively do the same for all the inputs
inputs = set(itertools.chain(*output_inputs.values()))
o = get_drake_data(inputs)
output_inputs.update(o)
return output_inputs | def function[get_drake_data, parameter[steps]]:
constant[
Returns: a dictionary of outputs mapped to inputs
Note that an output is either a target or a leaf node in the
step tree
]
variable[output_inputs] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[steps]]] equal[==] constant[0]] begin[:]
return[name[output_inputs]]
for taget[name[step]] in starred[name[steps]] begin[:]
call[name[output_inputs]][name[step]] assign[=] call[name[get_inputs], parameter[name[step]]]
variable[inputs] assign[=] call[name[set], parameter[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da1b24af160>]]]]
variable[o] assign[=] call[name[get_drake_data], parameter[name[inputs]]]
call[name[output_inputs].update, parameter[name[o]]]
return[name[output_inputs]] | keyword[def] identifier[get_drake_data] ( identifier[steps] ):
literal[string]
identifier[output_inputs] ={}
keyword[if] identifier[len] ( identifier[steps] )== literal[int] :
keyword[return] identifier[output_inputs]
keyword[for] identifier[step] keyword[in] identifier[steps] :
identifier[output_inputs] [ identifier[step] ]= identifier[get_inputs] ( identifier[step] , identifier[target] = keyword[True] )
identifier[inputs] = identifier[set] ( identifier[itertools] . identifier[chain] (* identifier[output_inputs] . identifier[values] ()))
identifier[o] = identifier[get_drake_data] ( identifier[inputs] )
identifier[output_inputs] . identifier[update] ( identifier[o] )
keyword[return] identifier[output_inputs] | def get_drake_data(steps):
"""
Returns: a dictionary of outputs mapped to inputs
Note that an output is either a target or a leaf node in the
step tree
"""
output_inputs = {}
if len(steps) == 0:
return output_inputs # depends on [control=['if'], data=[]]
for step in steps:
output_inputs[step] = get_inputs(step, target=True) # depends on [control=['for'], data=['step']]
# recursively do the same for all the inputs
inputs = set(itertools.chain(*output_inputs.values()))
o = get_drake_data(inputs)
output_inputs.update(o)
return output_inputs |
def layers_intersect(layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(
layer_a.crs(), layer_b.crs(), QgsProject.instance())
extent_b = (coord_transform.transform(
extent_b, QgsCoordinateTransform.ReverseTransform))
return extent_a.intersects(extent_b) | def function[layers_intersect, parameter[layer_a, layer_b]]:
constant[Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
]
variable[extent_a] assign[=] call[name[layer_a].extent, parameter[]]
variable[extent_b] assign[=] call[name[layer_b].extent, parameter[]]
if compare[call[name[layer_a].crs, parameter[]] not_equal[!=] call[name[layer_b].crs, parameter[]]] begin[:]
variable[coord_transform] assign[=] call[name[QgsCoordinateTransform], parameter[call[name[layer_a].crs, parameter[]], call[name[layer_b].crs, parameter[]], call[name[QgsProject].instance, parameter[]]]]
variable[extent_b] assign[=] call[name[coord_transform].transform, parameter[name[extent_b], name[QgsCoordinateTransform].ReverseTransform]]
return[call[name[extent_a].intersects, parameter[name[extent_b]]]] | keyword[def] identifier[layers_intersect] ( identifier[layer_a] , identifier[layer_b] ):
literal[string]
identifier[extent_a] = identifier[layer_a] . identifier[extent] ()
identifier[extent_b] = identifier[layer_b] . identifier[extent] ()
keyword[if] identifier[layer_a] . identifier[crs] ()!= identifier[layer_b] . identifier[crs] ():
identifier[coord_transform] = identifier[QgsCoordinateTransform] (
identifier[layer_a] . identifier[crs] (), identifier[layer_b] . identifier[crs] (), identifier[QgsProject] . identifier[instance] ())
identifier[extent_b] =( identifier[coord_transform] . identifier[transform] (
identifier[extent_b] , identifier[QgsCoordinateTransform] . identifier[ReverseTransform] ))
keyword[return] identifier[extent_a] . identifier[intersects] ( identifier[extent_b] ) | def layers_intersect(layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(layer_a.crs(), layer_b.crs(), QgsProject.instance())
extent_b = coord_transform.transform(extent_b, QgsCoordinateTransform.ReverseTransform) # depends on [control=['if'], data=[]]
return extent_a.intersects(extent_b) |
def create_dataset(self, ds_name, data, attrs=None, dtype=None):
"""
Saves a Numpy array in a dataset in the HDF file, registers it as
ds_name and returns the h5py dataset.
:param ds_name: string
Registration name of the dataset to be registered.
:param data: Numpy ndarray
:param dtype: dtype
Datatype of the dataset
:return: h5py dataset
"""
if ds_name in self._datasets:
ds = self._datasets[ds_name]
if ds.dtype != data.dtype:
warnings.warn('Dataset and data dtype are different!')
else:
if dtype is None:
dtype = data.dtype
ds = self._group.create_dataset(ds_name, data.shape,
dtype=dtype)
if attrs is not None:
for key in attrs:
setattr(ds.attrs, key, attrs[key])
ds.read_direct(data)
self._datasets[ds_name] = ds
return ds | def function[create_dataset, parameter[self, ds_name, data, attrs, dtype]]:
constant[
Saves a Numpy array in a dataset in the HDF file, registers it as
ds_name and returns the h5py dataset.
:param ds_name: string
Registration name of the dataset to be registered.
:param data: Numpy ndarray
:param dtype: dtype
Datatype of the dataset
:return: h5py dataset
]
if compare[name[ds_name] in name[self]._datasets] begin[:]
variable[ds] assign[=] call[name[self]._datasets][name[ds_name]]
if compare[name[ds].dtype not_equal[!=] name[data].dtype] begin[:]
call[name[warnings].warn, parameter[constant[Dataset and data dtype are different!]]]
call[name[ds].read_direct, parameter[name[data]]]
call[name[self]._datasets][name[ds_name]] assign[=] name[ds]
return[name[ds]] | keyword[def] identifier[create_dataset] ( identifier[self] , identifier[ds_name] , identifier[data] , identifier[attrs] = keyword[None] , identifier[dtype] = keyword[None] ):
literal[string]
keyword[if] identifier[ds_name] keyword[in] identifier[self] . identifier[_datasets] :
identifier[ds] = identifier[self] . identifier[_datasets] [ identifier[ds_name] ]
keyword[if] identifier[ds] . identifier[dtype] != identifier[data] . identifier[dtype] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[else] :
keyword[if] identifier[dtype] keyword[is] keyword[None] :
identifier[dtype] = identifier[data] . identifier[dtype]
identifier[ds] = identifier[self] . identifier[_group] . identifier[create_dataset] ( identifier[ds_name] , identifier[data] . identifier[shape] ,
identifier[dtype] = identifier[dtype] )
keyword[if] identifier[attrs] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[key] keyword[in] identifier[attrs] :
identifier[setattr] ( identifier[ds] . identifier[attrs] , identifier[key] , identifier[attrs] [ identifier[key] ])
identifier[ds] . identifier[read_direct] ( identifier[data] )
identifier[self] . identifier[_datasets] [ identifier[ds_name] ]= identifier[ds]
keyword[return] identifier[ds] | def create_dataset(self, ds_name, data, attrs=None, dtype=None):
"""
Saves a Numpy array in a dataset in the HDF file, registers it as
ds_name and returns the h5py dataset.
:param ds_name: string
Registration name of the dataset to be registered.
:param data: Numpy ndarray
:param dtype: dtype
Datatype of the dataset
:return: h5py dataset
"""
if ds_name in self._datasets:
ds = self._datasets[ds_name]
if ds.dtype != data.dtype:
warnings.warn('Dataset and data dtype are different!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ds_name']]
else:
if dtype is None:
dtype = data.dtype # depends on [control=['if'], data=['dtype']]
ds = self._group.create_dataset(ds_name, data.shape, dtype=dtype)
if attrs is not None:
for key in attrs:
setattr(ds.attrs, key, attrs[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['attrs']]
ds.read_direct(data)
self._datasets[ds_name] = ds
return ds |
def _add_unitary_single(self, gate, qubit):
"""Apply an arbitrary 1-qubit unitary matrix.
Args:
gate (matrix_like): a single qubit gate matrix
qubit (int): the qubit to apply gate to
"""
# Convert to complex rank-2 tensor
gate_tensor = np.array(gate, dtype=complex)
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_matmul_index([qubit], self._number_of_qubits)
# Apply matrix multiplication
self._unitary = np.einsum(indexes, gate_tensor, self._unitary,
dtype=complex, casting='no') | def function[_add_unitary_single, parameter[self, gate, qubit]]:
constant[Apply an arbitrary 1-qubit unitary matrix.
Args:
gate (matrix_like): a single qubit gate matrix
qubit (int): the qubit to apply gate to
]
variable[gate_tensor] assign[=] call[name[np].array, parameter[name[gate]]]
variable[indexes] assign[=] call[name[einsum_matmul_index], parameter[list[[<ast.Name object at 0x7da1b050a8c0>]], name[self]._number_of_qubits]]
name[self]._unitary assign[=] call[name[np].einsum, parameter[name[indexes], name[gate_tensor], name[self]._unitary]] | keyword[def] identifier[_add_unitary_single] ( identifier[self] , identifier[gate] , identifier[qubit] ):
literal[string]
identifier[gate_tensor] = identifier[np] . identifier[array] ( identifier[gate] , identifier[dtype] = identifier[complex] )
identifier[indexes] = identifier[einsum_matmul_index] ([ identifier[qubit] ], identifier[self] . identifier[_number_of_qubits] )
identifier[self] . identifier[_unitary] = identifier[np] . identifier[einsum] ( identifier[indexes] , identifier[gate_tensor] , identifier[self] . identifier[_unitary] ,
identifier[dtype] = identifier[complex] , identifier[casting] = literal[string] ) | def _add_unitary_single(self, gate, qubit):
"""Apply an arbitrary 1-qubit unitary matrix.
Args:
gate (matrix_like): a single qubit gate matrix
qubit (int): the qubit to apply gate to
"""
# Convert to complex rank-2 tensor
gate_tensor = np.array(gate, dtype=complex)
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_matmul_index([qubit], self._number_of_qubits)
# Apply matrix multiplication
self._unitary = np.einsum(indexes, gate_tensor, self._unitary, dtype=complex, casting='no') |
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default | def function[add_config, parameter[self, key, type_, default, env_var]]:
constant[Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
]
call[name[self].config][name[key]] assign[=] dictionary[[<ast.Constant object at 0x7da18f7233d0>], [<ast.Name object at 0x7da18f720820>]]
if compare[name[env_var] is_not constant[None]] begin[:]
call[call[name[self].config][name[key]]][constant[env_var]] assign[=] name[env_var]
if compare[name[default] is_not name[NOT_SET]] begin[:]
call[call[name[self].config][name[key]]][constant[default]] assign[=] name[default] | keyword[def] identifier[add_config] ( identifier[self] , identifier[key] , identifier[type_] , identifier[default] = identifier[NOT_SET] , identifier[env_var] = keyword[None] ):
literal[string]
identifier[self] . identifier[config] [ identifier[key] ]={ literal[string] : identifier[type_] }
keyword[if] identifier[env_var] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[config] [ identifier[key] ][ literal[string] ]= identifier[env_var]
keyword[if] identifier[default] keyword[is] keyword[not] identifier[NOT_SET] :
identifier[self] . identifier[config] [ identifier[key] ][ literal[string] ]= identifier[default] | def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var # depends on [control=['if'], data=['env_var']]
if default is not NOT_SET:
self.config[key]['default'] = default # depends on [control=['if'], data=['default']] |
def _calculate_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# ensure highest precision for our summation/vectorization "trick"
rr = rr.astype(np.float64)
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts.astype(np.int32),
tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn | def function[_calculate_normals, parameter[rr, tris]]:
constant[Efficiently compute vertex normals for triangulated surface]
variable[rr] assign[=] call[name[rr].astype, parameter[name[np].float64]]
variable[r1] assign[=] call[name[rr]][tuple[[<ast.Subscript object at 0x7da1b0f2a710>, <ast.Slice object at 0x7da1b0ff9a50>]]]
variable[r2] assign[=] call[name[rr]][tuple[[<ast.Subscript object at 0x7da18dc981f0>, <ast.Slice object at 0x7da18dc9bac0>]]]
variable[r3] assign[=] call[name[rr]][tuple[[<ast.Subscript object at 0x7da18dc9b0d0>, <ast.Slice object at 0x7da18dc998a0>]]]
variable[tri_nn] assign[=] call[name[_fast_cross_3d], parameter[binary_operation[name[r2] - name[r1]], binary_operation[name[r3] - name[r1]]]]
variable[size] assign[=] call[name[np].sqrt, parameter[call[name[np].sum, parameter[binary_operation[name[tri_nn] * name[tri_nn]]]]]]
call[name[size]][compare[name[size] equal[==] constant[0]]] assign[=] constant[1.0]
<ast.AugAssign object at 0x7da1b0fb0df0>
variable[npts] assign[=] call[name[len], parameter[name[rr]]]
variable[nn] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0f18190>, <ast.Constant object at 0x7da1b0f19f90>]]]]
for taget[name[verts]] in starred[name[tris].T] begin[:]
for taget[name[idx]] in starred[call[name[range], parameter[constant[3]]]] begin[:]
<ast.AugAssign object at 0x7da1b0fb1270>
variable[size] assign[=] call[name[np].sqrt, parameter[call[name[np].sum, parameter[binary_operation[name[nn] * name[nn]]]]]]
call[name[size]][compare[name[size] equal[==] constant[0]]] assign[=] constant[1.0]
<ast.AugAssign object at 0x7da1b0f19a50>
return[name[nn]] | keyword[def] identifier[_calculate_normals] ( identifier[rr] , identifier[tris] ):
literal[string]
identifier[rr] = identifier[rr] . identifier[astype] ( identifier[np] . identifier[float64] )
identifier[r1] = identifier[rr] [ identifier[tris] [:, literal[int] ],:]
identifier[r2] = identifier[rr] [ identifier[tris] [:, literal[int] ],:]
identifier[r3] = identifier[rr] [ identifier[tris] [:, literal[int] ],:]
identifier[tri_nn] = identifier[_fast_cross_3d] (( identifier[r2] - identifier[r1] ),( identifier[r3] - identifier[r1] ))
identifier[size] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[sum] ( identifier[tri_nn] * identifier[tri_nn] , identifier[axis] = literal[int] ))
identifier[size] [ identifier[size] == literal[int] ]= literal[int]
identifier[tri_nn] /= identifier[size] [:, identifier[np] . identifier[newaxis] ]
identifier[npts] = identifier[len] ( identifier[rr] )
identifier[nn] = identifier[np] . identifier[zeros] (( identifier[npts] , literal[int] ))
keyword[for] identifier[verts] keyword[in] identifier[tris] . identifier[T] :
keyword[for] identifier[idx] keyword[in] identifier[range] ( literal[int] ):
identifier[nn] [:, identifier[idx] ]+= identifier[np] . identifier[bincount] ( identifier[verts] . identifier[astype] ( identifier[np] . identifier[int32] ),
identifier[tri_nn] [:, identifier[idx] ], identifier[minlength] = identifier[npts] )
identifier[size] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[sum] ( identifier[nn] * identifier[nn] , identifier[axis] = literal[int] ))
identifier[size] [ identifier[size] == literal[int] ]= literal[int]
identifier[nn] /= identifier[size] [:, identifier[np] . identifier[newaxis] ]
keyword[return] identifier[nn] | def _calculate_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# ensure highest precision for our summation/vectorization "trick"
rr = rr.astype(np.float64)
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d(r2 - r1, r3 - r1)
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts.astype(np.int32), tri_nn[:, idx], minlength=npts) # depends on [control=['for'], data=['idx']] # depends on [control=['for'], data=['verts']]
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn |
def _apply_to_data(data, func, unpack_dict=False):
"""Apply a function to data, trying to unpack different data
types.
"""
apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)
if isinstance(data, dict):
if unpack_dict:
return [apply_(v) for v in data.values()]
return {k: apply_(v) for k, v in data.items()}
if isinstance(data, (list, tuple)):
try:
# e.g.list/tuple of arrays
return [apply_(x) for x in data]
except TypeError:
return func(data)
return func(data) | def function[_apply_to_data, parameter[data, func, unpack_dict]]:
constant[Apply a function to data, trying to unpack different data
types.
]
variable[apply_] assign[=] call[name[partial], parameter[name[_apply_to_data]]]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
if name[unpack_dict] begin[:]
return[<ast.ListComp object at 0x7da18dc06200>]
return[<ast.DictComp object at 0x7da18dc04940>]
if call[name[isinstance], parameter[name[data], tuple[[<ast.Name object at 0x7da18dc07400>, <ast.Name object at 0x7da18dc05930>]]]] begin[:]
<ast.Try object at 0x7da18dc06d70>
return[call[name[func], parameter[name[data]]]] | keyword[def] identifier[_apply_to_data] ( identifier[data] , identifier[func] , identifier[unpack_dict] = keyword[False] ):
literal[string]
identifier[apply_] = identifier[partial] ( identifier[_apply_to_data] , identifier[func] = identifier[func] , identifier[unpack_dict] = identifier[unpack_dict] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[if] identifier[unpack_dict] :
keyword[return] [ identifier[apply_] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[data] . identifier[values] ()]
keyword[return] { identifier[k] : identifier[apply_] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ()}
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[list] , identifier[tuple] )):
keyword[try] :
keyword[return] [ identifier[apply_] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[data] ]
keyword[except] identifier[TypeError] :
keyword[return] identifier[func] ( identifier[data] )
keyword[return] identifier[func] ( identifier[data] ) | def _apply_to_data(data, func, unpack_dict=False):
"""Apply a function to data, trying to unpack different data
types.
"""
apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)
if isinstance(data, dict):
if unpack_dict:
return [apply_(v) for v in data.values()] # depends on [control=['if'], data=[]]
return {k: apply_(v) for (k, v) in data.items()} # depends on [control=['if'], data=[]]
if isinstance(data, (list, tuple)):
try:
# e.g.list/tuple of arrays
return [apply_(x) for x in data] # depends on [control=['try'], data=[]]
except TypeError:
return func(data) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return func(data) |
def parent(self):
"""Return a location representing the administrative unit above
the one represented by this location."""
if self.city:
return Location(
country=self.country, state=self.state, county=self.county)
if self.county:
return Location(country=self.country, state=self.state)
if self.state:
return Location(country=self.country)
return Location() | def function[parent, parameter[self]]:
constant[Return a location representing the administrative unit above
the one represented by this location.]
if name[self].city begin[:]
return[call[name[Location], parameter[]]]
if name[self].county begin[:]
return[call[name[Location], parameter[]]]
if name[self].state begin[:]
return[call[name[Location], parameter[]]]
return[call[name[Location], parameter[]]] | keyword[def] identifier[parent] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[city] :
keyword[return] identifier[Location] (
identifier[country] = identifier[self] . identifier[country] , identifier[state] = identifier[self] . identifier[state] , identifier[county] = identifier[self] . identifier[county] )
keyword[if] identifier[self] . identifier[county] :
keyword[return] identifier[Location] ( identifier[country] = identifier[self] . identifier[country] , identifier[state] = identifier[self] . identifier[state] )
keyword[if] identifier[self] . identifier[state] :
keyword[return] identifier[Location] ( identifier[country] = identifier[self] . identifier[country] )
keyword[return] identifier[Location] () | def parent(self):
"""Return a location representing the administrative unit above
the one represented by this location."""
if self.city:
return Location(country=self.country, state=self.state, county=self.county) # depends on [control=['if'], data=[]]
if self.county:
return Location(country=self.country, state=self.state) # depends on [control=['if'], data=[]]
if self.state:
return Location(country=self.country) # depends on [control=['if'], data=[]]
return Location() |
def milestone(self, column=None, value=None, **kwargs):
"""
Status codes and related dates of certain grants,
>>> GICS().milestone('milestone_date', '16-MAR-01')
"""
return self._resolve_call('GIC_MILESTONE', column, value, **kwargs) | def function[milestone, parameter[self, column, value]]:
constant[
Status codes and related dates of certain grants,
>>> GICS().milestone('milestone_date', '16-MAR-01')
]
return[call[name[self]._resolve_call, parameter[constant[GIC_MILESTONE], name[column], name[value]]]] | keyword[def] identifier[milestone] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_resolve_call] ( literal[string] , identifier[column] , identifier[value] ,** identifier[kwargs] ) | def milestone(self, column=None, value=None, **kwargs):
"""
Status codes and related dates of certain grants,
>>> GICS().milestone('milestone_date', '16-MAR-01')
"""
return self._resolve_call('GIC_MILESTONE', column, value, **kwargs) |
def grading_value_text(self):
'''
A rendering of the grading that is an answer to the question
"What is the grade?".
'''
if self.assignment.is_graded():
if self.is_grading_finished():
return str(self.grading)
else:
return str('pending')
else:
if self.is_grading_finished():
return str('done')
else:
return str('not done') | def function[grading_value_text, parameter[self]]:
constant[
A rendering of the grading that is an answer to the question
"What is the grade?".
]
if call[name[self].assignment.is_graded, parameter[]] begin[:]
if call[name[self].is_grading_finished, parameter[]] begin[:]
return[call[name[str], parameter[name[self].grading]]] | keyword[def] identifier[grading_value_text] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[assignment] . identifier[is_graded] ():
keyword[if] identifier[self] . identifier[is_grading_finished] ():
keyword[return] identifier[str] ( identifier[self] . identifier[grading] )
keyword[else] :
keyword[return] identifier[str] ( literal[string] )
keyword[else] :
keyword[if] identifier[self] . identifier[is_grading_finished] ():
keyword[return] identifier[str] ( literal[string] )
keyword[else] :
keyword[return] identifier[str] ( literal[string] ) | def grading_value_text(self):
"""
A rendering of the grading that is an answer to the question
"What is the grade?".
"""
if self.assignment.is_graded():
if self.is_grading_finished():
return str(self.grading) # depends on [control=['if'], data=[]]
else:
return str('pending') # depends on [control=['if'], data=[]]
elif self.is_grading_finished():
return str('done') # depends on [control=['if'], data=[]]
else:
return str('not done') |
def clean_year_month(year, month, month_orig):
"""
If 'month_orig', which is the month given in the url BEFORE any next/prev
query strings have been applied, is out of range, sets month to the
current month and returns an error message. Also Returns an error
message if the year given is +/- 50 years from now.
If 'month', which is the month given in the url AFTER any next/prev
query strings have been applied, is out of range, adjusts it to be
in range (by also adjusting the year).
"""
error = False
error_msg = "The date given was invalid."
if month_orig not in xrange(1, 13) and month_orig is not None:
month = now.month
error = error_msg
# This takes care of 'next' query strings making month > 12
while month > 12:
month -= 12
year += 1
# This takes care of 'prev' query strings making month < 1
while month < 1:
month += 12
year -= 1
year, month, error = _check_year(year, month, error, error_msg)
return year, month, error | def function[clean_year_month, parameter[year, month, month_orig]]:
constant[
If 'month_orig', which is the month given in the url BEFORE any next/prev
query strings have been applied, is out of range, sets month to the
current month and returns an error message. Also Returns an error
message if the year given is +/- 50 years from now.
If 'month', which is the month given in the url AFTER any next/prev
query strings have been applied, is out of range, adjusts it to be
in range (by also adjusting the year).
]
variable[error] assign[=] constant[False]
variable[error_msg] assign[=] constant[The date given was invalid.]
if <ast.BoolOp object at 0x7da18c4cef80> begin[:]
variable[month] assign[=] name[now].month
variable[error] assign[=] name[error_msg]
while compare[name[month] greater[>] constant[12]] begin[:]
<ast.AugAssign object at 0x7da18c4ccb20>
<ast.AugAssign object at 0x7da18c4cd540>
while compare[name[month] less[<] constant[1]] begin[:]
<ast.AugAssign object at 0x7da18c4cc760>
<ast.AugAssign object at 0x7da18c4ce4d0>
<ast.Tuple object at 0x7da18c4ccdc0> assign[=] call[name[_check_year], parameter[name[year], name[month], name[error], name[error_msg]]]
return[tuple[[<ast.Name object at 0x7da18c4cf2b0>, <ast.Name object at 0x7da18c4cc820>, <ast.Name object at 0x7da18c4cc520>]]] | keyword[def] identifier[clean_year_month] ( identifier[year] , identifier[month] , identifier[month_orig] ):
literal[string]
identifier[error] = keyword[False]
identifier[error_msg] = literal[string]
keyword[if] identifier[month_orig] keyword[not] keyword[in] identifier[xrange] ( literal[int] , literal[int] ) keyword[and] identifier[month_orig] keyword[is] keyword[not] keyword[None] :
identifier[month] = identifier[now] . identifier[month]
identifier[error] = identifier[error_msg]
keyword[while] identifier[month] > literal[int] :
identifier[month] -= literal[int]
identifier[year] += literal[int]
keyword[while] identifier[month] < literal[int] :
identifier[month] += literal[int]
identifier[year] -= literal[int]
identifier[year] , identifier[month] , identifier[error] = identifier[_check_year] ( identifier[year] , identifier[month] , identifier[error] , identifier[error_msg] )
keyword[return] identifier[year] , identifier[month] , identifier[error] | def clean_year_month(year, month, month_orig):
"""
If 'month_orig', which is the month given in the url BEFORE any next/prev
query strings have been applied, is out of range, sets month to the
current month and returns an error message. Also Returns an error
message if the year given is +/- 50 years from now.
If 'month', which is the month given in the url AFTER any next/prev
query strings have been applied, is out of range, adjusts it to be
in range (by also adjusting the year).
"""
error = False
error_msg = 'The date given was invalid.'
if month_orig not in xrange(1, 13) and month_orig is not None:
month = now.month
error = error_msg # depends on [control=['if'], data=[]]
# This takes care of 'next' query strings making month > 12
while month > 12:
month -= 12
year += 1 # depends on [control=['while'], data=['month']]
# This takes care of 'prev' query strings making month < 1
while month < 1:
month += 12
year -= 1 # depends on [control=['while'], data=['month']]
(year, month, error) = _check_year(year, month, error, error_msg)
return (year, month, error) |
def query_file(self, path, fetchall=False, **params):
"""Like Connection.query, but takes a filename to load a query from."""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}' not found!".format(path))
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path))
# Read the given .sql file into memory.
with open(path) as f:
query = f.read()
# Defer processing to self.query method.
return self.query(query=query, fetchall=fetchall, **params) | def function[query_file, parameter[self, path, fetchall]]:
constant[Like Connection.query, but takes a filename to load a query from.]
if <ast.UnaryOp object at 0x7da1b21c6890> begin[:]
<ast.Raise object at 0x7da1b21c4f70>
if call[name[os].path.isdir, parameter[name[path]]] begin[:]
<ast.Raise object at 0x7da1b21c65c0>
with call[name[open], parameter[name[path]]] begin[:]
variable[query] assign[=] call[name[f].read, parameter[]]
return[call[name[self].query, parameter[]]] | keyword[def] identifier[query_file] ( identifier[self] , identifier[path] , identifier[fetchall] = keyword[False] ,** identifier[params] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[query] = identifier[f] . identifier[read] ()
keyword[return] identifier[self] . identifier[query] ( identifier[query] = identifier[query] , identifier[fetchall] = identifier[fetchall] ,** identifier[params] ) | def query_file(self, path, fetchall=False, **params):
"""Like Connection.query, but takes a filename to load a query from."""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}' not found!".format(path)) # depends on [control=['if'], data=[]]
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path)) # depends on [control=['if'], data=[]]
# Read the given .sql file into memory.
with open(path) as f:
query = f.read() # depends on [control=['with'], data=['f']]
# Defer processing to self.query method.
return self.query(query=query, fetchall=fetchall, **params) |
def load_output_writer(output_params, readonly=False):
"""
Return output class of driver.
Returns
-------
output : ``OutputData``
output writer object
"""
if not isinstance(output_params, dict):
raise TypeError("output_params must be a dictionary")
driver_name = output_params["format"]
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
_driver = v.load()
if all(
[hasattr(_driver, attr) for attr in ["OutputData", "METADATA"]]
) and (
_driver.METADATA["driver_name"] == driver_name
):
return _driver.OutputData(output_params, readonly=readonly)
raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name) | def function[load_output_writer, parameter[output_params, readonly]]:
constant[
Return output class of driver.
Returns
-------
output : ``OutputData``
output writer object
]
if <ast.UnaryOp object at 0x7da1b00b4eb0> begin[:]
<ast.Raise object at 0x7da1b00b4820>
variable[driver_name] assign[=] call[name[output_params]][constant[format]]
for taget[name[v]] in starred[call[name[pkg_resources].iter_entry_points, parameter[name[DRIVERS_ENTRY_POINT]]]] begin[:]
variable[_driver] assign[=] call[name[v].load, parameter[]]
if <ast.BoolOp object at 0x7da1b00b7dc0> begin[:]
return[call[name[_driver].OutputData, parameter[name[output_params]]]]
<ast.Raise object at 0x7da1b00b40d0> | keyword[def] identifier[load_output_writer] ( identifier[output_params] , identifier[readonly] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[output_params] , identifier[dict] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[driver_name] = identifier[output_params] [ literal[string] ]
keyword[for] identifier[v] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] ( identifier[DRIVERS_ENTRY_POINT] ):
identifier[_driver] = identifier[v] . identifier[load] ()
keyword[if] identifier[all] (
[ identifier[hasattr] ( identifier[_driver] , identifier[attr] ) keyword[for] identifier[attr] keyword[in] [ literal[string] , literal[string] ]]
) keyword[and] (
identifier[_driver] . identifier[METADATA] [ literal[string] ]== identifier[driver_name]
):
keyword[return] identifier[_driver] . identifier[OutputData] ( identifier[output_params] , identifier[readonly] = identifier[readonly] )
keyword[raise] identifier[MapcheteDriverError] ( literal[string] % identifier[driver_name] ) | def load_output_writer(output_params, readonly=False):
"""
Return output class of driver.
Returns
-------
output : ``OutputData``
output writer object
"""
if not isinstance(output_params, dict):
raise TypeError('output_params must be a dictionary') # depends on [control=['if'], data=[]]
driver_name = output_params['format']
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
_driver = v.load()
if all([hasattr(_driver, attr) for attr in ['OutputData', 'METADATA']]) and _driver.METADATA['driver_name'] == driver_name:
return _driver.OutputData(output_params, readonly=readonly) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name) |
def _get_package(self):
"""Get the package related to simple hardware ordering."""
mask = '''
items[
keyName,
capacity,
description,
attributes[id,attributeTypeKeyName],
itemCategory[id,categoryCode],
softwareDescription[id,referenceCode,longDescription],
prices
],
activePresets,
accountRestrictedActivePresets,
regions[location[location[priceGroups]]]
'''
package_keyname = 'BARE_METAL_SERVER'
package = self.ordering_manager.get_package_by_key(package_keyname, mask=mask)
return package | def function[_get_package, parameter[self]]:
constant[Get the package related to simple hardware ordering.]
variable[mask] assign[=] constant[
items[
keyName,
capacity,
description,
attributes[id,attributeTypeKeyName],
itemCategory[id,categoryCode],
softwareDescription[id,referenceCode,longDescription],
prices
],
activePresets,
accountRestrictedActivePresets,
regions[location[location[priceGroups]]]
]
variable[package_keyname] assign[=] constant[BARE_METAL_SERVER]
variable[package] assign[=] call[name[self].ordering_manager.get_package_by_key, parameter[name[package_keyname]]]
return[name[package]] | keyword[def] identifier[_get_package] ( identifier[self] ):
literal[string]
identifier[mask] = literal[string]
identifier[package_keyname] = literal[string]
identifier[package] = identifier[self] . identifier[ordering_manager] . identifier[get_package_by_key] ( identifier[package_keyname] , identifier[mask] = identifier[mask] )
keyword[return] identifier[package] | def _get_package(self):
"""Get the package related to simple hardware ordering."""
mask = '\n items[\n keyName,\n capacity,\n description,\n attributes[id,attributeTypeKeyName],\n itemCategory[id,categoryCode],\n softwareDescription[id,referenceCode,longDescription],\n prices\n ],\n activePresets,\n accountRestrictedActivePresets,\n regions[location[location[priceGroups]]]\n '
package_keyname = 'BARE_METAL_SERVER'
package = self.ordering_manager.get_package_by_key(package_keyname, mask=mask)
return package |
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
"""
Return information about unique constraints in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
"""
constraints = self._get_redshift_constraints(connection,
table_name, schema, **kw)
constraints = [c for c in constraints if c.contype == 'u']
uniques = defaultdict(lambda: defaultdict(dict))
for con in constraints:
uniques[con.conname]["key"] = con.conkey
uniques[con.conname]["cols"][con.attnum] = con.attname
return [
{'name': None,
'column_names': [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
] | def function[get_unique_constraints, parameter[self, connection, table_name, schema]]:
constant[
Return information about unique constraints in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
]
variable[constraints] assign[=] call[name[self]._get_redshift_constraints, parameter[name[connection], name[table_name], name[schema]]]
variable[constraints] assign[=] <ast.ListComp object at 0x7da18ede4100>
variable[uniques] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da18ede6dd0>]]
for taget[name[con]] in starred[name[constraints]] begin[:]
call[call[name[uniques]][name[con].conname]][constant[key]] assign[=] name[con].conkey
call[call[call[name[uniques]][name[con].conname]][constant[cols]]][name[con].attnum] assign[=] name[con].attname
return[<ast.ListComp object at 0x7da18ede5060>] | keyword[def] identifier[get_unique_constraints] ( identifier[self] , identifier[connection] , identifier[table_name] ,
identifier[schema] = keyword[None] ,** identifier[kw] ):
literal[string]
identifier[constraints] = identifier[self] . identifier[_get_redshift_constraints] ( identifier[connection] ,
identifier[table_name] , identifier[schema] ,** identifier[kw] )
identifier[constraints] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[constraints] keyword[if] identifier[c] . identifier[contype] == literal[string] ]
identifier[uniques] = identifier[defaultdict] ( keyword[lambda] : identifier[defaultdict] ( identifier[dict] ))
keyword[for] identifier[con] keyword[in] identifier[constraints] :
identifier[uniques] [ identifier[con] . identifier[conname] ][ literal[string] ]= identifier[con] . identifier[conkey]
identifier[uniques] [ identifier[con] . identifier[conname] ][ literal[string] ][ identifier[con] . identifier[attnum] ]= identifier[con] . identifier[attname]
keyword[return] [
{ literal[string] : keyword[None] ,
literal[string] :[ identifier[uc] [ literal[string] ][ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[uc] [ literal[string] ]]}
keyword[for] identifier[name] , identifier[uc] keyword[in] identifier[uniques] . identifier[items] ()
] | def get_unique_constraints(self, connection, table_name, schema=None, **kw):
"""
Return information about unique constraints in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
"""
constraints = self._get_redshift_constraints(connection, table_name, schema, **kw)
constraints = [c for c in constraints if c.contype == 'u']
uniques = defaultdict(lambda : defaultdict(dict))
for con in constraints:
uniques[con.conname]['key'] = con.conkey
uniques[con.conname]['cols'][con.attnum] = con.attname # depends on [control=['for'], data=['con']]
return [{'name': None, 'column_names': [uc['cols'][i] for i in uc['key']]} for (name, uc) in uniques.items()] |
def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
"""
# TODO: Add time interval to this
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError("Stream with id '{}' not found".format(stream_id))
stream = self.streams[stream_id]
query = stream_id.as_raw()
with switch_db(StreamInstanceModel, 'hyperstream'):
StreamInstanceModel.objects(__raw__=query).delete()
# Also update the stream status
stream.calculated_intervals = TimeIntervals([])
if remove_definition:
with switch_db(StreamDefinitionModel, 'hyperstream'):
StreamDefinitionModel.objects(__raw__=query).delete()
logging.info("Purged stream {}".format(stream_id)) | def function[purge_stream, parameter[self, stream_id, remove_definition, sandbox]]:
constant[
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
]
if compare[name[sandbox] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da207f99ff0>
if compare[name[stream_id] <ast.NotIn object at 0x7da2590d7190> name[self].streams] begin[:]
<ast.Raise object at 0x7da207f99090>
variable[stream] assign[=] call[name[self].streams][name[stream_id]]
variable[query] assign[=] call[name[stream_id].as_raw, parameter[]]
with call[name[switch_db], parameter[name[StreamInstanceModel], constant[hyperstream]]] begin[:]
call[call[name[StreamInstanceModel].objects, parameter[]].delete, parameter[]]
name[stream].calculated_intervals assign[=] call[name[TimeIntervals], parameter[list[[]]]]
if name[remove_definition] begin[:]
with call[name[switch_db], parameter[name[StreamDefinitionModel], constant[hyperstream]]] begin[:]
call[call[name[StreamDefinitionModel].objects, parameter[]].delete, parameter[]]
call[name[logging].info, parameter[call[constant[Purged stream {}].format, parameter[name[stream_id]]]]] | keyword[def] identifier[purge_stream] ( identifier[self] , identifier[stream_id] , identifier[remove_definition] = keyword[False] , identifier[sandbox] = keyword[None] ):
literal[string]
keyword[if] identifier[sandbox] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[NotImplementedError]
keyword[if] identifier[stream_id] keyword[not] keyword[in] identifier[self] . identifier[streams] :
keyword[raise] identifier[StreamNotFoundError] ( literal[string] . identifier[format] ( identifier[stream_id] ))
identifier[stream] = identifier[self] . identifier[streams] [ identifier[stream_id] ]
identifier[query] = identifier[stream_id] . identifier[as_raw] ()
keyword[with] identifier[switch_db] ( identifier[StreamInstanceModel] , literal[string] ):
identifier[StreamInstanceModel] . identifier[objects] ( identifier[__raw__] = identifier[query] ). identifier[delete] ()
identifier[stream] . identifier[calculated_intervals] = identifier[TimeIntervals] ([])
keyword[if] identifier[remove_definition] :
keyword[with] identifier[switch_db] ( identifier[StreamDefinitionModel] , literal[string] ):
identifier[StreamDefinitionModel] . identifier[objects] ( identifier[__raw__] = identifier[query] ). identifier[delete] ()
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[stream_id] )) | def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
"""
# TODO: Add time interval to this
if sandbox is not None:
raise NotImplementedError # depends on [control=['if'], data=[]]
if stream_id not in self.streams:
raise StreamNotFoundError("Stream with id '{}' not found".format(stream_id)) # depends on [control=['if'], data=['stream_id']]
stream = self.streams[stream_id]
query = stream_id.as_raw()
with switch_db(StreamInstanceModel, 'hyperstream'):
StreamInstanceModel.objects(__raw__=query).delete() # depends on [control=['with'], data=[]]
# Also update the stream status
stream.calculated_intervals = TimeIntervals([])
if remove_definition:
with switch_db(StreamDefinitionModel, 'hyperstream'):
StreamDefinitionModel.objects(__raw__=query).delete() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
logging.info('Purged stream {}'.format(stream_id)) |
def target_message(conn, payload):
"""
Distibuted payload (message) to one connection
:param conn: connection
:param payload: payload(json dumpable)
:return:
"""
try:
yield from conn.send(json.dumps(payload))
except Exception as e:
logger.debug('could not send', e) | def function[target_message, parameter[conn, payload]]:
constant[
Distibuted payload (message) to one connection
:param conn: connection
:param payload: payload(json dumpable)
:return:
]
<ast.Try object at 0x7da1b17200a0> | keyword[def] identifier[target_message] ( identifier[conn] , identifier[payload] ):
literal[string]
keyword[try] :
keyword[yield] keyword[from] identifier[conn] . identifier[send] ( identifier[json] . identifier[dumps] ( identifier[payload] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[e] ) | def target_message(conn, payload):
"""
Distibuted payload (message) to one connection
:param conn: connection
:param payload: payload(json dumpable)
:return:
"""
try:
yield from conn.send(json.dumps(payload)) # depends on [control=['try'], data=[]]
except Exception as e:
logger.debug('could not send', e) # depends on [control=['except'], data=['e']] |
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m | def function[toPyModel, parameter[model_ptr]]:
constant[
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
]
if compare[call[name[bool], parameter[name[model_ptr]]] equal[==] constant[False]] begin[:]
<ast.Raise object at 0x7da1b1ff5f60>
variable[m] assign[=] name[model_ptr].contents
name[m].__createfrom__ assign[=] constant[C]
return[name[m]] | keyword[def] identifier[toPyModel] ( identifier[model_ptr] ):
literal[string]
keyword[if] identifier[bool] ( identifier[model_ptr] )== keyword[False] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[m] = identifier[model_ptr] . identifier[contents]
identifier[m] . identifier[__createfrom__] = literal[string]
keyword[return] identifier[m] | def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError('Null pointer') # depends on [control=['if'], data=[]]
m = model_ptr.contents
m.__createfrom__ = 'C'
return m |
def set_level(logger=None, log_level=None):
'''Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None
'''
log_level = logging.getLevelName(os.getenv('VERBOSITY', 'WARNING'))
logging.getLogger(logger).setLevel(log_level) | def function[set_level, parameter[logger, log_level]]:
constant[Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None
]
variable[log_level] assign[=] call[name[logging].getLevelName, parameter[call[name[os].getenv, parameter[constant[VERBOSITY], constant[WARNING]]]]]
call[call[name[logging].getLogger, parameter[name[logger]]].setLevel, parameter[name[log_level]]] | keyword[def] identifier[set_level] ( identifier[logger] = keyword[None] , identifier[log_level] = keyword[None] ):
literal[string]
identifier[log_level] = identifier[logging] . identifier[getLevelName] ( identifier[os] . identifier[getenv] ( literal[string] , literal[string] ))
identifier[logging] . identifier[getLogger] ( identifier[logger] ). identifier[setLevel] ( identifier[log_level] ) | def set_level(logger=None, log_level=None):
"""Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None
"""
log_level = logging.getLevelName(os.getenv('VERBOSITY', 'WARNING'))
logging.getLogger(logger).setLevel(log_level) |
def get_section_config(self, section):
"""
Get a specific configuration section.
Returns the default section if none can be found.
:returns: configparser section
"""
if section not in self._config:
return self._config["DEFAULT"]
return self._config[section] | def function[get_section_config, parameter[self, section]]:
constant[
Get a specific configuration section.
Returns the default section if none can be found.
:returns: configparser section
]
if compare[name[section] <ast.NotIn object at 0x7da2590d7190> name[self]._config] begin[:]
return[call[name[self]._config][constant[DEFAULT]]]
return[call[name[self]._config][name[section]]] | keyword[def] identifier[get_section_config] ( identifier[self] , identifier[section] ):
literal[string]
keyword[if] identifier[section] keyword[not] keyword[in] identifier[self] . identifier[_config] :
keyword[return] identifier[self] . identifier[_config] [ literal[string] ]
keyword[return] identifier[self] . identifier[_config] [ identifier[section] ] | def get_section_config(self, section):
"""
Get a specific configuration section.
Returns the default section if none can be found.
:returns: configparser section
"""
if section not in self._config:
return self._config['DEFAULT'] # depends on [control=['if'], data=[]]
return self._config[section] |
def vmomentdensity(self,*args,**kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit numerical integral; default: 4)
vTmax - upper limit for integration over vT (default: 1.5)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z (no support for units)
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
"""
use_physical= kwargs.pop('use_physical',True)
ro= kwargs.pop('ro',None)
if ro is None and hasattr(self,'_roSet') and self._roSet:
ro= self._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
if use_physical and not vo is None and not ro is None:
fac= vo**(args[2]+args[3]+args[4])/ro**3
if _APY_UNITS:
u= 1/units.kpc**3*(units.km/units.s)**(args[2]+args[3]+args[4])
out= self._vmomentdensity(*args,**kwargs)
if _APY_UNITS:
return units.Quantity(out*fac,unit=u)
else:
return out*fac
else:
return self._vmomentdensity(*args,**kwargs) | def function[vmomentdensity, parameter[self]]:
constant[
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit numerical integral; default: 4)
vTmax - upper limit for integration over vT (default: 1.5)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z (no support for units)
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
]
variable[use_physical] assign[=] call[name[kwargs].pop, parameter[constant[use_physical], constant[True]]]
variable[ro] assign[=] call[name[kwargs].pop, parameter[constant[ro], constant[None]]]
if <ast.BoolOp object at 0x7da1b0c96bf0> begin[:]
variable[ro] assign[=] name[self]._ro
if <ast.BoolOp object at 0x7da1b0c96110> begin[:]
variable[ro] assign[=] call[name[ro].to, parameter[name[units].kpc]].value
variable[vo] assign[=] call[name[kwargs].pop, parameter[constant[vo], constant[None]]]
if <ast.BoolOp object at 0x7da1b0c97160> begin[:]
variable[vo] assign[=] name[self]._vo
if <ast.BoolOp object at 0x7da1b0c97190> begin[:]
variable[vo] assign[=] call[name[vo].to, parameter[binary_operation[name[units].km / name[units].s]]].value
if <ast.BoolOp object at 0x7da1b0c95ea0> begin[:]
variable[fac] assign[=] binary_operation[binary_operation[name[vo] ** binary_operation[binary_operation[call[name[args]][constant[2]] + call[name[args]][constant[3]]] + call[name[args]][constant[4]]]] / binary_operation[name[ro] ** constant[3]]]
if name[_APY_UNITS] begin[:]
variable[u] assign[=] binary_operation[binary_operation[constant[1] / binary_operation[name[units].kpc ** constant[3]]] * binary_operation[binary_operation[name[units].km / name[units].s] ** binary_operation[binary_operation[call[name[args]][constant[2]] + call[name[args]][constant[3]]] + call[name[args]][constant[4]]]]]
variable[out] assign[=] call[name[self]._vmomentdensity, parameter[<ast.Starred object at 0x7da1b0e147f0>]]
if name[_APY_UNITS] begin[:]
return[call[name[units].Quantity, parameter[binary_operation[name[out] * name[fac]]]]] | keyword[def] identifier[vmomentdensity] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[use_physical] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[ro] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[ro] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[_roSet] :
identifier[ro] = identifier[self] . identifier[_ro]
keyword[if] identifier[_APY_LOADED] keyword[and] identifier[isinstance] ( identifier[ro] , identifier[units] . identifier[Quantity] ):
identifier[ro] = identifier[ro] . identifier[to] ( identifier[units] . identifier[kpc] ). identifier[value]
identifier[vo] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[vo] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[_voSet] :
identifier[vo] = identifier[self] . identifier[_vo]
keyword[if] identifier[_APY_LOADED] keyword[and] identifier[isinstance] ( identifier[vo] , identifier[units] . identifier[Quantity] ):
identifier[vo] = identifier[vo] . identifier[to] ( identifier[units] . identifier[km] / identifier[units] . identifier[s] ). identifier[value]
keyword[if] identifier[use_physical] keyword[and] keyword[not] identifier[vo] keyword[is] keyword[None] keyword[and] keyword[not] identifier[ro] keyword[is] keyword[None] :
identifier[fac] = identifier[vo] **( identifier[args] [ literal[int] ]+ identifier[args] [ literal[int] ]+ identifier[args] [ literal[int] ])/ identifier[ro] ** literal[int]
keyword[if] identifier[_APY_UNITS] :
identifier[u] = literal[int] / identifier[units] . identifier[kpc] ** literal[int] *( identifier[units] . identifier[km] / identifier[units] . identifier[s] )**( identifier[args] [ literal[int] ]+ identifier[args] [ literal[int] ]+ identifier[args] [ literal[int] ])
identifier[out] = identifier[self] . identifier[_vmomentdensity] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[_APY_UNITS] :
keyword[return] identifier[units] . identifier[Quantity] ( identifier[out] * identifier[fac] , identifier[unit] = identifier[u] )
keyword[else] :
keyword[return] identifier[out] * identifier[fac]
keyword[else] :
keyword[return] identifier[self] . identifier[_vmomentdensity] (* identifier[args] ,** identifier[kwargs] ) | def vmomentdensity(self, *args, **kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit numerical integral; default: 4)
vTmax - upper limit for integration over vT (default: 1.5)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z (no support for units)
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
"""
use_physical = kwargs.pop('use_physical', True)
ro = kwargs.pop('ro', None)
if ro is None and hasattr(self, '_roSet') and self._roSet:
ro = self._ro # depends on [control=['if'], data=[]]
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value # depends on [control=['if'], data=[]]
vo = kwargs.pop('vo', None)
if vo is None and hasattr(self, '_voSet') and self._voSet:
vo = self._vo # depends on [control=['if'], data=[]]
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value # depends on [control=['if'], data=[]]
if use_physical and (not vo is None) and (not ro is None):
fac = vo ** (args[2] + args[3] + args[4]) / ro ** 3
if _APY_UNITS:
u = 1 / units.kpc ** 3 * (units.km / units.s) ** (args[2] + args[3] + args[4]) # depends on [control=['if'], data=[]]
out = self._vmomentdensity(*args, **kwargs)
if _APY_UNITS:
return units.Quantity(out * fac, unit=u) # depends on [control=['if'], data=[]]
else:
return out * fac # depends on [control=['if'], data=[]]
else:
return self._vmomentdensity(*args, **kwargs) |
def in_fill(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_fill(self._pointer, x, y)) | def function[in_fill, parameter[self, x, y]]:
constant[Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
]
return[call[name[bool], parameter[call[name[cairo].cairo_in_fill, parameter[name[self]._pointer, name[x], name[y]]]]]] | keyword[def] identifier[in_fill] ( identifier[self] , identifier[x] , identifier[y] ):
literal[string]
keyword[return] identifier[bool] ( identifier[cairo] . identifier[cairo_in_fill] ( identifier[self] . identifier[_pointer] , identifier[x] , identifier[y] )) | def in_fill(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_fill(self._pointer, x, y)) |
def get(self, event):
"""Get a specified object"""
try:
data, schema, user, client = self._get_args(event)
except AttributeError:
return
object_filter = self._get_filter(event)
if 'subscribe' in data:
do_subscribe = data['subscribe'] is True
else:
do_subscribe = False
try:
uuid = str(data['uuid'])
except (KeyError, TypeError):
uuid = ""
opts = schemastore[schema].get('options', {})
hidden = opts.get('hidden', [])
if object_filter == {}:
if uuid == "":
self.log('Object with no filter/uuid requested:', schema,
data,
lvl=warn)
return
object_filter = {'uuid': uuid}
storage_object = None
storage_object = objectmodels[schema].find_one(object_filter)
if not storage_object:
self._cancel_by_error(event, uuid + '(' + str(object_filter) + ') of ' + schema +
' unavailable')
return
if storage_object:
self.log("Object found, checking permissions: ", data, lvl=verbose)
if not self._check_permissions(user, 'read',
storage_object):
self._cancel_by_permission(schema, data, event)
return
for field in hidden:
storage_object._fields.pop(field, None)
if do_subscribe and uuid != "":
self._add_subscription(uuid, event)
result = {
'component': 'hfos.events.objectmanager',
'action': 'get',
'data': {
'schema': schema,
'uuid': uuid,
'object': storage_object.serializablefields()
}
}
self._respond(None, result, event) | def function[get, parameter[self, event]]:
constant[Get a specified object]
<ast.Try object at 0x7da1b0ede290>
variable[object_filter] assign[=] call[name[self]._get_filter, parameter[name[event]]]
if compare[constant[subscribe] in name[data]] begin[:]
variable[do_subscribe] assign[=] compare[call[name[data]][constant[subscribe]] is constant[True]]
<ast.Try object at 0x7da1b0edf5b0>
variable[opts] assign[=] call[call[name[schemastore]][name[schema]].get, parameter[constant[options], dictionary[[], []]]]
variable[hidden] assign[=] call[name[opts].get, parameter[constant[hidden], list[[]]]]
if compare[name[object_filter] equal[==] dictionary[[], []]] begin[:]
if compare[name[uuid] equal[==] constant[]] begin[:]
call[name[self].log, parameter[constant[Object with no filter/uuid requested:], name[schema], name[data]]]
return[None]
variable[object_filter] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f05300>], [<ast.Name object at 0x7da1b0f06350>]]
variable[storage_object] assign[=] constant[None]
variable[storage_object] assign[=] call[call[name[objectmodels]][name[schema]].find_one, parameter[name[object_filter]]]
if <ast.UnaryOp object at 0x7da1b0f06560> begin[:]
call[name[self]._cancel_by_error, parameter[name[event], binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[uuid] + constant[(]] + call[name[str], parameter[name[object_filter]]]] + constant[) of ]] + name[schema]] + constant[ unavailable]]]]
return[None]
if name[storage_object] begin[:]
call[name[self].log, parameter[constant[Object found, checking permissions: ], name[data]]]
if <ast.UnaryOp object at 0x7da1b0f05840> begin[:]
call[name[self]._cancel_by_permission, parameter[name[schema], name[data], name[event]]]
return[None]
for taget[name[field]] in starred[name[hidden]] begin[:]
call[name[storage_object]._fields.pop, parameter[name[field], constant[None]]]
if <ast.BoolOp object at 0x7da1b0fe9b40> begin[:]
call[name[self]._add_subscription, parameter[name[uuid], name[event]]]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b0fe9900>, <ast.Constant object at 0x7da1b0fe9840>, <ast.Constant object at 0x7da1b0fe9c30>], [<ast.Constant object at 0x7da1b0fe8a00>, <ast.Constant object at 0x7da1b0fe98a0>, <ast.Dict object at 0x7da1b0feb430>]]
call[name[self]._respond, parameter[constant[None], name[result], name[event]]] | keyword[def] identifier[get] ( identifier[self] , identifier[event] ):
literal[string]
keyword[try] :
identifier[data] , identifier[schema] , identifier[user] , identifier[client] = identifier[self] . identifier[_get_args] ( identifier[event] )
keyword[except] identifier[AttributeError] :
keyword[return]
identifier[object_filter] = identifier[self] . identifier[_get_filter] ( identifier[event] )
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[do_subscribe] = identifier[data] [ literal[string] ] keyword[is] keyword[True]
keyword[else] :
identifier[do_subscribe] = keyword[False]
keyword[try] :
identifier[uuid] = identifier[str] ( identifier[data] [ literal[string] ])
keyword[except] ( identifier[KeyError] , identifier[TypeError] ):
identifier[uuid] = literal[string]
identifier[opts] = identifier[schemastore] [ identifier[schema] ]. identifier[get] ( literal[string] ,{})
identifier[hidden] = identifier[opts] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[object_filter] =={}:
keyword[if] identifier[uuid] == literal[string] :
identifier[self] . identifier[log] ( literal[string] , identifier[schema] ,
identifier[data] ,
identifier[lvl] = identifier[warn] )
keyword[return]
identifier[object_filter] ={ literal[string] : identifier[uuid] }
identifier[storage_object] = keyword[None]
identifier[storage_object] = identifier[objectmodels] [ identifier[schema] ]. identifier[find_one] ( identifier[object_filter] )
keyword[if] keyword[not] identifier[storage_object] :
identifier[self] . identifier[_cancel_by_error] ( identifier[event] , identifier[uuid] + literal[string] + identifier[str] ( identifier[object_filter] )+ literal[string] + identifier[schema] +
literal[string] )
keyword[return]
keyword[if] identifier[storage_object] :
identifier[self] . identifier[log] ( literal[string] , identifier[data] , identifier[lvl] = identifier[verbose] )
keyword[if] keyword[not] identifier[self] . identifier[_check_permissions] ( identifier[user] , literal[string] ,
identifier[storage_object] ):
identifier[self] . identifier[_cancel_by_permission] ( identifier[schema] , identifier[data] , identifier[event] )
keyword[return]
keyword[for] identifier[field] keyword[in] identifier[hidden] :
identifier[storage_object] . identifier[_fields] . identifier[pop] ( identifier[field] , keyword[None] )
keyword[if] identifier[do_subscribe] keyword[and] identifier[uuid] != literal[string] :
identifier[self] . identifier[_add_subscription] ( identifier[uuid] , identifier[event] )
identifier[result] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[schema] ,
literal[string] : identifier[uuid] ,
literal[string] : identifier[storage_object] . identifier[serializablefields] ()
}
}
identifier[self] . identifier[_respond] ( keyword[None] , identifier[result] , identifier[event] ) | def get(self, event):
"""Get a specified object"""
try:
(data, schema, user, client) = self._get_args(event) # depends on [control=['try'], data=[]]
except AttributeError:
return # depends on [control=['except'], data=[]]
object_filter = self._get_filter(event)
if 'subscribe' in data:
do_subscribe = data['subscribe'] is True # depends on [control=['if'], data=['data']]
else:
do_subscribe = False
try:
uuid = str(data['uuid']) # depends on [control=['try'], data=[]]
except (KeyError, TypeError):
uuid = '' # depends on [control=['except'], data=[]]
opts = schemastore[schema].get('options', {})
hidden = opts.get('hidden', [])
if object_filter == {}:
if uuid == '':
self.log('Object with no filter/uuid requested:', schema, data, lvl=warn)
return # depends on [control=['if'], data=[]]
object_filter = {'uuid': uuid} # depends on [control=['if'], data=['object_filter']]
storage_object = None
storage_object = objectmodels[schema].find_one(object_filter)
if not storage_object:
self._cancel_by_error(event, uuid + '(' + str(object_filter) + ') of ' + schema + ' unavailable')
return # depends on [control=['if'], data=[]]
if storage_object:
self.log('Object found, checking permissions: ', data, lvl=verbose)
if not self._check_permissions(user, 'read', storage_object):
self._cancel_by_permission(schema, data, event)
return # depends on [control=['if'], data=[]]
for field in hidden:
storage_object._fields.pop(field, None) # depends on [control=['for'], data=['field']]
if do_subscribe and uuid != '':
self._add_subscription(uuid, event) # depends on [control=['if'], data=[]]
result = {'component': 'hfos.events.objectmanager', 'action': 'get', 'data': {'schema': schema, 'uuid': uuid, 'object': storage_object.serializablefields()}}
self._respond(None, result, event) # depends on [control=['if'], data=[]] |
def raises_regex(self, expected_exception, expected_regexp):
"""
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised,
and the string representation of *expected_exception* must match regular expression *expected_regexp*.
"""
return unittest_case.assertRaisesRegexp(expected_exception, expected_regexp, self._orig_subject,
*self._args, **self._kwargs) | def function[raises_regex, parameter[self, expected_exception, expected_regexp]]:
constant[
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised,
and the string representation of *expected_exception* must match regular expression *expected_regexp*.
]
return[call[name[unittest_case].assertRaisesRegexp, parameter[name[expected_exception], name[expected_regexp], name[self]._orig_subject, <ast.Starred object at 0x7da18f00f130>]]] | keyword[def] identifier[raises_regex] ( identifier[self] , identifier[expected_exception] , identifier[expected_regexp] ):
literal[string]
keyword[return] identifier[unittest_case] . identifier[assertRaisesRegexp] ( identifier[expected_exception] , identifier[expected_regexp] , identifier[self] . identifier[_orig_subject] ,
* identifier[self] . identifier[_args] ,** identifier[self] . identifier[_kwargs] ) | def raises_regex(self, expected_exception, expected_regexp):
"""
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised,
and the string representation of *expected_exception* must match regular expression *expected_regexp*.
"""
return unittest_case.assertRaisesRegexp(expected_exception, expected_regexp, self._orig_subject, *self._args, **self._kwargs) |
def _BuildQuery(self,
subject,
attribute=None,
timestamp=None,
limit=None,
is_prefix=False):
"""Build the SELECT query to be executed."""
args = []
subject = utils.SmartUnicode(subject)
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
sorting = ""
tables = "FROM aff4"
# Set fields, tables, and criteria and append args
if attribute is not None:
if is_prefix:
tables += " JOIN attributes ON aff4.attribute_hash=attributes.hash"
prefix = attribute + "%"
criteria += " AND attributes.attribute like %s"
args.append(prefix)
else:
criteria += " AND aff4.attribute_hash=unhex(md5(%s))"
args.append(attribute)
# Limit to time range if specified
if isinstance(timestamp, (tuple, list)):
criteria += " AND aff4.timestamp >= %s AND aff4.timestamp <= %s"
args.append(int(timestamp[0]))
args.append(int(timestamp[1]))
fields = "aff4.value, aff4.timestamp"
if is_prefix:
fields += ", attributes.attribute"
# Modify fields and sorting for timestamps.
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
tables += (" JOIN (SELECT attribute_hash, MAX(timestamp) timestamp "
"%s %s GROUP BY attribute_hash) maxtime ON "
"aff4.attribute_hash=maxtime.attribute_hash AND "
"aff4.timestamp=maxtime.timestamp") % (tables, criteria)
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
else:
# Always order results.
sorting = "ORDER BY aff4.timestamp DESC"
# Add limit if set.
if limit:
sorting += " LIMIT %s" % int(limit)
query = " ".join(["SELECT", fields, tables, criteria, sorting])
return (query, args) | def function[_BuildQuery, parameter[self, subject, attribute, timestamp, limit, is_prefix]]:
constant[Build the SELECT query to be executed.]
variable[args] assign[=] list[[]]
variable[subject] assign[=] call[name[utils].SmartUnicode, parameter[name[subject]]]
variable[criteria] assign[=] constant[WHERE aff4.subject_hash=unhex(md5(%s))]
call[name[args].append, parameter[name[subject]]]
variable[sorting] assign[=] constant[]
variable[tables] assign[=] constant[FROM aff4]
if compare[name[attribute] is_not constant[None]] begin[:]
if name[is_prefix] begin[:]
<ast.AugAssign object at 0x7da1b1b87460>
variable[prefix] assign[=] binary_operation[name[attribute] + constant[%]]
<ast.AugAssign object at 0x7da1b1b873d0>
call[name[args].append, parameter[name[prefix]]]
if call[name[isinstance], parameter[name[timestamp], tuple[[<ast.Name object at 0x7da1b1b872b0>, <ast.Name object at 0x7da1b1b853c0>]]]] begin[:]
<ast.AugAssign object at 0x7da1b1b85210>
call[name[args].append, parameter[call[name[int], parameter[call[name[timestamp]][constant[0]]]]]]
call[name[args].append, parameter[call[name[int], parameter[call[name[timestamp]][constant[1]]]]]]
variable[fields] assign[=] constant[aff4.value, aff4.timestamp]
if name[is_prefix] begin[:]
<ast.AugAssign object at 0x7da1b1c3f040>
if <ast.BoolOp object at 0x7da1b1c3d2d0> begin[:]
<ast.AugAssign object at 0x7da1b1c3c8b0>
variable[criteria] assign[=] constant[WHERE aff4.subject_hash=unhex(md5(%s))]
call[name[args].append, parameter[name[subject]]]
if name[limit] begin[:]
<ast.AugAssign object at 0x7da1b1c3e380>
variable[query] assign[=] call[constant[ ].join, parameter[list[[<ast.Constant object at 0x7da1b1c3d360>, <ast.Name object at 0x7da1b1c3cd00>, <ast.Name object at 0x7da1b1c3d270>, <ast.Name object at 0x7da1b1c3d5d0>, <ast.Name object at 0x7da1b1c3d120>]]]]
return[tuple[[<ast.Name object at 0x7da1b1c3fd30>, <ast.Name object at 0x7da1b1c3d8a0>]]] | keyword[def] identifier[_BuildQuery] ( identifier[self] ,
identifier[subject] ,
identifier[attribute] = keyword[None] ,
identifier[timestamp] = keyword[None] ,
identifier[limit] = keyword[None] ,
identifier[is_prefix] = keyword[False] ):
literal[string]
identifier[args] =[]
identifier[subject] = identifier[utils] . identifier[SmartUnicode] ( identifier[subject] )
identifier[criteria] = literal[string]
identifier[args] . identifier[append] ( identifier[subject] )
identifier[sorting] = literal[string]
identifier[tables] = literal[string]
keyword[if] identifier[attribute] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[is_prefix] :
identifier[tables] += literal[string]
identifier[prefix] = identifier[attribute] + literal[string]
identifier[criteria] += literal[string]
identifier[args] . identifier[append] ( identifier[prefix] )
keyword[else] :
identifier[criteria] += literal[string]
identifier[args] . identifier[append] ( identifier[attribute] )
keyword[if] identifier[isinstance] ( identifier[timestamp] ,( identifier[tuple] , identifier[list] )):
identifier[criteria] += literal[string]
identifier[args] . identifier[append] ( identifier[int] ( identifier[timestamp] [ literal[int] ]))
identifier[args] . identifier[append] ( identifier[int] ( identifier[timestamp] [ literal[int] ]))
identifier[fields] = literal[string]
keyword[if] identifier[is_prefix] :
identifier[fields] += literal[string]
keyword[if] identifier[timestamp] keyword[is] keyword[None] keyword[or] identifier[timestamp] == identifier[self] . identifier[NEWEST_TIMESTAMP] :
identifier[tables] +=( literal[string]
literal[string]
literal[string]
literal[string] )%( identifier[tables] , identifier[criteria] )
identifier[criteria] = literal[string]
identifier[args] . identifier[append] ( identifier[subject] )
keyword[else] :
identifier[sorting] = literal[string]
keyword[if] identifier[limit] :
identifier[sorting] += literal[string] % identifier[int] ( identifier[limit] )
identifier[query] = literal[string] . identifier[join] ([ literal[string] , identifier[fields] , identifier[tables] , identifier[criteria] , identifier[sorting] ])
keyword[return] ( identifier[query] , identifier[args] ) | def _BuildQuery(self, subject, attribute=None, timestamp=None, limit=None, is_prefix=False):
"""Build the SELECT query to be executed."""
args = []
subject = utils.SmartUnicode(subject)
criteria = 'WHERE aff4.subject_hash=unhex(md5(%s))'
args.append(subject)
sorting = ''
tables = 'FROM aff4'
# Set fields, tables, and criteria and append args
if attribute is not None:
if is_prefix:
tables += ' JOIN attributes ON aff4.attribute_hash=attributes.hash'
prefix = attribute + '%'
criteria += ' AND attributes.attribute like %s'
args.append(prefix) # depends on [control=['if'], data=[]]
else:
criteria += ' AND aff4.attribute_hash=unhex(md5(%s))'
args.append(attribute) # depends on [control=['if'], data=['attribute']]
# Limit to time range if specified
if isinstance(timestamp, (tuple, list)):
criteria += ' AND aff4.timestamp >= %s AND aff4.timestamp <= %s'
args.append(int(timestamp[0]))
args.append(int(timestamp[1])) # depends on [control=['if'], data=[]]
fields = 'aff4.value, aff4.timestamp'
if is_prefix:
fields += ', attributes.attribute' # depends on [control=['if'], data=[]]
# Modify fields and sorting for timestamps.
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
tables += ' JOIN (SELECT attribute_hash, MAX(timestamp) timestamp %s %s GROUP BY attribute_hash) maxtime ON aff4.attribute_hash=maxtime.attribute_hash AND aff4.timestamp=maxtime.timestamp' % (tables, criteria)
criteria = 'WHERE aff4.subject_hash=unhex(md5(%s))'
args.append(subject) # depends on [control=['if'], data=[]]
else:
# Always order results.
sorting = 'ORDER BY aff4.timestamp DESC'
# Add limit if set.
if limit:
sorting += ' LIMIT %s' % int(limit) # depends on [control=['if'], data=[]]
query = ' '.join(['SELECT', fields, tables, criteria, sorting])
return (query, args) |
def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos | def function[ToJsonString, parameter[self]]:
constant[Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
]
variable[nanos] assign[=] binary_operation[name[self].nanos <ast.Mod object at 0x7da2590d6920> name[_NANOS_PER_SECOND]]
variable[total_sec] assign[=] binary_operation[name[self].seconds + binary_operation[binary_operation[name[self].nanos - name[nanos]] <ast.FloorDiv object at 0x7da2590d6bc0> name[_NANOS_PER_SECOND]]]
variable[seconds] assign[=] binary_operation[name[total_sec] <ast.Mod object at 0x7da2590d6920> name[_SECONDS_PER_DAY]]
variable[days] assign[=] binary_operation[binary_operation[name[total_sec] - name[seconds]] <ast.FloorDiv object at 0x7da2590d6bc0> name[_SECONDS_PER_DAY]]
variable[dt] assign[=] binary_operation[call[name[datetime], parameter[constant[1970], constant[1], constant[1]]] + call[name[timedelta], parameter[name[days], name[seconds]]]]
variable[result] assign[=] call[name[dt].isoformat, parameter[]]
if compare[binary_operation[name[nanos] <ast.Mod object at 0x7da2590d6920> constant[1000000000.0]] equal[==] constant[0]] begin[:]
return[binary_operation[name[result] + constant[Z]]]
if compare[binary_operation[name[nanos] <ast.Mod object at 0x7da2590d6920> constant[1000000.0]] equal[==] constant[0]] begin[:]
return[binary_operation[name[result] + binary_operation[constant[.%03dZ] <ast.Mod object at 0x7da2590d6920> binary_operation[name[nanos] / constant[1000000.0]]]]]
if compare[binary_operation[name[nanos] <ast.Mod object at 0x7da2590d6920> constant[1000.0]] equal[==] constant[0]] begin[:]
return[binary_operation[name[result] + binary_operation[constant[.%06dZ] <ast.Mod object at 0x7da2590d6920> binary_operation[name[nanos] / constant[1000.0]]]]]
return[binary_operation[name[result] + binary_operation[constant[.%09dZ] <ast.Mod object at 0x7da2590d6920> name[nanos]]]] | keyword[def] identifier[ToJsonString] ( identifier[self] ):
literal[string]
identifier[nanos] = identifier[self] . identifier[nanos] % identifier[_NANOS_PER_SECOND]
identifier[total_sec] = identifier[self] . identifier[seconds] +( identifier[self] . identifier[nanos] - identifier[nanos] )// identifier[_NANOS_PER_SECOND]
identifier[seconds] = identifier[total_sec] % identifier[_SECONDS_PER_DAY]
identifier[days] =( identifier[total_sec] - identifier[seconds] )// identifier[_SECONDS_PER_DAY]
identifier[dt] = identifier[datetime] ( literal[int] , literal[int] , literal[int] )+ identifier[timedelta] ( identifier[days] , identifier[seconds] )
identifier[result] = identifier[dt] . identifier[isoformat] ()
keyword[if] ( identifier[nanos] % literal[int] )== literal[int] :
keyword[return] identifier[result] + literal[string]
keyword[if] ( identifier[nanos] % literal[int] )== literal[int] :
keyword[return] identifier[result] + literal[string] %( identifier[nanos] / literal[int] )
keyword[if] ( identifier[nanos] % literal[int] )== literal[int] :
keyword[return] identifier[result] + literal[string] %( identifier[nanos] / literal[int] )
keyword[return] identifier[result] + literal[string] % identifier[nanos] | def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if nanos % 1000000000.0 == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z' # depends on [control=['if'], data=[]]
if nanos % 1000000.0 == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1000000.0) # depends on [control=['if'], data=[]]
if nanos % 1000.0 == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1000.0) # depends on [control=['if'], data=[]]
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos |
def logistic_regression(X, y, coef_only=False, alpha=0.05,
as_dataframe=True, remove_na=False, **kwargs):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target variable contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors contains NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError('Dependent variable must be binary.')
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])]
# Add intercept in names
names.insert(0, "Intercept")
# Initialize and fit
if 'solver' not in kwargs:
kwargs['solver'] = 'lbfgs'
if 'multi_class' not in kwargs:
kwargs['multi_class'] = 'auto'
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
coef = np.append(lom.intercept_, lom.coef_)
if coef_only:
return coef
# Design matrix -- add intercept
X_design = np.column_stack((np.ones(X.shape[0]), X))
n, p = X_design.shape
# Fisher Information Matrix
denom = (2 * (1 + np.cosh(lom.decision_function(X))))
denom = np.tile(denom, (p, 1)).T
fim = np.dot((X_design / denom).T, X_design)
crao = np.linalg.inv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = np.array([2 * norm.sf(abs(z)) for z in z_scores])
# Confidence intervals
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores,
'pval': pval, ll_name: ll, ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats)
else:
return stats | def function[logistic_regression, parameter[X, y, coef_only, alpha, as_dataframe, remove_na]]:
constant[(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
]
from relative_module[pingouin.utils] import module[_is_sklearn_installed]
call[name[_is_sklearn_installed], parameter[]]
from relative_module[sklearn.linear_model] import module[LogisticRegression]
if call[name[isinstance], parameter[name[X], name[pd].DataFrame]] begin[:]
variable[names] assign[=] call[call[name[X].keys, parameter[]].tolist, parameter[]]
assert[compare[constant[0] less[<] name[alpha]]]
assert[compare[name[y].ndim equal[==] constant[1]]]
variable[X] assign[=] call[name[np].asarray, parameter[name[X]]]
variable[y] assign[=] call[name[np].asarray, parameter[name[y]]]
if compare[name[X].ndim equal[==] constant[1]] begin[:]
variable[X] assign[=] call[name[X]][tuple[[<ast.Constant object at 0x7da2054a7880>, <ast.Attribute object at 0x7da2054a57b0>]]]
if name[remove_na] begin[:]
<ast.Tuple object at 0x7da2054a6c20> assign[=] call[name[rm_na], parameter[name[X], call[name[y]][tuple[[<ast.Constant object at 0x7da2054a4580>, <ast.Attribute object at 0x7da2054a6bf0>]]]]]
variable[y] assign[=] call[name[np].squeeze, parameter[name[y]]]
variable[y_gd] assign[=] call[call[name[np].isfinite, parameter[name[y]]].all, parameter[]]
variable[X_gd] assign[=] call[call[name[np].isfinite, parameter[name[X]]].all, parameter[]]
assert[name[y_gd]]
assert[name[X_gd]]
assert[compare[call[name[y].shape][constant[0]] equal[==] call[name[X].shape][constant[0]]]]
if compare[call[name[np].unique, parameter[name[y]]].size not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da2054a5390>
if <ast.UnaryOp object at 0x7da2054a5150> begin[:]
variable[names] assign[=] <ast.ListComp object at 0x7da2054a6e60>
call[name[names].insert, parameter[constant[0], constant[Intercept]]]
if compare[constant[solver] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[solver]] assign[=] constant[lbfgs]
if compare[constant[multi_class] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[multi_class]] assign[=] constant[auto]
variable[lom] assign[=] call[name[LogisticRegression], parameter[]]
call[name[lom].fit, parameter[name[X], name[y]]]
variable[coef] assign[=] call[name[np].append, parameter[name[lom].intercept_, name[lom].coef_]]
if name[coef_only] begin[:]
return[name[coef]]
variable[X_design] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Call object at 0x7da2054a5c90>, <ast.Name object at 0x7da2054a7d60>]]]]
<ast.Tuple object at 0x7da2054a4100> assign[=] name[X_design].shape
variable[denom] assign[=] binary_operation[constant[2] * binary_operation[constant[1] + call[name[np].cosh, parameter[call[name[lom].decision_function, parameter[name[X]]]]]]]
variable[denom] assign[=] call[name[np].tile, parameter[name[denom], tuple[[<ast.Name object at 0x7da2054a6050>, <ast.Constant object at 0x7da2054a7ac0>]]]].T
variable[fim] assign[=] call[name[np].dot, parameter[binary_operation[name[X_design] / name[denom]].T, name[X_design]]]
variable[crao] assign[=] call[name[np].linalg.inv, parameter[name[fim]]]
variable[se] assign[=] call[name[np].sqrt, parameter[call[name[np].diag, parameter[name[crao]]]]]
variable[z_scores] assign[=] binary_operation[name[coef] / name[se]]
variable[pval] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18ede4cd0>]]
variable[crit] assign[=] call[name[norm].ppf, parameter[binary_operation[constant[1] - binary_operation[name[alpha] / constant[2]]]]]
variable[ll] assign[=] binary_operation[name[coef] - binary_operation[name[crit] * name[se]]]
variable[ul] assign[=] binary_operation[name[coef] + binary_operation[name[crit] * name[se]]]
variable[ll_name] assign[=] binary_operation[constant[CI[%.1f%%]] <ast.Mod object at 0x7da2590d6920> binary_operation[binary_operation[constant[100] * name[alpha]] / constant[2]]]
variable[ul_name] assign[=] binary_operation[constant[CI[%.1f%%]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[100] * binary_operation[constant[1] - binary_operation[name[alpha] / constant[2]]]]]
variable[stats] assign[=] dictionary[[<ast.Constant object at 0x7da18ede6500>, <ast.Constant object at 0x7da18ede7400>, <ast.Constant object at 0x7da18ede5b40>, <ast.Constant object at 0x7da18ede7970>, <ast.Constant object at 0x7da18ede4550>, <ast.Name object at 0x7da18ede61d0>, <ast.Name object at 0x7da18ede5f90>], [<ast.Name object at 0x7da18ede55d0>, <ast.Name object at 0x7da18ede5e10>, <ast.Name object at 0x7da18ede5540>, <ast.Name object at 0x7da18ede5630>, <ast.Name object at 0x7da18ede77f0>, <ast.Name object at 0x7da18ede65f0>, <ast.Name object at 0x7da18ede7fd0>]]
if name[as_dataframe] begin[:]
return[call[name[pd].DataFrame.from_dict, parameter[name[stats]]]] | keyword[def] identifier[logistic_regression] ( identifier[X] , identifier[y] , identifier[coef_only] = keyword[False] , identifier[alpha] = literal[int] ,
identifier[as_dataframe] = keyword[True] , identifier[remove_na] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[pingouin] . identifier[utils] keyword[import] identifier[_is_sklearn_installed]
identifier[_is_sklearn_installed] ( identifier[raise_error] = keyword[True] )
keyword[from] identifier[sklearn] . identifier[linear_model] keyword[import] identifier[LogisticRegression]
keyword[if] identifier[isinstance] ( identifier[X] , identifier[pd] . identifier[DataFrame] ):
identifier[names] = identifier[X] . identifier[keys] (). identifier[tolist] ()
keyword[elif] identifier[isinstance] ( identifier[X] , identifier[pd] . identifier[Series] ):
identifier[names] =[ identifier[X] . identifier[name] ]
keyword[else] :
identifier[names] =[]
keyword[assert] literal[int] < identifier[alpha] < literal[int]
keyword[assert] identifier[y] . identifier[ndim] == literal[int] , literal[string]
identifier[X] = identifier[np] . identifier[asarray] ( identifier[X] )
identifier[y] = identifier[np] . identifier[asarray] ( identifier[y] )
keyword[if] identifier[X] . identifier[ndim] == literal[int] :
identifier[X] = identifier[X] [..., identifier[np] . identifier[newaxis] ]
keyword[if] identifier[remove_na] :
identifier[X] , identifier[y] = identifier[rm_na] ( identifier[X] , identifier[y] [..., identifier[np] . identifier[newaxis] ], identifier[paired] = keyword[True] , identifier[axis] = literal[string] )
identifier[y] = identifier[np] . identifier[squeeze] ( identifier[y] )
identifier[y_gd] = identifier[np] . identifier[isfinite] ( identifier[y] ). identifier[all] ()
identifier[X_gd] = identifier[np] . identifier[isfinite] ( identifier[X] ). identifier[all] ()
keyword[assert] identifier[y_gd] , literal[string]
keyword[assert] identifier[X_gd] , literal[string]
keyword[assert] identifier[y] . identifier[shape] [ literal[int] ]== identifier[X] . identifier[shape] [ literal[int] ], literal[string]
keyword[if] identifier[np] . identifier[unique] ( identifier[y] ). identifier[size] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[names] :
identifier[names] =[ literal[string] + identifier[str] ( identifier[i] + literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[X] . identifier[shape] [ literal[int] ])]
identifier[names] . identifier[insert] ( literal[int] , literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[lom] = identifier[LogisticRegression] (** identifier[kwargs] )
identifier[lom] . identifier[fit] ( identifier[X] , identifier[y] )
identifier[coef] = identifier[np] . identifier[append] ( identifier[lom] . identifier[intercept_] , identifier[lom] . identifier[coef_] )
keyword[if] identifier[coef_only] :
keyword[return] identifier[coef]
identifier[X_design] = identifier[np] . identifier[column_stack] (( identifier[np] . identifier[ones] ( identifier[X] . identifier[shape] [ literal[int] ]), identifier[X] ))
identifier[n] , identifier[p] = identifier[X_design] . identifier[shape]
identifier[denom] =( literal[int] *( literal[int] + identifier[np] . identifier[cosh] ( identifier[lom] . identifier[decision_function] ( identifier[X] ))))
identifier[denom] = identifier[np] . identifier[tile] ( identifier[denom] ,( identifier[p] , literal[int] )). identifier[T]
identifier[fim] = identifier[np] . identifier[dot] (( identifier[X_design] / identifier[denom] ). identifier[T] , identifier[X_design] )
identifier[crao] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[fim] )
identifier[se] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[diag] ( identifier[crao] ))
identifier[z_scores] = identifier[coef] / identifier[se]
identifier[pval] = identifier[np] . identifier[array] ([ literal[int] * identifier[norm] . identifier[sf] ( identifier[abs] ( identifier[z] )) keyword[for] identifier[z] keyword[in] identifier[z_scores] ])
identifier[crit] = identifier[norm] . identifier[ppf] ( literal[int] - identifier[alpha] / literal[int] )
identifier[ll] = identifier[coef] - identifier[crit] * identifier[se]
identifier[ul] = identifier[coef] + identifier[crit] * identifier[se]
identifier[ll_name] = literal[string] %( literal[int] * identifier[alpha] / literal[int] )
identifier[ul_name] = literal[string] %( literal[int] *( literal[int] - identifier[alpha] / literal[int] ))
identifier[stats] ={ literal[string] : identifier[names] , literal[string] : identifier[coef] , literal[string] : identifier[se] , literal[string] : identifier[z_scores] ,
literal[string] : identifier[pval] , identifier[ll_name] : identifier[ll] , identifier[ul_name] : identifier[ul] }
keyword[if] identifier[as_dataframe] :
keyword[return] identifier[pd] . identifier[DataFrame] . identifier[from_dict] ( identifier[stats] )
keyword[else] :
keyword[return] identifier[stats] | def logistic_regression(X, y, coef_only=False, alpha=0.05, as_dataframe=True, remove_na=False, **kwargs):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist() # depends on [control=['if'], data=[]]
elif isinstance(X, pd.Series):
names = [X.name] # depends on [control=['if'], data=[]]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis] # depends on [control=['if'], data=[]]
# Check for NaN / Inf
if remove_na:
(X, y) = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y) # depends on [control=['if'], data=[]]
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target variable contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors contains NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError('Dependent variable must be binary.') # depends on [control=['if'], data=[]]
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])] # depends on [control=['if'], data=[]]
# Add intercept in names
names.insert(0, 'Intercept')
# Initialize and fit
if 'solver' not in kwargs:
kwargs['solver'] = 'lbfgs' # depends on [control=['if'], data=['kwargs']]
if 'multi_class' not in kwargs:
kwargs['multi_class'] = 'auto' # depends on [control=['if'], data=['kwargs']]
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
coef = np.append(lom.intercept_, lom.coef_)
if coef_only:
return coef # depends on [control=['if'], data=[]]
# Design matrix -- add intercept
X_design = np.column_stack((np.ones(X.shape[0]), X))
(n, p) = X_design.shape
# Fisher Information Matrix
denom = 2 * (1 + np.cosh(lom.decision_function(X)))
denom = np.tile(denom, (p, 1)).T
fim = np.dot((X_design / denom).T, X_design)
crao = np.linalg.inv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = np.array([2 * norm.sf(abs(z)) for z in z_scores])
# Confidence intervals
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores, 'pval': pval, ll_name: ll, ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats) # depends on [control=['if'], data=[]]
else:
return stats |
def filterItems(self,
terms,
caseSensitive=False):
"""
Filters the items in this tree based on the inputed text.
:param terms | <str> || {<str> datatype: [<str> opt, ..]}
caseSensitive | <bool>
"""
# create a dictionary of options
if type(terms) != dict:
terms = {'*': nativestring(terms)}
# validate the "all search"
if '*' in terms and type(terms['*']) != list:
sterms = nativestring(terms['*'])
if not sterms.strip():
terms.pop('*')
else:
dtype_matches = DATATYPE_FILTER_EXPR.findall(sterms)
# generate the filter for each data type
for match, dtype, values in dtype_matches:
sterms = sterms.replace(match, '')
terms.setdefault(dtype, [])
terms[dtype] += values.split(',')
keywords = sterms.replace(',', '').split()
while '' in keywords:
keywords.remove('')
terms['*'] = keywords
# filter out any data types that are not being searched
filtered_dtypes = self.filteredDataTypes()
filter_terms = {}
for dtype, keywords in terms.items():
if dtype != '*' and not dtype in filtered_dtypes:
continue
if not caseSensitive:
keywords = [nativestring(keyword).lower() for keyword in keywords]
else:
keywords = map(nativestring, keywords)
filter_terms[dtype] = keywords
self.__filterItems(filter_terms, caseSensitive) | def function[filterItems, parameter[self, terms, caseSensitive]]:
constant[
Filters the items in this tree based on the inputed text.
:param terms | <str> || {<str> datatype: [<str> opt, ..]}
caseSensitive | <bool>
]
if compare[call[name[type], parameter[name[terms]]] not_equal[!=] name[dict]] begin[:]
variable[terms] assign[=] dictionary[[<ast.Constant object at 0x7da1b24e1300>], [<ast.Call object at 0x7da1b24e1600>]]
if <ast.BoolOp object at 0x7da1b24e0fd0> begin[:]
variable[sterms] assign[=] call[name[nativestring], parameter[call[name[terms]][constant[*]]]]
if <ast.UnaryOp object at 0x7da1b24ae680> begin[:]
call[name[terms].pop, parameter[constant[*]]]
variable[filtered_dtypes] assign[=] call[name[self].filteredDataTypes, parameter[]]
variable[filter_terms] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b24473a0>, <ast.Name object at 0x7da1b2447340>]]] in starred[call[name[terms].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b2444520> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b2446110> begin[:]
variable[keywords] assign[=] <ast.ListComp object at 0x7da1b24462c0>
call[name[filter_terms]][name[dtype]] assign[=] name[keywords]
call[name[self].__filterItems, parameter[name[filter_terms], name[caseSensitive]]] | keyword[def] identifier[filterItems] ( identifier[self] ,
identifier[terms] ,
identifier[caseSensitive] = keyword[False] ):
literal[string]
keyword[if] identifier[type] ( identifier[terms] )!= identifier[dict] :
identifier[terms] ={ literal[string] : identifier[nativestring] ( identifier[terms] )}
keyword[if] literal[string] keyword[in] identifier[terms] keyword[and] identifier[type] ( identifier[terms] [ literal[string] ])!= identifier[list] :
identifier[sterms] = identifier[nativestring] ( identifier[terms] [ literal[string] ])
keyword[if] keyword[not] identifier[sterms] . identifier[strip] ():
identifier[terms] . identifier[pop] ( literal[string] )
keyword[else] :
identifier[dtype_matches] = identifier[DATATYPE_FILTER_EXPR] . identifier[findall] ( identifier[sterms] )
keyword[for] identifier[match] , identifier[dtype] , identifier[values] keyword[in] identifier[dtype_matches] :
identifier[sterms] = identifier[sterms] . identifier[replace] ( identifier[match] , literal[string] )
identifier[terms] . identifier[setdefault] ( identifier[dtype] ,[])
identifier[terms] [ identifier[dtype] ]+= identifier[values] . identifier[split] ( literal[string] )
identifier[keywords] = identifier[sterms] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ()
keyword[while] literal[string] keyword[in] identifier[keywords] :
identifier[keywords] . identifier[remove] ( literal[string] )
identifier[terms] [ literal[string] ]= identifier[keywords]
identifier[filtered_dtypes] = identifier[self] . identifier[filteredDataTypes] ()
identifier[filter_terms] ={}
keyword[for] identifier[dtype] , identifier[keywords] keyword[in] identifier[terms] . identifier[items] ():
keyword[if] identifier[dtype] != literal[string] keyword[and] keyword[not] identifier[dtype] keyword[in] identifier[filtered_dtypes] :
keyword[continue]
keyword[if] keyword[not] identifier[caseSensitive] :
identifier[keywords] =[ identifier[nativestring] ( identifier[keyword] ). identifier[lower] () keyword[for] identifier[keyword] keyword[in] identifier[keywords] ]
keyword[else] :
identifier[keywords] = identifier[map] ( identifier[nativestring] , identifier[keywords] )
identifier[filter_terms] [ identifier[dtype] ]= identifier[keywords]
identifier[self] . identifier[__filterItems] ( identifier[filter_terms] , identifier[caseSensitive] ) | def filterItems(self, terms, caseSensitive=False):
"""
Filters the items in this tree based on the inputed text.
:param terms | <str> || {<str> datatype: [<str> opt, ..]}
caseSensitive | <bool>
"""
# create a dictionary of options
if type(terms) != dict:
terms = {'*': nativestring(terms)} # depends on [control=['if'], data=[]]
# validate the "all search"
if '*' in terms and type(terms['*']) != list:
sterms = nativestring(terms['*'])
if not sterms.strip():
terms.pop('*') # depends on [control=['if'], data=[]]
else:
dtype_matches = DATATYPE_FILTER_EXPR.findall(sterms)
# generate the filter for each data type
for (match, dtype, values) in dtype_matches:
sterms = sterms.replace(match, '')
terms.setdefault(dtype, [])
terms[dtype] += values.split(',') # depends on [control=['for'], data=[]]
keywords = sterms.replace(',', '').split()
while '' in keywords:
keywords.remove('') # depends on [control=['while'], data=['keywords']]
terms['*'] = keywords # depends on [control=['if'], data=[]]
# filter out any data types that are not being searched
filtered_dtypes = self.filteredDataTypes()
filter_terms = {}
for (dtype, keywords) in terms.items():
if dtype != '*' and (not dtype in filtered_dtypes):
continue # depends on [control=['if'], data=[]]
if not caseSensitive:
keywords = [nativestring(keyword).lower() for keyword in keywords] # depends on [control=['if'], data=[]]
else:
keywords = map(nativestring, keywords)
filter_terms[dtype] = keywords # depends on [control=['for'], data=[]]
self.__filterItems(filter_terms, caseSensitive) |
def _update_all_devices(self):
"""Update the all_devices list."""
self.all_devices = []
self.all_devices.extend(self.keyboards)
self.all_devices.extend(self.mice)
self.all_devices.extend(self.gamepads)
self.all_devices.extend(self.other_devices) | def function[_update_all_devices, parameter[self]]:
constant[Update the all_devices list.]
name[self].all_devices assign[=] list[[]]
call[name[self].all_devices.extend, parameter[name[self].keyboards]]
call[name[self].all_devices.extend, parameter[name[self].mice]]
call[name[self].all_devices.extend, parameter[name[self].gamepads]]
call[name[self].all_devices.extend, parameter[name[self].other_devices]] | keyword[def] identifier[_update_all_devices] ( identifier[self] ):
literal[string]
identifier[self] . identifier[all_devices] =[]
identifier[self] . identifier[all_devices] . identifier[extend] ( identifier[self] . identifier[keyboards] )
identifier[self] . identifier[all_devices] . identifier[extend] ( identifier[self] . identifier[mice] )
identifier[self] . identifier[all_devices] . identifier[extend] ( identifier[self] . identifier[gamepads] )
identifier[self] . identifier[all_devices] . identifier[extend] ( identifier[self] . identifier[other_devices] ) | def _update_all_devices(self):
"""Update the all_devices list."""
self.all_devices = []
self.all_devices.extend(self.keyboards)
self.all_devices.extend(self.mice)
self.all_devices.extend(self.gamepads)
self.all_devices.extend(self.other_devices) |
def motif3struct_wei(W):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(W) # number of vertices in W
I = np.zeros((13, n)) # intensity
Q = np.zeros((13, n)) # coherence
F = np.zeros((13, n)) # frequency
A = binarize(W, copy=True) # create binary adjmat
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, 2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = np.squeeze(s == m3n)
w = np.array((W[v1, u], W[v2, u], W[u, v1],
W[v2, v1], W[u, v2], W[v1, v2]))
M = w * m3[ix, :]
id = id3[ix] - 1
l = n3[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# add to cumulative counts
I[id, u] += i
I[id, v1] += i
I[id, v2] += i
Q[id, u] += q
Q[id, v1] += q
Q[id, v2] += q
F[id, u] += 1
F[id, v1] += 1
F[id, v1] += 1
return I, Q, F | def function[motif3struct_wei, parameter[W]]:
constant[
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
]
from relative_module[scipy] import module[io]
import module[os]
variable[fname] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], name[motiflib]]]
variable[mot] assign[=] call[name[io].loadmat, parameter[name[fname]]]
variable[m3] assign[=] call[name[mot]][constant[m3]]
variable[m3n] assign[=] call[name[mot]][constant[m3n]]
variable[id3] assign[=] call[call[name[mot]][constant[id3]].squeeze, parameter[]]
variable[n3] assign[=] call[call[name[mot]][constant[n3]].squeeze, parameter[]]
variable[n] assign[=] call[name[len], parameter[name[W]]]
variable[I] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b07ae800>, <ast.Name object at 0x7da1b07ae7d0>]]]]
variable[Q] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b07ae680>, <ast.Name object at 0x7da1b07ae650>]]]]
variable[F] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b07ae500>, <ast.Name object at 0x7da1b07ae4d0>]]]]
variable[A] assign[=] call[name[binarize], parameter[name[W]]]
variable[As] assign[=] call[name[np].logical_or, parameter[name[A], name[A].T]]
for taget[name[u]] in starred[call[name[range], parameter[binary_operation[name[n] - constant[2]]]]] begin[:]
variable[V1] assign[=] call[name[np].append, parameter[call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b07aded0>]]]], call[name[As]][tuple[[<ast.Name object at 0x7da1b07addb0>, <ast.Slice object at 0x7da1b07add80>]]]]]
for taget[name[v1]] in starred[call[call[name[np].where, parameter[name[V1]]]][constant[0]]] begin[:]
variable[V2] assign[=] call[name[np].append, parameter[call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b07ad900>]]]], call[name[As]][tuple[[<ast.Name object at 0x7da1b07ad7e0>, <ast.Slice object at 0x7da1b07ad7b0>]]]]]
call[name[V2]][name[V1]] assign[=] constant[0]
variable[V2] assign[=] call[name[np].logical_or, parameter[call[name[np].append, parameter[call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b07ad330>]]]], call[name[As]][tuple[[<ast.Name object at 0x7da1b07ad270>, <ast.Slice object at 0x7da1b07ad240>]]]]], name[V2]]]
for taget[name[v2]] in starred[call[call[name[np].where, parameter[name[V2]]]][constant[0]]] begin[:]
variable[a] assign[=] call[name[np].array, parameter[tuple[[<ast.Subscript object at 0x7da1b07acdf0>, <ast.Subscript object at 0x7da1b07acd00>, <ast.Subscript object at 0x7da1b07acc10>, <ast.Subscript object at 0x7da1b07acb20>, <ast.Subscript object at 0x7da1b07aca30>, <ast.Subscript object at 0x7da1b07ac940>]]]]
variable[s] assign[=] call[name[np].uint32, parameter[call[name[np].sum, parameter[binary_operation[call[name[np].power, parameter[constant[10], call[name[np].arange, parameter[constant[5], <ast.UnaryOp object at 0x7da1b07ac520>, <ast.UnaryOp object at 0x7da1b07ac4c0>]]]] * name[a]]]]]]
variable[ix] assign[=] call[name[np].squeeze, parameter[compare[name[s] equal[==] name[m3n]]]]
variable[w] assign[=] call[name[np].array, parameter[tuple[[<ast.Subscript object at 0x7da1b0831480>, <ast.Subscript object at 0x7da1b0831fc0>, <ast.Subscript object at 0x7da1b0772080>, <ast.Subscript object at 0x7da1b0772170>, <ast.Subscript object at 0x7da1b0772260>, <ast.Subscript object at 0x7da1b0772350>]]]]
variable[M] assign[=] binary_operation[name[w] * call[name[m3]][tuple[[<ast.Name object at 0x7da1b0772590>, <ast.Slice object at 0x7da1b07725c0>]]]]
variable[id] assign[=] binary_operation[call[name[id3]][name[ix]] - constant[1]]
variable[l] assign[=] call[name[n3]][name[ix]]
variable[x] assign[=] binary_operation[call[name[np].sum, parameter[name[M]]] / name[l]]
call[name[M]][compare[name[M] equal[==] constant[0]]] assign[=] constant[1]
variable[i] assign[=] binary_operation[call[name[np].prod, parameter[name[M]]] ** binary_operation[constant[1] / name[l]]]
variable[q] assign[=] binary_operation[name[i] / name[x]]
<ast.AugAssign object at 0x7da1b0797b80>
<ast.AugAssign object at 0x7da1b0795f90>
<ast.AugAssign object at 0x7da1b0795570>
<ast.AugAssign object at 0x7da1b07940a0>
<ast.AugAssign object at 0x7da1b07940d0>
<ast.AugAssign object at 0x7da1b0795630>
<ast.AugAssign object at 0x7da1b070f010>
<ast.AugAssign object at 0x7da1b070f430>
<ast.AugAssign object at 0x7da1b070f460>
return[tuple[[<ast.Name object at 0x7da1b070ee60>, <ast.Name object at 0x7da1b070ee00>, <ast.Name object at 0x7da1b070ee30>]]] | keyword[def] identifier[motif3struct_wei] ( identifier[W] ):
literal[string]
keyword[from] identifier[scipy] keyword[import] identifier[io]
keyword[import] identifier[os]
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), identifier[motiflib] )
identifier[mot] = identifier[io] . identifier[loadmat] ( identifier[fname] )
identifier[m3] = identifier[mot] [ literal[string] ]
identifier[m3n] = identifier[mot] [ literal[string] ]
identifier[id3] = identifier[mot] [ literal[string] ]. identifier[squeeze] ()
identifier[n3] = identifier[mot] [ literal[string] ]. identifier[squeeze] ()
identifier[n] = identifier[len] ( identifier[W] )
identifier[I] = identifier[np] . identifier[zeros] (( literal[int] , identifier[n] ))
identifier[Q] = identifier[np] . identifier[zeros] (( literal[int] , identifier[n] ))
identifier[F] = identifier[np] . identifier[zeros] (( literal[int] , identifier[n] ))
identifier[A] = identifier[binarize] ( identifier[W] , identifier[copy] = keyword[True] )
identifier[As] = identifier[np] . identifier[logical_or] ( identifier[A] , identifier[A] . identifier[T] )
keyword[for] identifier[u] keyword[in] identifier[range] ( identifier[n] - literal[int] ):
identifier[V1] = identifier[np] . identifier[append] ( identifier[np] . identifier[zeros] (( identifier[u] ,), identifier[dtype] = identifier[int] ), identifier[As] [ identifier[u] , identifier[u] + literal[int] : identifier[n] + literal[int] ])
keyword[for] identifier[v1] keyword[in] identifier[np] . identifier[where] ( identifier[V1] )[ literal[int] ]:
identifier[V2] = identifier[np] . identifier[append] ( identifier[np] . identifier[zeros] (( identifier[u] ,), identifier[dtype] = identifier[int] ), identifier[As] [ identifier[v1] , identifier[u] + literal[int] : identifier[n] + literal[int] ])
identifier[V2] [ identifier[V1] ]= literal[int]
identifier[V2] = identifier[np] . identifier[logical_or] (
identifier[np] . identifier[append] ( identifier[np] . identifier[zeros] (( identifier[v1] ,)), identifier[As] [ identifier[u] , identifier[v1] + literal[int] : identifier[n] + literal[int] ]), identifier[V2] )
keyword[for] identifier[v2] keyword[in] identifier[np] . identifier[where] ( identifier[V2] )[ literal[int] ]:
identifier[a] = identifier[np] . identifier[array] (( identifier[A] [ identifier[v1] , identifier[u] ], identifier[A] [ identifier[v2] , identifier[u] ], identifier[A] [ identifier[u] , identifier[v1] ],
identifier[A] [ identifier[v2] , identifier[v1] ], identifier[A] [ identifier[u] , identifier[v2] ], identifier[A] [ identifier[v1] , literal[int] ]))
identifier[s] = identifier[np] . identifier[uint32] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[power] ( literal[int] , identifier[np] . identifier[arange] ( literal[int] ,- literal[int] ,- literal[int] ))* identifier[a] ))
identifier[ix] = identifier[np] . identifier[squeeze] ( identifier[s] == identifier[m3n] )
identifier[w] = identifier[np] . identifier[array] (( identifier[W] [ identifier[v1] , identifier[u] ], identifier[W] [ identifier[v2] , identifier[u] ], identifier[W] [ identifier[u] , identifier[v1] ],
identifier[W] [ identifier[v2] , identifier[v1] ], identifier[W] [ identifier[u] , identifier[v2] ], identifier[W] [ identifier[v1] , identifier[v2] ]))
identifier[M] = identifier[w] * identifier[m3] [ identifier[ix] ,:]
identifier[id] = identifier[id3] [ identifier[ix] ]- literal[int]
identifier[l] = identifier[n3] [ identifier[ix] ]
identifier[x] = identifier[np] . identifier[sum] ( identifier[M] , identifier[axis] = literal[int] )/ identifier[l]
identifier[M] [ identifier[M] == literal[int] ]= literal[int]
identifier[i] = identifier[np] . identifier[prod] ( identifier[M] , identifier[axis] = literal[int] )**( literal[int] / identifier[l] )
identifier[q] = identifier[i] / identifier[x]
identifier[I] [ identifier[id] , identifier[u] ]+= identifier[i]
identifier[I] [ identifier[id] , identifier[v1] ]+= identifier[i]
identifier[I] [ identifier[id] , identifier[v2] ]+= identifier[i]
identifier[Q] [ identifier[id] , identifier[u] ]+= identifier[q]
identifier[Q] [ identifier[id] , identifier[v1] ]+= identifier[q]
identifier[Q] [ identifier[id] , identifier[v2] ]+= identifier[q]
identifier[F] [ identifier[id] , identifier[u] ]+= literal[int]
identifier[F] [ identifier[id] , identifier[v1] ]+= literal[int]
identifier[F] [ identifier[id] , identifier[v1] ]+= literal[int]
keyword[return] identifier[I] , identifier[Q] , identifier[F] | def motif3struct_wei(W):
"""
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
"""
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(W) # number of vertices in W
I = np.zeros((13, n)) # intensity
Q = np.zeros((13, n)) # coherence
F = np.zeros((13, n)) # frequency
A = binarize(W, copy=True) # create binary adjmat
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1], A[v2, v1], A[u, v2], A[v1, 2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = np.squeeze(s == m3n)
w = np.array((W[v1, u], W[v2, u], W[u, v1], W[v2, v1], W[u, v2], W[v1, v2]))
M = w * m3[ix, :]
id = id3[ix] - 1
l = n3[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1) ** (1 / l) # intensity
q = i / x # coherence
# add to cumulative counts
I[id, u] += i
I[id, v1] += i
I[id, v2] += i
Q[id, u] += q
Q[id, v1] += q
Q[id, v2] += q
F[id, u] += 1
F[id, v1] += 1
F[id, v1] += 1 # depends on [control=['for'], data=['v2']] # depends on [control=['for'], data=['v1']] # depends on [control=['for'], data=['u']]
return (I, Q, F) |
def is_valid_ipv6(ip_str):
"""
Check the validity of an IPv6 address
"""
try:
socket.inet_pton(socket.AF_INET6, ip_str)
except socket.error:
return False
return True | def function[is_valid_ipv6, parameter[ip_str]]:
constant[
Check the validity of an IPv6 address
]
<ast.Try object at 0x7da207f01120>
return[constant[True]] | keyword[def] identifier[is_valid_ipv6] ( identifier[ip_str] ):
literal[string]
keyword[try] :
identifier[socket] . identifier[inet_pton] ( identifier[socket] . identifier[AF_INET6] , identifier[ip_str] )
keyword[except] identifier[socket] . identifier[error] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_valid_ipv6(ip_str):
"""
Check the validity of an IPv6 address
"""
try:
socket.inet_pton(socket.AF_INET6, ip_str) # depends on [control=['try'], data=[]]
except socket.error:
return False # depends on [control=['except'], data=[]]
return True |
def uuidToOoid(uuid,timestamp=None, depth= None):
""" Create an ooid from a 32-hex-digit string in regular uuid format.
uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info
"""
if not timestamp:
timestamp = utc_now().date()
if not depth:
depth = defaultDepth
assert depth <= 4 and depth >=1
return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day) | def function[uuidToOoid, parameter[uuid, timestamp, depth]]:
constant[ Create an ooid from a 32-hex-digit string in regular uuid format.
uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info
]
if <ast.UnaryOp object at 0x7da20c6c4f40> begin[:]
variable[timestamp] assign[=] call[call[name[utc_now], parameter[]].date, parameter[]]
if <ast.UnaryOp object at 0x7da20c6c4b20> begin[:]
variable[depth] assign[=] name[defaultDepth]
assert[<ast.BoolOp object at 0x7da20c6c47f0>]
return[binary_operation[constant[%s%d%02d%02d%02d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c6c41f0>, <ast.Name object at 0x7da20c6c6ad0>, <ast.BinOp object at 0x7da20c6c6020>, <ast.Attribute object at 0x7da20c6c7df0>, <ast.Attribute object at 0x7da20c6c6320>]]]] | keyword[def] identifier[uuidToOoid] ( identifier[uuid] , identifier[timestamp] = keyword[None] , identifier[depth] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[timestamp] :
identifier[timestamp] = identifier[utc_now] (). identifier[date] ()
keyword[if] keyword[not] identifier[depth] :
identifier[depth] = identifier[defaultDepth]
keyword[assert] identifier[depth] <= literal[int] keyword[and] identifier[depth] >= literal[int]
keyword[return] literal[string] %( identifier[uuid] [:- literal[int] ], identifier[depth] , identifier[timestamp] . identifier[year] % literal[int] , identifier[timestamp] . identifier[month] , identifier[timestamp] . identifier[day] ) | def uuidToOoid(uuid, timestamp=None, depth=None):
""" Create an ooid from a 32-hex-digit string in regular uuid format.
uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777
timestamp: the year-month-day is encoded in the ooid. If none, use current day
depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth
returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info
"""
if not timestamp:
timestamp = utc_now().date() # depends on [control=['if'], data=[]]
if not depth:
depth = defaultDepth # depends on [control=['if'], data=[]]
assert depth <= 4 and depth >= 1
return '%s%d%02d%02d%02d' % (uuid[:-7], depth, timestamp.year % 100, timestamp.month, timestamp.day) |
def _LDM(cpu, insn_id, base, regs):
"""
LDM (Load Multiple) loads a non-empty subset, or possibly all, of the general-purpose registers from
sequential memory locations. It is useful for block loads, stack operations and procedure exit sequences.
:param int insn_id: should be one of ARM_INS_LDM, ARM_INS_LDMIB, ARM_INS_LDMDA, ARM_INS_LDMDB
:param Armv7Operand base: Specifies the base register.
:param list[Armv7Operand] regs:
Is a list of registers. It specifies the set of registers to be loaded by the LDM instruction.
The registers are loaded in sequence, the lowest-numbered register from the lowest memory
address (start_address), through to the highest-numbered register from the highest memory
address (end_address). If the PC is specified in the register list (opcode bit[15] is set),
the instruction causes a branch to the address (data) loaded into the PC.
It's technically UNKNOWN if you writeback to a register you loaded into, but we let it slide.
"""
if cpu.instruction.usermode:
raise NotImplementedError("Use of the S bit is not supported")
increment = insn_id in (cs.arm.ARM_INS_LDM, cs.arm.ARM_INS_LDMIB)
after = insn_id in (cs.arm.ARM_INS_LDM, cs.arm.ARM_INS_LDMDA)
address = base.read()
for reg in regs:
if not after:
address += (1 if increment else -1) * (reg.size // 8)
reg.write(cpu.read_int(address, reg.size))
if reg.reg in ('PC', 'R15'):
# The general-purpose registers loaded can include the PC. If they do, the word loaded for the PC is
# treated as an address and a branch occurs to that address. In ARMv5 and above, bit[0] of the loaded
# value determines whether execution continues after this branch in ARM state or in Thumb state, as
# though a BX instruction had been executed.
cpu._set_mode_by_val(cpu.PC)
cpu.PC = cpu.PC & ~1
if after:
address += (1 if increment else -1) * (reg.size // 8)
if cpu.instruction.writeback:
base.writeback(address) | def function[_LDM, parameter[cpu, insn_id, base, regs]]:
constant[
LDM (Load Multiple) loads a non-empty subset, or possibly all, of the general-purpose registers from
sequential memory locations. It is useful for block loads, stack operations and procedure exit sequences.
:param int insn_id: should be one of ARM_INS_LDM, ARM_INS_LDMIB, ARM_INS_LDMDA, ARM_INS_LDMDB
:param Armv7Operand base: Specifies the base register.
:param list[Armv7Operand] regs:
Is a list of registers. It specifies the set of registers to be loaded by the LDM instruction.
The registers are loaded in sequence, the lowest-numbered register from the lowest memory
address (start_address), through to the highest-numbered register from the highest memory
address (end_address). If the PC is specified in the register list (opcode bit[15] is set),
the instruction causes a branch to the address (data) loaded into the PC.
It's technically UNKNOWN if you writeback to a register you loaded into, but we let it slide.
]
if name[cpu].instruction.usermode begin[:]
<ast.Raise object at 0x7da1b0086770>
variable[increment] assign[=] compare[name[insn_id] in tuple[[<ast.Attribute object at 0x7da1b0085390>, <ast.Attribute object at 0x7da1b0084820>]]]
variable[after] assign[=] compare[name[insn_id] in tuple[[<ast.Attribute object at 0x7da1b0085990>, <ast.Attribute object at 0x7da1b0087fa0>]]]
variable[address] assign[=] call[name[base].read, parameter[]]
for taget[name[reg]] in starred[name[regs]] begin[:]
if <ast.UnaryOp object at 0x7da1b0084250> begin[:]
<ast.AugAssign object at 0x7da1b0087a30>
call[name[reg].write, parameter[call[name[cpu].read_int, parameter[name[address], name[reg].size]]]]
if compare[name[reg].reg in tuple[[<ast.Constant object at 0x7da1b0086d10>, <ast.Constant object at 0x7da1b0087dc0>]]] begin[:]
call[name[cpu]._set_mode_by_val, parameter[name[cpu].PC]]
name[cpu].PC assign[=] binary_operation[name[cpu].PC <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b000bc70>]
if name[after] begin[:]
<ast.AugAssign object at 0x7da1b000a2f0>
if name[cpu].instruction.writeback begin[:]
call[name[base].writeback, parameter[name[address]]] | keyword[def] identifier[_LDM] ( identifier[cpu] , identifier[insn_id] , identifier[base] , identifier[regs] ):
literal[string]
keyword[if] identifier[cpu] . identifier[instruction] . identifier[usermode] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[increment] = identifier[insn_id] keyword[in] ( identifier[cs] . identifier[arm] . identifier[ARM_INS_LDM] , identifier[cs] . identifier[arm] . identifier[ARM_INS_LDMIB] )
identifier[after] = identifier[insn_id] keyword[in] ( identifier[cs] . identifier[arm] . identifier[ARM_INS_LDM] , identifier[cs] . identifier[arm] . identifier[ARM_INS_LDMDA] )
identifier[address] = identifier[base] . identifier[read] ()
keyword[for] identifier[reg] keyword[in] identifier[regs] :
keyword[if] keyword[not] identifier[after] :
identifier[address] +=( literal[int] keyword[if] identifier[increment] keyword[else] - literal[int] )*( identifier[reg] . identifier[size] // literal[int] )
identifier[reg] . identifier[write] ( identifier[cpu] . identifier[read_int] ( identifier[address] , identifier[reg] . identifier[size] ))
keyword[if] identifier[reg] . identifier[reg] keyword[in] ( literal[string] , literal[string] ):
identifier[cpu] . identifier[_set_mode_by_val] ( identifier[cpu] . identifier[PC] )
identifier[cpu] . identifier[PC] = identifier[cpu] . identifier[PC] &~ literal[int]
keyword[if] identifier[after] :
identifier[address] +=( literal[int] keyword[if] identifier[increment] keyword[else] - literal[int] )*( identifier[reg] . identifier[size] // literal[int] )
keyword[if] identifier[cpu] . identifier[instruction] . identifier[writeback] :
identifier[base] . identifier[writeback] ( identifier[address] ) | def _LDM(cpu, insn_id, base, regs):
"""
LDM (Load Multiple) loads a non-empty subset, or possibly all, of the general-purpose registers from
sequential memory locations. It is useful for block loads, stack operations and procedure exit sequences.
:param int insn_id: should be one of ARM_INS_LDM, ARM_INS_LDMIB, ARM_INS_LDMDA, ARM_INS_LDMDB
:param Armv7Operand base: Specifies the base register.
:param list[Armv7Operand] regs:
Is a list of registers. It specifies the set of registers to be loaded by the LDM instruction.
The registers are loaded in sequence, the lowest-numbered register from the lowest memory
address (start_address), through to the highest-numbered register from the highest memory
address (end_address). If the PC is specified in the register list (opcode bit[15] is set),
the instruction causes a branch to the address (data) loaded into the PC.
It's technically UNKNOWN if you writeback to a register you loaded into, but we let it slide.
"""
if cpu.instruction.usermode:
raise NotImplementedError('Use of the S bit is not supported') # depends on [control=['if'], data=[]]
increment = insn_id in (cs.arm.ARM_INS_LDM, cs.arm.ARM_INS_LDMIB)
after = insn_id in (cs.arm.ARM_INS_LDM, cs.arm.ARM_INS_LDMDA)
address = base.read()
for reg in regs:
if not after:
address += (1 if increment else -1) * (reg.size // 8) # depends on [control=['if'], data=[]]
reg.write(cpu.read_int(address, reg.size))
if reg.reg in ('PC', 'R15'):
# The general-purpose registers loaded can include the PC. If they do, the word loaded for the PC is
# treated as an address and a branch occurs to that address. In ARMv5 and above, bit[0] of the loaded
# value determines whether execution continues after this branch in ARM state or in Thumb state, as
# though a BX instruction had been executed.
cpu._set_mode_by_val(cpu.PC)
cpu.PC = cpu.PC & ~1 # depends on [control=['if'], data=[]]
if after:
address += (1 if increment else -1) * (reg.size // 8) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reg']]
if cpu.instruction.writeback:
base.writeback(address) # depends on [control=['if'], data=[]] |
def encode_example(self, bbox):
"""See base class for details."""
# Validate the coordinates
for coordinate in bbox:
if not isinstance(coordinate, float):
raise ValueError(
'BBox coordinates should be float. Got {}.'.format(bbox))
if not 0.0 <= coordinate <= 1.0:
raise ValueError(
'BBox coordinates should be between 0 and 1. Got {}.'.format(bbox))
if bbox.xmax < bbox.xmin or bbox.ymax < bbox.ymin:
raise ValueError(
'BBox coordinates should have min <= max. Got {}.'.format(bbox))
return super(BBoxFeature, self).encode_example(
[bbox.ymin, bbox.xmin, bbox.ymax, bbox.xmax]
) | def function[encode_example, parameter[self, bbox]]:
constant[See base class for details.]
for taget[name[coordinate]] in starred[name[bbox]] begin[:]
if <ast.UnaryOp object at 0x7da1b20671c0> begin[:]
<ast.Raise object at 0x7da1b2066440>
if <ast.UnaryOp object at 0x7da1b2041210> begin[:]
<ast.Raise object at 0x7da1b2041150>
if <ast.BoolOp object at 0x7da1b2043a90> begin[:]
<ast.Raise object at 0x7da1b2043130>
return[call[call[name[super], parameter[name[BBoxFeature], name[self]]].encode_example, parameter[list[[<ast.Attribute object at 0x7da1b2042890>, <ast.Attribute object at 0x7da1b2042410>, <ast.Attribute object at 0x7da1b2042ce0>, <ast.Attribute object at 0x7da1b20414e0>]]]]] | keyword[def] identifier[encode_example] ( identifier[self] , identifier[bbox] ):
literal[string]
keyword[for] identifier[coordinate] keyword[in] identifier[bbox] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[coordinate] , identifier[float] ):
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[bbox] ))
keyword[if] keyword[not] literal[int] <= identifier[coordinate] <= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[bbox] ))
keyword[if] identifier[bbox] . identifier[xmax] < identifier[bbox] . identifier[xmin] keyword[or] identifier[bbox] . identifier[ymax] < identifier[bbox] . identifier[ymin] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[bbox] ))
keyword[return] identifier[super] ( identifier[BBoxFeature] , identifier[self] ). identifier[encode_example] (
[ identifier[bbox] . identifier[ymin] , identifier[bbox] . identifier[xmin] , identifier[bbox] . identifier[ymax] , identifier[bbox] . identifier[xmax] ]
) | def encode_example(self, bbox):
"""See base class for details."""
# Validate the coordinates
for coordinate in bbox:
if not isinstance(coordinate, float):
raise ValueError('BBox coordinates should be float. Got {}.'.format(bbox)) # depends on [control=['if'], data=[]]
if not 0.0 <= coordinate <= 1.0:
raise ValueError('BBox coordinates should be between 0 and 1. Got {}.'.format(bbox)) # depends on [control=['if'], data=[]]
if bbox.xmax < bbox.xmin or bbox.ymax < bbox.ymin:
raise ValueError('BBox coordinates should have min <= max. Got {}.'.format(bbox)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['coordinate']]
return super(BBoxFeature, self).encode_example([bbox.ymin, bbox.xmin, bbox.ymax, bbox.xmax]) |
def read_byte(self, addr):
"""read_byte(addr) -> result
Perform SMBus Read Byte transaction.
"""
self._set_addr(addr)
result = SMBUS.i2c_smbus_read_byte(self._fd)
if result == -1:
raise IOError(ffi.errno)
return result | def function[read_byte, parameter[self, addr]]:
constant[read_byte(addr) -> result
Perform SMBus Read Byte transaction.
]
call[name[self]._set_addr, parameter[name[addr]]]
variable[result] assign[=] call[name[SMBUS].i2c_smbus_read_byte, parameter[name[self]._fd]]
if compare[name[result] equal[==] <ast.UnaryOp object at 0x7da2054a7820>] begin[:]
<ast.Raise object at 0x7da2054a71f0>
return[name[result]] | keyword[def] identifier[read_byte] ( identifier[self] , identifier[addr] ):
literal[string]
identifier[self] . identifier[_set_addr] ( identifier[addr] )
identifier[result] = identifier[SMBUS] . identifier[i2c_smbus_read_byte] ( identifier[self] . identifier[_fd] )
keyword[if] identifier[result] ==- literal[int] :
keyword[raise] identifier[IOError] ( identifier[ffi] . identifier[errno] )
keyword[return] identifier[result] | def read_byte(self, addr):
"""read_byte(addr) -> result
Perform SMBus Read Byte transaction.
"""
self._set_addr(addr)
result = SMBUS.i2c_smbus_read_byte(self._fd)
if result == -1:
raise IOError(ffi.errno) # depends on [control=['if'], data=[]]
return result |
def invalidated(self,
targets,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
"""
invalidation_check = self._do_invalidation_check(fingerprint_strategy,
invalidate_dependents,
targets,
topological_order)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts, uncached_causes = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
self.context.run_tracker.artifact_cache_stats.add_hits(self._task_name, cached_targets)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
self.context.run_tracker.artifact_cache_stats.add_misses(self._task_name,
uncached_targets,
uncached_causes)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = InvalidationCheck(invalidation_check.all_vts, uncached_vts)
if not silent:
targets = []
for vt in invalidation_check.invalid_vts:
targets.extend(vt.targets)
if len(targets):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
'Invalidated ',
items_to_report_element(target_address_references, 'target'),
'.',
]
self.context.log.info(*msg_elements)
self._update_invalidation_report(invalidation_check, 'pre-check')
# Cache has been checked to create the full list of invalid VTs.
# Only copy previous_results for this subset of VTs.
if self.incremental:
for vts in invalidation_check.invalid_vts:
vts.copy_previous_results()
# This may seem odd: why would we need to invalidate a VersionedTargetSet that is already
# invalid? But the name force_invalidate() is slightly misleading in this context - what it
# actually does is delete the key file created at the end of the last successful task run.
# This is necessary to avoid the following scenario:
#
# 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator.
# 2) In state B: Task fails, but writes some output. Key is not recorded.
# 3) After reverting back to state A: The current key is the same as the one recorded at the
# end of step 1), so it looks like no work needs to be done, but actually the task
# must re-run, to overwrite the output written in step 2.
#
# Deleting the file ensures that if a task fails, there is no key for which we might think
# we're in a valid state.
for vts in invalidation_check.invalid_vts:
vts.force_invalidate()
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
self._update_invalidation_report(invalidation_check, 'post-check')
for vt in invalidation_check.invalid_vts:
vt.update()
# Background work to clean up previous builds.
if self.context.options.for_global_scope().workdir_max_build_entries is not None:
self._launch_background_workdir_cleanup(invalidation_check.all_vts) | def function[invalidated, parameter[self, targets, invalidate_dependents, silent, fingerprint_strategy, topological_order]]:
constant[Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
]
variable[invalidation_check] assign[=] call[name[self]._do_invalidation_check, parameter[name[fingerprint_strategy], name[invalidate_dependents], name[targets], name[topological_order]]]
call[name[self]._maybe_create_results_dirs, parameter[name[invalidation_check].all_vts]]
if <ast.BoolOp object at 0x7da1b2279180> begin[:]
with call[name[self].context.new_workunit, parameter[constant[cache]]] begin[:]
<ast.Tuple object at 0x7da1b2279d80> assign[=] call[name[self].check_artifact_cache, parameter[call[name[self].check_artifact_cache_for, parameter[name[invalidation_check]]]]]
if name[cached_vts] begin[:]
variable[cached_targets] assign[=] <ast.ListComp object at 0x7da1b2279c90>
call[name[self].context.run_tracker.artifact_cache_stats.add_hits, parameter[name[self]._task_name, name[cached_targets]]]
if <ast.UnaryOp object at 0x7da1b227a410> begin[:]
call[name[self]._report_targets, parameter[constant[Using cached artifacts for ], name[cached_targets], constant[.]]]
if name[uncached_vts] begin[:]
variable[uncached_targets] assign[=] <ast.ListComp object at 0x7da1b227a170>
call[name[self].context.run_tracker.artifact_cache_stats.add_misses, parameter[name[self]._task_name, name[uncached_targets], name[uncached_causes]]]
if <ast.UnaryOp object at 0x7da1b227ad70> begin[:]
call[name[self]._report_targets, parameter[constant[No cached artifacts for ], name[uncached_targets], constant[.]]]
variable[invalidation_check] assign[=] call[name[InvalidationCheck], parameter[name[invalidation_check].all_vts, name[uncached_vts]]]
if <ast.UnaryOp object at 0x7da1b227aef0> begin[:]
variable[targets] assign[=] list[[]]
for taget[name[vt]] in starred[name[invalidation_check].invalid_vts] begin[:]
call[name[targets].extend, parameter[name[vt].targets]]
if call[name[len], parameter[name[targets]]] begin[:]
variable[target_address_references] assign[=] <ast.ListComp object at 0x7da1b22795a0>
variable[msg_elements] assign[=] list[[<ast.Constant object at 0x7da1b2278700>, <ast.Call object at 0x7da1b227b610>, <ast.Constant object at 0x7da1b22782e0>]]
call[name[self].context.log.info, parameter[<ast.Starred object at 0x7da1b227b9a0>]]
call[name[self]._update_invalidation_report, parameter[name[invalidation_check], constant[pre-check]]]
if name[self].incremental begin[:]
for taget[name[vts]] in starred[name[invalidation_check].invalid_vts] begin[:]
call[name[vts].copy_previous_results, parameter[]]
for taget[name[vts]] in starred[name[invalidation_check].invalid_vts] begin[:]
call[name[vts].force_invalidate, parameter[]]
<ast.Yield object at 0x7da1b2278280>
call[name[self]._update_invalidation_report, parameter[name[invalidation_check], constant[post-check]]]
for taget[name[vt]] in starred[name[invalidation_check].invalid_vts] begin[:]
call[name[vt].update, parameter[]]
if compare[call[name[self].context.options.for_global_scope, parameter[]].workdir_max_build_entries is_not constant[None]] begin[:]
call[name[self]._launch_background_workdir_cleanup, parameter[name[invalidation_check].all_vts]] | keyword[def] identifier[invalidated] ( identifier[self] ,
identifier[targets] ,
identifier[invalidate_dependents] = keyword[False] ,
identifier[silent] = keyword[False] ,
identifier[fingerprint_strategy] = keyword[None] ,
identifier[topological_order] = keyword[False] ):
literal[string]
identifier[invalidation_check] = identifier[self] . identifier[_do_invalidation_check] ( identifier[fingerprint_strategy] ,
identifier[invalidate_dependents] ,
identifier[targets] ,
identifier[topological_order] )
identifier[self] . identifier[_maybe_create_results_dirs] ( identifier[invalidation_check] . identifier[all_vts] )
keyword[if] identifier[invalidation_check] . identifier[invalid_vts] keyword[and] identifier[self] . identifier[artifact_cache_reads_enabled] ():
keyword[with] identifier[self] . identifier[context] . identifier[new_workunit] ( literal[string] ):
identifier[cached_vts] , identifier[uncached_vts] , identifier[uncached_causes] = identifier[self] . identifier[check_artifact_cache] ( identifier[self] . identifier[check_artifact_cache_for] ( identifier[invalidation_check] ))
keyword[if] identifier[cached_vts] :
identifier[cached_targets] =[ identifier[vt] . identifier[target] keyword[for] identifier[vt] keyword[in] identifier[cached_vts] ]
identifier[self] . identifier[context] . identifier[run_tracker] . identifier[artifact_cache_stats] . identifier[add_hits] ( identifier[self] . identifier[_task_name] , identifier[cached_targets] )
keyword[if] keyword[not] identifier[silent] :
identifier[self] . identifier[_report_targets] ( literal[string] , identifier[cached_targets] , literal[string] )
keyword[if] identifier[uncached_vts] :
identifier[uncached_targets] =[ identifier[vt] . identifier[target] keyword[for] identifier[vt] keyword[in] identifier[uncached_vts] ]
identifier[self] . identifier[context] . identifier[run_tracker] . identifier[artifact_cache_stats] . identifier[add_misses] ( identifier[self] . identifier[_task_name] ,
identifier[uncached_targets] ,
identifier[uncached_causes] )
keyword[if] keyword[not] identifier[silent] :
identifier[self] . identifier[_report_targets] ( literal[string] , identifier[uncached_targets] , literal[string] )
identifier[invalidation_check] = identifier[InvalidationCheck] ( identifier[invalidation_check] . identifier[all_vts] , identifier[uncached_vts] )
keyword[if] keyword[not] identifier[silent] :
identifier[targets] =[]
keyword[for] identifier[vt] keyword[in] identifier[invalidation_check] . identifier[invalid_vts] :
identifier[targets] . identifier[extend] ( identifier[vt] . identifier[targets] )
keyword[if] identifier[len] ( identifier[targets] ):
identifier[target_address_references] =[ identifier[t] . identifier[address] . identifier[reference] () keyword[for] identifier[t] keyword[in] identifier[targets] ]
identifier[msg_elements] =[
literal[string] ,
identifier[items_to_report_element] ( identifier[target_address_references] , literal[string] ),
literal[string] ,
]
identifier[self] . identifier[context] . identifier[log] . identifier[info] (* identifier[msg_elements] )
identifier[self] . identifier[_update_invalidation_report] ( identifier[invalidation_check] , literal[string] )
keyword[if] identifier[self] . identifier[incremental] :
keyword[for] identifier[vts] keyword[in] identifier[invalidation_check] . identifier[invalid_vts] :
identifier[vts] . identifier[copy_previous_results] ()
keyword[for] identifier[vts] keyword[in] identifier[invalidation_check] . identifier[invalid_vts] :
identifier[vts] . identifier[force_invalidate] ()
keyword[yield] identifier[invalidation_check]
identifier[self] . identifier[_update_invalidation_report] ( identifier[invalidation_check] , literal[string] )
keyword[for] identifier[vt] keyword[in] identifier[invalidation_check] . identifier[invalid_vts] :
identifier[vt] . identifier[update] ()
keyword[if] identifier[self] . identifier[context] . identifier[options] . identifier[for_global_scope] (). identifier[workdir_max_build_entries] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_launch_background_workdir_cleanup] ( identifier[invalidation_check] . identifier[all_vts] ) | def invalidated(self, targets, invalidate_dependents=False, silent=False, fingerprint_strategy=None, topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
"""
invalidation_check = self._do_invalidation_check(fingerprint_strategy, invalidate_dependents, targets, topological_order)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
(cached_vts, uncached_vts, uncached_causes) = self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check)) # depends on [control=['with'], data=[]]
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
self.context.run_tracker.artifact_cache_stats.add_hits(self._task_name, cached_targets)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
self.context.run_tracker.artifact_cache_stats.add_misses(self._task_name, uncached_targets, uncached_causes)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = InvalidationCheck(invalidation_check.all_vts, uncached_vts) # depends on [control=['if'], data=[]]
if not silent:
targets = []
for vt in invalidation_check.invalid_vts:
targets.extend(vt.targets) # depends on [control=['for'], data=['vt']]
if len(targets):
target_address_references = [t.address.reference() for t in targets]
msg_elements = ['Invalidated ', items_to_report_element(target_address_references, 'target'), '.']
self.context.log.info(*msg_elements) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._update_invalidation_report(invalidation_check, 'pre-check')
# Cache has been checked to create the full list of invalid VTs.
# Only copy previous_results for this subset of VTs.
if self.incremental:
for vts in invalidation_check.invalid_vts:
vts.copy_previous_results() # depends on [control=['for'], data=['vts']] # depends on [control=['if'], data=[]]
# This may seem odd: why would we need to invalidate a VersionedTargetSet that is already
# invalid? But the name force_invalidate() is slightly misleading in this context - what it
# actually does is delete the key file created at the end of the last successful task run.
# This is necessary to avoid the following scenario:
#
# 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator.
# 2) In state B: Task fails, but writes some output. Key is not recorded.
# 3) After reverting back to state A: The current key is the same as the one recorded at the
# end of step 1), so it looks like no work needs to be done, but actually the task
# must re-run, to overwrite the output written in step 2.
#
# Deleting the file ensures that if a task fails, there is no key for which we might think
# we're in a valid state.
for vts in invalidation_check.invalid_vts:
vts.force_invalidate() # depends on [control=['for'], data=['vts']]
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
self._update_invalidation_report(invalidation_check, 'post-check')
for vt in invalidation_check.invalid_vts:
vt.update() # depends on [control=['for'], data=['vt']]
# Background work to clean up previous builds.
if self.context.options.for_global_scope().workdir_max_build_entries is not None:
self._launch_background_workdir_cleanup(invalidation_check.all_vts) # depends on [control=['if'], data=[]] |
def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing):
"""
Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
"""
image = scipy.asarray(image)
image = image.astype(scipy.float_)
# iterate over the image dimensions and for each create the appropriate edges and compute the associated weights
for dim in range(image.ndim):
# construct slice-objects for the current dimension
slices_exclude_last = [slice(None)] * image.ndim
slices_exclude_last[dim] = slice(-1)
slices_exclude_first = [slice(None)] * image.ndim
slices_exclude_first[dim] = slice(1, None)
# compute difference between all layers in the current dimensions direction
neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first])
# apply boundary term
neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term)
# compute key offset for relative key difference
offset_key = [1 if i == dim else 0 for i in range(image.ndim)]
offset = __flatten_index(offset_key, image.shape)
# generate index offset function for index dependent offset
idx_offset_divider = (image.shape[dim] - 1) * offset
idx_offset = lambda x: int(x / idx_offset_divider) * offset
# weight the computed distanced in dimension dim by the corresponding slice spacing provided
if spacing: neighbourhood_intensity_term /= spacing[dim]
for key, value in enumerate(neighbourhood_intensity_term.ravel()):
# apply index dependent offset
key += idx_offset(key)
# add edges and set the weight
graph.set_nweight(key, key + offset, value, value) | def function[__skeleton_base, parameter[graph, image, boundary_term, neighbourhood_function, spacing]]:
constant[
Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
]
variable[image] assign[=] call[name[scipy].asarray, parameter[name[image]]]
variable[image] assign[=] call[name[image].astype, parameter[name[scipy].float_]]
for taget[name[dim]] in starred[call[name[range], parameter[name[image].ndim]]] begin[:]
variable[slices_exclude_last] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b1516560>]] * name[image].ndim]
call[name[slices_exclude_last]][name[dim]] assign[=] call[name[slice], parameter[<ast.UnaryOp object at 0x7da1b1517280>]]
variable[slices_exclude_first] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b1517190>]] * name[image].ndim]
call[name[slices_exclude_first]][name[dim]] assign[=] call[name[slice], parameter[constant[1], constant[None]]]
variable[neighbourhood_intensity_term] assign[=] call[name[neighbourhood_function], parameter[call[name[image]][name[slices_exclude_last]], call[name[image]][name[slices_exclude_first]]]]
variable[neighbourhood_intensity_term] assign[=] call[name[boundary_term], parameter[name[neighbourhood_intensity_term]]]
variable[offset_key] assign[=] <ast.ListComp object at 0x7da20c9918d0>
variable[offset] assign[=] call[name[__flatten_index], parameter[name[offset_key], name[image].shape]]
variable[idx_offset_divider] assign[=] binary_operation[binary_operation[call[name[image].shape][name[dim]] - constant[1]] * name[offset]]
variable[idx_offset] assign[=] <ast.Lambda object at 0x7da1b12db700>
if name[spacing] begin[:]
<ast.AugAssign object at 0x7da1b12d8580>
for taget[tuple[[<ast.Name object at 0x7da1b12da470>, <ast.Name object at 0x7da1b12d8940>]]] in starred[call[name[enumerate], parameter[call[name[neighbourhood_intensity_term].ravel, parameter[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b12d9810>
call[name[graph].set_nweight, parameter[name[key], binary_operation[name[key] + name[offset]], name[value], name[value]]] | keyword[def] identifier[__skeleton_base] ( identifier[graph] , identifier[image] , identifier[boundary_term] , identifier[neighbourhood_function] , identifier[spacing] ):
literal[string]
identifier[image] = identifier[scipy] . identifier[asarray] ( identifier[image] )
identifier[image] = identifier[image] . identifier[astype] ( identifier[scipy] . identifier[float_] )
keyword[for] identifier[dim] keyword[in] identifier[range] ( identifier[image] . identifier[ndim] ):
identifier[slices_exclude_last] =[ identifier[slice] ( keyword[None] )]* identifier[image] . identifier[ndim]
identifier[slices_exclude_last] [ identifier[dim] ]= identifier[slice] (- literal[int] )
identifier[slices_exclude_first] =[ identifier[slice] ( keyword[None] )]* identifier[image] . identifier[ndim]
identifier[slices_exclude_first] [ identifier[dim] ]= identifier[slice] ( literal[int] , keyword[None] )
identifier[neighbourhood_intensity_term] = identifier[neighbourhood_function] ( identifier[image] [ identifier[slices_exclude_last] ], identifier[image] [ identifier[slices_exclude_first] ])
identifier[neighbourhood_intensity_term] = identifier[boundary_term] ( identifier[neighbourhood_intensity_term] )
identifier[offset_key] =[ literal[int] keyword[if] identifier[i] == identifier[dim] keyword[else] literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[image] . identifier[ndim] )]
identifier[offset] = identifier[__flatten_index] ( identifier[offset_key] , identifier[image] . identifier[shape] )
identifier[idx_offset_divider] =( identifier[image] . identifier[shape] [ identifier[dim] ]- literal[int] )* identifier[offset]
identifier[idx_offset] = keyword[lambda] identifier[x] : identifier[int] ( identifier[x] / identifier[idx_offset_divider] )* identifier[offset]
keyword[if] identifier[spacing] : identifier[neighbourhood_intensity_term] /= identifier[spacing] [ identifier[dim] ]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[enumerate] ( identifier[neighbourhood_intensity_term] . identifier[ravel] ()):
identifier[key] += identifier[idx_offset] ( identifier[key] )
identifier[graph] . identifier[set_nweight] ( identifier[key] , identifier[key] + identifier[offset] , identifier[value] , identifier[value] ) | def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing):
"""
Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
"""
image = scipy.asarray(image)
image = image.astype(scipy.float_)
# iterate over the image dimensions and for each create the appropriate edges and compute the associated weights
for dim in range(image.ndim):
# construct slice-objects for the current dimension
slices_exclude_last = [slice(None)] * image.ndim
slices_exclude_last[dim] = slice(-1)
slices_exclude_first = [slice(None)] * image.ndim
slices_exclude_first[dim] = slice(1, None)
# compute difference between all layers in the current dimensions direction
neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first])
# apply boundary term
neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term)
# compute key offset for relative key difference
offset_key = [1 if i == dim else 0 for i in range(image.ndim)]
offset = __flatten_index(offset_key, image.shape)
# generate index offset function for index dependent offset
idx_offset_divider = (image.shape[dim] - 1) * offset
idx_offset = lambda x: int(x / idx_offset_divider) * offset
# weight the computed distanced in dimension dim by the corresponding slice spacing provided
if spacing:
neighbourhood_intensity_term /= spacing[dim] # depends on [control=['if'], data=[]]
for (key, value) in enumerate(neighbourhood_intensity_term.ravel()):
# apply index dependent offset
key += idx_offset(key)
# add edges and set the weight
graph.set_nweight(key, key + offset, value, value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['dim']] |
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n') | def function[generate_dot, parameter[self, fd, name, parts, urls, graph_options, node_options, edge_options]]:
constant[
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
]
variable[g_options] assign[=] call[name[self].default_graph_options.copy, parameter[]]
call[name[g_options].update, parameter[name[graph_options]]]
variable[n_options] assign[=] call[name[self].default_node_options.copy, parameter[]]
call[name[n_options].update, parameter[name[node_options]]]
variable[e_options] assign[=] call[name[self].default_edge_options.copy, parameter[]]
call[name[e_options].update, parameter[name[edge_options]]]
call[name[fd].write, parameter[binary_operation[constant[digraph %s {
] <ast.Mod object at 0x7da2590d6920> name[name]]]]
call[name[fd].write, parameter[call[name[self]._format_graph_options, parameter[name[g_options]]]]]
for taget[name[cls]] in starred[name[self].all_classes] begin[:]
if <ast.BoolOp object at 0x7da207f029e0> begin[:]
continue
variable[name] assign[=] call[name[self].class_name, parameter[name[cls], name[parts]]]
variable[this_node_options] assign[=] call[name[n_options].copy, parameter[]]
variable[url] assign[=] call[name[urls].get, parameter[call[name[self].class_name, parameter[name[cls]]]]]
if compare[name[url] is_not constant[None]] begin[:]
call[name[this_node_options]][constant[URL]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[url]]
call[name[fd].write, parameter[binary_operation[constant[ "%s" [%s];
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f9b400>, <ast.Call object at 0x7da207f9a9e0>]]]]]
for taget[name[base]] in starred[name[cls].__bases__] begin[:]
if <ast.BoolOp object at 0x7da207f98070> begin[:]
continue
variable[base_name] assign[=] call[name[self].class_name, parameter[name[base], name[parts]]]
call[name[fd].write, parameter[binary_operation[constant[ "%s" -> "%s" [%s];
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f9b8e0>, <ast.Name object at 0x7da207f99f30>, <ast.Call object at 0x7da207f9b730>]]]]]
call[name[fd].write, parameter[constant[}
]]] | keyword[def] identifier[generate_dot] ( identifier[self] , identifier[fd] , identifier[name] , identifier[parts] = literal[int] , identifier[urls] ={},
identifier[graph_options] ={}, identifier[node_options] ={},
identifier[edge_options] ={}):
literal[string]
identifier[g_options] = identifier[self] . identifier[default_graph_options] . identifier[copy] ()
identifier[g_options] . identifier[update] ( identifier[graph_options] )
identifier[n_options] = identifier[self] . identifier[default_node_options] . identifier[copy] ()
identifier[n_options] . identifier[update] ( identifier[node_options] )
identifier[e_options] = identifier[self] . identifier[default_edge_options] . identifier[copy] ()
identifier[e_options] . identifier[update] ( identifier[edge_options] )
identifier[fd] . identifier[write] ( literal[string] % identifier[name] )
identifier[fd] . identifier[write] ( identifier[self] . identifier[_format_graph_options] ( identifier[g_options] ))
keyword[for] identifier[cls] keyword[in] identifier[self] . identifier[all_classes] :
keyword[if] keyword[not] identifier[self] . identifier[show_builtins] keyword[and] identifier[cls] keyword[in] identifier[__builtins__] . identifier[values] ():
keyword[continue]
identifier[name] = identifier[self] . identifier[class_name] ( identifier[cls] , identifier[parts] )
identifier[this_node_options] = identifier[n_options] . identifier[copy] ()
identifier[url] = identifier[urls] . identifier[get] ( identifier[self] . identifier[class_name] ( identifier[cls] ))
keyword[if] identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[this_node_options] [ literal[string] ]= literal[string] % identifier[url]
identifier[fd] . identifier[write] ( literal[string] %
( identifier[name] , identifier[self] . identifier[_format_node_options] ( identifier[this_node_options] )))
keyword[for] identifier[base] keyword[in] identifier[cls] . identifier[__bases__] :
keyword[if] keyword[not] identifier[self] . identifier[show_builtins] keyword[and] identifier[base] keyword[in] identifier[__builtins__] . identifier[values] ():
keyword[continue]
identifier[base_name] = identifier[self] . identifier[class_name] ( identifier[base] , identifier[parts] )
identifier[fd] . identifier[write] ( literal[string] %
( identifier[base_name] , identifier[name] ,
identifier[self] . identifier[_format_node_options] ( identifier[e_options] )))
identifier[fd] . identifier[write] ( literal[string] ) | def generate_dot(self, fd, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue # depends on [control=['if'], data=[]]
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url # depends on [control=['if'], data=['url']]
fd.write(' "%s" [%s];\n' % (name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue # depends on [control=['if'], data=[]]
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_options(e_options))) # depends on [control=['for'], data=['base']] # depends on [control=['for'], data=['cls']]
fd.write('}\n') |
def load_activation_profiles(self):
"""
:class:`~zhmcclient.ActivationProfileManager`: Access to the
:term:`Load Activation Profiles <load Activation Profile>` in this
CPC.
"""
# We do here some lazy loading.
if not self._load_activation_profiles:
self._load_activation_profiles = \
ActivationProfileManager(self, profile_type='load')
return self._load_activation_profiles | def function[load_activation_profiles, parameter[self]]:
constant[
:class:`~zhmcclient.ActivationProfileManager`: Access to the
:term:`Load Activation Profiles <load Activation Profile>` in this
CPC.
]
if <ast.UnaryOp object at 0x7da2041daa40> begin[:]
name[self]._load_activation_profiles assign[=] call[name[ActivationProfileManager], parameter[name[self]]]
return[name[self]._load_activation_profiles] | keyword[def] identifier[load_activation_profiles] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_load_activation_profiles] :
identifier[self] . identifier[_load_activation_profiles] = identifier[ActivationProfileManager] ( identifier[self] , identifier[profile_type] = literal[string] )
keyword[return] identifier[self] . identifier[_load_activation_profiles] | def load_activation_profiles(self):
"""
:class:`~zhmcclient.ActivationProfileManager`: Access to the
:term:`Load Activation Profiles <load Activation Profile>` in this
CPC.
"""
# We do here some lazy loading.
if not self._load_activation_profiles:
self._load_activation_profiles = ActivationProfileManager(self, profile_type='load') # depends on [control=['if'], data=[]]
return self._load_activation_profiles |
def run(self, cmd, fn=None, globals=None, locals=None):
"""Run the cmd `cmd` with trace"""
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
if isinstance(cmd, str):
str_cmd = cmd
cmd = compile(str_cmd, fn or "<wdb>", "exec")
self.compile_cache[id(cmd)] = str_cmd
if fn:
from linecache import getline
lno = 1
while True:
line = getline(fn, lno, globals)
if line is None:
lno = None
break
if executable_line(line):
break
lno += 1
self.start_trace()
if lno is not None:
self.breakpoints.add(LineBreakpoint(fn, lno, temporary=True))
try:
execute(cmd, globals, locals)
finally:
self.stop_trace() | def function[run, parameter[self, cmd, fn, globals, locals]]:
constant[Run the cmd `cmd` with trace]
if compare[name[globals] is constant[None]] begin[:]
import module[__main__]
variable[globals] assign[=] name[__main__].__dict__
if compare[name[locals] is constant[None]] begin[:]
variable[locals] assign[=] name[globals]
call[name[self].reset, parameter[]]
if call[name[isinstance], parameter[name[cmd], name[str]]] begin[:]
variable[str_cmd] assign[=] name[cmd]
variable[cmd] assign[=] call[name[compile], parameter[name[str_cmd], <ast.BoolOp object at 0x7da18eb55690>, constant[exec]]]
call[name[self].compile_cache][call[name[id], parameter[name[cmd]]]] assign[=] name[str_cmd]
if name[fn] begin[:]
from relative_module[linecache] import module[getline]
variable[lno] assign[=] constant[1]
while constant[True] begin[:]
variable[line] assign[=] call[name[getline], parameter[name[fn], name[lno], name[globals]]]
if compare[name[line] is constant[None]] begin[:]
variable[lno] assign[=] constant[None]
break
if call[name[executable_line], parameter[name[line]]] begin[:]
break
<ast.AugAssign object at 0x7da18f58f5e0>
call[name[self].start_trace, parameter[]]
if compare[name[lno] is_not constant[None]] begin[:]
call[name[self].breakpoints.add, parameter[call[name[LineBreakpoint], parameter[name[fn], name[lno]]]]]
<ast.Try object at 0x7da2049634c0> | keyword[def] identifier[run] ( identifier[self] , identifier[cmd] , identifier[fn] = keyword[None] , identifier[globals] = keyword[None] , identifier[locals] = keyword[None] ):
literal[string]
keyword[if] identifier[globals] keyword[is] keyword[None] :
keyword[import] identifier[__main__]
identifier[globals] = identifier[__main__] . identifier[__dict__]
keyword[if] identifier[locals] keyword[is] keyword[None] :
identifier[locals] = identifier[globals]
identifier[self] . identifier[reset] ()
keyword[if] identifier[isinstance] ( identifier[cmd] , identifier[str] ):
identifier[str_cmd] = identifier[cmd]
identifier[cmd] = identifier[compile] ( identifier[str_cmd] , identifier[fn] keyword[or] literal[string] , literal[string] )
identifier[self] . identifier[compile_cache] [ identifier[id] ( identifier[cmd] )]= identifier[str_cmd]
keyword[if] identifier[fn] :
keyword[from] identifier[linecache] keyword[import] identifier[getline]
identifier[lno] = literal[int]
keyword[while] keyword[True] :
identifier[line] = identifier[getline] ( identifier[fn] , identifier[lno] , identifier[globals] )
keyword[if] identifier[line] keyword[is] keyword[None] :
identifier[lno] = keyword[None]
keyword[break]
keyword[if] identifier[executable_line] ( identifier[line] ):
keyword[break]
identifier[lno] += literal[int]
identifier[self] . identifier[start_trace] ()
keyword[if] identifier[lno] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[breakpoints] . identifier[add] ( identifier[LineBreakpoint] ( identifier[fn] , identifier[lno] , identifier[temporary] = keyword[True] ))
keyword[try] :
identifier[execute] ( identifier[cmd] , identifier[globals] , identifier[locals] )
keyword[finally] :
identifier[self] . identifier[stop_trace] () | def run(self, cmd, fn=None, globals=None, locals=None):
"""Run the cmd `cmd` with trace"""
if globals is None:
import __main__
globals = __main__.__dict__ # depends on [control=['if'], data=['globals']]
if locals is None:
locals = globals # depends on [control=['if'], data=['locals']]
self.reset()
if isinstance(cmd, str):
str_cmd = cmd
cmd = compile(str_cmd, fn or '<wdb>', 'exec')
self.compile_cache[id(cmd)] = str_cmd # depends on [control=['if'], data=[]]
if fn:
from linecache import getline
lno = 1
while True:
line = getline(fn, lno, globals)
if line is None:
lno = None
break # depends on [control=['if'], data=[]]
if executable_line(line):
break # depends on [control=['if'], data=[]]
lno += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
self.start_trace()
if lno is not None:
self.breakpoints.add(LineBreakpoint(fn, lno, temporary=True)) # depends on [control=['if'], data=['lno']]
try:
execute(cmd, globals, locals) # depends on [control=['try'], data=[]]
finally:
self.stop_trace() |
def textvalidation(self, warnonly=None):
"""Run text validation on this element. Checks whether any text redundancy is consistent and whether offsets are valid.
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if warnonly is None and self.doc and self.doc.version:
warnonly = (checkversion(self.doc.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
valid = True
for cls in self.doc.textclasses:
if self.hastext(cls, strict=True) and not isinstance(self, (Linebreak, Whitespace)):
if self.doc and self.doc.debug: print("[PyNLPl FoLiA DEBUG] Text validation on " + repr(self),file=stderr)
correctionhandling = self.findcorrectionhandling(cls)
if correctionhandling is None:
#skipping text validation, correction is too complex (nested) to handle for now; just assume valid (benefit of the doubt)
if self.doc and self.doc.debug: print("[PyNLPl FoLiA DEBUG] SKIPPING Text validation on " + repr(self) + ", too complex to handle (nested corrections or inconsistent use)",file=stderr)
return True #just assume it's valid then
strictnormtext = self.text(cls,retaintokenisation=False,strict=True, normalize_spaces=True)
deepnormtext = self.text(cls,retaintokenisation=False,strict=False, normalize_spaces=True)
if strictnormtext != deepnormtext:
valid = False
deviation = 0
for i, (c1,c2) in enumerate(zip(strictnormtext,deepnormtext)):
if c1 != c2:
deviation = i
break
msg = "Text for " + self.__class__.__name__ + ", ID " + str(self.id) + ", class " + cls + ", is inconsistent: EXPECTED (after normalization) *****>\n" + deepnormtext + "\n****> BUT FOUND (after normalization) ****>\n" + strictnormtext + "\n******* DEVIATION POINT: " + strictnormtext[max(0,deviation-10):deviation] + "<*HERE*>" + strictnormtext[deviation:deviation+10]
if warnonly:
print("TEXT VALIDATION ERROR: " + msg,file=sys.stderr)
else:
raise InconsistentText(msg)
#validate offsets
tc = self.textcontent(cls)
if tc.offset is not None:
#we can't validate the reference of this element yet since it may point to higher level elements still being created!! we store it in a buffer that will
#be processed by pendingvalidation() after parsing and prior to serialisation
if self.doc and self.doc.debug: print("[PyNLPl FoLiA DEBUG] Queing element for later offset validation: " + repr(self),file=stderr)
self.doc.offsetvalidationbuffer.append( (self, cls) )
return valid | def function[textvalidation, parameter[self, warnonly]]:
constant[Run text validation on this element. Checks whether any text redundancy is consistent and whether offsets are valid.
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
]
if <ast.BoolOp object at 0x7da18f58fb20> begin[:]
variable[warnonly] assign[=] compare[call[name[checkversion], parameter[name[self].doc.version, constant[1.5.0]]] less[<] constant[0]]
variable[valid] assign[=] constant[True]
for taget[name[cls]] in starred[name[self].doc.textclasses] begin[:]
if <ast.BoolOp object at 0x7da20c992d10> begin[:]
if <ast.BoolOp object at 0x7da20c990d60> begin[:]
call[name[print], parameter[binary_operation[constant[[PyNLPl FoLiA DEBUG] Text validation on ] + call[name[repr], parameter[name[self]]]]]]
variable[correctionhandling] assign[=] call[name[self].findcorrectionhandling, parameter[name[cls]]]
if compare[name[correctionhandling] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da20c993730> begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[[PyNLPl FoLiA DEBUG] SKIPPING Text validation on ] + call[name[repr], parameter[name[self]]]] + constant[, too complex to handle (nested corrections or inconsistent use)]]]]
return[constant[True]]
variable[strictnormtext] assign[=] call[name[self].text, parameter[name[cls]]]
variable[deepnormtext] assign[=] call[name[self].text, parameter[name[cls]]]
if compare[name[strictnormtext] not_equal[!=] name[deepnormtext]] begin[:]
variable[valid] assign[=] constant[False]
variable[deviation] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20c992ec0>, <ast.Tuple object at 0x7da20c990280>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[strictnormtext], name[deepnormtext]]]]]] begin[:]
if compare[name[c1] not_equal[!=] name[c2]] begin[:]
variable[deviation] assign[=] name[i]
break
variable[msg] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Text for ] + name[self].__class__.__name__] + constant[, ID ]] + call[name[str], parameter[name[self].id]]] + constant[, class ]] + name[cls]] + constant[, is inconsistent: EXPECTED (after normalization) *****>
]] + name[deepnormtext]] + constant[
****> BUT FOUND (after normalization) ****>
]] + name[strictnormtext]] + constant[
******* DEVIATION POINT: ]] + call[name[strictnormtext]][<ast.Slice object at 0x7da20c991270>]] + constant[<*HERE*>]] + call[name[strictnormtext]][<ast.Slice object at 0x7da20c9937c0>]]
if name[warnonly] begin[:]
call[name[print], parameter[binary_operation[constant[TEXT VALIDATION ERROR: ] + name[msg]]]]
variable[tc] assign[=] call[name[self].textcontent, parameter[name[cls]]]
if compare[name[tc].offset is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b0e3a530> begin[:]
call[name[print], parameter[binary_operation[constant[[PyNLPl FoLiA DEBUG] Queing element for later offset validation: ] + call[name[repr], parameter[name[self]]]]]]
call[name[self].doc.offsetvalidationbuffer.append, parameter[tuple[[<ast.Name object at 0x7da1b0e38f10>, <ast.Name object at 0x7da1b0e3a110>]]]]
return[name[valid]] | keyword[def] identifier[textvalidation] ( identifier[self] , identifier[warnonly] = keyword[None] ):
literal[string]
keyword[if] identifier[warnonly] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[doc] . identifier[version] :
identifier[warnonly] =( identifier[checkversion] ( identifier[self] . identifier[doc] . identifier[version] , literal[string] )< literal[int] )
identifier[valid] = keyword[True]
keyword[for] identifier[cls] keyword[in] identifier[self] . identifier[doc] . identifier[textclasses] :
keyword[if] identifier[self] . identifier[hastext] ( identifier[cls] , identifier[strict] = keyword[True] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[self] ,( identifier[Linebreak] , identifier[Whitespace] )):
keyword[if] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[doc] . identifier[debug] : identifier[print] ( literal[string] + identifier[repr] ( identifier[self] ), identifier[file] = identifier[stderr] )
identifier[correctionhandling] = identifier[self] . identifier[findcorrectionhandling] ( identifier[cls] )
keyword[if] identifier[correctionhandling] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[doc] . identifier[debug] : identifier[print] ( literal[string] + identifier[repr] ( identifier[self] )+ literal[string] , identifier[file] = identifier[stderr] )
keyword[return] keyword[True]
identifier[strictnormtext] = identifier[self] . identifier[text] ( identifier[cls] , identifier[retaintokenisation] = keyword[False] , identifier[strict] = keyword[True] , identifier[normalize_spaces] = keyword[True] )
identifier[deepnormtext] = identifier[self] . identifier[text] ( identifier[cls] , identifier[retaintokenisation] = keyword[False] , identifier[strict] = keyword[False] , identifier[normalize_spaces] = keyword[True] )
keyword[if] identifier[strictnormtext] != identifier[deepnormtext] :
identifier[valid] = keyword[False]
identifier[deviation] = literal[int]
keyword[for] identifier[i] ,( identifier[c1] , identifier[c2] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[strictnormtext] , identifier[deepnormtext] )):
keyword[if] identifier[c1] != identifier[c2] :
identifier[deviation] = identifier[i]
keyword[break]
identifier[msg] = literal[string] + identifier[self] . identifier[__class__] . identifier[__name__] + literal[string] + identifier[str] ( identifier[self] . identifier[id] )+ literal[string] + identifier[cls] + literal[string] + identifier[deepnormtext] + literal[string] + identifier[strictnormtext] + literal[string] + identifier[strictnormtext] [ identifier[max] ( literal[int] , identifier[deviation] - literal[int] ): identifier[deviation] ]+ literal[string] + identifier[strictnormtext] [ identifier[deviation] : identifier[deviation] + literal[int] ]
keyword[if] identifier[warnonly] :
identifier[print] ( literal[string] + identifier[msg] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[else] :
keyword[raise] identifier[InconsistentText] ( identifier[msg] )
identifier[tc] = identifier[self] . identifier[textcontent] ( identifier[cls] )
keyword[if] identifier[tc] . identifier[offset] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[doc] . identifier[debug] : identifier[print] ( literal[string] + identifier[repr] ( identifier[self] ), identifier[file] = identifier[stderr] )
identifier[self] . identifier[doc] . identifier[offsetvalidationbuffer] . identifier[append] (( identifier[self] , identifier[cls] ))
keyword[return] identifier[valid] | def textvalidation(self, warnonly=None):
"""Run text validation on this element. Checks whether any text redundancy is consistent and whether offsets are valid.
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if warnonly is None and self.doc and self.doc.version:
warnonly = checkversion(self.doc.version, '1.5.0') < 0 #warn only for documents older than FoLiA v1.5 # depends on [control=['if'], data=[]]
valid = True
for cls in self.doc.textclasses:
if self.hastext(cls, strict=True) and (not isinstance(self, (Linebreak, Whitespace))):
if self.doc and self.doc.debug:
print('[PyNLPl FoLiA DEBUG] Text validation on ' + repr(self), file=stderr) # depends on [control=['if'], data=[]]
correctionhandling = self.findcorrectionhandling(cls)
if correctionhandling is None:
#skipping text validation, correction is too complex (nested) to handle for now; just assume valid (benefit of the doubt)
if self.doc and self.doc.debug:
print('[PyNLPl FoLiA DEBUG] SKIPPING Text validation on ' + repr(self) + ', too complex to handle (nested corrections or inconsistent use)', file=stderr) # depends on [control=['if'], data=[]]
return True #just assume it's valid then # depends on [control=['if'], data=[]]
strictnormtext = self.text(cls, retaintokenisation=False, strict=True, normalize_spaces=True)
deepnormtext = self.text(cls, retaintokenisation=False, strict=False, normalize_spaces=True)
if strictnormtext != deepnormtext:
valid = False
deviation = 0
for (i, (c1, c2)) in enumerate(zip(strictnormtext, deepnormtext)):
if c1 != c2:
deviation = i
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
msg = 'Text for ' + self.__class__.__name__ + ', ID ' + str(self.id) + ', class ' + cls + ', is inconsistent: EXPECTED (after normalization) *****>\n' + deepnormtext + '\n****> BUT FOUND (after normalization) ****>\n' + strictnormtext + '\n******* DEVIATION POINT: ' + strictnormtext[max(0, deviation - 10):deviation] + '<*HERE*>' + strictnormtext[deviation:deviation + 10]
if warnonly:
print('TEXT VALIDATION ERROR: ' + msg, file=sys.stderr) # depends on [control=['if'], data=[]]
else:
raise InconsistentText(msg) # depends on [control=['if'], data=['strictnormtext', 'deepnormtext']]
#validate offsets
tc = self.textcontent(cls)
if tc.offset is not None:
#we can't validate the reference of this element yet since it may point to higher level elements still being created!! we store it in a buffer that will
#be processed by pendingvalidation() after parsing and prior to serialisation
if self.doc and self.doc.debug:
print('[PyNLPl FoLiA DEBUG] Queing element for later offset validation: ' + repr(self), file=stderr) # depends on [control=['if'], data=[]]
self.doc.offsetvalidationbuffer.append((self, cls)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cls']]
return valid |
def _set_line_indent(src, line, indent):
'''
Indent the line with the source line.
'''
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.lstrip() | def function[_set_line_indent, parameter[src, line, indent]]:
constant[
Indent the line with the source line.
]
if <ast.UnaryOp object at 0x7da18f58f820> begin[:]
return[name[line]]
variable[idt] assign[=] list[[]]
for taget[name[c]] in starred[name[src]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f58dae0>, <ast.Constant object at 0x7da18f58ed70>]]] begin[:]
break
call[name[idt].append, parameter[name[c]]]
return[binary_operation[call[constant[].join, parameter[name[idt]]] + call[name[line].lstrip, parameter[]]]] | keyword[def] identifier[_set_line_indent] ( identifier[src] , identifier[line] , identifier[indent] ):
literal[string]
keyword[if] keyword[not] identifier[indent] :
keyword[return] identifier[line]
identifier[idt] =[]
keyword[for] identifier[c] keyword[in] identifier[src] :
keyword[if] identifier[c] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[break]
identifier[idt] . identifier[append] ( identifier[c] )
keyword[return] literal[string] . identifier[join] ( identifier[idt] )+ identifier[line] . identifier[lstrip] () | def _set_line_indent(src, line, indent):
"""
Indent the line with the source line.
"""
if not indent:
return line # depends on [control=['if'], data=[]]
idt = []
for c in src:
if c not in ['\t', ' ']:
break # depends on [control=['if'], data=[]]
idt.append(c) # depends on [control=['for'], data=['c']]
return ''.join(idt) + line.lstrip() |
def user_list(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None, return_password=False):
'''
Return a dict with information about users of a Postgres server.
Set return_password to True to get password hash in the result.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_list
'''
ret = {}
ver = _parsed_version(user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
if ver:
if ver >= _LooseVersion('9.1'):
replication_column = 'pg_roles.rolreplication'
else:
replication_column = 'NULL'
if ver >= _LooseVersion('9.5'):
rolcatupdate_column = 'NULL'
else:
rolcatupdate_column = 'pg_roles.rolcatupdate'
else:
log.error('Could not retrieve Postgres version. Is Postgresql server running?')
return False
# will return empty string if return_password = False
_x = lambda s: s if return_password else ''
query = (''.join([
'SELECT '
'pg_roles.rolname as "name",'
'pg_roles.rolsuper as "superuser", '
'pg_roles.rolinherit as "inherits privileges", '
'pg_roles.rolcreaterole as "can create roles", '
'pg_roles.rolcreatedb as "can create databases", '
'{0} as "can update system catalogs", '
'pg_roles.rolcanlogin as "can login", '
'{1} as "replication", '
'pg_roles.rolconnlimit as "connections", '
'(SELECT array_agg(pg_roles2.rolname)'
' FROM pg_catalog.pg_auth_members'
' JOIN pg_catalog.pg_roles pg_roles2 ON (pg_auth_members.roleid = pg_roles2.oid)'
' WHERE pg_auth_members.member = pg_roles.oid) as "groups",'
'pg_roles.rolvaliduntil::timestamp(0) as "expiry time", '
'pg_roles.rolconfig as "defaults variables" '
, _x(', COALESCE(pg_shadow.passwd, pg_authid.rolpassword) as "password" '),
'FROM pg_roles '
, _x('LEFT JOIN pg_authid ON pg_roles.oid = pg_authid.oid ')
, _x('LEFT JOIN pg_shadow ON pg_roles.oid = pg_shadow.usesysid')
]).format(rolcatupdate_column, replication_column))
rows = psql_query(query,
runas=runas,
host=host,
user=user,
port=port,
maintenance_db=maintenance_db,
password=password)
def get_bool(rowdict, key):
'''
Returns the boolean value of the key, instead of 't' and 'f' strings.
'''
if rowdict[key] == 't':
return True
elif rowdict[key] == 'f':
return False
else:
return None
for row in rows:
retrow = {}
for key in ('superuser', 'inherits privileges', 'can create roles',
'can create databases', 'can update system catalogs',
'can login', 'replication', 'connections'):
retrow[key] = get_bool(row, key)
for date_key in ('expiry time',):
try:
retrow[date_key] = datetime.datetime.strptime(
row[date_key], '%Y-%m-%d %H:%M:%S')
except ValueError:
retrow[date_key] = None
retrow['defaults variables'] = row['defaults variables']
if return_password:
retrow['password'] = row['password']
# use csv reader to handle quoted roles correctly
retrow['groups'] = list(csv.reader([row['groups'].strip('{}')]))[0]
ret[row['name']] = retrow
return ret | def function[user_list, parameter[user, host, port, maintenance_db, password, runas, return_password]]:
constant[
Return a dict with information about users of a Postgres server.
Set return_password to True to get password hash in the result.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_list
]
variable[ret] assign[=] dictionary[[], []]
variable[ver] assign[=] call[name[_parsed_version], parameter[]]
if name[ver] begin[:]
if compare[name[ver] greater_or_equal[>=] call[name[_LooseVersion], parameter[constant[9.1]]]] begin[:]
variable[replication_column] assign[=] constant[pg_roles.rolreplication]
if compare[name[ver] greater_or_equal[>=] call[name[_LooseVersion], parameter[constant[9.5]]]] begin[:]
variable[rolcatupdate_column] assign[=] constant[NULL]
variable[_x] assign[=] <ast.Lambda object at 0x7da18dc07460>
variable[query] assign[=] call[call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18dc06b00>, <ast.Call object at 0x7da18dc06dd0>, <ast.Constant object at 0x7da18dc07220>, <ast.Call object at 0x7da18dc04640>, <ast.Call object at 0x7da18dc05840>]]]].format, parameter[name[rolcatupdate_column], name[replication_column]]]
variable[rows] assign[=] call[name[psql_query], parameter[name[query]]]
def function[get_bool, parameter[rowdict, key]]:
constant[
Returns the boolean value of the key, instead of 't' and 'f' strings.
]
if compare[call[name[rowdict]][name[key]] equal[==] constant[t]] begin[:]
return[constant[True]]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[retrow] assign[=] dictionary[[], []]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da18dc054e0>, <ast.Constant object at 0x7da18dc07100>, <ast.Constant object at 0x7da18dc06500>, <ast.Constant object at 0x7da18dc05510>, <ast.Constant object at 0x7da18dc07280>, <ast.Constant object at 0x7da18dc07340>, <ast.Constant object at 0x7da18dc041c0>, <ast.Constant object at 0x7da18dc05c90>]]] begin[:]
call[name[retrow]][name[key]] assign[=] call[name[get_bool], parameter[name[row], name[key]]]
for taget[name[date_key]] in starred[tuple[[<ast.Constant object at 0x7da18dc05300>]]] begin[:]
<ast.Try object at 0x7da18dc047f0>
call[name[retrow]][constant[defaults variables]] assign[=] call[name[row]][constant[defaults variables]]
if name[return_password] begin[:]
call[name[retrow]][constant[password]] assign[=] call[name[row]][constant[password]]
call[name[retrow]][constant[groups]] assign[=] call[call[name[list], parameter[call[name[csv].reader, parameter[list[[<ast.Call object at 0x7da18dc06530>]]]]]]][constant[0]]
call[name[ret]][call[name[row]][constant[name]]] assign[=] name[retrow]
return[name[ret]] | keyword[def] identifier[user_list] ( identifier[user] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[maintenance_db] = keyword[None] ,
identifier[password] = keyword[None] , identifier[runas] = keyword[None] , identifier[return_password] = keyword[False] ):
literal[string]
identifier[ret] ={}
identifier[ver] = identifier[_parsed_version] ( identifier[user] = identifier[user] ,
identifier[host] = identifier[host] ,
identifier[port] = identifier[port] ,
identifier[maintenance_db] = identifier[maintenance_db] ,
identifier[password] = identifier[password] ,
identifier[runas] = identifier[runas] )
keyword[if] identifier[ver] :
keyword[if] identifier[ver] >= identifier[_LooseVersion] ( literal[string] ):
identifier[replication_column] = literal[string]
keyword[else] :
identifier[replication_column] = literal[string]
keyword[if] identifier[ver] >= identifier[_LooseVersion] ( literal[string] ):
identifier[rolcatupdate_column] = literal[string]
keyword[else] :
identifier[rolcatupdate_column] = literal[string]
keyword[else] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[_x] = keyword[lambda] identifier[s] : identifier[s] keyword[if] identifier[return_password] keyword[else] literal[string]
identifier[query] =( literal[string] . identifier[join] ([
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
, identifier[_x] ( literal[string] ),
literal[string]
, identifier[_x] ( literal[string] )
, identifier[_x] ( literal[string] )
]). identifier[format] ( identifier[rolcatupdate_column] , identifier[replication_column] ))
identifier[rows] = identifier[psql_query] ( identifier[query] ,
identifier[runas] = identifier[runas] ,
identifier[host] = identifier[host] ,
identifier[user] = identifier[user] ,
identifier[port] = identifier[port] ,
identifier[maintenance_db] = identifier[maintenance_db] ,
identifier[password] = identifier[password] )
keyword[def] identifier[get_bool] ( identifier[rowdict] , identifier[key] ):
literal[string]
keyword[if] identifier[rowdict] [ identifier[key] ]== literal[string] :
keyword[return] keyword[True]
keyword[elif] identifier[rowdict] [ identifier[key] ]== literal[string] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[None]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[retrow] ={}
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ):
identifier[retrow] [ identifier[key] ]= identifier[get_bool] ( identifier[row] , identifier[key] )
keyword[for] identifier[date_key] keyword[in] ( literal[string] ,):
keyword[try] :
identifier[retrow] [ identifier[date_key] ]= identifier[datetime] . identifier[datetime] . identifier[strptime] (
identifier[row] [ identifier[date_key] ], literal[string] )
keyword[except] identifier[ValueError] :
identifier[retrow] [ identifier[date_key] ]= keyword[None]
identifier[retrow] [ literal[string] ]= identifier[row] [ literal[string] ]
keyword[if] identifier[return_password] :
identifier[retrow] [ literal[string] ]= identifier[row] [ literal[string] ]
identifier[retrow] [ literal[string] ]= identifier[list] ( identifier[csv] . identifier[reader] ([ identifier[row] [ literal[string] ]. identifier[strip] ( literal[string] )]))[ literal[int] ]
identifier[ret] [ identifier[row] [ literal[string] ]]= identifier[retrow]
keyword[return] identifier[ret] | def user_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False):
"""
Return a dict with information about users of a Postgres server.
Set return_password to True to get password hash in the result.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_list
"""
ret = {}
ver = _parsed_version(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas)
if ver:
if ver >= _LooseVersion('9.1'):
replication_column = 'pg_roles.rolreplication' # depends on [control=['if'], data=[]]
else:
replication_column = 'NULL'
if ver >= _LooseVersion('9.5'):
rolcatupdate_column = 'NULL' # depends on [control=['if'], data=[]]
else:
rolcatupdate_column = 'pg_roles.rolcatupdate' # depends on [control=['if'], data=[]]
else:
log.error('Could not retrieve Postgres version. Is Postgresql server running?')
return False
# will return empty string if return_password = False
_x = lambda s: s if return_password else ''
query = ''.join(['SELECT pg_roles.rolname as "name",pg_roles.rolsuper as "superuser", pg_roles.rolinherit as "inherits privileges", pg_roles.rolcreaterole as "can create roles", pg_roles.rolcreatedb as "can create databases", {0} as "can update system catalogs", pg_roles.rolcanlogin as "can login", {1} as "replication", pg_roles.rolconnlimit as "connections", (SELECT array_agg(pg_roles2.rolname) FROM pg_catalog.pg_auth_members JOIN pg_catalog.pg_roles pg_roles2 ON (pg_auth_members.roleid = pg_roles2.oid) WHERE pg_auth_members.member = pg_roles.oid) as "groups",pg_roles.rolvaliduntil::timestamp(0) as "expiry time", pg_roles.rolconfig as "defaults variables" ', _x(', COALESCE(pg_shadow.passwd, pg_authid.rolpassword) as "password" '), 'FROM pg_roles ', _x('LEFT JOIN pg_authid ON pg_roles.oid = pg_authid.oid '), _x('LEFT JOIN pg_shadow ON pg_roles.oid = pg_shadow.usesysid')]).format(rolcatupdate_column, replication_column)
rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password)
def get_bool(rowdict, key):
"""
Returns the boolean value of the key, instead of 't' and 'f' strings.
"""
if rowdict[key] == 't':
return True # depends on [control=['if'], data=[]]
elif rowdict[key] == 'f':
return False # depends on [control=['if'], data=[]]
else:
return None
for row in rows:
retrow = {}
for key in ('superuser', 'inherits privileges', 'can create roles', 'can create databases', 'can update system catalogs', 'can login', 'replication', 'connections'):
retrow[key] = get_bool(row, key) # depends on [control=['for'], data=['key']]
for date_key in ('expiry time',):
try:
retrow[date_key] = datetime.datetime.strptime(row[date_key], '%Y-%m-%d %H:%M:%S') # depends on [control=['try'], data=[]]
except ValueError:
retrow[date_key] = None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['date_key']]
retrow['defaults variables'] = row['defaults variables']
if return_password:
retrow['password'] = row['password'] # depends on [control=['if'], data=[]]
# use csv reader to handle quoted roles correctly
retrow['groups'] = list(csv.reader([row['groups'].strip('{}')]))[0]
ret[row['name']] = retrow # depends on [control=['for'], data=['row']]
return ret |
def send(self, *args, **kwargs):
"""Send messages to another service that is connected to the currently
running service via the recipe. The 'send' method will either use a
default channel name, set via the set_default_channel method, or an
unnamed output definition.
"""
if not self.transport:
raise ValueError(
"This RecipeWrapper object does not contain "
"a reference to a transport object."
)
if not self.recipe_step:
raise ValueError(
"This RecipeWrapper object does not contain "
"a recipe with a selected step."
)
if "output" not in self.recipe_step:
# The current recipe step does not have output channels.
return
if isinstance(self.recipe_step["output"], dict):
# The current recipe step does have named output channels.
if self.default_channel:
# Use named output channel
self.send_to(self.default_channel, *args, **kwargs)
else:
# The current recipe step does have unnamed output channels.
self._send_to_destinations(self.recipe_step["output"], *args, **kwargs) | def function[send, parameter[self]]:
constant[Send messages to another service that is connected to the currently
running service via the recipe. The 'send' method will either use a
default channel name, set via the set_default_channel method, or an
unnamed output definition.
]
if <ast.UnaryOp object at 0x7da2044c1a80> begin[:]
<ast.Raise object at 0x7da2044c1b10>
if <ast.UnaryOp object at 0x7da2044c20b0> begin[:]
<ast.Raise object at 0x7da2044c2500>
if compare[constant[output] <ast.NotIn object at 0x7da2590d7190> name[self].recipe_step] begin[:]
return[None]
if call[name[isinstance], parameter[call[name[self].recipe_step][constant[output]], name[dict]]] begin[:]
if name[self].default_channel begin[:]
call[name[self].send_to, parameter[name[self].default_channel, <ast.Starred object at 0x7da2044c0790>]] | keyword[def] identifier[send] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[transport] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[self] . identifier[recipe_step] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[recipe_step] :
keyword[return]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[recipe_step] [ literal[string] ], identifier[dict] ):
keyword[if] identifier[self] . identifier[default_channel] :
identifier[self] . identifier[send_to] ( identifier[self] . identifier[default_channel] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
identifier[self] . identifier[_send_to_destinations] ( identifier[self] . identifier[recipe_step] [ literal[string] ],* identifier[args] ,** identifier[kwargs] ) | def send(self, *args, **kwargs):
"""Send messages to another service that is connected to the currently
running service via the recipe. The 'send' method will either use a
default channel name, set via the set_default_channel method, or an
unnamed output definition.
"""
if not self.transport:
raise ValueError('This RecipeWrapper object does not contain a reference to a transport object.') # depends on [control=['if'], data=[]]
if not self.recipe_step:
raise ValueError('This RecipeWrapper object does not contain a recipe with a selected step.') # depends on [control=['if'], data=[]]
if 'output' not in self.recipe_step:
# The current recipe step does not have output channels.
return # depends on [control=['if'], data=[]]
if isinstance(self.recipe_step['output'], dict):
# The current recipe step does have named output channels.
if self.default_channel:
# Use named output channel
self.send_to(self.default_channel, *args, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# The current recipe step does have unnamed output channels.
self._send_to_destinations(self.recipe_step['output'], *args, **kwargs) |
def do_pdp_descriptor(conf, cert=None, enc_cert=None):
""" Create a Policy Decision Point descriptor """
pdp = md.PDPDescriptor()
pdp.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr("endpoints", "pdp")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["pdp"]).items():
setattr(pdp, endpoint, instlist)
_do_nameid_format(pdp, conf, "pdp")
if cert:
pdp.key_descriptor = do_key_descriptor(cert, enc_cert,
use=conf.metadata_key_usage)
return pdp | def function[do_pdp_descriptor, parameter[conf, cert, enc_cert]]:
constant[ Create a Policy Decision Point descriptor ]
variable[pdp] assign[=] call[name[md].PDPDescriptor, parameter[]]
name[pdp].protocol_support_enumeration assign[=] name[samlp].NAMESPACE
variable[endps] assign[=] call[name[conf].getattr, parameter[constant[endpoints], constant[pdp]]]
if name[endps] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c9922f0>, <ast.Name object at 0x7da20c993700>]]] in starred[call[call[name[do_endpoints], parameter[name[endps], call[name[ENDPOINTS]][constant[pdp]]]].items, parameter[]]] begin[:]
call[name[setattr], parameter[name[pdp], name[endpoint], name[instlist]]]
call[name[_do_nameid_format], parameter[name[pdp], name[conf], constant[pdp]]]
if name[cert] begin[:]
name[pdp].key_descriptor assign[=] call[name[do_key_descriptor], parameter[name[cert], name[enc_cert]]]
return[name[pdp]] | keyword[def] identifier[do_pdp_descriptor] ( identifier[conf] , identifier[cert] = keyword[None] , identifier[enc_cert] = keyword[None] ):
literal[string]
identifier[pdp] = identifier[md] . identifier[PDPDescriptor] ()
identifier[pdp] . identifier[protocol_support_enumeration] = identifier[samlp] . identifier[NAMESPACE]
identifier[endps] = identifier[conf] . identifier[getattr] ( literal[string] , literal[string] )
keyword[if] identifier[endps] :
keyword[for] ( identifier[endpoint] , identifier[instlist] ) keyword[in] identifier[do_endpoints] ( identifier[endps] ,
identifier[ENDPOINTS] [ literal[string] ]). identifier[items] ():
identifier[setattr] ( identifier[pdp] , identifier[endpoint] , identifier[instlist] )
identifier[_do_nameid_format] ( identifier[pdp] , identifier[conf] , literal[string] )
keyword[if] identifier[cert] :
identifier[pdp] . identifier[key_descriptor] = identifier[do_key_descriptor] ( identifier[cert] , identifier[enc_cert] ,
identifier[use] = identifier[conf] . identifier[metadata_key_usage] )
keyword[return] identifier[pdp] | def do_pdp_descriptor(conf, cert=None, enc_cert=None):
""" Create a Policy Decision Point descriptor """
pdp = md.PDPDescriptor()
pdp.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr('endpoints', 'pdp')
if endps:
for (endpoint, instlist) in do_endpoints(endps, ENDPOINTS['pdp']).items():
setattr(pdp, endpoint, instlist) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
_do_nameid_format(pdp, conf, 'pdp')
if cert:
pdp.key_descriptor = do_key_descriptor(cert, enc_cert, use=conf.metadata_key_usage) # depends on [control=['if'], data=[]]
return pdp |
def truncate(self, path, size):
"""
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param int size: the new size of the file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "truncate({!r}, {!r})".format(path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr) | def function[truncate, parameter[self, path, size]]:
constant[
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param int size: the new size of the file
]
variable[path] assign[=] call[name[self]._adjust_cwd, parameter[name[path]]]
call[name[self]._log, parameter[name[DEBUG], call[constant[truncate({!r}, {!r})].format, parameter[name[path], name[size]]]]]
variable[attr] assign[=] call[name[SFTPAttributes], parameter[]]
name[attr].st_size assign[=] name[size]
call[name[self]._request, parameter[name[CMD_SETSTAT], name[path], name[attr]]] | keyword[def] identifier[truncate] ( identifier[self] , identifier[path] , identifier[size] ):
literal[string]
identifier[path] = identifier[self] . identifier[_adjust_cwd] ( identifier[path] )
identifier[self] . identifier[_log] ( identifier[DEBUG] , literal[string] . identifier[format] ( identifier[path] , identifier[size] ))
identifier[attr] = identifier[SFTPAttributes] ()
identifier[attr] . identifier[st_size] = identifier[size]
identifier[self] . identifier[_request] ( identifier[CMD_SETSTAT] , identifier[path] , identifier[attr] ) | def truncate(self, path, size):
"""
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param int size: the new size of the file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'truncate({!r}, {!r})'.format(path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr) |
def bake_content(request):
"""Invoke the baking process - trigger post-publication"""
ident_hash = request.matchdict['ident_hash']
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest('must specify the version')
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT bool(portal_type = 'Collection'), stateid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(ident_hash))
if stateid == 5:
cursor.execute("""\
SELECT pg_notify('post_publication',
'{"module_ident": '||%s||',
"ident_hash": "'||%s||'",
"timestamp": "'||CURRENT_TIMESTAMP||'"}')
""", (module_ident, ident_hash))
else:
cursor.execute("""\
UPDATE modules SET stateid = 5
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,)) | def function[bake_content, parameter[request]]:
constant[Invoke the baking process - trigger post-publication]
variable[ident_hash] assign[=] call[name[request].matchdict][constant[ident_hash]]
<ast.Try object at 0x7da20c6e4a00>
if <ast.UnaryOp object at 0x7da20c6e6980> begin[:]
<ast.Raise object at 0x7da20c6e47c0>
with call[name[db_connect], parameter[]] begin[:]
with call[name[db_conn].cursor, parameter[]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT bool(portal_type = 'Collection'), stateid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s
], tuple[[<ast.Name object at 0x7da1b00db040>]]]]
<ast.Try object at 0x7da1b00d9f60>
if <ast.UnaryOp object at 0x7da1b00d8a90> begin[:]
<ast.Raise object at 0x7da1b00da1a0>
if compare[name[stateid] equal[==] constant[5]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT pg_notify('post_publication',
'{"module_ident": '||%s||',
"ident_hash": "'||%s||'",
"timestamp": "'||CURRENT_TIMESTAMP||'"}')
], tuple[[<ast.Name object at 0x7da1b00dbbb0>, <ast.Name object at 0x7da1b00d9e70>]]]] | keyword[def] identifier[bake_content] ( identifier[request] ):
literal[string]
identifier[ident_hash] = identifier[request] . identifier[matchdict] [ literal[string] ]
keyword[try] :
identifier[id] , identifier[version] = identifier[split_ident_hash] ( identifier[ident_hash] )
keyword[except] identifier[IdentHashError] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPNotFound] ()
keyword[if] keyword[not] identifier[version] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPBadRequest] ( literal[string] )
keyword[with] identifier[db_connect] () keyword[as] identifier[db_conn] :
keyword[with] identifier[db_conn] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[ident_hash] ,))
keyword[try] :
identifier[is_binder] , identifier[stateid] , identifier[module_ident] = identifier[cursor] . identifier[fetchone] ()
keyword[except] identifier[TypeError] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPNotFound] ()
keyword[if] keyword[not] identifier[is_binder] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPBadRequest] (
literal[string] . identifier[format] ( identifier[ident_hash] ))
keyword[if] identifier[stateid] == literal[int] :
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[module_ident] , identifier[ident_hash] ))
keyword[else] :
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[ident_hash] ,)) | def bake_content(request):
"""Invoke the baking process - trigger post-publication"""
ident_hash = request.matchdict['ident_hash']
try:
(id, version) = split_ident_hash(ident_hash) # depends on [control=['try'], data=[]]
except IdentHashError:
raise httpexceptions.HTTPNotFound() # depends on [control=['except'], data=[]]
if not version:
raise httpexceptions.HTTPBadRequest('must specify the version') # depends on [control=['if'], data=[]]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT bool(portal_type = 'Collection'), stateid, module_ident\nFROM modules\nWHERE ident_hash(uuid, major_version, minor_version) = %s\n", (ident_hash,))
try:
(is_binder, stateid, module_ident) = cursor.fetchone() # depends on [control=['try'], data=[]]
except TypeError:
raise httpexceptions.HTTPNotFound() # depends on [control=['except'], data=[]]
if not is_binder:
raise httpexceptions.HTTPBadRequest('{} is not a book'.format(ident_hash)) # depends on [control=['if'], data=[]]
if stateid == 5:
cursor.execute('SELECT pg_notify(\'post_publication\',\n\'{"module_ident": \'||%s||\',\n "ident_hash": "\'||%s||\'",\n "timestamp": "\'||CURRENT_TIMESTAMP||\'"}\')\n', (module_ident, ident_hash)) # depends on [control=['if'], data=[]]
else:
cursor.execute('UPDATE modules SET stateid = 5\nWHERE ident_hash(uuid, major_version, minor_version) = %s\n', (ident_hash,)) # depends on [control=['with'], data=['cursor']] # depends on [control=['with'], data=['db_conn']] |
def get_shipment(self, resource_id):
"""Retrieve a single shipment by a shipment's ID."""
return Shipments(self.client).on(self).get(resource_id) | def function[get_shipment, parameter[self, resource_id]]:
constant[Retrieve a single shipment by a shipment's ID.]
return[call[call[call[name[Shipments], parameter[name[self].client]].on, parameter[name[self]]].get, parameter[name[resource_id]]]] | keyword[def] identifier[get_shipment] ( identifier[self] , identifier[resource_id] ):
literal[string]
keyword[return] identifier[Shipments] ( identifier[self] . identifier[client] ). identifier[on] ( identifier[self] ). identifier[get] ( identifier[resource_id] ) | def get_shipment(self, resource_id):
"""Retrieve a single shipment by a shipment's ID."""
return Shipments(self.client).on(self).get(resource_id) |
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
host_ip = ipaddress.ip_address(six.text_type(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError(
"hostname %r doesn't match %r"
% (hostname, dnsnames[0])
)
else:
raise CertificateError(
"no appropriate commonName or "
"subjectAltName fields were found"
) | def function[match_hostname, parameter[cert, hostname]]:
constant[Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
]
if <ast.UnaryOp object at 0x7da204960670> begin[:]
<ast.Raise object at 0x7da2049630d0>
<ast.Try object at 0x7da204963940>
variable[dnsnames] assign[=] list[[]]
variable[san] assign[=] call[name[cert].get, parameter[constant[subjectAltName], tuple[[]]]]
for taget[tuple[[<ast.Name object at 0x7da204963dc0>, <ast.Name object at 0x7da204963850>]]] in starred[name[san]] begin[:]
if compare[name[key] equal[==] constant[DNS]] begin[:]
if <ast.BoolOp object at 0x7da204960790> begin[:]
return[None]
call[name[dnsnames].append, parameter[name[value]]]
if <ast.UnaryOp object at 0x7da204962710> begin[:]
for taget[name[sub]] in starred[call[name[cert].get, parameter[constant[subject], tuple[[]]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204961e70>, <ast.Name object at 0x7da204963ca0>]]] in starred[name[sub]] begin[:]
if compare[name[key] equal[==] constant[commonName]] begin[:]
if call[name[_dnsname_match], parameter[name[value], name[hostname]]] begin[:]
return[None]
call[name[dnsnames].append, parameter[name[value]]]
if compare[call[name[len], parameter[name[dnsnames]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da20c7c85e0> | keyword[def] identifier[match_hostname] ( identifier[cert] , identifier[hostname] ):
literal[string]
keyword[if] keyword[not] identifier[cert] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] )
keyword[try] :
identifier[host_ip] = identifier[ipaddress] . identifier[ip_address] ( identifier[six] . identifier[text_type] ( identifier[hostname] ))
keyword[except] identifier[ValueError] :
identifier[host_ip] = keyword[None]
identifier[dnsnames] =[]
identifier[san] = identifier[cert] . identifier[get] ( literal[string] ,())
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[san] :
keyword[if] identifier[key] == literal[string] :
keyword[if] identifier[host_ip] keyword[is] keyword[None] keyword[and] identifier[_dnsname_match] ( identifier[value] , identifier[hostname] ):
keyword[return]
identifier[dnsnames] . identifier[append] ( identifier[value] )
keyword[elif] identifier[key] == literal[string] :
keyword[if] identifier[host_ip] keyword[is] keyword[not] keyword[None] keyword[and] identifier[_ipaddress_match] ( identifier[value] , identifier[host_ip] ):
keyword[return]
identifier[dnsnames] . identifier[append] ( identifier[value] )
keyword[if] keyword[not] identifier[dnsnames] :
keyword[for] identifier[sub] keyword[in] identifier[cert] . identifier[get] ( literal[string] ,()):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sub] :
keyword[if] identifier[key] == literal[string] :
keyword[if] identifier[_dnsname_match] ( identifier[value] , identifier[hostname] ):
keyword[return]
identifier[dnsnames] . identifier[append] ( identifier[value] )
keyword[if] identifier[len] ( identifier[dnsnames] )> literal[int] :
keyword[raise] identifier[CertificateError] (
literal[string]
%( identifier[hostname] , literal[string] . identifier[join] ( identifier[map] ( identifier[repr] , identifier[dnsnames] ))))
keyword[elif] identifier[len] ( identifier[dnsnames] )== literal[int] :
keyword[raise] identifier[CertificateError] (
literal[string]
%( identifier[hostname] , identifier[dnsnames] [ literal[int] ])
)
keyword[else] :
keyword[raise] identifier[CertificateError] (
literal[string]
literal[string]
) | def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError('empty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIRED') # depends on [control=['if'], data=[]]
try:
host_ip = ipaddress.ip_address(six.text_type(hostname)) # depends on [control=['try'], data=[]]
except ValueError:
# Not an IP address (common case)
host_ip = None # depends on [control=['except'], data=[]]
dnsnames = []
san = cert.get('subjectAltName', ())
for (key, value) in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return # depends on [control=['if'], data=[]]
dnsnames.append(value) # depends on [control=['if'], data=[]]
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return # depends on [control=['if'], data=[]]
dnsnames.append(value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for (key, value) in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return # depends on [control=['if'], data=[]]
dnsnames.append(value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['sub']] # depends on [control=['if'], data=[]]
if len(dnsnames) > 1:
raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) # depends on [control=['if'], data=[]]
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) # depends on [control=['if'], data=[]]
else:
raise CertificateError('no appropriate commonName or subjectAltName fields were found') |
def SetActiveBreakpoints(self, breakpoints_data):
"""Adds new breakpoints and removes missing ones.
Args:
breakpoints_data: updated list of active breakpoints.
"""
with self._lock:
ids = set([x['id'] for x in breakpoints_data])
# Clear breakpoints that no longer show up in active breakpoints list.
for breakpoint_id in six.viewkeys(self._active) - ids:
self._active.pop(breakpoint_id).Clear()
# Create new breakpoints.
self._active.update([
(x['id'],
python_breakpoint.PythonBreakpoint(
x,
self._hub_client,
self,
self.data_visibility_policy))
for x in breakpoints_data
if x['id'] in ids - six.viewkeys(self._active) - self._completed])
# Remove entries from completed_breakpoints_ that weren't listed in
# breakpoints_data vector. These are confirmed to have been removed by the
# hub and the debuglet can now assume that they will never show up ever
# again. The backend never reuses breakpoint IDs.
self._completed &= ids
if self._active:
self._next_expiration = datetime.min # Not known.
else:
self._next_expiration = datetime.max | def function[SetActiveBreakpoints, parameter[self, breakpoints_data]]:
constant[Adds new breakpoints and removes missing ones.
Args:
breakpoints_data: updated list of active breakpoints.
]
with name[self]._lock begin[:]
variable[ids] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da2045653f0>]]
for taget[name[breakpoint_id]] in starred[binary_operation[call[name[six].viewkeys, parameter[name[self]._active]] - name[ids]]] begin[:]
call[call[name[self]._active.pop, parameter[name[breakpoint_id]]].Clear, parameter[]]
call[name[self]._active.update, parameter[<ast.ListComp object at 0x7da204565d80>]]
<ast.AugAssign object at 0x7da20c6aa470>
if name[self]._active begin[:]
name[self]._next_expiration assign[=] name[datetime].min | keyword[def] identifier[SetActiveBreakpoints] ( identifier[self] , identifier[breakpoints_data] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
identifier[ids] = identifier[set] ([ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[breakpoints_data] ])
keyword[for] identifier[breakpoint_id] keyword[in] identifier[six] . identifier[viewkeys] ( identifier[self] . identifier[_active] )- identifier[ids] :
identifier[self] . identifier[_active] . identifier[pop] ( identifier[breakpoint_id] ). identifier[Clear] ()
identifier[self] . identifier[_active] . identifier[update] ([
( identifier[x] [ literal[string] ],
identifier[python_breakpoint] . identifier[PythonBreakpoint] (
identifier[x] ,
identifier[self] . identifier[_hub_client] ,
identifier[self] ,
identifier[self] . identifier[data_visibility_policy] ))
keyword[for] identifier[x] keyword[in] identifier[breakpoints_data]
keyword[if] identifier[x] [ literal[string] ] keyword[in] identifier[ids] - identifier[six] . identifier[viewkeys] ( identifier[self] . identifier[_active] )- identifier[self] . identifier[_completed] ])
identifier[self] . identifier[_completed] &= identifier[ids]
keyword[if] identifier[self] . identifier[_active] :
identifier[self] . identifier[_next_expiration] = identifier[datetime] . identifier[min]
keyword[else] :
identifier[self] . identifier[_next_expiration] = identifier[datetime] . identifier[max] | def SetActiveBreakpoints(self, breakpoints_data):
"""Adds new breakpoints and removes missing ones.
Args:
breakpoints_data: updated list of active breakpoints.
"""
with self._lock:
ids = set([x['id'] for x in breakpoints_data])
# Clear breakpoints that no longer show up in active breakpoints list.
for breakpoint_id in six.viewkeys(self._active) - ids:
self._active.pop(breakpoint_id).Clear() # depends on [control=['for'], data=['breakpoint_id']]
# Create new breakpoints.
self._active.update([(x['id'], python_breakpoint.PythonBreakpoint(x, self._hub_client, self, self.data_visibility_policy)) for x in breakpoints_data if x['id'] in ids - six.viewkeys(self._active) - self._completed])
# Remove entries from completed_breakpoints_ that weren't listed in
# breakpoints_data vector. These are confirmed to have been removed by the
# hub and the debuglet can now assume that they will never show up ever
# again. The backend never reuses breakpoint IDs.
self._completed &= ids
if self._active:
self._next_expiration = datetime.min # Not known. # depends on [control=['if'], data=[]]
else:
self._next_expiration = datetime.max # depends on [control=['with'], data=[]] |
def fade_out(self, duration=3):
"""Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds)"""
super(RgbLight, self).fade_out(duration)
self.off() | def function[fade_out, parameter[self, duration]]:
constant[Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds)]
call[call[name[super], parameter[name[RgbLight], name[self]]].fade_out, parameter[name[duration]]]
call[name[self].off, parameter[]] | keyword[def] identifier[fade_out] ( identifier[self] , identifier[duration] = literal[int] ):
literal[string]
identifier[super] ( identifier[RgbLight] , identifier[self] ). identifier[fade_out] ( identifier[duration] )
identifier[self] . identifier[off] () | def fade_out(self, duration=3):
"""Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds)"""
super(RgbLight, self).fade_out(duration)
self.off() |
def QA_SU_save_financial_files():
"""本地存储financialdata
"""
download_financialzip()
coll = DATABASE.financial
coll.create_index(
[("code", ASCENDING), ("report_date", ASCENDING)], unique=True)
for item in os.listdir(download_path):
if item[0:4] != 'gpcw':
print(
"file ", item, " is not start with gpcw , seems not a financial file , ignore!")
continue
date = int(item.split('.')[0][-8:])
print('QUANTAXIS NOW SAVING {}'.format(date))
if coll.find({'report_date': date}).count() < 3600:
print(coll.find({'report_date': date}).count())
data = QA_util_to_json_from_pandas(parse_filelist([item]).reset_index(
).drop_duplicates(subset=['code', 'report_date']).sort_index())
# data["crawl_date"] = str(datetime.date.today())
try:
coll.insert_many(data, ordered=False)
except Exception as e:
if isinstance(e, MemoryError):
coll.insert_many(data, ordered=True)
elif isinstance(e, pymongo.bulk.BulkWriteError):
pass
else:
print('ALL READY IN DATABASE')
print('SUCCESSFULLY SAVE/UPDATE FINANCIAL DATA') | def function[QA_SU_save_financial_files, parameter[]]:
constant[本地存储financialdata
]
call[name[download_financialzip], parameter[]]
variable[coll] assign[=] name[DATABASE].financial
call[name[coll].create_index, parameter[list[[<ast.Tuple object at 0x7da1b1f46440>, <ast.Tuple object at 0x7da1b1f44dc0>]]]]
for taget[name[item]] in starred[call[name[os].listdir, parameter[name[download_path]]]] begin[:]
if compare[call[name[item]][<ast.Slice object at 0x7da1b1f470d0>] not_equal[!=] constant[gpcw]] begin[:]
call[name[print], parameter[constant[file ], name[item], constant[ is not start with gpcw , seems not a financial file , ignore!]]]
continue
variable[date] assign[=] call[name[int], parameter[call[call[call[name[item].split, parameter[constant[.]]]][constant[0]]][<ast.Slice object at 0x7da1b1f47a30>]]]
call[name[print], parameter[call[constant[QUANTAXIS NOW SAVING {}].format, parameter[name[date]]]]]
if compare[call[call[name[coll].find, parameter[dictionary[[<ast.Constant object at 0x7da1b1f47130>], [<ast.Name object at 0x7da1b1f47dc0>]]]].count, parameter[]] less[<] constant[3600]] begin[:]
call[name[print], parameter[call[call[name[coll].find, parameter[dictionary[[<ast.Constant object at 0x7da1b1f44280>], [<ast.Name object at 0x7da1b1f458d0>]]]].count, parameter[]]]]
variable[data] assign[=] call[name[QA_util_to_json_from_pandas], parameter[call[call[call[call[name[parse_filelist], parameter[list[[<ast.Name object at 0x7da1b1f45ea0>]]]].reset_index, parameter[]].drop_duplicates, parameter[]].sort_index, parameter[]]]]
<ast.Try object at 0x7da1b1f445e0>
call[name[print], parameter[constant[SUCCESSFULLY SAVE/UPDATE FINANCIAL DATA]]] | keyword[def] identifier[QA_SU_save_financial_files] ():
literal[string]
identifier[download_financialzip] ()
identifier[coll] = identifier[DATABASE] . identifier[financial]
identifier[coll] . identifier[create_index] (
[( literal[string] , identifier[ASCENDING] ),( literal[string] , identifier[ASCENDING] )], identifier[unique] = keyword[True] )
keyword[for] identifier[item] keyword[in] identifier[os] . identifier[listdir] ( identifier[download_path] ):
keyword[if] identifier[item] [ literal[int] : literal[int] ]!= literal[string] :
identifier[print] (
literal[string] , identifier[item] , literal[string] )
keyword[continue]
identifier[date] = identifier[int] ( identifier[item] . identifier[split] ( literal[string] )[ literal[int] ][- literal[int] :])
identifier[print] ( literal[string] . identifier[format] ( identifier[date] ))
keyword[if] identifier[coll] . identifier[find] ({ literal[string] : identifier[date] }). identifier[count] ()< literal[int] :
identifier[print] ( identifier[coll] . identifier[find] ({ literal[string] : identifier[date] }). identifier[count] ())
identifier[data] = identifier[QA_util_to_json_from_pandas] ( identifier[parse_filelist] ([ identifier[item] ]). identifier[reset_index] (
). identifier[drop_duplicates] ( identifier[subset] =[ literal[string] , literal[string] ]). identifier[sort_index] ())
keyword[try] :
identifier[coll] . identifier[insert_many] ( identifier[data] , identifier[ordered] = keyword[False] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[MemoryError] ):
identifier[coll] . identifier[insert_many] ( identifier[data] , identifier[ordered] = keyword[True] )
keyword[elif] identifier[isinstance] ( identifier[e] , identifier[pymongo] . identifier[bulk] . identifier[BulkWriteError] ):
keyword[pass]
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] ) | def QA_SU_save_financial_files():
"""本地存储financialdata
"""
download_financialzip()
coll = DATABASE.financial
coll.create_index([('code', ASCENDING), ('report_date', ASCENDING)], unique=True)
for item in os.listdir(download_path):
if item[0:4] != 'gpcw':
print('file ', item, ' is not start with gpcw , seems not a financial file , ignore!')
continue # depends on [control=['if'], data=[]]
date = int(item.split('.')[0][-8:])
print('QUANTAXIS NOW SAVING {}'.format(date))
if coll.find({'report_date': date}).count() < 3600:
print(coll.find({'report_date': date}).count())
data = QA_util_to_json_from_pandas(parse_filelist([item]).reset_index().drop_duplicates(subset=['code', 'report_date']).sort_index())
# data["crawl_date"] = str(datetime.date.today())
try:
coll.insert_many(data, ordered=False) # depends on [control=['try'], data=[]]
except Exception as e:
if isinstance(e, MemoryError):
coll.insert_many(data, ordered=True) # depends on [control=['if'], data=[]]
elif isinstance(e, pymongo.bulk.BulkWriteError):
pass # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
print('ALL READY IN DATABASE') # depends on [control=['for'], data=['item']]
print('SUCCESSFULLY SAVE/UPDATE FINANCIAL DATA') |
def _next_code_point(val, val_iter, yield_char=False, to_int=lambda x: x):
"""Provides the next *code point* in the given Unicode sequence.
This generator function yields complete character code points, never incomplete surrogates. When a low surrogate is
found without following a high surrogate, this function raises ``ValueError`` for having encountered an unpaired
low surrogate. When the provided iterator ends on a high surrogate, this function yields ``None``. This is the
**only** case in which this function yields ``None``. When this occurs, the user may append additional data to the
input unicode sequence and resume iterating through another ``next`` on this generator. When this function receives
``next`` after yielding ``None``, it *reinitializes the unicode iterator*. This means that this feature can only
be used for values that contain an ``__iter__`` implementation that remains at the current position in the data
when called (e.g. :class:`BufferQueue`). At this point, there are only two possible outcomes:
* If next code point is a valid low surrogate, this function yields the combined code point represented by the
surrogate pair.
* Otherwise, this function raises ``ValueError`` for having encountered an unpaired high surrogate.
Args:
val (unicode|BufferQueue): A unicode sequence or unicode BufferQueue over which to iterate.
val_iter (Iterator[unicode|BufferQueue]): The unicode sequence iterator over ``val`` from which to generate the
next integer code point in the range ``0x0`` to ``0x10FFFF``.
yield_char (Optional[bool]): If True **and** the character code point resulted from a surrogate pair, this
function will yield a :class:`CodePoint` representing the character code point and containing the original
unicode character. This is useful when the original unicode character will be needed again because UCS2
Python builds will error when trying to convert code points greater than 0xFFFF back into their
unicode character representations. This avoids requiring the user to mathematically re-derive the
surrogate pair in order to successfully convert the code point back to a unicode character.
to_int (Optional[callable]): A function to call on each element of val_iter to convert that element to an int.
"""
try:
high = next(val_iter)
except StopIteration:
return
low = None
code_point = to_int(high)
if _LOW_SURROGATE_START <= code_point <= _LOW_SURROGATE_END:
raise ValueError('Unpaired low surrogate in Unicode sequence: %d' % code_point)
elif _HIGH_SURROGATE_START <= code_point <= _HIGH_SURROGATE_END:
def combine_surrogates():
low_surrogate = next(val_iter)
low_code_point = to_int(low_surrogate)
if low_code_point < _LOW_SURROGATE_START or low_code_point > _LOW_SURROGATE_END:
raise ValueError('Unpaired high surrogate: %d' % code_point)
# Decode the surrogates
real_code_point = _NON_BMP_OFFSET
real_code_point += (code_point - _HIGH_SURROGATE_START) << 10
real_code_point += (low_code_point - _LOW_SURROGATE_START)
return real_code_point, low_surrogate
try:
code_point, low = combine_surrogates()
except StopIteration:
yield None
val_iter = iter(val) # More data has appeared in val.
code_point, low = combine_surrogates()
if yield_char and low is not None:
out = CodePoint(code_point)
if isinstance(val, six.text_type):
# Iterating over a text type returns text types.
out.char = high + low
else:
out.char = six.unichr(high) + six.unichr(low)
else:
out = code_point
yield out | def function[_next_code_point, parameter[val, val_iter, yield_char, to_int]]:
constant[Provides the next *code point* in the given Unicode sequence.
This generator function yields complete character code points, never incomplete surrogates. When a low surrogate is
found without following a high surrogate, this function raises ``ValueError`` for having encountered an unpaired
low surrogate. When the provided iterator ends on a high surrogate, this function yields ``None``. This is the
**only** case in which this function yields ``None``. When this occurs, the user may append additional data to the
input unicode sequence and resume iterating through another ``next`` on this generator. When this function receives
``next`` after yielding ``None``, it *reinitializes the unicode iterator*. This means that this feature can only
be used for values that contain an ``__iter__`` implementation that remains at the current position in the data
when called (e.g. :class:`BufferQueue`). At this point, there are only two possible outcomes:
* If next code point is a valid low surrogate, this function yields the combined code point represented by the
surrogate pair.
* Otherwise, this function raises ``ValueError`` for having encountered an unpaired high surrogate.
Args:
val (unicode|BufferQueue): A unicode sequence or unicode BufferQueue over which to iterate.
val_iter (Iterator[unicode|BufferQueue]): The unicode sequence iterator over ``val`` from which to generate the
next integer code point in the range ``0x0`` to ``0x10FFFF``.
yield_char (Optional[bool]): If True **and** the character code point resulted from a surrogate pair, this
function will yield a :class:`CodePoint` representing the character code point and containing the original
unicode character. This is useful when the original unicode character will be needed again because UCS2
Python builds will error when trying to convert code points greater than 0xFFFF back into their
unicode character representations. This avoids requiring the user to mathematically re-derive the
surrogate pair in order to successfully convert the code point back to a unicode character.
to_int (Optional[callable]): A function to call on each element of val_iter to convert that element to an int.
]
<ast.Try object at 0x7da1b15f48e0>
variable[low] assign[=] constant[None]
variable[code_point] assign[=] call[name[to_int], parameter[name[high]]]
if compare[name[_LOW_SURROGATE_START] less_or_equal[<=] name[code_point]] begin[:]
<ast.Raise object at 0x7da1b15f43d0>
if <ast.BoolOp object at 0x7da1b15f61a0> begin[:]
variable[out] assign[=] call[name[CodePoint], parameter[name[code_point]]]
if call[name[isinstance], parameter[name[val], name[six].text_type]] begin[:]
name[out].char assign[=] binary_operation[name[high] + name[low]]
<ast.Yield object at 0x7da1b15f6950> | keyword[def] identifier[_next_code_point] ( identifier[val] , identifier[val_iter] , identifier[yield_char] = keyword[False] , identifier[to_int] = keyword[lambda] identifier[x] : identifier[x] ):
literal[string]
keyword[try] :
identifier[high] = identifier[next] ( identifier[val_iter] )
keyword[except] identifier[StopIteration] :
keyword[return]
identifier[low] = keyword[None]
identifier[code_point] = identifier[to_int] ( identifier[high] )
keyword[if] identifier[_LOW_SURROGATE_START] <= identifier[code_point] <= identifier[_LOW_SURROGATE_END] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[code_point] )
keyword[elif] identifier[_HIGH_SURROGATE_START] <= identifier[code_point] <= identifier[_HIGH_SURROGATE_END] :
keyword[def] identifier[combine_surrogates] ():
identifier[low_surrogate] = identifier[next] ( identifier[val_iter] )
identifier[low_code_point] = identifier[to_int] ( identifier[low_surrogate] )
keyword[if] identifier[low_code_point] < identifier[_LOW_SURROGATE_START] keyword[or] identifier[low_code_point] > identifier[_LOW_SURROGATE_END] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[code_point] )
identifier[real_code_point] = identifier[_NON_BMP_OFFSET]
identifier[real_code_point] +=( identifier[code_point] - identifier[_HIGH_SURROGATE_START] )<< literal[int]
identifier[real_code_point] +=( identifier[low_code_point] - identifier[_LOW_SURROGATE_START] )
keyword[return] identifier[real_code_point] , identifier[low_surrogate]
keyword[try] :
identifier[code_point] , identifier[low] = identifier[combine_surrogates] ()
keyword[except] identifier[StopIteration] :
keyword[yield] keyword[None]
identifier[val_iter] = identifier[iter] ( identifier[val] )
identifier[code_point] , identifier[low] = identifier[combine_surrogates] ()
keyword[if] identifier[yield_char] keyword[and] identifier[low] keyword[is] keyword[not] keyword[None] :
identifier[out] = identifier[CodePoint] ( identifier[code_point] )
keyword[if] identifier[isinstance] ( identifier[val] , identifier[six] . identifier[text_type] ):
identifier[out] . identifier[char] = identifier[high] + identifier[low]
keyword[else] :
identifier[out] . identifier[char] = identifier[six] . identifier[unichr] ( identifier[high] )+ identifier[six] . identifier[unichr] ( identifier[low] )
keyword[else] :
identifier[out] = identifier[code_point]
keyword[yield] identifier[out] | def _next_code_point(val, val_iter, yield_char=False, to_int=lambda x: x):
"""Provides the next *code point* in the given Unicode sequence.
This generator function yields complete character code points, never incomplete surrogates. When a low surrogate is
found without following a high surrogate, this function raises ``ValueError`` for having encountered an unpaired
low surrogate. When the provided iterator ends on a high surrogate, this function yields ``None``. This is the
**only** case in which this function yields ``None``. When this occurs, the user may append additional data to the
input unicode sequence and resume iterating through another ``next`` on this generator. When this function receives
``next`` after yielding ``None``, it *reinitializes the unicode iterator*. This means that this feature can only
be used for values that contain an ``__iter__`` implementation that remains at the current position in the data
when called (e.g. :class:`BufferQueue`). At this point, there are only two possible outcomes:
* If next code point is a valid low surrogate, this function yields the combined code point represented by the
surrogate pair.
* Otherwise, this function raises ``ValueError`` for having encountered an unpaired high surrogate.
Args:
val (unicode|BufferQueue): A unicode sequence or unicode BufferQueue over which to iterate.
val_iter (Iterator[unicode|BufferQueue]): The unicode sequence iterator over ``val`` from which to generate the
next integer code point in the range ``0x0`` to ``0x10FFFF``.
yield_char (Optional[bool]): If True **and** the character code point resulted from a surrogate pair, this
function will yield a :class:`CodePoint` representing the character code point and containing the original
unicode character. This is useful when the original unicode character will be needed again because UCS2
Python builds will error when trying to convert code points greater than 0xFFFF back into their
unicode character representations. This avoids requiring the user to mathematically re-derive the
surrogate pair in order to successfully convert the code point back to a unicode character.
to_int (Optional[callable]): A function to call on each element of val_iter to convert that element to an int.
"""
try:
high = next(val_iter) # depends on [control=['try'], data=[]]
except StopIteration:
return # depends on [control=['except'], data=[]]
low = None
code_point = to_int(high)
if _LOW_SURROGATE_START <= code_point <= _LOW_SURROGATE_END:
raise ValueError('Unpaired low surrogate in Unicode sequence: %d' % code_point) # depends on [control=['if'], data=['code_point']]
elif _HIGH_SURROGATE_START <= code_point <= _HIGH_SURROGATE_END:
def combine_surrogates():
low_surrogate = next(val_iter)
low_code_point = to_int(low_surrogate)
if low_code_point < _LOW_SURROGATE_START or low_code_point > _LOW_SURROGATE_END:
raise ValueError('Unpaired high surrogate: %d' % code_point) # depends on [control=['if'], data=[]]
# Decode the surrogates
real_code_point = _NON_BMP_OFFSET
real_code_point += code_point - _HIGH_SURROGATE_START << 10
real_code_point += low_code_point - _LOW_SURROGATE_START
return (real_code_point, low_surrogate)
try:
(code_point, low) = combine_surrogates() # depends on [control=['try'], data=[]]
except StopIteration:
yield None
val_iter = iter(val) # More data has appeared in val.
(code_point, low) = combine_surrogates() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['_HIGH_SURROGATE_START', 'code_point']]
if yield_char and low is not None:
out = CodePoint(code_point)
if isinstance(val, six.text_type):
# Iterating over a text type returns text types.
out.char = high + low # depends on [control=['if'], data=[]]
else:
out.char = six.unichr(high) + six.unichr(low) # depends on [control=['if'], data=[]]
else:
out = code_point
yield out |
def p_funcvardecl(self, p):
"""funcvardecl : decl
| integerdecl
"""
if isinstance(p[1], Decl):
for r in p[1].list:
if (not isinstance(r, Input) and not isinstance(r, Reg)
and not isinstance(r, Integer)):
raise ParseError("Syntax Error")
p[0] = p[1]
p.set_lineno(0, p.lineno(1)) | def function[p_funcvardecl, parameter[self, p]]:
constant[funcvardecl : decl
| integerdecl
]
if call[name[isinstance], parameter[call[name[p]][constant[1]], name[Decl]]] begin[:]
for taget[name[r]] in starred[call[name[p]][constant[1]].list] begin[:]
if <ast.BoolOp object at 0x7da1b1630e20> begin[:]
<ast.Raise object at 0x7da1b1632710>
call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]]
call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]] | keyword[def] identifier[p_funcvardecl] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[p] [ literal[int] ], identifier[Decl] ):
keyword[for] identifier[r] keyword[in] identifier[p] [ literal[int] ]. identifier[list] :
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[r] , identifier[Input] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[r] , identifier[Reg] )
keyword[and] keyword[not] identifier[isinstance] ( identifier[r] , identifier[Integer] )):
keyword[raise] identifier[ParseError] ( literal[string] )
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]
identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_funcvardecl(self, p):
"""funcvardecl : decl
| integerdecl
"""
if isinstance(p[1], Decl):
for r in p[1].list:
if not isinstance(r, Input) and (not isinstance(r, Reg)) and (not isinstance(r, Integer)):
raise ParseError('Syntax Error') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']] # depends on [control=['if'], data=[]]
p[0] = p[1]
p.set_lineno(0, p.lineno(1)) |
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename='',
where='global', verbose=False):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore,
where=where,
verbose=verbose),
filename=filename) | def function[fix_2to3, parameter[source, aggressive, select, ignore, filename, where, verbose]]:
constant[Fix various deprecated code (via lib2to3).]
if <ast.UnaryOp object at 0x7da2044c08b0> begin[:]
return[name[source]]
variable[select] assign[=] <ast.BoolOp object at 0x7da2044c2260>
variable[ignore] assign[=] <ast.BoolOp object at 0x7da2044c0070>
return[call[name[refactor], parameter[name[source], call[name[code_to_2to3], parameter[]]]]] | keyword[def] identifier[fix_2to3] ( identifier[source] ,
identifier[aggressive] = keyword[True] , identifier[select] = keyword[None] , identifier[ignore] = keyword[None] , identifier[filename] = literal[string] ,
identifier[where] = literal[string] , identifier[verbose] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[aggressive] :
keyword[return] identifier[source]
identifier[select] = identifier[select] keyword[or] []
identifier[ignore] = identifier[ignore] keyword[or] []
keyword[return] identifier[refactor] ( identifier[source] ,
identifier[code_to_2to3] ( identifier[select] = identifier[select] ,
identifier[ignore] = identifier[ignore] ,
identifier[where] = identifier[where] ,
identifier[verbose] = identifier[verbose] ),
identifier[filename] = identifier[filename] ) | def fix_2to3(source, aggressive=True, select=None, ignore=None, filename='', where='global', verbose=False):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source # depends on [control=['if'], data=[]]
select = select or []
ignore = ignore or []
return refactor(source, code_to_2to3(select=select, ignore=ignore, where=where, verbose=verbose), filename=filename) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.