code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def print_level(log_function, fmt, level, *args):
"""Print a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Note:
If the application is running in "Silent Mode"
(i.e., ``_SILENT == True``), this function will return
immediately and no message will be printed.
Args:
log_function: The function that will be called to output the formatted
message.
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> print_level("%s %d", 0, "TEST", 0)
TEST 0
>>> print_level("%s %d", 1, "TEST", 1)
TEST 1
>>> print_level("%s %d", 2, "TEST", 2)
TEST 2
"""
if _SILENT:
return
msg = fmt % args
spaces = ' ' * level
log_function("%s%s" % (spaces, msg)) | def function[print_level, parameter[log_function, fmt, level]]:
constant[Print a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Note:
If the application is running in "Silent Mode"
(i.e., ``_SILENT == True``), this function will return
immediately and no message will be printed.
Args:
log_function: The function that will be called to output the formatted
message.
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> print_level("%s %d", 0, "TEST", 0)
TEST 0
>>> print_level("%s %d", 1, "TEST", 1)
TEST 1
>>> print_level("%s %d", 2, "TEST", 2)
TEST 2
]
if name[_SILENT] begin[:]
return[None]
variable[msg] assign[=] binary_operation[name[fmt] <ast.Mod object at 0x7da2590d6920> name[args]]
variable[spaces] assign[=] binary_operation[constant[ ] * name[level]]
call[name[log_function], parameter[binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0fc7e20>, <ast.Name object at 0x7da1b0fc4b50>]]]]] | keyword[def] identifier[print_level] ( identifier[log_function] , identifier[fmt] , identifier[level] ,* identifier[args] ):
literal[string]
keyword[if] identifier[_SILENT] :
keyword[return]
identifier[msg] = identifier[fmt] % identifier[args]
identifier[spaces] = literal[string] * identifier[level]
identifier[log_function] ( literal[string] %( identifier[spaces] , identifier[msg] )) | def print_level(log_function, fmt, level, *args):
"""Print a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Note:
If the application is running in "Silent Mode"
(i.e., ``_SILENT == True``), this function will return
immediately and no message will be printed.
Args:
log_function: The function that will be called to output the formatted
message.
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> print_level("%s %d", 0, "TEST", 0)
TEST 0
>>> print_level("%s %d", 1, "TEST", 1)
TEST 1
>>> print_level("%s %d", 2, "TEST", 2)
TEST 2
"""
if _SILENT:
return # depends on [control=['if'], data=[]]
msg = fmt % args
spaces = ' ' * level
log_function('%s%s' % (spaces, msg)) |
def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
"""
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
"""
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = "".encode()
info.create_system = 0
zfile.writestr(info, content) | def function[write_file_to_zip_with_neutral_metadata, parameter[zfile, filename, content]]:
constant[
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
]
variable[info] assign[=] call[name[zipfile].ZipInfo, parameter[name[filename]]]
name[info].compress_type assign[=] name[zipfile].ZIP_DEFLATED
name[info].comment assign[=] call[constant[].encode, parameter[]]
name[info].create_system assign[=] constant[0]
call[name[zfile].writestr, parameter[name[info], name[content]]] | keyword[def] identifier[write_file_to_zip_with_neutral_metadata] ( identifier[zfile] , identifier[filename] , identifier[content] ):
literal[string]
identifier[info] = identifier[zipfile] . identifier[ZipInfo] ( identifier[filename] , identifier[date_time] =( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ))
identifier[info] . identifier[compress_type] = identifier[zipfile] . identifier[ZIP_DEFLATED]
identifier[info] . identifier[comment] = literal[string] . identifier[encode] ()
identifier[info] . identifier[create_system] = literal[int]
identifier[zfile] . identifier[writestr] ( identifier[info] , identifier[content] ) | def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
"""
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
"""
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = ''.encode()
info.create_system = 0
zfile.writestr(info, content) |
def default_metric_definitions(cls, toolkit):
"""Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions
"""
if toolkit is RLToolkit.COACH:
return [
{'Name': 'reward-training',
'Regex': '^Training>.*Total reward=(.*?),'},
{'Name': 'reward-testing',
'Regex': '^Testing>.*Total reward=(.*?),'}
]
elif toolkit is RLToolkit.RAY:
float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" # noqa: W605, E501 pylint: disable=anomalous-backslash-in-string
return [
{'Name': 'episode_reward_mean',
'Regex': 'episode_reward_mean: (%s)' % float_regex},
{'Name': 'episode_reward_max',
'Regex': 'episode_reward_max: (%s)' % float_regex}
] | def function[default_metric_definitions, parameter[cls, toolkit]]:
constant[Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions
]
if compare[name[toolkit] is name[RLToolkit].COACH] begin[:]
return[list[[<ast.Dict object at 0x7da1b21c6ce0>, <ast.Dict object at 0x7da1b21c6dd0>]]] | keyword[def] identifier[default_metric_definitions] ( identifier[cls] , identifier[toolkit] ):
literal[string]
keyword[if] identifier[toolkit] keyword[is] identifier[RLToolkit] . identifier[COACH] :
keyword[return] [
{ literal[string] : literal[string] ,
literal[string] : literal[string] },
{ literal[string] : literal[string] ,
literal[string] : literal[string] }
]
keyword[elif] identifier[toolkit] keyword[is] identifier[RLToolkit] . identifier[RAY] :
identifier[float_regex] = literal[string]
keyword[return] [
{ literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[float_regex] },
{ literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[float_regex] }
] | def default_metric_definitions(cls, toolkit):
"""Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions
"""
if toolkit is RLToolkit.COACH:
return [{'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'}] # depends on [control=['if'], data=[]]
elif toolkit is RLToolkit.RAY:
float_regex = '[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?' # noqa: W605, E501 pylint: disable=anomalous-backslash-in-string
return [{'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: (%s)' % float_regex}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: (%s)' % float_regex}] # depends on [control=['if'], data=[]] |
def validate(self):
"""Validate that the GlobalContextField is correctly representable."""
if not isinstance(self.location, Location):
raise TypeError(u'Expected Location location, got: {} {}'
.format(type(self.location).__name__, self.location))
if self.location.field is None:
raise AssertionError(u'Received Location without a field: {}'
.format(self.location))
if not is_graphql_type(self.field_type):
raise ValueError(u'Invalid value of "field_type": {}'.format(self.field_type)) | def function[validate, parameter[self]]:
constant[Validate that the GlobalContextField is correctly representable.]
if <ast.UnaryOp object at 0x7da1b17cef20> begin[:]
<ast.Raise object at 0x7da1b17cfb20>
if compare[name[self].location.field is constant[None]] begin[:]
<ast.Raise object at 0x7da1b17ce470>
if <ast.UnaryOp object at 0x7da1b17ccb50> begin[:]
<ast.Raise object at 0x7da1b17cc220> | keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[location] , identifier[Location] ):
keyword[raise] identifier[TypeError] ( literal[string]
. identifier[format] ( identifier[type] ( identifier[self] . identifier[location] ). identifier[__name__] , identifier[self] . identifier[location] ))
keyword[if] identifier[self] . identifier[location] . identifier[field] keyword[is] keyword[None] :
keyword[raise] identifier[AssertionError] ( literal[string]
. identifier[format] ( identifier[self] . identifier[location] ))
keyword[if] keyword[not] identifier[is_graphql_type] ( identifier[self] . identifier[field_type] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[self] . identifier[field_type] )) | def validate(self):
"""Validate that the GlobalContextField is correctly representable."""
if not isinstance(self.location, Location):
raise TypeError(u'Expected Location location, got: {} {}'.format(type(self.location).__name__, self.location)) # depends on [control=['if'], data=[]]
if self.location.field is None:
raise AssertionError(u'Received Location without a field: {}'.format(self.location)) # depends on [control=['if'], data=[]]
if not is_graphql_type(self.field_type):
raise ValueError(u'Invalid value of "field_type": {}'.format(self.field_type)) # depends on [control=['if'], data=[]] |
def GroupsUsersPost(self, parameters, group_id):
"""
Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful.
"""
if self.__SenseApiCall__('/groups/{group_id}/users.json'.format(group_id = group_id), 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | def function[GroupsUsersPost, parameter[self, parameters, group_id]]:
constant[
Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful.
]
if call[name[self].__SenseApiCall__, parameter[call[constant[/groups/{group_id}/users.json].format, parameter[]], constant[POST]]] begin[:]
return[constant[True]] | keyword[def] identifier[GroupsUsersPost] ( identifier[self] , identifier[parameters] , identifier[group_id] ):
literal[string]
keyword[if] identifier[self] . identifier[__SenseApiCall__] ( literal[string] . identifier[format] ( identifier[group_id] = identifier[group_id] ), literal[string] , identifier[parameters] = identifier[parameters] ):
keyword[return] keyword[True]
keyword[else] :
identifier[self] . identifier[__error__] = literal[string]
keyword[return] keyword[False] | def GroupsUsersPost(self, parameters, group_id):
"""
Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful.
"""
if self.__SenseApiCall__('/groups/{group_id}/users.json'.format(group_id=group_id), 'POST', parameters=parameters):
return True # depends on [control=['if'], data=[]]
else:
self.__error__ = 'api call unsuccessful'
return False |
def dump(result):
"""Dump result into a string, useful for debugging."""
if isinstance(result, dict):
# Result is a search result.
statuses = result['results']
else:
# Result is a lookup result.
statuses = result
status_str_list = []
for status in statuses:
status_str_list.append(textwrap.dedent(u"""
@{screen_name} -- https://twitter.com/{screen_name}
{text}
""").strip().format(
screen_name=status['actor']['preferredUsername'],
text=status['body']))
return u'\n\n'.join(status_str_list) | def function[dump, parameter[result]]:
constant[Dump result into a string, useful for debugging.]
if call[name[isinstance], parameter[name[result], name[dict]]] begin[:]
variable[statuses] assign[=] call[name[result]][constant[results]]
variable[status_str_list] assign[=] list[[]]
for taget[name[status]] in starred[name[statuses]] begin[:]
call[name[status_str_list].append, parameter[call[call[call[name[textwrap].dedent, parameter[constant[
@{screen_name} -- https://twitter.com/{screen_name}
{text}
]]].strip, parameter[]].format, parameter[]]]]
return[call[constant[
].join, parameter[name[status_str_list]]]] | keyword[def] identifier[dump] ( identifier[result] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[result] , identifier[dict] ):
identifier[statuses] = identifier[result] [ literal[string] ]
keyword[else] :
identifier[statuses] = identifier[result]
identifier[status_str_list] =[]
keyword[for] identifier[status] keyword[in] identifier[statuses] :
identifier[status_str_list] . identifier[append] ( identifier[textwrap] . identifier[dedent] ( literal[string] ). identifier[strip] (). identifier[format] (
identifier[screen_name] = identifier[status] [ literal[string] ][ literal[string] ],
identifier[text] = identifier[status] [ literal[string] ]))
keyword[return] literal[string] . identifier[join] ( identifier[status_str_list] ) | def dump(result):
"""Dump result into a string, useful for debugging."""
if isinstance(result, dict):
# Result is a search result.
statuses = result['results'] # depends on [control=['if'], data=[]]
else:
# Result is a lookup result.
statuses = result
status_str_list = []
for status in statuses:
status_str_list.append(textwrap.dedent(u'\n @{screen_name} -- https://twitter.com/{screen_name}\n {text}\n ').strip().format(screen_name=status['actor']['preferredUsername'], text=status['body'])) # depends on [control=['for'], data=['status']]
return u'\n\n'.join(status_str_list) |
def cylinders(shape: List[int], radius: int, ncylinders: int,
phi_max: float = 0, theta_max: float = 90):
r"""
Generates a binary image of overlapping cylinders. This is a good
approximation of a fibrous mat.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels. 2D images are not permitted.
radius : scalar
The radius of the cylinders in voxels
ncylinders : scalar
The number of cylinders to add to the domain. Adjust this value to
control the final porosity, which is not easily specified since
cylinders overlap and intersect different fractions of the domain.
theta_max : scalar
A value between 0 and 90 that controls the amount of rotation *in the*
XY plane, with 0 meaning all fibers point in the X-direction, and
90 meaning they are randomly rotated about the Z axis by as much
as +/- 90 degrees.
phi_max : scalar
A value between 0 and 90 that controls the amount that the fibers
lie *out of* the XY plane, with 0 meaning all fibers lie in the XY
plane, and 90 meaning that fibers are randomly oriented out of the
plane by as much as +/- 90 degrees.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
"""
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
elif sp.size(shape) == 2:
raise Exception("2D cylinders don't make sense")
R = sp.sqrt(sp.sum(sp.square(shape))).astype(int)
im = sp.zeros(shape)
# Adjust max angles to be between 0 and 90
if (phi_max > 90) or (phi_max < 0):
raise Exception('phi_max must be betwen 0 and 90')
if (theta_max > 90) or (theta_max < 0):
raise Exception('theta_max must be betwen 0 and 90')
n = 0
while n < ncylinders:
# Choose a random starting point in domain
x = sp.rand(3)*shape
# Chose a random phi and theta within given ranges
phi = (sp.pi/2 - sp.pi*sp.rand())*phi_max/90
theta = (sp.pi/2 - sp.pi*sp.rand())*theta_max/90
X0 = R*sp.array([sp.cos(phi)*sp.cos(theta),
sp.cos(phi)*sp.sin(theta),
sp.sin(phi)])
[X0, X1] = [x + X0, x - X0]
crds = line_segment(X0, X1)
lower = ~sp.any(sp.vstack(crds).T < [0, 0, 0], axis=1)
upper = ~sp.any(sp.vstack(crds).T >= shape, axis=1)
valid = upper*lower
if sp.any(valid):
im[crds[0][valid], crds[1][valid], crds[2][valid]] = 1
n += 1
im = sp.array(im, dtype=bool)
dt = spim.distance_transform_edt(~im) < radius
return ~dt | def function[cylinders, parameter[shape, radius, ncylinders, phi_max, theta_max]]:
constant[
Generates a binary image of overlapping cylinders. This is a good
approximation of a fibrous mat.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels. 2D images are not permitted.
radius : scalar
The radius of the cylinders in voxels
ncylinders : scalar
The number of cylinders to add to the domain. Adjust this value to
control the final porosity, which is not easily specified since
cylinders overlap and intersect different fractions of the domain.
theta_max : scalar
A value between 0 and 90 that controls the amount of rotation *in the*
XY plane, with 0 meaning all fibers point in the X-direction, and
90 meaning they are randomly rotated about the Z axis by as much
as +/- 90 degrees.
phi_max : scalar
A value between 0 and 90 that controls the amount that the fibers
lie *out of* the XY plane, with 0 meaning all fibers lie in the XY
plane, and 90 meaning that fibers are randomly oriented out of the
plane by as much as +/- 90 degrees.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
]
variable[shape] assign[=] call[name[sp].array, parameter[name[shape]]]
if compare[call[name[sp].size, parameter[name[shape]]] equal[==] constant[1]] begin[:]
variable[shape] assign[=] call[name[sp].full, parameter[tuple[[<ast.Constant object at 0x7da1b06a0070>]], call[name[int], parameter[name[shape]]]]]
variable[R] assign[=] call[call[name[sp].sqrt, parameter[call[name[sp].sum, parameter[call[name[sp].square, parameter[name[shape]]]]]]].astype, parameter[name[int]]]
variable[im] assign[=] call[name[sp].zeros, parameter[name[shape]]]
if <ast.BoolOp object at 0x7da1b053a140> begin[:]
<ast.Raise object at 0x7da1b0539de0>
if <ast.BoolOp object at 0x7da1b053bfd0> begin[:]
<ast.Raise object at 0x7da1b053beb0>
variable[n] assign[=] constant[0]
while compare[name[n] less[<] name[ncylinders]] begin[:]
variable[x] assign[=] binary_operation[call[name[sp].rand, parameter[constant[3]]] * name[shape]]
variable[phi] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[sp].pi / constant[2]] - binary_operation[name[sp].pi * call[name[sp].rand, parameter[]]]] * name[phi_max]] / constant[90]]
variable[theta] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[sp].pi / constant[2]] - binary_operation[name[sp].pi * call[name[sp].rand, parameter[]]]] * name[theta_max]] / constant[90]]
variable[X0] assign[=] binary_operation[name[R] * call[name[sp].array, parameter[list[[<ast.BinOp object at 0x7da1b0510790>, <ast.BinOp object at 0x7da1b0513250>, <ast.Call object at 0x7da1b0512530>]]]]]
<ast.List object at 0x7da1b0510d90> assign[=] list[[<ast.BinOp object at 0x7da1b0513fa0>, <ast.BinOp object at 0x7da1b0510460>]]
variable[crds] assign[=] call[name[line_segment], parameter[name[X0], name[X1]]]
variable[lower] assign[=] <ast.UnaryOp object at 0x7da1b0512860>
variable[upper] assign[=] <ast.UnaryOp object at 0x7da1b05124a0>
variable[valid] assign[=] binary_operation[name[upper] * name[lower]]
if call[name[sp].any, parameter[name[valid]]] begin[:]
call[name[im]][tuple[[<ast.Subscript object at 0x7da1b0513f70>, <ast.Subscript object at 0x7da1b0510970>, <ast.Subscript object at 0x7da1b0511840>]]] assign[=] constant[1]
<ast.AugAssign object at 0x7da1b0512740>
variable[im] assign[=] call[name[sp].array, parameter[name[im]]]
variable[dt] assign[=] compare[call[name[spim].distance_transform_edt, parameter[<ast.UnaryOp object at 0x7da1b0511ed0>]] less[<] name[radius]]
return[<ast.UnaryOp object at 0x7da1b0511cc0>] | keyword[def] identifier[cylinders] ( identifier[shape] : identifier[List] [ identifier[int] ], identifier[radius] : identifier[int] , identifier[ncylinders] : identifier[int] ,
identifier[phi_max] : identifier[float] = literal[int] , identifier[theta_max] : identifier[float] = literal[int] ):
literal[string]
identifier[shape] = identifier[sp] . identifier[array] ( identifier[shape] )
keyword[if] identifier[sp] . identifier[size] ( identifier[shape] )== literal[int] :
identifier[shape] = identifier[sp] . identifier[full] (( literal[int] ,), identifier[int] ( identifier[shape] ))
keyword[elif] identifier[sp] . identifier[size] ( identifier[shape] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[R] = identifier[sp] . identifier[sqrt] ( identifier[sp] . identifier[sum] ( identifier[sp] . identifier[square] ( identifier[shape] ))). identifier[astype] ( identifier[int] )
identifier[im] = identifier[sp] . identifier[zeros] ( identifier[shape] )
keyword[if] ( identifier[phi_max] > literal[int] ) keyword[or] ( identifier[phi_max] < literal[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] ( identifier[theta_max] > literal[int] ) keyword[or] ( identifier[theta_max] < literal[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[n] = literal[int]
keyword[while] identifier[n] < identifier[ncylinders] :
identifier[x] = identifier[sp] . identifier[rand] ( literal[int] )* identifier[shape]
identifier[phi] =( identifier[sp] . identifier[pi] / literal[int] - identifier[sp] . identifier[pi] * identifier[sp] . identifier[rand] ())* identifier[phi_max] / literal[int]
identifier[theta] =( identifier[sp] . identifier[pi] / literal[int] - identifier[sp] . identifier[pi] * identifier[sp] . identifier[rand] ())* identifier[theta_max] / literal[int]
identifier[X0] = identifier[R] * identifier[sp] . identifier[array] ([ identifier[sp] . identifier[cos] ( identifier[phi] )* identifier[sp] . identifier[cos] ( identifier[theta] ),
identifier[sp] . identifier[cos] ( identifier[phi] )* identifier[sp] . identifier[sin] ( identifier[theta] ),
identifier[sp] . identifier[sin] ( identifier[phi] )])
[ identifier[X0] , identifier[X1] ]=[ identifier[x] + identifier[X0] , identifier[x] - identifier[X0] ]
identifier[crds] = identifier[line_segment] ( identifier[X0] , identifier[X1] )
identifier[lower] =~ identifier[sp] . identifier[any] ( identifier[sp] . identifier[vstack] ( identifier[crds] ). identifier[T] <[ literal[int] , literal[int] , literal[int] ], identifier[axis] = literal[int] )
identifier[upper] =~ identifier[sp] . identifier[any] ( identifier[sp] . identifier[vstack] ( identifier[crds] ). identifier[T] >= identifier[shape] , identifier[axis] = literal[int] )
identifier[valid] = identifier[upper] * identifier[lower]
keyword[if] identifier[sp] . identifier[any] ( identifier[valid] ):
identifier[im] [ identifier[crds] [ literal[int] ][ identifier[valid] ], identifier[crds] [ literal[int] ][ identifier[valid] ], identifier[crds] [ literal[int] ][ identifier[valid] ]]= literal[int]
identifier[n] += literal[int]
identifier[im] = identifier[sp] . identifier[array] ( identifier[im] , identifier[dtype] = identifier[bool] )
identifier[dt] = identifier[spim] . identifier[distance_transform_edt] (~ identifier[im] )< identifier[radius]
keyword[return] ~ identifier[dt] | def cylinders(shape: List[int], radius: int, ncylinders: int, phi_max: float=0, theta_max: float=90):
"""
Generates a binary image of overlapping cylinders. This is a good
approximation of a fibrous mat.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels. 2D images are not permitted.
radius : scalar
The radius of the cylinders in voxels
ncylinders : scalar
The number of cylinders to add to the domain. Adjust this value to
control the final porosity, which is not easily specified since
cylinders overlap and intersect different fractions of the domain.
theta_max : scalar
A value between 0 and 90 that controls the amount of rotation *in the*
XY plane, with 0 meaning all fibers point in the X-direction, and
90 meaning they are randomly rotated about the Z axis by as much
as +/- 90 degrees.
phi_max : scalar
A value between 0 and 90 that controls the amount that the fibers
lie *out of* the XY plane, with 0 meaning all fibers lie in the XY
plane, and 90 meaning that fibers are randomly oriented out of the
plane by as much as +/- 90 degrees.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
"""
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3,), int(shape)) # depends on [control=['if'], data=[]]
elif sp.size(shape) == 2:
raise Exception("2D cylinders don't make sense") # depends on [control=['if'], data=[]]
R = sp.sqrt(sp.sum(sp.square(shape))).astype(int)
im = sp.zeros(shape)
# Adjust max angles to be between 0 and 90
if phi_max > 90 or phi_max < 0:
raise Exception('phi_max must be betwen 0 and 90') # depends on [control=['if'], data=[]]
if theta_max > 90 or theta_max < 0:
raise Exception('theta_max must be betwen 0 and 90') # depends on [control=['if'], data=[]]
n = 0
while n < ncylinders:
# Choose a random starting point in domain
x = sp.rand(3) * shape
# Chose a random phi and theta within given ranges
phi = (sp.pi / 2 - sp.pi * sp.rand()) * phi_max / 90
theta = (sp.pi / 2 - sp.pi * sp.rand()) * theta_max / 90
X0 = R * sp.array([sp.cos(phi) * sp.cos(theta), sp.cos(phi) * sp.sin(theta), sp.sin(phi)])
[X0, X1] = [x + X0, x - X0]
crds = line_segment(X0, X1)
lower = ~sp.any(sp.vstack(crds).T < [0, 0, 0], axis=1)
upper = ~sp.any(sp.vstack(crds).T >= shape, axis=1)
valid = upper * lower
if sp.any(valid):
im[crds[0][valid], crds[1][valid], crds[2][valid]] = 1
n += 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['n']]
im = sp.array(im, dtype=bool)
dt = spim.distance_transform_edt(~im) < radius
return ~dt |
def weighted_temperature(self, how='geometric_series'):
r"""
A new temperature vector is generated containing a multi-day
average temperature as needed in the load profile function.
Parameters
----------
how : string
string which type to return ("geometric_series" or "mean")
Notes
-----
Equation for the mathematical series of the average
tempaerature [1]_:
.. math::
T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+
0.125\cdot T_{D-3}}{1+0.5+0.25+0.125}
with :math:`T_D` = Average temperature on the present day
:math:`T_{D-i}` = Average temperature on the day - i
References
----------
.. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_,
BDEW Documentation for heat profiles.
"""
# calculate daily mean temperature
temperature = self.df['temperature'].resample('D').mean().reindex(
self.df.index).fillna(method='ffill').fillna(method='bfill')
if how == 'geometric_series':
temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) +
0.25 * np.roll(temperature, 48) +
0.125 * np.roll(temperature, 72)) / 1.875
elif how == 'mean':
temperature_mean = temperature
else:
temperature_mean = None
return temperature_mean | def function[weighted_temperature, parameter[self, how]]:
constant[
A new temperature vector is generated containing a multi-day
average temperature as needed in the load profile function.
Parameters
----------
how : string
string which type to return ("geometric_series" or "mean")
Notes
-----
Equation for the mathematical series of the average
tempaerature [1]_:
.. math::
T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+
0.125\cdot T_{D-3}}{1+0.5+0.25+0.125}
with :math:`T_D` = Average temperature on the present day
:math:`T_{D-i}` = Average temperature on the day - i
References
----------
.. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_,
BDEW Documentation for heat profiles.
]
variable[temperature] assign[=] call[call[call[call[call[call[name[self].df][constant[temperature]].resample, parameter[constant[D]]].mean, parameter[]].reindex, parameter[name[self].df.index]].fillna, parameter[]].fillna, parameter[]]
if compare[name[how] equal[==] constant[geometric_series]] begin[:]
variable[temperature_mean] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[temperature] + binary_operation[constant[0.5] * call[name[np].roll, parameter[name[temperature], constant[24]]]]] + binary_operation[constant[0.25] * call[name[np].roll, parameter[name[temperature], constant[48]]]]] + binary_operation[constant[0.125] * call[name[np].roll, parameter[name[temperature], constant[72]]]]] / constant[1.875]]
return[name[temperature_mean]] | keyword[def] identifier[weighted_temperature] ( identifier[self] , identifier[how] = literal[string] ):
literal[string]
identifier[temperature] = identifier[self] . identifier[df] [ literal[string] ]. identifier[resample] ( literal[string] ). identifier[mean] (). identifier[reindex] (
identifier[self] . identifier[df] . identifier[index] ). identifier[fillna] ( identifier[method] = literal[string] ). identifier[fillna] ( identifier[method] = literal[string] )
keyword[if] identifier[how] == literal[string] :
identifier[temperature_mean] =( identifier[temperature] + literal[int] * identifier[np] . identifier[roll] ( identifier[temperature] , literal[int] )+
literal[int] * identifier[np] . identifier[roll] ( identifier[temperature] , literal[int] )+
literal[int] * identifier[np] . identifier[roll] ( identifier[temperature] , literal[int] ))/ literal[int]
keyword[elif] identifier[how] == literal[string] :
identifier[temperature_mean] = identifier[temperature]
keyword[else] :
identifier[temperature_mean] = keyword[None]
keyword[return] identifier[temperature_mean] | def weighted_temperature(self, how='geometric_series'):
"""
A new temperature vector is generated containing a multi-day
average temperature as needed in the load profile function.
Parameters
----------
how : string
string which type to return ("geometric_series" or "mean")
Notes
-----
Equation for the mathematical series of the average
tempaerature [1]_:
.. math::
T=\\frac{T_{D}+0.5\\cdot T_{D-1}+0.25\\cdot T_{D-2}+
0.125\\cdot T_{D-3}}{1+0.5+0.25+0.125}
with :math:`T_D` = Average temperature on the present day
:math:`T_{D-i}` = Average temperature on the day - i
References
----------
.. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_,
BDEW Documentation for heat profiles.
"""
# calculate daily mean temperature
temperature = self.df['temperature'].resample('D').mean().reindex(self.df.index).fillna(method='ffill').fillna(method='bfill')
if how == 'geometric_series':
temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) + 0.25 * np.roll(temperature, 48) + 0.125 * np.roll(temperature, 72)) / 1.875 # depends on [control=['if'], data=[]]
elif how == 'mean':
temperature_mean = temperature # depends on [control=['if'], data=[]]
else:
temperature_mean = None
return temperature_mean |
def _add_fluent_indexes(self):
"""
Add the index commands fluently specified on columns:
"""
for column in self._columns:
for index in ['primary', 'unique', 'index']:
column_index = column.get(index)
if column_index is True:
getattr(self, index)(column.name)
break
elif column_index:
getattr(self, index)(column.name, column_index)
break | def function[_add_fluent_indexes, parameter[self]]:
constant[
Add the index commands fluently specified on columns:
]
for taget[name[column]] in starred[name[self]._columns] begin[:]
for taget[name[index]] in starred[list[[<ast.Constant object at 0x7da1b10244c0>, <ast.Constant object at 0x7da1b1024c40>, <ast.Constant object at 0x7da1b1025810>]]] begin[:]
variable[column_index] assign[=] call[name[column].get, parameter[name[index]]]
if compare[name[column_index] is constant[True]] begin[:]
call[call[name[getattr], parameter[name[self], name[index]]], parameter[name[column].name]]
break | keyword[def] identifier[_add_fluent_indexes] ( identifier[self] ):
literal[string]
keyword[for] identifier[column] keyword[in] identifier[self] . identifier[_columns] :
keyword[for] identifier[index] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[column_index] = identifier[column] . identifier[get] ( identifier[index] )
keyword[if] identifier[column_index] keyword[is] keyword[True] :
identifier[getattr] ( identifier[self] , identifier[index] )( identifier[column] . identifier[name] )
keyword[break]
keyword[elif] identifier[column_index] :
identifier[getattr] ( identifier[self] , identifier[index] )( identifier[column] . identifier[name] , identifier[column_index] )
keyword[break] | def _add_fluent_indexes(self):
"""
Add the index commands fluently specified on columns:
"""
for column in self._columns:
for index in ['primary', 'unique', 'index']:
column_index = column.get(index)
if column_index is True:
getattr(self, index)(column.name)
break # depends on [control=['if'], data=[]]
elif column_index:
getattr(self, index)(column.name, column_index)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']] # depends on [control=['for'], data=['column']] |
def liftover_cpra(self, chromosome, position, verbose=False):
"""
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
"""
chromosome = str(chromosome)
position = int(position)
# Perform the liftover lookup, shift the position by 1 as pyliftover deals in 0-based co-ords
new = self.liftover.convert_coordinate(chromosome, position - 1)
# This has to be here as new will be NoneType when the chromosome doesn't exist in the chainfile
if new:
# If the liftover is unique
if len(new) == 1:
# If the liftover hasn't changed strand
if new[0][2] == "+":
# Set the co-ordinates to the lifted-over ones and write out
new_chromosome = str(new[0][0])
# Shift the position forward by one to convert back to a 1-based co-ords
new_position = int(new[0][1]) + 1
return new_chromosome, new_position
else:
exception_string = "{},{} has a flipped strand in liftover: {}".format(chromosome, position, new)
else:
exception_string = "{},{} lifts over to multiple positions: {}".format(chromosome, position, new)
elif new is None:
exception_string = "Chromosome '{}' provided not in chain file".format(chromosome)
if verbose:
logging.error(exception_string)
return None, None | def function[liftover_cpra, parameter[self, chromosome, position, verbose]]:
constant[
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
]
variable[chromosome] assign[=] call[name[str], parameter[name[chromosome]]]
variable[position] assign[=] call[name[int], parameter[name[position]]]
variable[new] assign[=] call[name[self].liftover.convert_coordinate, parameter[name[chromosome], binary_operation[name[position] - constant[1]]]]
if name[new] begin[:]
if compare[call[name[len], parameter[name[new]]] equal[==] constant[1]] begin[:]
if compare[call[call[name[new]][constant[0]]][constant[2]] equal[==] constant[+]] begin[:]
variable[new_chromosome] assign[=] call[name[str], parameter[call[call[name[new]][constant[0]]][constant[0]]]]
variable[new_position] assign[=] binary_operation[call[name[int], parameter[call[call[name[new]][constant[0]]][constant[1]]]] + constant[1]]
return[tuple[[<ast.Name object at 0x7da20c76c2b0>, <ast.Name object at 0x7da20c76c370>]]]
if name[verbose] begin[:]
call[name[logging].error, parameter[name[exception_string]]]
return[tuple[[<ast.Constant object at 0x7da20c76da80>, <ast.Constant object at 0x7da18bccbd30>]]] | keyword[def] identifier[liftover_cpra] ( identifier[self] , identifier[chromosome] , identifier[position] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[chromosome] = identifier[str] ( identifier[chromosome] )
identifier[position] = identifier[int] ( identifier[position] )
identifier[new] = identifier[self] . identifier[liftover] . identifier[convert_coordinate] ( identifier[chromosome] , identifier[position] - literal[int] )
keyword[if] identifier[new] :
keyword[if] identifier[len] ( identifier[new] )== literal[int] :
keyword[if] identifier[new] [ literal[int] ][ literal[int] ]== literal[string] :
identifier[new_chromosome] = identifier[str] ( identifier[new] [ literal[int] ][ literal[int] ])
identifier[new_position] = identifier[int] ( identifier[new] [ literal[int] ][ literal[int] ])+ literal[int]
keyword[return] identifier[new_chromosome] , identifier[new_position]
keyword[else] :
identifier[exception_string] = literal[string] . identifier[format] ( identifier[chromosome] , identifier[position] , identifier[new] )
keyword[else] :
identifier[exception_string] = literal[string] . identifier[format] ( identifier[chromosome] , identifier[position] , identifier[new] )
keyword[elif] identifier[new] keyword[is] keyword[None] :
identifier[exception_string] = literal[string] . identifier[format] ( identifier[chromosome] )
keyword[if] identifier[verbose] :
identifier[logging] . identifier[error] ( identifier[exception_string] )
keyword[return] keyword[None] , keyword[None] | def liftover_cpra(self, chromosome, position, verbose=False):
"""
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
"""
chromosome = str(chromosome)
position = int(position)
# Perform the liftover lookup, shift the position by 1 as pyliftover deals in 0-based co-ords
new = self.liftover.convert_coordinate(chromosome, position - 1)
# This has to be here as new will be NoneType when the chromosome doesn't exist in the chainfile
if new:
# If the liftover is unique
if len(new) == 1:
# If the liftover hasn't changed strand
if new[0][2] == '+':
# Set the co-ordinates to the lifted-over ones and write out
new_chromosome = str(new[0][0])
# Shift the position forward by one to convert back to a 1-based co-ords
new_position = int(new[0][1]) + 1
return (new_chromosome, new_position) # depends on [control=['if'], data=[]]
else:
exception_string = '{},{} has a flipped strand in liftover: {}'.format(chromosome, position, new) # depends on [control=['if'], data=[]]
else:
exception_string = '{},{} lifts over to multiple positions: {}'.format(chromosome, position, new) # depends on [control=['if'], data=[]]
elif new is None:
exception_string = "Chromosome '{}' provided not in chain file".format(chromosome) # depends on [control=['if'], data=[]]
if verbose:
logging.error(exception_string) # depends on [control=['if'], data=[]]
return (None, None) |
def add_cats(self, axis, cat_data):
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
'''
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data) | def function[add_cats, parameter[self, axis, cat_data]]:
constant[
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
]
for taget[name[inst_data]] in starred[name[cat_data]] begin[:]
call[name[categories].add_cats, parameter[name[self], name[axis], name[inst_data]]] | keyword[def] identifier[add_cats] ( identifier[self] , identifier[axis] , identifier[cat_data] ):
literal[string]
keyword[for] identifier[inst_data] keyword[in] identifier[cat_data] :
identifier[categories] . identifier[add_cats] ( identifier[self] , identifier[axis] , identifier[inst_data] ) | def add_cats(self, axis, cat_data):
"""
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
"""
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data) # depends on [control=['for'], data=['inst_data']] |
def forward_reference(self, slot_range: str, owning_class: str) -> bool:
""" Determine whether slot_range is a forward reference """
for cname in self.schema.classes:
if cname == owning_class:
return True # Occurs on or after
elif cname == slot_range:
return False # Occurs before
return True | def function[forward_reference, parameter[self, slot_range, owning_class]]:
constant[ Determine whether slot_range is a forward reference ]
for taget[name[cname]] in starred[name[self].schema.classes] begin[:]
if compare[name[cname] equal[==] name[owning_class]] begin[:]
return[constant[True]]
return[constant[True]] | keyword[def] identifier[forward_reference] ( identifier[self] , identifier[slot_range] : identifier[str] , identifier[owning_class] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[for] identifier[cname] keyword[in] identifier[self] . identifier[schema] . identifier[classes] :
keyword[if] identifier[cname] == identifier[owning_class] :
keyword[return] keyword[True]
keyword[elif] identifier[cname] == identifier[slot_range] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def forward_reference(self, slot_range: str, owning_class: str) -> bool:
""" Determine whether slot_range is a forward reference """
for cname in self.schema.classes:
if cname == owning_class:
return True # Occurs on or after # depends on [control=['if'], data=[]]
elif cname == slot_range:
return False # Occurs before # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cname']]
return True |
def get_process_id_list(self, sentry_unit, process_name,
expect_success=True, pgrep_full=False):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
if pgrep_full:
cmd = 'pgrep -f "{}"'.format(process_name)
else:
cmd = 'pidof -x "{}"'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split() | def function[get_process_id_list, parameter[self, sentry_unit, process_name, expect_success, pgrep_full]]:
constant[Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
]
if name[pgrep_full] begin[:]
variable[cmd] assign[=] call[constant[pgrep -f "{}"].format, parameter[name[process_name]]]
if <ast.UnaryOp object at 0x7da2041db460> begin[:]
<ast.AugAssign object at 0x7da2041db8b0>
<ast.Tuple object at 0x7da2041dbb50> assign[=] call[name[sentry_unit].run, parameter[name[cmd]]]
if compare[name[code] not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[{} `{}` returned {} {}].format, parameter[call[name[sentry_unit].info][constant[unit_name]], name[cmd], name[code], name[output]]]
call[name[amulet].raise_status, parameter[name[amulet].FAIL]]
return[call[call[name[str], parameter[name[output]]].split, parameter[]]] | keyword[def] identifier[get_process_id_list] ( identifier[self] , identifier[sentry_unit] , identifier[process_name] ,
identifier[expect_success] = keyword[True] , identifier[pgrep_full] = keyword[False] ):
literal[string]
keyword[if] identifier[pgrep_full] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[process_name] )
keyword[else] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[process_name] )
keyword[if] keyword[not] identifier[expect_success] :
identifier[cmd] += literal[string]
identifier[output] , identifier[code] = identifier[sentry_unit] . identifier[run] ( identifier[cmd] )
keyword[if] identifier[code] != literal[int] :
identifier[msg] =( literal[string]
literal[string] . identifier[format] ( identifier[sentry_unit] . identifier[info] [ literal[string] ],
identifier[cmd] , identifier[code] , identifier[output] ))
identifier[amulet] . identifier[raise_status] ( identifier[amulet] . identifier[FAIL] , identifier[msg] = identifier[msg] )
keyword[return] identifier[str] ( identifier[output] ). identifier[split] () | def get_process_id_list(self, sentry_unit, process_name, expect_success=True, pgrep_full=False):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
if pgrep_full:
cmd = 'pgrep -f "{}"'.format(process_name) # depends on [control=['if'], data=[]]
else:
cmd = 'pidof -x "{}"'.format(process_name)
if not expect_success:
cmd += ' || exit 0 && exit 1' # depends on [control=['if'], data=[]]
(output, code) = sentry_unit.run(cmd)
if code != 0:
msg = '{} `{}` returned {} {}'.format(sentry_unit.info['unit_name'], cmd, code, output)
amulet.raise_status(amulet.FAIL, msg=msg) # depends on [control=['if'], data=['code']]
return str(output).split() |
def notify_rejection(analysisrequest):
"""
Notifies via email that a given Analysis Request has been rejected. The
notification is sent to the Client contacts assigned to the Analysis
Request.
:param analysisrequest: Analysis Request to which the notification refers
:returns: true if success
"""
# We do this imports here to avoid circular dependencies until we deal
# better with this notify_rejection thing.
from bika.lims.browser.analysisrequest.reject import \
AnalysisRequestRejectPdfView, AnalysisRequestRejectEmailView
arid = analysisrequest.getId()
# This is the template to render for the pdf that will be either attached
# to the email and attached the the Analysis Request for further access
tpl = AnalysisRequestRejectPdfView(analysisrequest, analysisrequest.REQUEST)
html = tpl.template()
html = safe_unicode(html).encode('utf-8')
filename = '%s-rejected' % arid
pdf_fn = tempfile.mktemp(suffix=".pdf")
pdf = createPdf(htmlreport=html, outfile=pdf_fn)
if pdf:
# Attach the pdf to the Analysis Request
attid = analysisrequest.aq_parent.generateUniqueId('Attachment')
att = _createObjectByType(
"Attachment", analysisrequest.aq_parent, attid)
att.setAttachmentFile(open(pdf_fn))
# Awkward workaround to rename the file
attf = att.getAttachmentFile()
attf.filename = '%s.pdf' % filename
att.setAttachmentFile(attf)
att.unmarkCreationFlag()
renameAfterCreation(att)
analysisrequest.addAttachment(att)
os.remove(pdf_fn)
# This is the message for the email's body
tpl = AnalysisRequestRejectEmailView(
analysisrequest, analysisrequest.REQUEST)
html = tpl.template()
html = safe_unicode(html).encode('utf-8')
# compose and send email.
mailto = []
lab = analysisrequest.bika_setup.laboratory
mailfrom = formataddr((encode_header(lab.getName()), lab.getEmailAddress()))
mailsubject = _('%s has been rejected') % arid
contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact()
for contact in contacts:
name = to_utf8(contact.getFullname())
email = to_utf8(contact.getEmailAddress())
if email:
mailto.append(formataddr((encode_header(name), email)))
if not mailto:
return False
mime_msg = MIMEMultipart('related')
mime_msg['Subject'] = mailsubject
mime_msg['From'] = mailfrom
mime_msg['To'] = ','.join(mailto)
mime_msg.preamble = 'This is a multi-part MIME message.'
msg_txt = MIMEText(html, _subtype='html')
mime_msg.attach(msg_txt)
if pdf:
attachPdf(mime_msg, pdf, filename)
try:
host = getToolByName(analysisrequest, 'MailHost')
host.send(mime_msg.as_string(), immediate=True)
except:
logger.warning(
"Email with subject %s was not sent (SMTP connection error)" % mailsubject)
return True | def function[notify_rejection, parameter[analysisrequest]]:
constant[
Notifies via email that a given Analysis Request has been rejected. The
notification is sent to the Client contacts assigned to the Analysis
Request.
:param analysisrequest: Analysis Request to which the notification refers
:returns: true if success
]
from relative_module[bika.lims.browser.analysisrequest.reject] import module[AnalysisRequestRejectPdfView], module[AnalysisRequestRejectEmailView]
variable[arid] assign[=] call[name[analysisrequest].getId, parameter[]]
variable[tpl] assign[=] call[name[AnalysisRequestRejectPdfView], parameter[name[analysisrequest], name[analysisrequest].REQUEST]]
variable[html] assign[=] call[name[tpl].template, parameter[]]
variable[html] assign[=] call[call[name[safe_unicode], parameter[name[html]]].encode, parameter[constant[utf-8]]]
variable[filename] assign[=] binary_operation[constant[%s-rejected] <ast.Mod object at 0x7da2590d6920> name[arid]]
variable[pdf_fn] assign[=] call[name[tempfile].mktemp, parameter[]]
variable[pdf] assign[=] call[name[createPdf], parameter[]]
if name[pdf] begin[:]
variable[attid] assign[=] call[name[analysisrequest].aq_parent.generateUniqueId, parameter[constant[Attachment]]]
variable[att] assign[=] call[name[_createObjectByType], parameter[constant[Attachment], name[analysisrequest].aq_parent, name[attid]]]
call[name[att].setAttachmentFile, parameter[call[name[open], parameter[name[pdf_fn]]]]]
variable[attf] assign[=] call[name[att].getAttachmentFile, parameter[]]
name[attf].filename assign[=] binary_operation[constant[%s.pdf] <ast.Mod object at 0x7da2590d6920> name[filename]]
call[name[att].setAttachmentFile, parameter[name[attf]]]
call[name[att].unmarkCreationFlag, parameter[]]
call[name[renameAfterCreation], parameter[name[att]]]
call[name[analysisrequest].addAttachment, parameter[name[att]]]
call[name[os].remove, parameter[name[pdf_fn]]]
variable[tpl] assign[=] call[name[AnalysisRequestRejectEmailView], parameter[name[analysisrequest], name[analysisrequest].REQUEST]]
variable[html] assign[=] call[name[tpl].template, parameter[]]
variable[html] assign[=] call[call[name[safe_unicode], parameter[name[html]]].encode, parameter[constant[utf-8]]]
variable[mailto] assign[=] list[[]]
variable[lab] assign[=] name[analysisrequest].bika_setup.laboratory
variable[mailfrom] assign[=] call[name[formataddr], parameter[tuple[[<ast.Call object at 0x7da18f00fd30>, <ast.Call object at 0x7da18f00c760>]]]]
variable[mailsubject] assign[=] binary_operation[call[name[_], parameter[constant[%s has been rejected]]] <ast.Mod object at 0x7da2590d6920> name[arid]]
variable[contacts] assign[=] binary_operation[list[[<ast.Call object at 0x7da18f00e380>]] + call[name[analysisrequest].getCCContact, parameter[]]]
for taget[name[contact]] in starred[name[contacts]] begin[:]
variable[name] assign[=] call[name[to_utf8], parameter[call[name[contact].getFullname, parameter[]]]]
variable[email] assign[=] call[name[to_utf8], parameter[call[name[contact].getEmailAddress, parameter[]]]]
if name[email] begin[:]
call[name[mailto].append, parameter[call[name[formataddr], parameter[tuple[[<ast.Call object at 0x7da1b1d49f00>, <ast.Name object at 0x7da1b1d48190>]]]]]]
if <ast.UnaryOp object at 0x7da1b1d4a7a0> begin[:]
return[constant[False]]
variable[mime_msg] assign[=] call[name[MIMEMultipart], parameter[constant[related]]]
call[name[mime_msg]][constant[Subject]] assign[=] name[mailsubject]
call[name[mime_msg]][constant[From]] assign[=] name[mailfrom]
call[name[mime_msg]][constant[To]] assign[=] call[constant[,].join, parameter[name[mailto]]]
name[mime_msg].preamble assign[=] constant[This is a multi-part MIME message.]
variable[msg_txt] assign[=] call[name[MIMEText], parameter[name[html]]]
call[name[mime_msg].attach, parameter[name[msg_txt]]]
if name[pdf] begin[:]
call[name[attachPdf], parameter[name[mime_msg], name[pdf], name[filename]]]
<ast.Try object at 0x7da1b1d4b7f0>
return[constant[True]] | keyword[def] identifier[notify_rejection] ( identifier[analysisrequest] ):
literal[string]
keyword[from] identifier[bika] . identifier[lims] . identifier[browser] . identifier[analysisrequest] . identifier[reject] keyword[import] identifier[AnalysisRequestRejectPdfView] , identifier[AnalysisRequestRejectEmailView]
identifier[arid] = identifier[analysisrequest] . identifier[getId] ()
identifier[tpl] = identifier[AnalysisRequestRejectPdfView] ( identifier[analysisrequest] , identifier[analysisrequest] . identifier[REQUEST] )
identifier[html] = identifier[tpl] . identifier[template] ()
identifier[html] = identifier[safe_unicode] ( identifier[html] ). identifier[encode] ( literal[string] )
identifier[filename] = literal[string] % identifier[arid]
identifier[pdf_fn] = identifier[tempfile] . identifier[mktemp] ( identifier[suffix] = literal[string] )
identifier[pdf] = identifier[createPdf] ( identifier[htmlreport] = identifier[html] , identifier[outfile] = identifier[pdf_fn] )
keyword[if] identifier[pdf] :
identifier[attid] = identifier[analysisrequest] . identifier[aq_parent] . identifier[generateUniqueId] ( literal[string] )
identifier[att] = identifier[_createObjectByType] (
literal[string] , identifier[analysisrequest] . identifier[aq_parent] , identifier[attid] )
identifier[att] . identifier[setAttachmentFile] ( identifier[open] ( identifier[pdf_fn] ))
identifier[attf] = identifier[att] . identifier[getAttachmentFile] ()
identifier[attf] . identifier[filename] = literal[string] % identifier[filename]
identifier[att] . identifier[setAttachmentFile] ( identifier[attf] )
identifier[att] . identifier[unmarkCreationFlag] ()
identifier[renameAfterCreation] ( identifier[att] )
identifier[analysisrequest] . identifier[addAttachment] ( identifier[att] )
identifier[os] . identifier[remove] ( identifier[pdf_fn] )
identifier[tpl] = identifier[AnalysisRequestRejectEmailView] (
identifier[analysisrequest] , identifier[analysisrequest] . identifier[REQUEST] )
identifier[html] = identifier[tpl] . identifier[template] ()
identifier[html] = identifier[safe_unicode] ( identifier[html] ). identifier[encode] ( literal[string] )
identifier[mailto] =[]
identifier[lab] = identifier[analysisrequest] . identifier[bika_setup] . identifier[laboratory]
identifier[mailfrom] = identifier[formataddr] (( identifier[encode_header] ( identifier[lab] . identifier[getName] ()), identifier[lab] . identifier[getEmailAddress] ()))
identifier[mailsubject] = identifier[_] ( literal[string] )% identifier[arid]
identifier[contacts] =[ identifier[analysisrequest] . identifier[getContact] ()]+ identifier[analysisrequest] . identifier[getCCContact] ()
keyword[for] identifier[contact] keyword[in] identifier[contacts] :
identifier[name] = identifier[to_utf8] ( identifier[contact] . identifier[getFullname] ())
identifier[email] = identifier[to_utf8] ( identifier[contact] . identifier[getEmailAddress] ())
keyword[if] identifier[email] :
identifier[mailto] . identifier[append] ( identifier[formataddr] (( identifier[encode_header] ( identifier[name] ), identifier[email] )))
keyword[if] keyword[not] identifier[mailto] :
keyword[return] keyword[False]
identifier[mime_msg] = identifier[MIMEMultipart] ( literal[string] )
identifier[mime_msg] [ literal[string] ]= identifier[mailsubject]
identifier[mime_msg] [ literal[string] ]= identifier[mailfrom]
identifier[mime_msg] [ literal[string] ]= literal[string] . identifier[join] ( identifier[mailto] )
identifier[mime_msg] . identifier[preamble] = literal[string]
identifier[msg_txt] = identifier[MIMEText] ( identifier[html] , identifier[_subtype] = literal[string] )
identifier[mime_msg] . identifier[attach] ( identifier[msg_txt] )
keyword[if] identifier[pdf] :
identifier[attachPdf] ( identifier[mime_msg] , identifier[pdf] , identifier[filename] )
keyword[try] :
identifier[host] = identifier[getToolByName] ( identifier[analysisrequest] , literal[string] )
identifier[host] . identifier[send] ( identifier[mime_msg] . identifier[as_string] (), identifier[immediate] = keyword[True] )
keyword[except] :
identifier[logger] . identifier[warning] (
literal[string] % identifier[mailsubject] )
keyword[return] keyword[True] | def notify_rejection(analysisrequest):
"""
Notifies via email that a given Analysis Request has been rejected. The
notification is sent to the Client contacts assigned to the Analysis
Request.
:param analysisrequest: Analysis Request to which the notification refers
:returns: true if success
"""
# We do this imports here to avoid circular dependencies until we deal
# better with this notify_rejection thing.
from bika.lims.browser.analysisrequest.reject import AnalysisRequestRejectPdfView, AnalysisRequestRejectEmailView
arid = analysisrequest.getId()
# This is the template to render for the pdf that will be either attached
# to the email and attached the the Analysis Request for further access
tpl = AnalysisRequestRejectPdfView(analysisrequest, analysisrequest.REQUEST)
html = tpl.template()
html = safe_unicode(html).encode('utf-8')
filename = '%s-rejected' % arid
pdf_fn = tempfile.mktemp(suffix='.pdf')
pdf = createPdf(htmlreport=html, outfile=pdf_fn)
if pdf:
# Attach the pdf to the Analysis Request
attid = analysisrequest.aq_parent.generateUniqueId('Attachment')
att = _createObjectByType('Attachment', analysisrequest.aq_parent, attid)
att.setAttachmentFile(open(pdf_fn))
# Awkward workaround to rename the file
attf = att.getAttachmentFile()
attf.filename = '%s.pdf' % filename
att.setAttachmentFile(attf)
att.unmarkCreationFlag()
renameAfterCreation(att)
analysisrequest.addAttachment(att)
os.remove(pdf_fn) # depends on [control=['if'], data=[]]
# This is the message for the email's body
tpl = AnalysisRequestRejectEmailView(analysisrequest, analysisrequest.REQUEST)
html = tpl.template()
html = safe_unicode(html).encode('utf-8')
# compose and send email.
mailto = []
lab = analysisrequest.bika_setup.laboratory
mailfrom = formataddr((encode_header(lab.getName()), lab.getEmailAddress()))
mailsubject = _('%s has been rejected') % arid
contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact()
for contact in contacts:
name = to_utf8(contact.getFullname())
email = to_utf8(contact.getEmailAddress())
if email:
mailto.append(formataddr((encode_header(name), email))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['contact']]
if not mailto:
return False # depends on [control=['if'], data=[]]
mime_msg = MIMEMultipart('related')
mime_msg['Subject'] = mailsubject
mime_msg['From'] = mailfrom
mime_msg['To'] = ','.join(mailto)
mime_msg.preamble = 'This is a multi-part MIME message.'
msg_txt = MIMEText(html, _subtype='html')
mime_msg.attach(msg_txt)
if pdf:
attachPdf(mime_msg, pdf, filename) # depends on [control=['if'], data=[]]
try:
host = getToolByName(analysisrequest, 'MailHost')
host.send(mime_msg.as_string(), immediate=True) # depends on [control=['try'], data=[]]
except:
logger.warning('Email with subject %s was not sent (SMTP connection error)' % mailsubject) # depends on [control=['except'], data=[]]
return True |
def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict):
"""
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
"""
return self._create_put_request(
resource=DELIVERY_NOTE_ITEMS,
billomat_id=delivery_note_item_id,
send_data=delivery_note_item_dict
) | def function[update_delivery_note_item, parameter[self, delivery_note_item_id, delivery_note_item_dict]]:
constant[
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
]
return[call[name[self]._create_put_request, parameter[]]] | keyword[def] identifier[update_delivery_note_item] ( identifier[self] , identifier[delivery_note_item_id] , identifier[delivery_note_item_dict] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_put_request] (
identifier[resource] = identifier[DELIVERY_NOTE_ITEMS] ,
identifier[billomat_id] = identifier[delivery_note_item_id] ,
identifier[send_data] = identifier[delivery_note_item_dict]
) | def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict):
"""
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
"""
return self._create_put_request(resource=DELIVERY_NOTE_ITEMS, billomat_id=delivery_note_item_id, send_data=delivery_note_item_dict) |
def cached_model_file(model_name='anon_model', file=None, model_code=None, cache_dir=None,
fit_cachefile=None, include_prefix=False):
''' Given model name & stan model code/file, compute path to cached stan fit
if include_prefix, returns (model_prefix, model_cachefile)
'''
cache_dir = _get_cache_dir(cache_dir)
model_name = _sanitize_model_name(model_name)
## compute model prefix
if file:
model_code = _read_file(file)
if model_code:
model_prefix = '.'.join([model_name, _make_digest(dict(model_code=model_code,
pystan=pystan.__version__,
cython=Cython.__version__))])
else: ## handle case where no model code given
if file is not None:
logger.info('Note - no model code detected from given file: {}'.format(file))
else:
logger.info('Note - no model code detected (neither file nor model_code given)')
## parse model_prefix from fit_cachefile if given
if fit_cachefile:
# if necessary, impute cache_dir from filepath
if fit_cachefile != os.path.basename(fit_cachefile):
cache_dir, fit_cachefile = os.path.split(os.path.abspath(fit_cachefile))
# if fit_cachefile given, parse to get fit_model_prefix
fit_model_prefix = re.sub(string=os.path.basename(fit_cachefile), pattern='(.*).stanfit.*', repl='\\1')
if model_code:
if fit_model_prefix != model_prefix:
logger.warning('Computed model prefix does not match that used to estimate model. Using prefix matching fit_cachefile')
model_prefix = fit_model_prefix
# compute path to model cachefile
model_cachefile = '.'.join([model_prefix, 'stanmodel', 'pkl'])
if include_prefix:
return model_prefix, model_cachefile
return model_cachefile | def function[cached_model_file, parameter[model_name, file, model_code, cache_dir, fit_cachefile, include_prefix]]:
constant[ Given model name & stan model code/file, compute path to cached stan fit
if include_prefix, returns (model_prefix, model_cachefile)
]
variable[cache_dir] assign[=] call[name[_get_cache_dir], parameter[name[cache_dir]]]
variable[model_name] assign[=] call[name[_sanitize_model_name], parameter[name[model_name]]]
if name[file] begin[:]
variable[model_code] assign[=] call[name[_read_file], parameter[name[file]]]
if name[model_code] begin[:]
variable[model_prefix] assign[=] call[constant[.].join, parameter[list[[<ast.Name object at 0x7da18f58dd80>, <ast.Call object at 0x7da18f58f190>]]]]
if name[fit_cachefile] begin[:]
if compare[name[fit_cachefile] not_equal[!=] call[name[os].path.basename, parameter[name[fit_cachefile]]]] begin[:]
<ast.Tuple object at 0x7da18f58fb20> assign[=] call[name[os].path.split, parameter[call[name[os].path.abspath, parameter[name[fit_cachefile]]]]]
variable[fit_model_prefix] assign[=] call[name[re].sub, parameter[]]
if name[model_code] begin[:]
if compare[name[fit_model_prefix] not_equal[!=] name[model_prefix]] begin[:]
call[name[logger].warning, parameter[constant[Computed model prefix does not match that used to estimate model. Using prefix matching fit_cachefile]]]
variable[model_prefix] assign[=] name[fit_model_prefix]
variable[model_cachefile] assign[=] call[constant[.].join, parameter[list[[<ast.Name object at 0x7da18f58f6d0>, <ast.Constant object at 0x7da18f58c8e0>, <ast.Constant object at 0x7da18f58e560>]]]]
if name[include_prefix] begin[:]
return[tuple[[<ast.Name object at 0x7da18f58c7c0>, <ast.Name object at 0x7da18f58f640>]]]
return[name[model_cachefile]] | keyword[def] identifier[cached_model_file] ( identifier[model_name] = literal[string] , identifier[file] = keyword[None] , identifier[model_code] = keyword[None] , identifier[cache_dir] = keyword[None] ,
identifier[fit_cachefile] = keyword[None] , identifier[include_prefix] = keyword[False] ):
literal[string]
identifier[cache_dir] = identifier[_get_cache_dir] ( identifier[cache_dir] )
identifier[model_name] = identifier[_sanitize_model_name] ( identifier[model_name] )
keyword[if] identifier[file] :
identifier[model_code] = identifier[_read_file] ( identifier[file] )
keyword[if] identifier[model_code] :
identifier[model_prefix] = literal[string] . identifier[join] ([ identifier[model_name] , identifier[_make_digest] ( identifier[dict] ( identifier[model_code] = identifier[model_code] ,
identifier[pystan] = identifier[pystan] . identifier[__version__] ,
identifier[cython] = identifier[Cython] . identifier[__version__] ))])
keyword[else] :
keyword[if] identifier[file] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[file] ))
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[fit_cachefile] :
keyword[if] identifier[fit_cachefile] != identifier[os] . identifier[path] . identifier[basename] ( identifier[fit_cachefile] ):
identifier[cache_dir] , identifier[fit_cachefile] = identifier[os] . identifier[path] . identifier[split] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[fit_cachefile] ))
identifier[fit_model_prefix] = identifier[re] . identifier[sub] ( identifier[string] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fit_cachefile] ), identifier[pattern] = literal[string] , identifier[repl] = literal[string] )
keyword[if] identifier[model_code] :
keyword[if] identifier[fit_model_prefix] != identifier[model_prefix] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[model_prefix] = identifier[fit_model_prefix]
identifier[model_cachefile] = literal[string] . identifier[join] ([ identifier[model_prefix] , literal[string] , literal[string] ])
keyword[if] identifier[include_prefix] :
keyword[return] identifier[model_prefix] , identifier[model_cachefile]
keyword[return] identifier[model_cachefile] | def cached_model_file(model_name='anon_model', file=None, model_code=None, cache_dir=None, fit_cachefile=None, include_prefix=False):
""" Given model name & stan model code/file, compute path to cached stan fit
if include_prefix, returns (model_prefix, model_cachefile)
"""
cache_dir = _get_cache_dir(cache_dir)
model_name = _sanitize_model_name(model_name)
## compute model prefix
if file:
model_code = _read_file(file) # depends on [control=['if'], data=[]]
if model_code:
model_prefix = '.'.join([model_name, _make_digest(dict(model_code=model_code, pystan=pystan.__version__, cython=Cython.__version__))]) # depends on [control=['if'], data=[]] ## handle case where no model code given
elif file is not None:
logger.info('Note - no model code detected from given file: {}'.format(file)) # depends on [control=['if'], data=['file']]
else:
logger.info('Note - no model code detected (neither file nor model_code given)')
## parse model_prefix from fit_cachefile if given
if fit_cachefile:
# if necessary, impute cache_dir from filepath
if fit_cachefile != os.path.basename(fit_cachefile):
(cache_dir, fit_cachefile) = os.path.split(os.path.abspath(fit_cachefile)) # depends on [control=['if'], data=['fit_cachefile']]
# if fit_cachefile given, parse to get fit_model_prefix
fit_model_prefix = re.sub(string=os.path.basename(fit_cachefile), pattern='(.*).stanfit.*', repl='\\1')
if model_code:
if fit_model_prefix != model_prefix:
logger.warning('Computed model prefix does not match that used to estimate model. Using prefix matching fit_cachefile') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
model_prefix = fit_model_prefix # depends on [control=['if'], data=[]]
# compute path to model cachefile
model_cachefile = '.'.join([model_prefix, 'stanmodel', 'pkl'])
if include_prefix:
return (model_prefix, model_cachefile) # depends on [control=['if'], data=[]]
return model_cachefile |
def get_network(context, id, fields=None):
"""Retrieve a network.
: param context: neutron api request context
: param id: UUID representing the network to fetch.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_network %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
network = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False,
id=id, join_subnets=True, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=id)
return v._make_network_dict(network, fields=fields) | def function[get_network, parameter[context, id, fields]]:
constant[Retrieve a network.
: param context: neutron api request context
: param id: UUID representing the network to fetch.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
]
call[name[LOG].info, parameter[binary_operation[constant[get_network %s for tenant %s fields %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b00f7e80>, <ast.Attribute object at 0x7da1b00f48b0>, <ast.Name object at 0x7da1b00f4fd0>]]]]]
variable[network] assign[=] call[name[db_api].network_find, parameter[]]
if <ast.UnaryOp object at 0x7da20c795240> begin[:]
<ast.Raise object at 0x7da20c7942e0>
return[call[name[v]._make_network_dict, parameter[name[network]]]] | keyword[def] identifier[get_network] ( identifier[context] , identifier[id] , identifier[fields] = keyword[None] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] %
( identifier[id] , identifier[context] . identifier[tenant_id] , identifier[fields] ))
identifier[network] = identifier[db_api] . identifier[network_find] ( identifier[context] = identifier[context] , identifier[limit] = keyword[None] , identifier[sorts] =[ literal[string] ],
identifier[marker] = keyword[None] , identifier[page_reverse] = keyword[False] ,
identifier[id] = identifier[id] , identifier[join_subnets] = keyword[True] , identifier[scope] = identifier[db_api] . identifier[ONE] )
keyword[if] keyword[not] identifier[network] :
keyword[raise] identifier[n_exc] . identifier[NetworkNotFound] ( identifier[net_id] = identifier[id] )
keyword[return] identifier[v] . identifier[_make_network_dict] ( identifier[network] , identifier[fields] = identifier[fields] ) | def get_network(context, id, fields=None):
"""Retrieve a network.
: param context: neutron api request context
: param id: UUID representing the network to fetch.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info('get_network %s for tenant %s fields %s' % (id, context.tenant_id, fields))
network = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, join_subnets=True, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=id) # depends on [control=['if'], data=[]]
return v._make_network_dict(network, fields=fields) |
def broker_metadata(self, broker_id):
"""Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
"""
return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id) | def function[broker_metadata, parameter[self, broker_id]]:
constant[Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
]
return[<ast.BoolOp object at 0x7da1b1c2a6e0>] | keyword[def] identifier[broker_metadata] ( identifier[self] , identifier[broker_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_brokers] . identifier[get] ( identifier[broker_id] ) keyword[or] identifier[self] . identifier[_bootstrap_brokers] . identifier[get] ( identifier[broker_id] ) | def broker_metadata(self, broker_id):
"""Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
"""
return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id) |
def addSubsequenceOfFeature(self, parentid):
"""
This will add reciprocal triples like:
feature <is subsequence of> parent
parent has_subsequence feature
:param graph:
:param parentid:
:return:
"""
self.graph.addTriple(self.fid, self.globaltt['is subsequence of'], parentid)
# this should be expected to be done in reasoning not ETL
self.graph.addTriple(parentid, self.globaltt['has subsequence'], self.fid)
return | def function[addSubsequenceOfFeature, parameter[self, parentid]]:
constant[
This will add reciprocal triples like:
feature <is subsequence of> parent
parent has_subsequence feature
:param graph:
:param parentid:
:return:
]
call[name[self].graph.addTriple, parameter[name[self].fid, call[name[self].globaltt][constant[is subsequence of]], name[parentid]]]
call[name[self].graph.addTriple, parameter[name[parentid], call[name[self].globaltt][constant[has subsequence]], name[self].fid]]
return[None] | keyword[def] identifier[addSubsequenceOfFeature] ( identifier[self] , identifier[parentid] ):
literal[string]
identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[self] . identifier[fid] , identifier[self] . identifier[globaltt] [ literal[string] ], identifier[parentid] )
identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[parentid] , identifier[self] . identifier[globaltt] [ literal[string] ], identifier[self] . identifier[fid] )
keyword[return] | def addSubsequenceOfFeature(self, parentid):
"""
This will add reciprocal triples like:
feature <is subsequence of> parent
parent has_subsequence feature
:param graph:
:param parentid:
:return:
"""
self.graph.addTriple(self.fid, self.globaltt['is subsequence of'], parentid)
# this should be expected to be done in reasoning not ETL
self.graph.addTriple(parentid, self.globaltt['has subsequence'], self.fid)
return |
def wait_displayed(element, timeout=None, fail_on_timeout=None):
"""
Wait until element becomes visible or time out.
Returns true is element became visible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
"""
return wait(lambda: element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout) | def function[wait_displayed, parameter[element, timeout, fail_on_timeout]]:
constant[
Wait until element becomes visible or time out.
Returns true is element became visible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
]
return[call[name[wait], parameter[<ast.Lambda object at 0x7da1b149e710>, <ast.BoolOp object at 0x7da1b149fdc0>, name[fail_on_timeout]]]] | keyword[def] identifier[wait_displayed] ( identifier[element] , identifier[timeout] = keyword[None] , identifier[fail_on_timeout] = keyword[None] ):
literal[string]
keyword[return] identifier[wait] ( keyword[lambda] : identifier[element] . identifier[is_displayed] (), identifier[timeout] keyword[or] identifier[element] . identifier[wait_timeout] , identifier[fail_on_timeout] ) | def wait_displayed(element, timeout=None, fail_on_timeout=None):
"""
Wait until element becomes visible or time out.
Returns true is element became visible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
"""
return wait(lambda : element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout) |
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print ('Created an ad with ID "%s", type "%s", and status "%s".'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.') | def function[CreateAdsWithCustomizations, parameter[client, adgroup_ids, feed_name]]:
constant[Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
]
variable[adgroup_ad_service] assign[=] call[name[client].GetService, parameter[constant[AdGroupAdService], constant[v201809]]]
variable[expanded_text_ad] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b3ff40>, <ast.Constant object at 0x7da1b1b3ffd0>, <ast.Constant object at 0x7da1b1b3fd30>, <ast.Constant object at 0x7da1b1b3ff70>, <ast.Constant object at 0x7da1b1b3c820>], [<ast.Constant object at 0x7da1b1b3c8e0>, <ast.BinOp object at 0x7da1b1b3c700>, <ast.BinOp object at 0x7da1b1b3ca60>, <ast.BinOp object at 0x7da1b1b3f970>, <ast.List object at 0x7da1b1b3f3d0>]]
variable[operations] assign[=] <ast.ListComp object at 0x7da1b1b3fb80>
variable[response] assign[=] call[name[adgroup_ad_service].mutate, parameter[name[operations]]]
if <ast.BoolOp object at 0x7da1b1b3cd30> begin[:]
for taget[name[ad]] in starred[call[name[response]][constant[value]]] begin[:]
call[name[print], parameter[binary_operation[constant[Created an ad with ID "%s", type "%s", and status "%s".] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1b3f8b0>, <ast.Subscript object at 0x7da1b1b3f190>, <ast.Subscript object at 0x7da1b1b2b220>]]]]] | keyword[def] identifier[CreateAdsWithCustomizations] ( identifier[client] , identifier[adgroup_ids] , identifier[feed_name] ):
literal[string]
identifier[adgroup_ad_service] = identifier[client] . identifier[GetService] ( literal[string] , literal[string] )
identifier[expanded_text_ad] ={
literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[feed_name] ,
literal[string] : literal[string] % identifier[feed_name] ,
literal[string] : literal[string] % identifier[feed_name] ,
literal[string] :[ literal[string] ],
}
identifier[operations] =[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[adgroup] ,
literal[string] : identifier[expanded_text_ad]
}
} keyword[for] identifier[adgroup] keyword[in] identifier[adgroup_ids] ]
identifier[response] = identifier[adgroup_ad_service] . identifier[mutate] ( identifier[operations] )
keyword[if] identifier[response] keyword[and] literal[string] keyword[in] identifier[response] :
keyword[for] identifier[ad] keyword[in] identifier[response] [ literal[string] ]:
identifier[print] ( literal[string]
%( identifier[ad] [ literal[string] ][ literal[string] ], identifier[ad] [ literal[string] ][ literal[string] ], identifier[ad] [ literal[string] ]))
keyword[else] :
keyword[raise] identifier[errors] . identifier[GoogleAdsError] ( literal[string] ) | def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')
expanded_text_ad = {'xsi_type': 'ExpandedTextAd', 'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name, 'headlinePart2': 'Only {=%s.Price}' % feed_name, 'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name, 'finalUrls': ['http://www.example.com']}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{'operator': 'ADD', 'operand': {'adGroupId': adgroup, 'ad': expanded_text_ad}} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print('Created an ad with ID "%s", type "%s", and status "%s".' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])) # depends on [control=['for'], data=['ad']] # depends on [control=['if'], data=[]]
else:
raise errors.GoogleAdsError('No ads were added.') |
def snmp_server_agtconfig_location(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
agtconfig = ET.SubElement(snmp_server, "agtconfig")
location = ET.SubElement(agtconfig, "location")
location.text = kwargs.pop('location')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[snmp_server_agtconfig_location, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[snmp_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[snmp-server]]]
variable[agtconfig] assign[=] call[name[ET].SubElement, parameter[name[snmp_server], constant[agtconfig]]]
variable[location] assign[=] call[name[ET].SubElement, parameter[name[agtconfig], constant[location]]]
name[location].text assign[=] call[name[kwargs].pop, parameter[constant[location]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[snmp_server_agtconfig_location] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[snmp_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[agtconfig] = identifier[ET] . identifier[SubElement] ( identifier[snmp_server] , literal[string] )
identifier[location] = identifier[ET] . identifier[SubElement] ( identifier[agtconfig] , literal[string] )
identifier[location] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def snmp_server_agtconfig_location(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')
agtconfig = ET.SubElement(snmp_server, 'agtconfig')
location = ET.SubElement(agtconfig, 'location')
location.text = kwargs.pop('location')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def visibility(vis: Number, unit: str = 'm') -> str:
"""
Formats a visibility element into a string with both km and sm values
Ex: 8km ( 5sm )
"""
if not (vis and unit in ('m', 'sm')):
return ''
if vis.repr in VIS_REPR:
return VIS_REPR[vis.repr]
if unit == 'm':
converted = vis.value * 0.000621371
converted = str(round(converted, 1)).replace('.0', '') + 'sm' # type: ignore
value = str(round(vis.value / 1000, 1)).replace('.0', '')
unit = 'km'
elif unit == 'sm':
converted = vis.value / 0.621371
converted = str(round(converted, 1)).replace('.0', '') + 'km' # type: ignore
value = str(vis.value).replace('.0', '')
return f'{value}{unit} ({converted})' | def function[visibility, parameter[vis, unit]]:
constant[
Formats a visibility element into a string with both km and sm values
Ex: 8km ( 5sm )
]
if <ast.UnaryOp object at 0x7da2047e8d60> begin[:]
return[constant[]]
if compare[name[vis].repr in name[VIS_REPR]] begin[:]
return[call[name[VIS_REPR]][name[vis].repr]]
if compare[name[unit] equal[==] constant[m]] begin[:]
variable[converted] assign[=] binary_operation[name[vis].value * constant[0.000621371]]
variable[converted] assign[=] binary_operation[call[call[name[str], parameter[call[name[round], parameter[name[converted], constant[1]]]]].replace, parameter[constant[.0], constant[]]] + constant[sm]]
variable[value] assign[=] call[call[name[str], parameter[call[name[round], parameter[binary_operation[name[vis].value / constant[1000]], constant[1]]]]].replace, parameter[constant[.0], constant[]]]
variable[unit] assign[=] constant[km]
return[<ast.JoinedStr object at 0x7da204564100>] | keyword[def] identifier[visibility] ( identifier[vis] : identifier[Number] , identifier[unit] : identifier[str] = literal[string] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] ( identifier[vis] keyword[and] identifier[unit] keyword[in] ( literal[string] , literal[string] )):
keyword[return] literal[string]
keyword[if] identifier[vis] . identifier[repr] keyword[in] identifier[VIS_REPR] :
keyword[return] identifier[VIS_REPR] [ identifier[vis] . identifier[repr] ]
keyword[if] identifier[unit] == literal[string] :
identifier[converted] = identifier[vis] . identifier[value] * literal[int]
identifier[converted] = identifier[str] ( identifier[round] ( identifier[converted] , literal[int] )). identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[value] = identifier[str] ( identifier[round] ( identifier[vis] . identifier[value] / literal[int] , literal[int] )). identifier[replace] ( literal[string] , literal[string] )
identifier[unit] = literal[string]
keyword[elif] identifier[unit] == literal[string] :
identifier[converted] = identifier[vis] . identifier[value] / literal[int]
identifier[converted] = identifier[str] ( identifier[round] ( identifier[converted] , literal[int] )). identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[value] = identifier[str] ( identifier[vis] . identifier[value] ). identifier[replace] ( literal[string] , literal[string] )
keyword[return] literal[string] | def visibility(vis: Number, unit: str='m') -> str:
"""
Formats a visibility element into a string with both km and sm values
Ex: 8km ( 5sm )
"""
if not (vis and unit in ('m', 'sm')):
return '' # depends on [control=['if'], data=[]]
if vis.repr in VIS_REPR:
return VIS_REPR[vis.repr] # depends on [control=['if'], data=['VIS_REPR']]
if unit == 'm':
converted = vis.value * 0.000621371
converted = str(round(converted, 1)).replace('.0', '') + 'sm' # type: ignore
value = str(round(vis.value / 1000, 1)).replace('.0', '')
unit = 'km' # depends on [control=['if'], data=['unit']]
elif unit == 'sm':
converted = vis.value / 0.621371
converted = str(round(converted, 1)).replace('.0', '') + 'km' # type: ignore
value = str(vis.value).replace('.0', '') # depends on [control=['if'], data=[]]
return f'{value}{unit} ({converted})' |
def solve(self):
'''
Solves the single period consumption-saving problem using the method of
endogenous gridpoints. Solution includes a consumption function cFunc
(using cubic or linear splines), a marginal value function vPfunc, a min-
imum acceptable level of normalized market resources mNrmMin, normalized
human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also
have a value function vFunc and marginal marginal value function vPPfunc.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem.
'''
# Make arrays of end-of-period assets and end-of-period marginal value
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
# Construct a basic solution for this period
if self.CubicBool:
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeCubiccFunc)
else:
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution) # add a few things
solution = self.addSSmNrm(solution) # find steady state m
# Add the value function if requested, as well as the marginal marginal
# value function if cubic splines were used (to prepare for next period)
if self.vFuncBool:
solution = self.addvFunc(solution,EndOfPrdvP)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution | def function[solve, parameter[self]]:
constant[
Solves the single period consumption-saving problem using the method of
endogenous gridpoints. Solution includes a consumption function cFunc
(using cubic or linear splines), a marginal value function vPfunc, a min-
imum acceptable level of normalized market resources mNrmMin, normalized
human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also
have a value function vFunc and marginal marginal value function vPPfunc.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem.
]
variable[aNrm] assign[=] call[name[self].prepareToCalcEndOfPrdvP, parameter[]]
variable[EndOfPrdvP] assign[=] call[name[self].calcEndOfPrdvP, parameter[]]
if name[self].CubicBool begin[:]
variable[solution] assign[=] call[name[self].makeBasicSolution, parameter[name[EndOfPrdvP], name[aNrm]]]
variable[solution] assign[=] call[name[self].addMPCandHumanWealth, parameter[name[solution]]]
variable[solution] assign[=] call[name[self].addSSmNrm, parameter[name[solution]]]
if name[self].vFuncBool begin[:]
variable[solution] assign[=] call[name[self].addvFunc, parameter[name[solution], name[EndOfPrdvP]]]
if name[self].CubicBool begin[:]
variable[solution] assign[=] call[name[self].addvPPfunc, parameter[name[solution]]]
return[name[solution]] | keyword[def] identifier[solve] ( identifier[self] ):
literal[string]
identifier[aNrm] = identifier[self] . identifier[prepareToCalcEndOfPrdvP] ()
identifier[EndOfPrdvP] = identifier[self] . identifier[calcEndOfPrdvP] ()
keyword[if] identifier[self] . identifier[CubicBool] :
identifier[solution] = identifier[self] . identifier[makeBasicSolution] ( identifier[EndOfPrdvP] , identifier[aNrm] , identifier[interpolator] = identifier[self] . identifier[makeCubiccFunc] )
keyword[else] :
identifier[solution] = identifier[self] . identifier[makeBasicSolution] ( identifier[EndOfPrdvP] , identifier[aNrm] , identifier[interpolator] = identifier[self] . identifier[makeLinearcFunc] )
identifier[solution] = identifier[self] . identifier[addMPCandHumanWealth] ( identifier[solution] )
identifier[solution] = identifier[self] . identifier[addSSmNrm] ( identifier[solution] )
keyword[if] identifier[self] . identifier[vFuncBool] :
identifier[solution] = identifier[self] . identifier[addvFunc] ( identifier[solution] , identifier[EndOfPrdvP] )
keyword[if] identifier[self] . identifier[CubicBool] :
identifier[solution] = identifier[self] . identifier[addvPPfunc] ( identifier[solution] )
keyword[return] identifier[solution] | def solve(self):
"""
Solves the single period consumption-saving problem using the method of
endogenous gridpoints. Solution includes a consumption function cFunc
(using cubic or linear splines), a marginal value function vPfunc, a min-
imum acceptable level of normalized market resources mNrmMin, normalized
human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also
have a value function vFunc and marginal marginal value function vPPfunc.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem.
"""
# Make arrays of end-of-period assets and end-of-period marginal value
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
# Construct a basic solution for this period
if self.CubicBool:
solution = self.makeBasicSolution(EndOfPrdvP, aNrm, interpolator=self.makeCubiccFunc) # depends on [control=['if'], data=[]]
else:
solution = self.makeBasicSolution(EndOfPrdvP, aNrm, interpolator=self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution) # add a few things
solution = self.addSSmNrm(solution) # find steady state m
# Add the value function if requested, as well as the marginal marginal
# value function if cubic splines were used (to prepare for next period)
if self.vFuncBool:
solution = self.addvFunc(solution, EndOfPrdvP) # depends on [control=['if'], data=[]]
if self.CubicBool:
solution = self.addvPPfunc(solution) # depends on [control=['if'], data=[]]
return solution |
def get_built_artifacts(self, id, **kwargs):
"""
Gets artifacts built for specific Build Record
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_built_artifacts(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: BuildRecord id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ArtifactPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_built_artifacts_with_http_info(id, **kwargs)
else:
(data) = self.get_built_artifacts_with_http_info(id, **kwargs)
return data | def function[get_built_artifacts, parameter[self, id]]:
constant[
Gets artifacts built for specific Build Record
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_built_artifacts(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: BuildRecord id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ArtifactPage
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[callback]]] begin[:]
return[call[name[self].get_built_artifacts_with_http_info, parameter[name[id]]]] | keyword[def] identifier[get_built_artifacts] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_built_artifacts_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_built_artifacts_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def get_built_artifacts(self, id, **kwargs):
"""
Gets artifacts built for specific Build Record
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_built_artifacts(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: BuildRecord id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ArtifactPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_built_artifacts_with_http_info(id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.get_built_artifacts_with_http_info(id, **kwargs)
return data |
def _validate_yourls_response(response, data):
"""Validate response from YOURLS server."""
try:
response.raise_for_status()
except HTTPError as http_exc:
# Collect full HTTPError information so we can reraise later if required.
http_error_info = sys.exc_info()
# We will reraise outside of try..except block to prevent exception
# chaining showing wrong traceback when we try and parse JSON response.
reraise = False
try:
jsondata = response.json()
except ValueError:
reraise = True
else:
logger.debug('Received error {response} with JSON {json}',
response=response, json=jsondata)
_handle_api_error_with_json(http_exc, jsondata, response)
if reraise:
six.reraise(*http_error_info)
else:
# We have a valid HTTP response, but we need to check what the API says
# about the request.
jsondata = response.json()
logger.debug('Received {response} with JSON {json}', response=response,
json=jsondata)
if {'status', 'code', 'message'} <= set(jsondata.keys()):
status = jsondata['status']
code = jsondata['code']
message = jsondata['message']
if status == 'fail':
if code == 'error:keyword':
raise YOURLSKeywordExistsError(message, keyword=data['keyword'])
elif code == 'error:url':
url = _json_to_shortened_url(jsondata['url'], jsondata['shorturl'])
raise YOURLSURLExistsError(message, url=url)
else:
raise YOURLSAPIError(message)
else:
return jsondata
else:
# Without status, nothing special needs to be handled.
return jsondata | def function[_validate_yourls_response, parameter[response, data]]:
constant[Validate response from YOURLS server.]
<ast.Try object at 0x7da1b025db10> | keyword[def] identifier[_validate_yourls_response] ( identifier[response] , identifier[data] ):
literal[string]
keyword[try] :
identifier[response] . identifier[raise_for_status] ()
keyword[except] identifier[HTTPError] keyword[as] identifier[http_exc] :
identifier[http_error_info] = identifier[sys] . identifier[exc_info] ()
identifier[reraise] = keyword[False]
keyword[try] :
identifier[jsondata] = identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] :
identifier[reraise] = keyword[True]
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[response] = identifier[response] , identifier[json] = identifier[jsondata] )
identifier[_handle_api_error_with_json] ( identifier[http_exc] , identifier[jsondata] , identifier[response] )
keyword[if] identifier[reraise] :
identifier[six] . identifier[reraise] (* identifier[http_error_info] )
keyword[else] :
identifier[jsondata] = identifier[response] . identifier[json] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[response] = identifier[response] ,
identifier[json] = identifier[jsondata] )
keyword[if] { literal[string] , literal[string] , literal[string] }<= identifier[set] ( identifier[jsondata] . identifier[keys] ()):
identifier[status] = identifier[jsondata] [ literal[string] ]
identifier[code] = identifier[jsondata] [ literal[string] ]
identifier[message] = identifier[jsondata] [ literal[string] ]
keyword[if] identifier[status] == literal[string] :
keyword[if] identifier[code] == literal[string] :
keyword[raise] identifier[YOURLSKeywordExistsError] ( identifier[message] , identifier[keyword] = identifier[data] [ literal[string] ])
keyword[elif] identifier[code] == literal[string] :
identifier[url] = identifier[_json_to_shortened_url] ( identifier[jsondata] [ literal[string] ], identifier[jsondata] [ literal[string] ])
keyword[raise] identifier[YOURLSURLExistsError] ( identifier[message] , identifier[url] = identifier[url] )
keyword[else] :
keyword[raise] identifier[YOURLSAPIError] ( identifier[message] )
keyword[else] :
keyword[return] identifier[jsondata]
keyword[else] :
keyword[return] identifier[jsondata] | def _validate_yourls_response(response, data):
"""Validate response from YOURLS server."""
try:
response.raise_for_status() # depends on [control=['try'], data=[]]
except HTTPError as http_exc:
# Collect full HTTPError information so we can reraise later if required.
http_error_info = sys.exc_info()
# We will reraise outside of try..except block to prevent exception
# chaining showing wrong traceback when we try and parse JSON response.
reraise = False
try:
jsondata = response.json() # depends on [control=['try'], data=[]]
except ValueError:
reraise = True # depends on [control=['except'], data=[]]
else:
logger.debug('Received error {response} with JSON {json}', response=response, json=jsondata)
_handle_api_error_with_json(http_exc, jsondata, response)
if reraise:
six.reraise(*http_error_info) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['http_exc']]
else:
# We have a valid HTTP response, but we need to check what the API says
# about the request.
jsondata = response.json()
logger.debug('Received {response} with JSON {json}', response=response, json=jsondata)
if {'status', 'code', 'message'} <= set(jsondata.keys()):
status = jsondata['status']
code = jsondata['code']
message = jsondata['message']
if status == 'fail':
if code == 'error:keyword':
raise YOURLSKeywordExistsError(message, keyword=data['keyword']) # depends on [control=['if'], data=[]]
elif code == 'error:url':
url = _json_to_shortened_url(jsondata['url'], jsondata['shorturl'])
raise YOURLSURLExistsError(message, url=url) # depends on [control=['if'], data=[]]
else:
raise YOURLSAPIError(message) # depends on [control=['if'], data=[]]
else:
return jsondata # depends on [control=['if'], data=[]]
else:
# Without status, nothing special needs to be handled.
return jsondata |
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans) | def function[read, parameter[self, prompt, clean]]:
constant[ Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
]
variable[ans] assign[=] call[name[read], parameter[binary_operation[name[prompt] + constant[ ]]]]
return[call[name[clean], parameter[name[ans]]]] | keyword[def] identifier[read] ( identifier[self] , identifier[prompt] = literal[string] , identifier[clean] = keyword[lambda] identifier[x] : identifier[x] ):
literal[string]
identifier[ans] = identifier[read] ( identifier[prompt] + literal[string] )
keyword[return] identifier[clean] ( identifier[ans] ) | def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans) |
def find_difference_contour(self):
"""Find the ratio and loss/gain contours.
This method finds the ratio contour and the Loss/Gain contour values.
Its inputs are the two datasets for comparison where the second is the control
to compare against the first.
The input data sets need to be the same shape.
Returns:
2-element tuple containing
- **diff** (*2D array of floats*): Ratio contour values.
- **loss_gain_contour** (*2D array of floats*): loss/gain contour values.
"""
# set contour to test and control contour
self.ratio_comp_value = (self.comparison_value if self.ratio_comp_value is None
else self.ratio_comp_value)
# indices of loss,gained.
inds_gained = np.where((self.comp1 >= self.comparison_value)
& (self.comp2 < self.comparison_value))
inds_lost = np.where((self.comp1 < self.comparison_value)
& (self.comp2 >= self.comparison_value))
self.comp1 = np.ma.masked_where(self.comp1 < self.ratio_comp_value, self.comp1)
self.comp2 = np.ma.masked_where(self.comp2 < self.ratio_comp_value, self.comp2)
# set diff to ratio for purposed of determining raito differences
diff = self.comp1/self.comp2
# the following determines the log10 of the ratio difference.
# If it is extremely small, we neglect and put it as zero
# (limits chosen to resemble ratios of less than 1.05 and greater than 0.952)
diff = (np.log10(diff)*(diff >= 1.05)
+ (-np.log10(1.0/diff)) * (diff <= 0.952)
+ 0.0*((diff < 1.05) & (diff > 0.952)))
# initialize loss/gain
loss_gain_contour = np.zeros(np.shape(self.comp1))
# fill out loss/gain
loss_gain_contour[inds_lost] = -1
loss_gain_contour[inds_gained] = 1
return diff, loss_gain_contour | def function[find_difference_contour, parameter[self]]:
constant[Find the ratio and loss/gain contours.
This method finds the ratio contour and the Loss/Gain contour values.
Its inputs are the two datasets for comparison where the second is the control
to compare against the first.
The input data sets need to be the same shape.
Returns:
2-element tuple containing
- **diff** (*2D array of floats*): Ratio contour values.
- **loss_gain_contour** (*2D array of floats*): loss/gain contour values.
]
name[self].ratio_comp_value assign[=] <ast.IfExp object at 0x7da18f58e6b0>
variable[inds_gained] assign[=] call[name[np].where, parameter[binary_operation[compare[name[self].comp1 greater_or_equal[>=] name[self].comparison_value] <ast.BitAnd object at 0x7da2590d6b60> compare[name[self].comp2 less[<] name[self].comparison_value]]]]
variable[inds_lost] assign[=] call[name[np].where, parameter[binary_operation[compare[name[self].comp1 less[<] name[self].comparison_value] <ast.BitAnd object at 0x7da2590d6b60> compare[name[self].comp2 greater_or_equal[>=] name[self].comparison_value]]]]
name[self].comp1 assign[=] call[name[np].ma.masked_where, parameter[compare[name[self].comp1 less[<] name[self].ratio_comp_value], name[self].comp1]]
name[self].comp2 assign[=] call[name[np].ma.masked_where, parameter[compare[name[self].comp2 less[<] name[self].ratio_comp_value], name[self].comp2]]
variable[diff] assign[=] binary_operation[name[self].comp1 / name[self].comp2]
variable[diff] assign[=] binary_operation[binary_operation[binary_operation[call[name[np].log10, parameter[name[diff]]] * compare[name[diff] greater_or_equal[>=] constant[1.05]]] + binary_operation[<ast.UnaryOp object at 0x7da20c76eb00> * compare[name[diff] less_or_equal[<=] constant[0.952]]]] + binary_operation[constant[0.0] * binary_operation[compare[name[diff] less[<] constant[1.05]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[diff] greater[>] constant[0.952]]]]]
variable[loss_gain_contour] assign[=] call[name[np].zeros, parameter[call[name[np].shape, parameter[name[self].comp1]]]]
call[name[loss_gain_contour]][name[inds_lost]] assign[=] <ast.UnaryOp object at 0x7da20c76f9d0>
call[name[loss_gain_contour]][name[inds_gained]] assign[=] constant[1]
return[tuple[[<ast.Name object at 0x7da20c76f100>, <ast.Name object at 0x7da20c76eef0>]]] | keyword[def] identifier[find_difference_contour] ( identifier[self] ):
literal[string]
identifier[self] . identifier[ratio_comp_value] =( identifier[self] . identifier[comparison_value] keyword[if] identifier[self] . identifier[ratio_comp_value] keyword[is] keyword[None]
keyword[else] identifier[self] . identifier[ratio_comp_value] )
identifier[inds_gained] = identifier[np] . identifier[where] (( identifier[self] . identifier[comp1] >= identifier[self] . identifier[comparison_value] )
&( identifier[self] . identifier[comp2] < identifier[self] . identifier[comparison_value] ))
identifier[inds_lost] = identifier[np] . identifier[where] (( identifier[self] . identifier[comp1] < identifier[self] . identifier[comparison_value] )
&( identifier[self] . identifier[comp2] >= identifier[self] . identifier[comparison_value] ))
identifier[self] . identifier[comp1] = identifier[np] . identifier[ma] . identifier[masked_where] ( identifier[self] . identifier[comp1] < identifier[self] . identifier[ratio_comp_value] , identifier[self] . identifier[comp1] )
identifier[self] . identifier[comp2] = identifier[np] . identifier[ma] . identifier[masked_where] ( identifier[self] . identifier[comp2] < identifier[self] . identifier[ratio_comp_value] , identifier[self] . identifier[comp2] )
identifier[diff] = identifier[self] . identifier[comp1] / identifier[self] . identifier[comp2]
identifier[diff] =( identifier[np] . identifier[log10] ( identifier[diff] )*( identifier[diff] >= literal[int] )
+(- identifier[np] . identifier[log10] ( literal[int] / identifier[diff] ))*( identifier[diff] <= literal[int] )
+ literal[int] *(( identifier[diff] < literal[int] )&( identifier[diff] > literal[int] )))
identifier[loss_gain_contour] = identifier[np] . identifier[zeros] ( identifier[np] . identifier[shape] ( identifier[self] . identifier[comp1] ))
identifier[loss_gain_contour] [ identifier[inds_lost] ]=- literal[int]
identifier[loss_gain_contour] [ identifier[inds_gained] ]= literal[int]
keyword[return] identifier[diff] , identifier[loss_gain_contour] | def find_difference_contour(self):
"""Find the ratio and loss/gain contours.
This method finds the ratio contour and the Loss/Gain contour values.
Its inputs are the two datasets for comparison where the second is the control
to compare against the first.
The input data sets need to be the same shape.
Returns:
2-element tuple containing
- **diff** (*2D array of floats*): Ratio contour values.
- **loss_gain_contour** (*2D array of floats*): loss/gain contour values.
"""
# set contour to test and control contour
self.ratio_comp_value = self.comparison_value if self.ratio_comp_value is None else self.ratio_comp_value
# indices of loss,gained.
inds_gained = np.where((self.comp1 >= self.comparison_value) & (self.comp2 < self.comparison_value))
inds_lost = np.where((self.comp1 < self.comparison_value) & (self.comp2 >= self.comparison_value))
self.comp1 = np.ma.masked_where(self.comp1 < self.ratio_comp_value, self.comp1)
self.comp2 = np.ma.masked_where(self.comp2 < self.ratio_comp_value, self.comp2)
# set diff to ratio for purposed of determining raito differences
diff = self.comp1 / self.comp2
# the following determines the log10 of the ratio difference.
# If it is extremely small, we neglect and put it as zero
# (limits chosen to resemble ratios of less than 1.05 and greater than 0.952)
diff = np.log10(diff) * (diff >= 1.05) + -np.log10(1.0 / diff) * (diff <= 0.952) + 0.0 * ((diff < 1.05) & (diff > 0.952))
# initialize loss/gain
loss_gain_contour = np.zeros(np.shape(self.comp1))
# fill out loss/gain
loss_gain_contour[inds_lost] = -1
loss_gain_contour[inds_gained] = 1
return (diff, loss_gain_contour) |
def how_vulnerable(
chain,
blackbox_mapping,
sanitiser_nodes,
potential_sanitiser,
blackbox_assignments,
interactive,
vuln_deets
):
"""Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
"""
for i, current_node in enumerate(chain):
if current_node in sanitiser_nodes:
vuln_deets['sanitiser'] = current_node
vuln_deets['confident'] = True
return VulnerabilityType.SANITISED, interactive
if isinstance(current_node, BBorBInode):
if current_node.func_name in blackbox_mapping['propagates']:
continue
elif current_node.func_name in blackbox_mapping['does_not_propagate']:
return VulnerabilityType.FALSE, interactive
elif interactive:
user_says = input(
'Is the return value of {} with tainted argument "{}" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(
current_node.label,
chain[i - 1].left_hand_side
)
).lower()
if user_says.startswith('s'):
interactive = False
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if user_says.startswith('n'):
blackbox_mapping['does_not_propagate'].append(current_node.func_name)
return VulnerabilityType.FALSE, interactive
blackbox_mapping['propagates'].append(current_node.func_name)
else:
vuln_deets['unknown_assignment'] = current_node
return VulnerabilityType.UNKNOWN, interactive
if potential_sanitiser:
vuln_deets['sanitiser'] = potential_sanitiser
vuln_deets['confident'] = False
return VulnerabilityType.SANITISED, interactive
return VulnerabilityType.TRUE, interactive | def function[how_vulnerable, parameter[chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, blackbox_assignments, interactive, vuln_deets]]:
constant[Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
]
for taget[tuple[[<ast.Name object at 0x7da1b1ec2fe0>, <ast.Name object at 0x7da1b1ec1de0>]]] in starred[call[name[enumerate], parameter[name[chain]]]] begin[:]
if compare[name[current_node] in name[sanitiser_nodes]] begin[:]
call[name[vuln_deets]][constant[sanitiser]] assign[=] name[current_node]
call[name[vuln_deets]][constant[confident]] assign[=] constant[True]
return[tuple[[<ast.Attribute object at 0x7da1b1ec35e0>, <ast.Name object at 0x7da1b1ec27a0>]]]
if call[name[isinstance], parameter[name[current_node], name[BBorBInode]]] begin[:]
if compare[name[current_node].func_name in call[name[blackbox_mapping]][constant[propagates]]] begin[:]
continue
if name[potential_sanitiser] begin[:]
call[name[vuln_deets]][constant[sanitiser]] assign[=] name[potential_sanitiser]
call[name[vuln_deets]][constant[confident]] assign[=] constant[False]
return[tuple[[<ast.Attribute object at 0x7da1b1e7d270>, <ast.Name object at 0x7da1b1e7c310>]]]
return[tuple[[<ast.Attribute object at 0x7da1b1e7e9b0>, <ast.Name object at 0x7da1b1e7ec80>]]] | keyword[def] identifier[how_vulnerable] (
identifier[chain] ,
identifier[blackbox_mapping] ,
identifier[sanitiser_nodes] ,
identifier[potential_sanitiser] ,
identifier[blackbox_assignments] ,
identifier[interactive] ,
identifier[vuln_deets]
):
literal[string]
keyword[for] identifier[i] , identifier[current_node] keyword[in] identifier[enumerate] ( identifier[chain] ):
keyword[if] identifier[current_node] keyword[in] identifier[sanitiser_nodes] :
identifier[vuln_deets] [ literal[string] ]= identifier[current_node]
identifier[vuln_deets] [ literal[string] ]= keyword[True]
keyword[return] identifier[VulnerabilityType] . identifier[SANITISED] , identifier[interactive]
keyword[if] identifier[isinstance] ( identifier[current_node] , identifier[BBorBInode] ):
keyword[if] identifier[current_node] . identifier[func_name] keyword[in] identifier[blackbox_mapping] [ literal[string] ]:
keyword[continue]
keyword[elif] identifier[current_node] . identifier[func_name] keyword[in] identifier[blackbox_mapping] [ literal[string] ]:
keyword[return] identifier[VulnerabilityType] . identifier[FALSE] , identifier[interactive]
keyword[elif] identifier[interactive] :
identifier[user_says] = identifier[input] (
literal[string] . identifier[format] (
identifier[current_node] . identifier[label] ,
identifier[chain] [ identifier[i] - literal[int] ]. identifier[left_hand_side]
)
). identifier[lower] ()
keyword[if] identifier[user_says] . identifier[startswith] ( literal[string] ):
identifier[interactive] = keyword[False]
identifier[vuln_deets] [ literal[string] ]= identifier[current_node]
keyword[return] identifier[VulnerabilityType] . identifier[UNKNOWN] , identifier[interactive]
keyword[if] identifier[user_says] . identifier[startswith] ( literal[string] ):
identifier[blackbox_mapping] [ literal[string] ]. identifier[append] ( identifier[current_node] . identifier[func_name] )
keyword[return] identifier[VulnerabilityType] . identifier[FALSE] , identifier[interactive]
identifier[blackbox_mapping] [ literal[string] ]. identifier[append] ( identifier[current_node] . identifier[func_name] )
keyword[else] :
identifier[vuln_deets] [ literal[string] ]= identifier[current_node]
keyword[return] identifier[VulnerabilityType] . identifier[UNKNOWN] , identifier[interactive]
keyword[if] identifier[potential_sanitiser] :
identifier[vuln_deets] [ literal[string] ]= identifier[potential_sanitiser]
identifier[vuln_deets] [ literal[string] ]= keyword[False]
keyword[return] identifier[VulnerabilityType] . identifier[SANITISED] , identifier[interactive]
keyword[return] identifier[VulnerabilityType] . identifier[TRUE] , identifier[interactive] | def how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, blackbox_assignments, interactive, vuln_deets):
"""Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.
Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.
e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.
Args:
chain(list(Node)): A path of nodes between source and sink.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
sanitiser_nodes(set): A set of nodes that are sanitisers for the sink.
potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.
blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
vuln_deets(dict): vulnerability details.
Returns:
A VulnerabilityType depending on how vulnerable the chain is.
"""
for (i, current_node) in enumerate(chain):
if current_node in sanitiser_nodes:
vuln_deets['sanitiser'] = current_node
vuln_deets['confident'] = True
return (VulnerabilityType.SANITISED, interactive) # depends on [control=['if'], data=['current_node']]
if isinstance(current_node, BBorBInode):
if current_node.func_name in blackbox_mapping['propagates']:
continue # depends on [control=['if'], data=[]]
elif current_node.func_name in blackbox_mapping['does_not_propagate']:
return (VulnerabilityType.FALSE, interactive) # depends on [control=['if'], data=[]]
elif interactive:
user_says = input('Is the return value of {} with tainted argument "{}" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(current_node.label, chain[i - 1].left_hand_side)).lower()
if user_says.startswith('s'):
interactive = False
vuln_deets['unknown_assignment'] = current_node
return (VulnerabilityType.UNKNOWN, interactive) # depends on [control=['if'], data=[]]
if user_says.startswith('n'):
blackbox_mapping['does_not_propagate'].append(current_node.func_name)
return (VulnerabilityType.FALSE, interactive) # depends on [control=['if'], data=[]]
blackbox_mapping['propagates'].append(current_node.func_name) # depends on [control=['if'], data=[]]
else:
vuln_deets['unknown_assignment'] = current_node
return (VulnerabilityType.UNKNOWN, interactive) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if potential_sanitiser:
vuln_deets['sanitiser'] = potential_sanitiser
vuln_deets['confident'] = False
return (VulnerabilityType.SANITISED, interactive) # depends on [control=['if'], data=[]]
return (VulnerabilityType.TRUE, interactive) |
def hr_diagram(cluster_name, output=None):
"""Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
"""
cluster = get_hr_data(cluster_name)
pf = hr_diagram_figure(cluster)
show_with_bokeh_server(pf) | def function[hr_diagram, parameter[cluster_name, output]]:
constant[Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
]
variable[cluster] assign[=] call[name[get_hr_data], parameter[name[cluster_name]]]
variable[pf] assign[=] call[name[hr_diagram_figure], parameter[name[cluster]]]
call[name[show_with_bokeh_server], parameter[name[pf]]] | keyword[def] identifier[hr_diagram] ( identifier[cluster_name] , identifier[output] = keyword[None] ):
literal[string]
identifier[cluster] = identifier[get_hr_data] ( identifier[cluster_name] )
identifier[pf] = identifier[hr_diagram_figure] ( identifier[cluster] )
identifier[show_with_bokeh_server] ( identifier[pf] ) | def hr_diagram(cluster_name, output=None):
"""Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
"""
cluster = get_hr_data(cluster_name)
pf = hr_diagram_figure(cluster)
show_with_bokeh_server(pf) |
def makePlot(args):
"""
Make the plot with parallax performance predictions.
:argument args: command line arguments
"""
gmag=np.linspace(5.7,20.0,101)
vminiB1V=vminiFromSpt('B1V')
vminiG2V=vminiFromSpt('G2V')
vminiM6V=vminiFromSpt('M6V')
vmagB1V=gmag-gminvFromVmini(vminiB1V)
vmagG2V=gmag-gminvFromVmini(vminiG2V)
vmagM6V=gmag-gminvFromVmini(vminiM6V)
sigparB1V=parallaxErrorSkyAvg(gmag,vminiB1V)
sigparB1Vmin=parallaxMinError(gmag,vminiB1V)
sigparB1Vmax=parallaxMaxError(gmag,vminiB1V)
sigparG2V=parallaxErrorSkyAvg(gmag,vminiG2V)
sigparG2Vmin=parallaxMinError(gmag,vminiG2V)
sigparG2Vmax=parallaxMaxError(gmag,vminiG2V)
sigparM6V=parallaxErrorSkyAvg(gmag,vminiM6V)
sigparM6Vmin=parallaxMinError(gmag,vminiM6V)
sigparM6Vmax=parallaxMaxError(gmag,vminiM6V)
fig=plt.figure(figsize=(10,6.5))
if (args['gmagAbscissa']):
plt.semilogy(gmag, sigparB1V, 'b', label='B1V')
plt.semilogy(gmag, sigparG2V, 'g', label='G2V')
plt.semilogy(gmag, sigparM6V, 'r', label='M6V')
plt.xlim((5,20))
plt.ylim((4,1000))
plt.legend(loc=4)
plt.xlabel('$G$ [mag]')
else:
ax=fig.add_subplot(111)
plt.semilogy(vmagB1V, sigparB1V, 'b', label='B1V')
#plt.semilogy(vmagG2V, sigparG2V, 'g', label='G2V')
plt.semilogy(vmagM6V, sigparM6V, 'r', label='M6V')
plt.fill_between(vmagB1V, sigparB1Vmin, sigparB1Vmax, color='b', alpha=0.3)
plt.fill_between(vmagM6V, sigparM6Vmin, sigparM6Vmax, color='r', alpha=0.3)
plt.xlim((5,22.5))
plt.ylim((4,1000))
plt.text(17.2,190,'B1V',color='b')
plt.text(18,20,'M6V',color='r')
plt.xlabel('$V$ [mag]')
plt.text(7,17,'calibration noise floor', size=12, bbox=dict(boxstyle="round,pad=0.3",
ec=(0.0, 0.0, 0.0),
fc=(1.0, 1.0, 1.0),
))
plt.text(14.75,80,'photon noise', rotation=45, size=12, bbox=dict(boxstyle="round,pad=0.3",
ec=(0.0, 0.0, 0.0),
fc=(1.0, 1.0, 1.0),
))
ax.annotate('non-uniformity\nover the sky', xy=(21.5, 320), xycoords='data',
xytext=(21.5,80), textcoords='data', ha='center', size='12',
bbox=dict(boxstyle="round,pad=0.3",ec=(0,0,0),fc=(1,1,1)),
arrowprops=dict(facecolor='black', shrink=0.15, width=1,
headwidth=6),
horizontalalignment='right', verticalalignment='top',
)
ax.annotate('', xy=(21.5, 500), xycoords='data',
xytext=(21.5,950), textcoords='data', ha='center', size='12',
arrowprops=dict(facecolor='black', shrink=0.15, width=1,
headwidth=6),
horizontalalignment='right', verticalalignment='bottom',
)
plt.xticks(np.arange(6,24,2))
ax = plt.gca().yaxis
ax.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.ticklabel_format(axis='y',style='plain')
plt.grid(which='both')
plt.ylabel('End-of-mission parallax standard error [$\mu$as]')
if (args['pdfOutput']):
plt.savefig('ParallaxErrors.pdf')
elif (args['pngOutput']):
plt.savefig('ParallaxErrors.png')
else:
plt.show() | def function[makePlot, parameter[args]]:
constant[
Make the plot with parallax performance predictions.
:argument args: command line arguments
]
variable[gmag] assign[=] call[name[np].linspace, parameter[constant[5.7], constant[20.0], constant[101]]]
variable[vminiB1V] assign[=] call[name[vminiFromSpt], parameter[constant[B1V]]]
variable[vminiG2V] assign[=] call[name[vminiFromSpt], parameter[constant[G2V]]]
variable[vminiM6V] assign[=] call[name[vminiFromSpt], parameter[constant[M6V]]]
variable[vmagB1V] assign[=] binary_operation[name[gmag] - call[name[gminvFromVmini], parameter[name[vminiB1V]]]]
variable[vmagG2V] assign[=] binary_operation[name[gmag] - call[name[gminvFromVmini], parameter[name[vminiG2V]]]]
variable[vmagM6V] assign[=] binary_operation[name[gmag] - call[name[gminvFromVmini], parameter[name[vminiM6V]]]]
variable[sigparB1V] assign[=] call[name[parallaxErrorSkyAvg], parameter[name[gmag], name[vminiB1V]]]
variable[sigparB1Vmin] assign[=] call[name[parallaxMinError], parameter[name[gmag], name[vminiB1V]]]
variable[sigparB1Vmax] assign[=] call[name[parallaxMaxError], parameter[name[gmag], name[vminiB1V]]]
variable[sigparG2V] assign[=] call[name[parallaxErrorSkyAvg], parameter[name[gmag], name[vminiG2V]]]
variable[sigparG2Vmin] assign[=] call[name[parallaxMinError], parameter[name[gmag], name[vminiG2V]]]
variable[sigparG2Vmax] assign[=] call[name[parallaxMaxError], parameter[name[gmag], name[vminiG2V]]]
variable[sigparM6V] assign[=] call[name[parallaxErrorSkyAvg], parameter[name[gmag], name[vminiM6V]]]
variable[sigparM6Vmin] assign[=] call[name[parallaxMinError], parameter[name[gmag], name[vminiM6V]]]
variable[sigparM6Vmax] assign[=] call[name[parallaxMaxError], parameter[name[gmag], name[vminiM6V]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
if call[name[args]][constant[gmagAbscissa]] begin[:]
call[name[plt].semilogy, parameter[name[gmag], name[sigparB1V], constant[b]]]
call[name[plt].semilogy, parameter[name[gmag], name[sigparG2V], constant[g]]]
call[name[plt].semilogy, parameter[name[gmag], name[sigparM6V], constant[r]]]
call[name[plt].xlim, parameter[tuple[[<ast.Constant object at 0x7da1b26ac610>, <ast.Constant object at 0x7da1b26af820>]]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b26af340>, <ast.Constant object at 0x7da1b26ad330>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].xlabel, parameter[constant[$G$ [mag]]]]
call[name[plt].xticks, parameter[call[name[np].arange, parameter[constant[6], constant[24], constant[2]]]]]
variable[ax] assign[=] call[name[plt].gca, parameter[]].yaxis
call[name[ax].set_major_formatter, parameter[call[name[matplotlib].ticker.ScalarFormatter, parameter[]]]]
call[name[plt].ticklabel_format, parameter[]]
call[name[plt].grid, parameter[]]
call[name[plt].ylabel, parameter[constant[End-of-mission parallax standard error [$\mu$as]]]]
if call[name[args]][constant[pdfOutput]] begin[:]
call[name[plt].savefig, parameter[constant[ParallaxErrors.pdf]]] | keyword[def] identifier[makePlot] ( identifier[args] ):
literal[string]
identifier[gmag] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , literal[int] )
identifier[vminiB1V] = identifier[vminiFromSpt] ( literal[string] )
identifier[vminiG2V] = identifier[vminiFromSpt] ( literal[string] )
identifier[vminiM6V] = identifier[vminiFromSpt] ( literal[string] )
identifier[vmagB1V] = identifier[gmag] - identifier[gminvFromVmini] ( identifier[vminiB1V] )
identifier[vmagG2V] = identifier[gmag] - identifier[gminvFromVmini] ( identifier[vminiG2V] )
identifier[vmagM6V] = identifier[gmag] - identifier[gminvFromVmini] ( identifier[vminiM6V] )
identifier[sigparB1V] = identifier[parallaxErrorSkyAvg] ( identifier[gmag] , identifier[vminiB1V] )
identifier[sigparB1Vmin] = identifier[parallaxMinError] ( identifier[gmag] , identifier[vminiB1V] )
identifier[sigparB1Vmax] = identifier[parallaxMaxError] ( identifier[gmag] , identifier[vminiB1V] )
identifier[sigparG2V] = identifier[parallaxErrorSkyAvg] ( identifier[gmag] , identifier[vminiG2V] )
identifier[sigparG2Vmin] = identifier[parallaxMinError] ( identifier[gmag] , identifier[vminiG2V] )
identifier[sigparG2Vmax] = identifier[parallaxMaxError] ( identifier[gmag] , identifier[vminiG2V] )
identifier[sigparM6V] = identifier[parallaxErrorSkyAvg] ( identifier[gmag] , identifier[vminiM6V] )
identifier[sigparM6Vmin] = identifier[parallaxMinError] ( identifier[gmag] , identifier[vminiM6V] )
identifier[sigparM6Vmax] = identifier[parallaxMaxError] ( identifier[gmag] , identifier[vminiM6V] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
keyword[if] ( identifier[args] [ literal[string] ]):
identifier[plt] . identifier[semilogy] ( identifier[gmag] , identifier[sigparB1V] , literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[semilogy] ( identifier[gmag] , identifier[sigparG2V] , literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[semilogy] ( identifier[gmag] , identifier[sigparM6V] , literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[xlim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[loc] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
keyword[else] :
identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] )
identifier[plt] . identifier[semilogy] ( identifier[vmagB1V] , identifier[sigparB1V] , literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[semilogy] ( identifier[vmagM6V] , identifier[sigparM6V] , literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[fill_between] ( identifier[vmagB1V] , identifier[sigparB1Vmin] , identifier[sigparB1Vmax] , identifier[color] = literal[string] , identifier[alpha] = literal[int] )
identifier[plt] . identifier[fill_between] ( identifier[vmagM6V] , identifier[sigparM6Vmin] , identifier[sigparM6Vmax] , identifier[color] = literal[string] , identifier[alpha] = literal[int] )
identifier[plt] . identifier[xlim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] ,
identifier[ec] =( literal[int] , literal[int] , literal[int] ),
identifier[fc] =( literal[int] , literal[int] , literal[int] ),
))
identifier[plt] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[rotation] = literal[int] , identifier[size] = literal[int] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] ,
identifier[ec] =( literal[int] , literal[int] , literal[int] ),
identifier[fc] =( literal[int] , literal[int] , literal[int] ),
))
identifier[ax] . identifier[annotate] ( literal[string] , identifier[xy] =( literal[int] , literal[int] ), identifier[xycoords] = literal[string] ,
identifier[xytext] =( literal[int] , literal[int] ), identifier[textcoords] = literal[string] , identifier[ha] = literal[string] , identifier[size] = literal[string] ,
identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[ec] =( literal[int] , literal[int] , literal[int] ), identifier[fc] =( literal[int] , literal[int] , literal[int] )),
identifier[arrowprops] = identifier[dict] ( identifier[facecolor] = literal[string] , identifier[shrink] = literal[int] , identifier[width] = literal[int] ,
identifier[headwidth] = literal[int] ),
identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] ,
)
identifier[ax] . identifier[annotate] ( literal[string] , identifier[xy] =( literal[int] , literal[int] ), identifier[xycoords] = literal[string] ,
identifier[xytext] =( literal[int] , literal[int] ), identifier[textcoords] = literal[string] , identifier[ha] = literal[string] , identifier[size] = literal[string] ,
identifier[arrowprops] = identifier[dict] ( identifier[facecolor] = literal[string] , identifier[shrink] = literal[int] , identifier[width] = literal[int] ,
identifier[headwidth] = literal[int] ),
identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] ,
)
identifier[plt] . identifier[xticks] ( identifier[np] . identifier[arange] ( literal[int] , literal[int] , literal[int] ))
identifier[ax] = identifier[plt] . identifier[gca] (). identifier[yaxis]
identifier[ax] . identifier[set_major_formatter] ( identifier[matplotlib] . identifier[ticker] . identifier[ScalarFormatter] ())
identifier[plt] . identifier[ticklabel_format] ( identifier[axis] = literal[string] , identifier[style] = literal[string] )
identifier[plt] . identifier[grid] ( identifier[which] = literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[if] ( identifier[args] [ literal[string] ]):
identifier[plt] . identifier[savefig] ( literal[string] )
keyword[elif] ( identifier[args] [ literal[string] ]):
identifier[plt] . identifier[savefig] ( literal[string] )
keyword[else] :
identifier[plt] . identifier[show] () | def makePlot(args):
"""
Make the plot with parallax performance predictions.
:argument args: command line arguments
"""
gmag = np.linspace(5.7, 20.0, 101)
vminiB1V = vminiFromSpt('B1V')
vminiG2V = vminiFromSpt('G2V')
vminiM6V = vminiFromSpt('M6V')
vmagB1V = gmag - gminvFromVmini(vminiB1V)
vmagG2V = gmag - gminvFromVmini(vminiG2V)
vmagM6V = gmag - gminvFromVmini(vminiM6V)
sigparB1V = parallaxErrorSkyAvg(gmag, vminiB1V)
sigparB1Vmin = parallaxMinError(gmag, vminiB1V)
sigparB1Vmax = parallaxMaxError(gmag, vminiB1V)
sigparG2V = parallaxErrorSkyAvg(gmag, vminiG2V)
sigparG2Vmin = parallaxMinError(gmag, vminiG2V)
sigparG2Vmax = parallaxMaxError(gmag, vminiG2V)
sigparM6V = parallaxErrorSkyAvg(gmag, vminiM6V)
sigparM6Vmin = parallaxMinError(gmag, vminiM6V)
sigparM6Vmax = parallaxMaxError(gmag, vminiM6V)
fig = plt.figure(figsize=(10, 6.5))
if args['gmagAbscissa']:
plt.semilogy(gmag, sigparB1V, 'b', label='B1V')
plt.semilogy(gmag, sigparG2V, 'g', label='G2V')
plt.semilogy(gmag, sigparM6V, 'r', label='M6V')
plt.xlim((5, 20))
plt.ylim((4, 1000))
plt.legend(loc=4)
plt.xlabel('$G$ [mag]') # depends on [control=['if'], data=[]]
else:
ax = fig.add_subplot(111)
plt.semilogy(vmagB1V, sigparB1V, 'b', label='B1V')
#plt.semilogy(vmagG2V, sigparG2V, 'g', label='G2V')
plt.semilogy(vmagM6V, sigparM6V, 'r', label='M6V')
plt.fill_between(vmagB1V, sigparB1Vmin, sigparB1Vmax, color='b', alpha=0.3)
plt.fill_between(vmagM6V, sigparM6Vmin, sigparM6Vmax, color='r', alpha=0.3)
plt.xlim((5, 22.5))
plt.ylim((4, 1000))
plt.text(17.2, 190, 'B1V', color='b')
plt.text(18, 20, 'M6V', color='r')
plt.xlabel('$V$ [mag]')
plt.text(7, 17, 'calibration noise floor', size=12, bbox=dict(boxstyle='round,pad=0.3', ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0)))
plt.text(14.75, 80, 'photon noise', rotation=45, size=12, bbox=dict(boxstyle='round,pad=0.3', ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0)))
ax.annotate('non-uniformity\nover the sky', xy=(21.5, 320), xycoords='data', xytext=(21.5, 80), textcoords='data', ha='center', size='12', bbox=dict(boxstyle='round,pad=0.3', ec=(0, 0, 0), fc=(1, 1, 1)), arrowprops=dict(facecolor='black', shrink=0.15, width=1, headwidth=6), horizontalalignment='right', verticalalignment='top')
ax.annotate('', xy=(21.5, 500), xycoords='data', xytext=(21.5, 950), textcoords='data', ha='center', size='12', arrowprops=dict(facecolor='black', shrink=0.15, width=1, headwidth=6), horizontalalignment='right', verticalalignment='bottom')
plt.xticks(np.arange(6, 24, 2))
ax = plt.gca().yaxis
ax.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.ticklabel_format(axis='y', style='plain')
plt.grid(which='both')
plt.ylabel('End-of-mission parallax standard error [$\\mu$as]')
if args['pdfOutput']:
plt.savefig('ParallaxErrors.pdf') # depends on [control=['if'], data=[]]
elif args['pngOutput']:
plt.savefig('ParallaxErrors.png') # depends on [control=['if'], data=[]]
else:
plt.show() |
def __get_min_reads(current_provisioning, min_provisioned_reads, log_tag):
""" Get the minimum number of reads to current_provisioning
:type current_provisioning: int
:param current_provisioning: Current provisioned reads
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- Minimum number of reads
"""
# Fallback value to ensure that we always have at least 1 read
reads = 1
if min_provisioned_reads:
reads = int(min_provisioned_reads)
if reads > int(current_provisioning * 2):
reads = int(current_provisioning * 2)
logger.debug(
'{0} - '
'Cannot reach min-provisioned-reads as max scale up '
'is 100% of current provisioning'.format(log_tag))
logger.debug(
'{0} - Setting min provisioned reads to {1}'.format(
log_tag, min_provisioned_reads))
return reads | def function[__get_min_reads, parameter[current_provisioning, min_provisioned_reads, log_tag]]:
constant[ Get the minimum number of reads to current_provisioning
:type current_provisioning: int
:param current_provisioning: Current provisioned reads
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- Minimum number of reads
]
variable[reads] assign[=] constant[1]
if name[min_provisioned_reads] begin[:]
variable[reads] assign[=] call[name[int], parameter[name[min_provisioned_reads]]]
if compare[name[reads] greater[>] call[name[int], parameter[binary_operation[name[current_provisioning] * constant[2]]]]] begin[:]
variable[reads] assign[=] call[name[int], parameter[binary_operation[name[current_provisioning] * constant[2]]]]
call[name[logger].debug, parameter[call[constant[{0} - Cannot reach min-provisioned-reads as max scale up is 100% of current provisioning].format, parameter[name[log_tag]]]]]
call[name[logger].debug, parameter[call[constant[{0} - Setting min provisioned reads to {1}].format, parameter[name[log_tag], name[min_provisioned_reads]]]]]
return[name[reads]] | keyword[def] identifier[__get_min_reads] ( identifier[current_provisioning] , identifier[min_provisioned_reads] , identifier[log_tag] ):
literal[string]
identifier[reads] = literal[int]
keyword[if] identifier[min_provisioned_reads] :
identifier[reads] = identifier[int] ( identifier[min_provisioned_reads] )
keyword[if] identifier[reads] > identifier[int] ( identifier[current_provisioning] * literal[int] ):
identifier[reads] = identifier[int] ( identifier[current_provisioning] * literal[int] )
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[log_tag] ))
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] (
identifier[log_tag] , identifier[min_provisioned_reads] ))
keyword[return] identifier[reads] | def __get_min_reads(current_provisioning, min_provisioned_reads, log_tag):
""" Get the minimum number of reads to current_provisioning
:type current_provisioning: int
:param current_provisioning: Current provisioned reads
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- Minimum number of reads
"""
# Fallback value to ensure that we always have at least 1 read
reads = 1
if min_provisioned_reads:
reads = int(min_provisioned_reads)
if reads > int(current_provisioning * 2):
reads = int(current_provisioning * 2)
logger.debug('{0} - Cannot reach min-provisioned-reads as max scale up is 100% of current provisioning'.format(log_tag)) # depends on [control=['if'], data=['reads']] # depends on [control=['if'], data=[]]
logger.debug('{0} - Setting min provisioned reads to {1}'.format(log_tag, min_provisioned_reads))
return reads |
def mkdir(self, tid, mode):
"""
Directory creation. Search is performed.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
mode : int
Ignored.
"""
pt = self.PathType.get(tid)
if pt is self.PathType.invalid or pt is self.PathType.file:
raise FuseOSError(errno.EPERM)
if self.__exists(tid):
raise FuseOSError(errno.EEXIST)
try:
dir_ent = YTActions(tid[0])
dir_ent.updateResults()
except ConnectionError:
raise FuseOSError(errno.ENETDOWN)
self.searches[tid[0]] = dir_ent # now adding directory entry is legit, nothing failed.
return 0 | def function[mkdir, parameter[self, tid, mode]]:
constant[
Directory creation. Search is performed.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
mode : int
Ignored.
]
variable[pt] assign[=] call[name[self].PathType.get, parameter[name[tid]]]
if <ast.BoolOp object at 0x7da18eb56560> begin[:]
<ast.Raise object at 0x7da18f812c80>
if call[name[self].__exists, parameter[name[tid]]] begin[:]
<ast.Raise object at 0x7da18f812920>
<ast.Try object at 0x7da18f8119c0>
call[name[self].searches][call[name[tid]][constant[0]]] assign[=] name[dir_ent]
return[constant[0]] | keyword[def] identifier[mkdir] ( identifier[self] , identifier[tid] , identifier[mode] ):
literal[string]
identifier[pt] = identifier[self] . identifier[PathType] . identifier[get] ( identifier[tid] )
keyword[if] identifier[pt] keyword[is] identifier[self] . identifier[PathType] . identifier[invalid] keyword[or] identifier[pt] keyword[is] identifier[self] . identifier[PathType] . identifier[file] :
keyword[raise] identifier[FuseOSError] ( identifier[errno] . identifier[EPERM] )
keyword[if] identifier[self] . identifier[__exists] ( identifier[tid] ):
keyword[raise] identifier[FuseOSError] ( identifier[errno] . identifier[EEXIST] )
keyword[try] :
identifier[dir_ent] = identifier[YTActions] ( identifier[tid] [ literal[int] ])
identifier[dir_ent] . identifier[updateResults] ()
keyword[except] identifier[ConnectionError] :
keyword[raise] identifier[FuseOSError] ( identifier[errno] . identifier[ENETDOWN] )
identifier[self] . identifier[searches] [ identifier[tid] [ literal[int] ]]= identifier[dir_ent]
keyword[return] literal[int] | def mkdir(self, tid, mode):
"""
Directory creation. Search is performed.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
mode : int
Ignored.
"""
pt = self.PathType.get(tid)
if pt is self.PathType.invalid or pt is self.PathType.file:
raise FuseOSError(errno.EPERM) # depends on [control=['if'], data=[]]
if self.__exists(tid):
raise FuseOSError(errno.EEXIST) # depends on [control=['if'], data=[]]
try:
dir_ent = YTActions(tid[0])
dir_ent.updateResults() # depends on [control=['try'], data=[]]
except ConnectionError:
raise FuseOSError(errno.ENETDOWN) # depends on [control=['except'], data=[]]
self.searches[tid[0]] = dir_ent # now adding directory entry is legit, nothing failed.
return 0 |
def set_parameter_bounds(self, name, par, bounds):
"""Set the bounds on the scaled value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
bounds : list
Upper and lower bound.
"""
idx = self.like.par_index(name, par)
self.like[idx].setBounds(*bounds)
self._sync_params(name) | def function[set_parameter_bounds, parameter[self, name, par, bounds]]:
constant[Set the bounds on the scaled value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
bounds : list
Upper and lower bound.
]
variable[idx] assign[=] call[name[self].like.par_index, parameter[name[name], name[par]]]
call[call[name[self].like][name[idx]].setBounds, parameter[<ast.Starred object at 0x7da20c76c1f0>]]
call[name[self]._sync_params, parameter[name[name]]] | keyword[def] identifier[set_parameter_bounds] ( identifier[self] , identifier[name] , identifier[par] , identifier[bounds] ):
literal[string]
identifier[idx] = identifier[self] . identifier[like] . identifier[par_index] ( identifier[name] , identifier[par] )
identifier[self] . identifier[like] [ identifier[idx] ]. identifier[setBounds] (* identifier[bounds] )
identifier[self] . identifier[_sync_params] ( identifier[name] ) | def set_parameter_bounds(self, name, par, bounds):
"""Set the bounds on the scaled value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
bounds : list
Upper and lower bound.
"""
idx = self.like.par_index(name, par)
self.like[idx].setBounds(*bounds)
self._sync_params(name) |
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):
"""Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficient, there shouldn't be performance issue.
"""
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than
# the target the user is trying to overwrite. This
if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)):
if not all(idx in item.index for idx in row_lookup):
raise ValueError(
"Must have equal len keys and value when setting with "
"an iterable"
)
if hasattr(item, "columns"):
if not all(idx in item.columns for idx in col_lookup):
raise ValueError(
"Must have equal len keys and value when setting "
"with an iterable"
)
item = item.reindex(index=row_lookup, columns=col_lookup)
else:
item = item.reindex(index=row_lookup)
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError(
"could not broadcast input array from shape {from_shape} into shape "
"{to_shape}".format(from_shape=from_shape, to_shape=to_shape)
) | def function[_broadcast_item, parameter[self, row_lookup, col_lookup, item, to_shape]]:
constant[Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficient, there shouldn't be performance issue.
]
if call[name[isinstance], parameter[name[item], tuple[[<ast.Attribute object at 0x7da18f00d240>, <ast.Attribute object at 0x7da18f00ce80>, <ast.Name object at 0x7da18f00ead0>]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f00ce50> begin[:]
<ast.Raise object at 0x7da18f00c2e0>
if call[name[hasattr], parameter[name[item], constant[columns]]] begin[:]
if <ast.UnaryOp object at 0x7da18f00f4f0> begin[:]
<ast.Raise object at 0x7da18f00f5e0>
variable[item] assign[=] call[name[item].reindex, parameter[]]
<ast.Try object at 0x7da18f09fb80> | keyword[def] identifier[_broadcast_item] ( identifier[self] , identifier[row_lookup] , identifier[col_lookup] , identifier[item] , identifier[to_shape] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[item] ,( identifier[pandas] . identifier[Series] , identifier[pandas] . identifier[DataFrame] , identifier[DataFrame] )):
keyword[if] keyword[not] identifier[all] ( identifier[idx] keyword[in] identifier[item] . identifier[index] keyword[for] identifier[idx] keyword[in] identifier[row_lookup] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] identifier[hasattr] ( identifier[item] , literal[string] ):
keyword[if] keyword[not] identifier[all] ( identifier[idx] keyword[in] identifier[item] . identifier[columns] keyword[for] identifier[idx] keyword[in] identifier[col_lookup] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
identifier[item] = identifier[item] . identifier[reindex] ( identifier[index] = identifier[row_lookup] , identifier[columns] = identifier[col_lookup] )
keyword[else] :
identifier[item] = identifier[item] . identifier[reindex] ( identifier[index] = identifier[row_lookup] )
keyword[try] :
identifier[item] = identifier[np] . identifier[array] ( identifier[item] )
keyword[if] identifier[np] . identifier[prod] ( identifier[to_shape] )== identifier[np] . identifier[prod] ( identifier[item] . identifier[shape] ):
keyword[return] identifier[item] . identifier[reshape] ( identifier[to_shape] )
keyword[else] :
keyword[return] identifier[np] . identifier[broadcast_to] ( identifier[item] , identifier[to_shape] )
keyword[except] identifier[ValueError] :
identifier[from_shape] = identifier[np] . identifier[array] ( identifier[item] ). identifier[shape]
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[from_shape] = identifier[from_shape] , identifier[to_shape] = identifier[to_shape] )
) | def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):
"""Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficient, there shouldn't be performance issue.
"""
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than
# the target the user is trying to overwrite. This
if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)):
if not all((idx in item.index for idx in row_lookup)):
raise ValueError('Must have equal len keys and value when setting with an iterable') # depends on [control=['if'], data=[]]
if hasattr(item, 'columns'):
if not all((idx in item.columns for idx in col_lookup)):
raise ValueError('Must have equal len keys and value when setting with an iterable') # depends on [control=['if'], data=[]]
item = item.reindex(index=row_lookup, columns=col_lookup) # depends on [control=['if'], data=[]]
else:
item = item.reindex(index=row_lookup) # depends on [control=['if'], data=[]]
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape) # depends on [control=['if'], data=[]]
else:
return np.broadcast_to(item, to_shape) # depends on [control=['try'], data=[]]
except ValueError:
from_shape = np.array(item).shape
raise ValueError('could not broadcast input array from shape {from_shape} into shape {to_shape}'.format(from_shape=from_shape, to_shape=to_shape)) # depends on [control=['except'], data=[]] |
def set_editor_cursor(self, editor, cursor):
"""Set the cursor of an editor."""
pos = cursor.position()
anchor = cursor.anchor()
new_cursor = QTextCursor()
if pos == anchor:
new_cursor.movePosition(pos)
else:
new_cursor.movePosition(anchor)
new_cursor.movePosition(pos, QTextCursor.KeepAnchor)
editor.setTextCursor(cursor) | def function[set_editor_cursor, parameter[self, editor, cursor]]:
constant[Set the cursor of an editor.]
variable[pos] assign[=] call[name[cursor].position, parameter[]]
variable[anchor] assign[=] call[name[cursor].anchor, parameter[]]
variable[new_cursor] assign[=] call[name[QTextCursor], parameter[]]
if compare[name[pos] equal[==] name[anchor]] begin[:]
call[name[new_cursor].movePosition, parameter[name[pos]]]
call[name[editor].setTextCursor, parameter[name[cursor]]] | keyword[def] identifier[set_editor_cursor] ( identifier[self] , identifier[editor] , identifier[cursor] ):
literal[string]
identifier[pos] = identifier[cursor] . identifier[position] ()
identifier[anchor] = identifier[cursor] . identifier[anchor] ()
identifier[new_cursor] = identifier[QTextCursor] ()
keyword[if] identifier[pos] == identifier[anchor] :
identifier[new_cursor] . identifier[movePosition] ( identifier[pos] )
keyword[else] :
identifier[new_cursor] . identifier[movePosition] ( identifier[anchor] )
identifier[new_cursor] . identifier[movePosition] ( identifier[pos] , identifier[QTextCursor] . identifier[KeepAnchor] )
identifier[editor] . identifier[setTextCursor] ( identifier[cursor] ) | def set_editor_cursor(self, editor, cursor):
"""Set the cursor of an editor."""
pos = cursor.position()
anchor = cursor.anchor()
new_cursor = QTextCursor()
if pos == anchor:
new_cursor.movePosition(pos) # depends on [control=['if'], data=['pos']]
else:
new_cursor.movePosition(anchor)
new_cursor.movePosition(pos, QTextCursor.KeepAnchor)
editor.setTextCursor(cursor) |
def eclipse_depth(mafn,Rp,Rs,b,u1=0.394,u2=0.261,max_only=False,npts=100,force_1d=False):
""" Calculates average (or max) eclipse depth
***why does b>1 take so freaking long?...
"""
k = Rp*REARTH/(Rs*RSUN)
if max_only:
return 1 - mafn(k,b,u1,u2)
if np.size(b) == 1:
x = np.linspace(0,np.sqrt(1-b**2),npts)
y = b
zs = np.sqrt(x**2 + y**2)
fs = mafn(k,zs,u1,u2) # returns array of shape (nks,nzs)
depth = 1-fs
else:
xmax = np.sqrt(1-b**2)
x = np.linspace(0,1,npts)*xmax[:,Nones]
y = b[:,None]
zs = np.sqrt(x**2 + y**2)
fs = mafn(k,zs.ravel(),u1,u2)
if not force_1d:
fs = fs.reshape(size(k),*zs.shape)
depth = 1-fs
meandepth = np.squeeze(depth.mean(axis=depth.ndim-1))
return meandepth | def function[eclipse_depth, parameter[mafn, Rp, Rs, b, u1, u2, max_only, npts, force_1d]]:
constant[ Calculates average (or max) eclipse depth
***why does b>1 take so freaking long?...
]
variable[k] assign[=] binary_operation[binary_operation[name[Rp] * name[REARTH]] / binary_operation[name[Rs] * name[RSUN]]]
if name[max_only] begin[:]
return[binary_operation[constant[1] - call[name[mafn], parameter[name[k], name[b], name[u1], name[u2]]]]]
if compare[call[name[np].size, parameter[name[b]]] equal[==] constant[1]] begin[:]
variable[x] assign[=] call[name[np].linspace, parameter[constant[0], call[name[np].sqrt, parameter[binary_operation[constant[1] - binary_operation[name[b] ** constant[2]]]]], name[npts]]]
variable[y] assign[=] name[b]
variable[zs] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x] ** constant[2]] + binary_operation[name[y] ** constant[2]]]]]
variable[fs] assign[=] call[name[mafn], parameter[name[k], name[zs], name[u1], name[u2]]]
variable[depth] assign[=] binary_operation[constant[1] - name[fs]]
variable[meandepth] assign[=] call[name[np].squeeze, parameter[call[name[depth].mean, parameter[]]]]
return[name[meandepth]] | keyword[def] identifier[eclipse_depth] ( identifier[mafn] , identifier[Rp] , identifier[Rs] , identifier[b] , identifier[u1] = literal[int] , identifier[u2] = literal[int] , identifier[max_only] = keyword[False] , identifier[npts] = literal[int] , identifier[force_1d] = keyword[False] ):
literal[string]
identifier[k] = identifier[Rp] * identifier[REARTH] /( identifier[Rs] * identifier[RSUN] )
keyword[if] identifier[max_only] :
keyword[return] literal[int] - identifier[mafn] ( identifier[k] , identifier[b] , identifier[u1] , identifier[u2] )
keyword[if] identifier[np] . identifier[size] ( identifier[b] )== literal[int] :
identifier[x] = identifier[np] . identifier[linspace] ( literal[int] , identifier[np] . identifier[sqrt] ( literal[int] - identifier[b] ** literal[int] ), identifier[npts] )
identifier[y] = identifier[b]
identifier[zs] = identifier[np] . identifier[sqrt] ( identifier[x] ** literal[int] + identifier[y] ** literal[int] )
identifier[fs] = identifier[mafn] ( identifier[k] , identifier[zs] , identifier[u1] , identifier[u2] )
identifier[depth] = literal[int] - identifier[fs]
keyword[else] :
identifier[xmax] = identifier[np] . identifier[sqrt] ( literal[int] - identifier[b] ** literal[int] )
identifier[x] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[npts] )* identifier[xmax] [:, identifier[Nones] ]
identifier[y] = identifier[b] [:, keyword[None] ]
identifier[zs] = identifier[np] . identifier[sqrt] ( identifier[x] ** literal[int] + identifier[y] ** literal[int] )
identifier[fs] = identifier[mafn] ( identifier[k] , identifier[zs] . identifier[ravel] (), identifier[u1] , identifier[u2] )
keyword[if] keyword[not] identifier[force_1d] :
identifier[fs] = identifier[fs] . identifier[reshape] ( identifier[size] ( identifier[k] ),* identifier[zs] . identifier[shape] )
identifier[depth] = literal[int] - identifier[fs]
identifier[meandepth] = identifier[np] . identifier[squeeze] ( identifier[depth] . identifier[mean] ( identifier[axis] = identifier[depth] . identifier[ndim] - literal[int] ))
keyword[return] identifier[meandepth] | def eclipse_depth(mafn, Rp, Rs, b, u1=0.394, u2=0.261, max_only=False, npts=100, force_1d=False):
""" Calculates average (or max) eclipse depth
***why does b>1 take so freaking long?...
"""
k = Rp * REARTH / (Rs * RSUN)
if max_only:
return 1 - mafn(k, b, u1, u2) # depends on [control=['if'], data=[]]
if np.size(b) == 1:
x = np.linspace(0, np.sqrt(1 - b ** 2), npts)
y = b
zs = np.sqrt(x ** 2 + y ** 2)
fs = mafn(k, zs, u1, u2) # returns array of shape (nks,nzs)
depth = 1 - fs # depends on [control=['if'], data=[]]
else:
xmax = np.sqrt(1 - b ** 2)
x = np.linspace(0, 1, npts) * xmax[:, Nones]
y = b[:, None]
zs = np.sqrt(x ** 2 + y ** 2)
fs = mafn(k, zs.ravel(), u1, u2)
if not force_1d:
fs = fs.reshape(size(k), *zs.shape) # depends on [control=['if'], data=[]]
depth = 1 - fs
meandepth = np.squeeze(depth.mean(axis=depth.ndim - 1))
return meandepth |
def expects(self, call_name):
"""Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = ExpectedCall(self, call_name, call_order=self._expected_call_order)
self._declare_call(call_name, c)
return self | def function[expects, parameter[self, call_name]]:
constant[Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
]
if compare[name[call_name] in name[self]._declared_calls] begin[:]
return[call[name[self].next_call, parameter[]]]
name[self]._last_declared_call_name assign[=] name[call_name]
variable[c] assign[=] call[name[ExpectedCall], parameter[name[self], name[call_name]]]
call[name[self]._declare_call, parameter[name[call_name], name[c]]]
return[name[self]] | keyword[def] identifier[expects] ( identifier[self] , identifier[call_name] ):
literal[string]
keyword[if] identifier[call_name] keyword[in] identifier[self] . identifier[_declared_calls] :
keyword[return] identifier[self] . identifier[next_call] ( identifier[for_method] = identifier[call_name] )
identifier[self] . identifier[_last_declared_call_name] = identifier[call_name]
identifier[c] = identifier[ExpectedCall] ( identifier[self] , identifier[call_name] , identifier[call_order] = identifier[self] . identifier[_expected_call_order] )
identifier[self] . identifier[_declare_call] ( identifier[call_name] , identifier[c] )
keyword[return] identifier[self] | def expects(self, call_name):
"""Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name) # depends on [control=['if'], data=['call_name']]
self._last_declared_call_name = call_name
c = ExpectedCall(self, call_name, call_order=self._expected_call_order)
self._declare_call(call_name, c)
return self |
def get_associated_resource(self, task):
"""
Retrieve a resource associated with a task.
Args:
task: task dict
Returns:
tuple: task (updated), the entity found (dict)
"""
if not task:
raise HPOneViewUnknownType(MSG_INVALID_TASK)
if task['category'] != 'tasks' and task['category'] != 'backups':
# it is an error if type is not in obj, so let the except flow
raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE)
if task['type'] == 'TaskResourceV2':
resource_uri = task['associatedResource']['resourceUri']
if resource_uri and resource_uri.startswith("/rest/appliance/support-dumps/"):
# Specific for support dumps
return task, resource_uri
elif task['type'] == 'BACKUP':
task = self._connection.get(task['taskUri'])
resource_uri = task['uri']
else:
raise HPOneViewInvalidResource(MSG_TASK_TYPE_UNRECONIZED % task['type'])
entity = {}
if resource_uri:
entity = self._connection.get(resource_uri)
return task, entity | def function[get_associated_resource, parameter[self, task]]:
constant[
Retrieve a resource associated with a task.
Args:
task: task dict
Returns:
tuple: task (updated), the entity found (dict)
]
if <ast.UnaryOp object at 0x7da18f58cc10> begin[:]
<ast.Raise object at 0x7da18f58ca00>
if <ast.BoolOp object at 0x7da18f58d570> begin[:]
<ast.Raise object at 0x7da18f58ddb0>
if compare[call[name[task]][constant[type]] equal[==] constant[TaskResourceV2]] begin[:]
variable[resource_uri] assign[=] call[call[name[task]][constant[associatedResource]]][constant[resourceUri]]
if <ast.BoolOp object at 0x7da18f58d5a0> begin[:]
return[tuple[[<ast.Name object at 0x7da18f58e800>, <ast.Name object at 0x7da18f58e020>]]]
variable[entity] assign[=] dictionary[[], []]
if name[resource_uri] begin[:]
variable[entity] assign[=] call[name[self]._connection.get, parameter[name[resource_uri]]]
return[tuple[[<ast.Name object at 0x7da20e955b70>, <ast.Name object at 0x7da20e957a30>]]] | keyword[def] identifier[get_associated_resource] ( identifier[self] , identifier[task] ):
literal[string]
keyword[if] keyword[not] identifier[task] :
keyword[raise] identifier[HPOneViewUnknownType] ( identifier[MSG_INVALID_TASK] )
keyword[if] identifier[task] [ literal[string] ]!= literal[string] keyword[and] identifier[task] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[HPOneViewUnknownType] ( identifier[MSG_UNKNOWN_OBJECT_TYPE] )
keyword[if] identifier[task] [ literal[string] ]== literal[string] :
identifier[resource_uri] = identifier[task] [ literal[string] ][ literal[string] ]
keyword[if] identifier[resource_uri] keyword[and] identifier[resource_uri] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[task] , identifier[resource_uri]
keyword[elif] identifier[task] [ literal[string] ]== literal[string] :
identifier[task] = identifier[self] . identifier[_connection] . identifier[get] ( identifier[task] [ literal[string] ])
identifier[resource_uri] = identifier[task] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[HPOneViewInvalidResource] ( identifier[MSG_TASK_TYPE_UNRECONIZED] % identifier[task] [ literal[string] ])
identifier[entity] ={}
keyword[if] identifier[resource_uri] :
identifier[entity] = identifier[self] . identifier[_connection] . identifier[get] ( identifier[resource_uri] )
keyword[return] identifier[task] , identifier[entity] | def get_associated_resource(self, task):
"""
Retrieve a resource associated with a task.
Args:
task: task dict
Returns:
tuple: task (updated), the entity found (dict)
"""
if not task:
raise HPOneViewUnknownType(MSG_INVALID_TASK) # depends on [control=['if'], data=[]]
if task['category'] != 'tasks' and task['category'] != 'backups':
# it is an error if type is not in obj, so let the except flow
raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE) # depends on [control=['if'], data=[]]
if task['type'] == 'TaskResourceV2':
resource_uri = task['associatedResource']['resourceUri']
if resource_uri and resource_uri.startswith('/rest/appliance/support-dumps/'):
# Specific for support dumps
return (task, resource_uri) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif task['type'] == 'BACKUP':
task = self._connection.get(task['taskUri'])
resource_uri = task['uri'] # depends on [control=['if'], data=[]]
else:
raise HPOneViewInvalidResource(MSG_TASK_TYPE_UNRECONIZED % task['type'])
entity = {}
if resource_uri:
entity = self._connection.get(resource_uri) # depends on [control=['if'], data=[]]
return (task, entity) |
def plot_ray_tracing_individual(
tracer, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None,
should_plot_image_plane_image=False,
should_plot_source_plane=False,
should_plot_convergence=False,
should_plot_potential=False,
should_plot_deflections=False,
units='arcsec',
output_path=None, output_format='show'):
"""Plot the observed _tracer of an analysis, using the *CCD* class object.
The visualization and output type can be fully customized.
Parameters
-----------
tracer : autolens.ccd.tracer.CCD
Class containing the _tracer, noise_map-mappers and PSF that are to be plotted.
The font size of the figure ylabel.
output_path : str
The path where the _tracer is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the _tracer is output. File formats (e.g. png, fits) output the _tracer to harddisk. 'show' displays the _tracer \
in the python interpreter window.
"""
if should_plot_image_plane_image:
plot_image_plane_image(
tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask,
zoom_around_mask=zoom_around_mask, positions=positions,
units=units,
output_path=output_path, output_format=output_format)
if should_plot_convergence:
plot_convergence(
tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask,
zoom_around_mask=zoom_around_mask,
units=units,
output_path=output_path, output_format=output_format)
if should_plot_potential:
plot_potential(
tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask,
zoom_around_mask=zoom_around_mask,
units=units,
output_path=output_path, output_format=output_format)
if should_plot_source_plane:
plane_plotters.plot_plane_image(
plane=tracer.source_plane, positions=None, plot_grid=False,
units=units,
output_path=output_path, output_filename='tracer_source_plane', output_format=output_format)
if should_plot_deflections:
plot_deflections_y(
tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask,
zoom_around_mask=zoom_around_mask,
units=units,
output_path=output_path, output_format=output_format)
if should_plot_deflections:
plot_deflections_x(
tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask,
zoom_around_mask=zoom_around_mask,
units=units,
output_path=output_path, output_format=output_format) | def function[plot_ray_tracing_individual, parameter[tracer, mask, extract_array_from_mask, zoom_around_mask, positions, should_plot_image_plane_image, should_plot_source_plane, should_plot_convergence, should_plot_potential, should_plot_deflections, units, output_path, output_format]]:
constant[Plot the observed _tracer of an analysis, using the *CCD* class object.
The visualization and output type can be fully customized.
Parameters
-----------
tracer : autolens.ccd.tracer.CCD
Class containing the _tracer, noise_map-mappers and PSF that are to be plotted.
The font size of the figure ylabel.
output_path : str
The path where the _tracer is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the _tracer is output. File formats (e.g. png, fits) output the _tracer to harddisk. 'show' displays the _tracer in the python interpreter window.
]
if name[should_plot_image_plane_image] begin[:]
call[name[plot_image_plane_image], parameter[]]
if name[should_plot_convergence] begin[:]
call[name[plot_convergence], parameter[]]
if name[should_plot_potential] begin[:]
call[name[plot_potential], parameter[]]
if name[should_plot_source_plane] begin[:]
call[name[plane_plotters].plot_plane_image, parameter[]]
if name[should_plot_deflections] begin[:]
call[name[plot_deflections_y], parameter[]]
if name[should_plot_deflections] begin[:]
call[name[plot_deflections_x], parameter[]] | keyword[def] identifier[plot_ray_tracing_individual] (
identifier[tracer] , identifier[mask] = keyword[None] , identifier[extract_array_from_mask] = keyword[False] , identifier[zoom_around_mask] = keyword[False] , identifier[positions] = keyword[None] ,
identifier[should_plot_image_plane_image] = keyword[False] ,
identifier[should_plot_source_plane] = keyword[False] ,
identifier[should_plot_convergence] = keyword[False] ,
identifier[should_plot_potential] = keyword[False] ,
identifier[should_plot_deflections] = keyword[False] ,
identifier[units] = literal[string] ,
identifier[output_path] = keyword[None] , identifier[output_format] = literal[string] ):
literal[string]
keyword[if] identifier[should_plot_image_plane_image] :
identifier[plot_image_plane_image] (
identifier[tracer] = identifier[tracer] , identifier[mask] = identifier[mask] , identifier[extract_array_from_mask] = identifier[extract_array_from_mask] ,
identifier[zoom_around_mask] = identifier[zoom_around_mask] , identifier[positions] = identifier[positions] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_format] = identifier[output_format] )
keyword[if] identifier[should_plot_convergence] :
identifier[plot_convergence] (
identifier[tracer] = identifier[tracer] , identifier[mask] = identifier[mask] , identifier[extract_array_from_mask] = identifier[extract_array_from_mask] ,
identifier[zoom_around_mask] = identifier[zoom_around_mask] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_format] = identifier[output_format] )
keyword[if] identifier[should_plot_potential] :
identifier[plot_potential] (
identifier[tracer] = identifier[tracer] , identifier[mask] = identifier[mask] , identifier[extract_array_from_mask] = identifier[extract_array_from_mask] ,
identifier[zoom_around_mask] = identifier[zoom_around_mask] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_format] = identifier[output_format] )
keyword[if] identifier[should_plot_source_plane] :
identifier[plane_plotters] . identifier[plot_plane_image] (
identifier[plane] = identifier[tracer] . identifier[source_plane] , identifier[positions] = keyword[None] , identifier[plot_grid] = keyword[False] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_filename] = literal[string] , identifier[output_format] = identifier[output_format] )
keyword[if] identifier[should_plot_deflections] :
identifier[plot_deflections_y] (
identifier[tracer] = identifier[tracer] , identifier[mask] = identifier[mask] , identifier[extract_array_from_mask] = identifier[extract_array_from_mask] ,
identifier[zoom_around_mask] = identifier[zoom_around_mask] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_format] = identifier[output_format] )
keyword[if] identifier[should_plot_deflections] :
identifier[plot_deflections_x] (
identifier[tracer] = identifier[tracer] , identifier[mask] = identifier[mask] , identifier[extract_array_from_mask] = identifier[extract_array_from_mask] ,
identifier[zoom_around_mask] = identifier[zoom_around_mask] ,
identifier[units] = identifier[units] ,
identifier[output_path] = identifier[output_path] , identifier[output_format] = identifier[output_format] ) | def plot_ray_tracing_individual(tracer, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, should_plot_image_plane_image=False, should_plot_source_plane=False, should_plot_convergence=False, should_plot_potential=False, should_plot_deflections=False, units='arcsec', output_path=None, output_format='show'):
"""Plot the observed _tracer of an analysis, using the *CCD* class object.
The visualization and output type can be fully customized.
Parameters
-----------
tracer : autolens.ccd.tracer.CCD
Class containing the _tracer, noise_map-mappers and PSF that are to be plotted.
The font size of the figure ylabel.
output_path : str
The path where the _tracer is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the _tracer is output. File formats (e.g. png, fits) output the _tracer to harddisk. 'show' displays the _tracer in the python interpreter window.
"""
if should_plot_image_plane_image:
plot_image_plane_image(tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions, units=units, output_path=output_path, output_format=output_format) # depends on [control=['if'], data=[]]
if should_plot_convergence:
plot_convergence(tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) # depends on [control=['if'], data=[]]
if should_plot_potential:
plot_potential(tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) # depends on [control=['if'], data=[]]
if should_plot_source_plane:
plane_plotters.plot_plane_image(plane=tracer.source_plane, positions=None, plot_grid=False, units=units, output_path=output_path, output_filename='tracer_source_plane', output_format=output_format) # depends on [control=['if'], data=[]]
if should_plot_deflections:
plot_deflections_y(tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) # depends on [control=['if'], data=[]]
if should_plot_deflections:
plot_deflections_x(tracer=tracer, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) # depends on [control=['if'], data=[]] |
def same_network(atree, btree) -> bool:
"""True if given trees share the same structure of powernodes,
independently of (power)node names,
and same edge topology between (power)nodes.
"""
return same_hierarchy(atree, btree) and same_topology(atree, btree) | def function[same_network, parameter[atree, btree]]:
constant[True if given trees share the same structure of powernodes,
independently of (power)node names,
and same edge topology between (power)nodes.
]
return[<ast.BoolOp object at 0x7da207f03b80>] | keyword[def] identifier[same_network] ( identifier[atree] , identifier[btree] )-> identifier[bool] :
literal[string]
keyword[return] identifier[same_hierarchy] ( identifier[atree] , identifier[btree] ) keyword[and] identifier[same_topology] ( identifier[atree] , identifier[btree] ) | def same_network(atree, btree) -> bool:
"""True if given trees share the same structure of powernodes,
independently of (power)node names,
and same edge topology between (power)nodes.
"""
return same_hierarchy(atree, btree) and same_topology(atree, btree) |
def run_create_admin(*args):
'''
creating the default administrator.
'''
post_data = {
'user_name': 'giser',
'user_email': 'giser@osgeo.cn',
'user_pass': '131322',
'role': '3300',
}
if MUser.get_by_name(post_data['user_name']):
print('User {user_name} already exists.'.format(user_name='giser'))
else:
MUser.create_user(post_data) | def function[run_create_admin, parameter[]]:
constant[
creating the default administrator.
]
variable[post_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b04f5750>, <ast.Constant object at 0x7da1b04f4c40>, <ast.Constant object at 0x7da1b04f5a20>, <ast.Constant object at 0x7da1b04f61a0>], [<ast.Constant object at 0x7da1b04f59c0>, <ast.Constant object at 0x7da1b04f6110>, <ast.Constant object at 0x7da1b04f4580>, <ast.Constant object at 0x7da1b04f4970>]]
if call[name[MUser].get_by_name, parameter[call[name[post_data]][constant[user_name]]]] begin[:]
call[name[print], parameter[call[constant[User {user_name} already exists.].format, parameter[]]]] | keyword[def] identifier[run_create_admin] (* identifier[args] ):
literal[string]
identifier[post_data] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[if] identifier[MUser] . identifier[get_by_name] ( identifier[post_data] [ literal[string] ]):
identifier[print] ( literal[string] . identifier[format] ( identifier[user_name] = literal[string] ))
keyword[else] :
identifier[MUser] . identifier[create_user] ( identifier[post_data] ) | def run_create_admin(*args):
"""
creating the default administrator.
"""
post_data = {'user_name': 'giser', 'user_email': 'giser@osgeo.cn', 'user_pass': '131322', 'role': '3300'}
if MUser.get_by_name(post_data['user_name']):
print('User {user_name} already exists.'.format(user_name='giser')) # depends on [control=['if'], data=[]]
else:
MUser.create_user(post_data) |
def path_encloses_pt(pt, opt, path):
"""returns true if pt is a point enclosed by path (which must be a Path
object satisfying path.isclosed==True). opt is a point you know is
NOT enclosed by path."""
assert path.isclosed()
intersections = Path(Line(pt, opt)).intersect(path)
if len(intersections) % 2:
return True
else:
return False | def function[path_encloses_pt, parameter[pt, opt, path]]:
constant[returns true if pt is a point enclosed by path (which must be a Path
object satisfying path.isclosed==True). opt is a point you know is
NOT enclosed by path.]
assert[call[name[path].isclosed, parameter[]]]
variable[intersections] assign[=] call[call[name[Path], parameter[call[name[Line], parameter[name[pt], name[opt]]]]].intersect, parameter[name[path]]]
if binary_operation[call[name[len], parameter[name[intersections]]] <ast.Mod object at 0x7da2590d6920> constant[2]] begin[:]
return[constant[True]] | keyword[def] identifier[path_encloses_pt] ( identifier[pt] , identifier[opt] , identifier[path] ):
literal[string]
keyword[assert] identifier[path] . identifier[isclosed] ()
identifier[intersections] = identifier[Path] ( identifier[Line] ( identifier[pt] , identifier[opt] )). identifier[intersect] ( identifier[path] )
keyword[if] identifier[len] ( identifier[intersections] )% literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def path_encloses_pt(pt, opt, path):
"""returns true if pt is a point enclosed by path (which must be a Path
object satisfying path.isclosed==True). opt is a point you know is
NOT enclosed by path."""
assert path.isclosed()
intersections = Path(Line(pt, opt)).intersect(path)
if len(intersections) % 2:
return True # depends on [control=['if'], data=[]]
else:
return False |
def from_envvar(self, variable_name: str, silent: bool=False) -> None:
"""Load the configuration from a location specified in the environment.
This will load a cfg file using :meth:`from_pyfile` from the
location specified in the environment, for example the two blocks
below are equivalent.
.. code-block:: python
app.config.from_envvar('CONFIG')
.. code-block:: python
filename = os.environ['CONFIG']
app.config.from_pyfile(filename)
"""
value = os.environ.get(variable_name)
if value is None and not silent:
raise RuntimeError(
f"Environment variable {variable_name} is not present, cannot load config",
)
return self.from_pyfile(value) | def function[from_envvar, parameter[self, variable_name, silent]]:
constant[Load the configuration from a location specified in the environment.
This will load a cfg file using :meth:`from_pyfile` from the
location specified in the environment, for example the two blocks
below are equivalent.
.. code-block:: python
app.config.from_envvar('CONFIG')
.. code-block:: python
filename = os.environ['CONFIG']
app.config.from_pyfile(filename)
]
variable[value] assign[=] call[name[os].environ.get, parameter[name[variable_name]]]
if <ast.BoolOp object at 0x7da18f58c280> begin[:]
<ast.Raise object at 0x7da18f58c250>
return[call[name[self].from_pyfile, parameter[name[value]]]] | keyword[def] identifier[from_envvar] ( identifier[self] , identifier[variable_name] : identifier[str] , identifier[silent] : identifier[bool] = keyword[False] )-> keyword[None] :
literal[string]
identifier[value] = identifier[os] . identifier[environ] . identifier[get] ( identifier[variable_name] )
keyword[if] identifier[value] keyword[is] keyword[None] keyword[and] keyword[not] identifier[silent] :
keyword[raise] identifier[RuntimeError] (
literal[string] ,
)
keyword[return] identifier[self] . identifier[from_pyfile] ( identifier[value] ) | def from_envvar(self, variable_name: str, silent: bool=False) -> None:
"""Load the configuration from a location specified in the environment.
This will load a cfg file using :meth:`from_pyfile` from the
location specified in the environment, for example the two blocks
below are equivalent.
.. code-block:: python
app.config.from_envvar('CONFIG')
.. code-block:: python
filename = os.environ['CONFIG']
app.config.from_pyfile(filename)
"""
value = os.environ.get(variable_name)
if value is None and (not silent):
raise RuntimeError(f'Environment variable {variable_name} is not present, cannot load config') # depends on [control=['if'], data=[]]
return self.from_pyfile(value) |
def qteReplayKeysequenceHook(self, msgObj):
"""
Replay the macro sequence.
"""
# Quit if there is nothing to replay.
if self.recorded_keysequence.toString() == '':
return
# Stop the recording before the replay, if necessary.
if self.qteRecording:
return
# Simulate the key presses.
self.qteMain.qteEmulateKeypresses(self.recorded_keysequence) | def function[qteReplayKeysequenceHook, parameter[self, msgObj]]:
constant[
Replay the macro sequence.
]
if compare[call[name[self].recorded_keysequence.toString, parameter[]] equal[==] constant[]] begin[:]
return[None]
if name[self].qteRecording begin[:]
return[None]
call[name[self].qteMain.qteEmulateKeypresses, parameter[name[self].recorded_keysequence]] | keyword[def] identifier[qteReplayKeysequenceHook] ( identifier[self] , identifier[msgObj] ):
literal[string]
keyword[if] identifier[self] . identifier[recorded_keysequence] . identifier[toString] ()== literal[string] :
keyword[return]
keyword[if] identifier[self] . identifier[qteRecording] :
keyword[return]
identifier[self] . identifier[qteMain] . identifier[qteEmulateKeypresses] ( identifier[self] . identifier[recorded_keysequence] ) | def qteReplayKeysequenceHook(self, msgObj):
"""
Replay the macro sequence.
"""
# Quit if there is nothing to replay.
if self.recorded_keysequence.toString() == '':
return # depends on [control=['if'], data=[]]
# Stop the recording before the replay, if necessary.
if self.qteRecording:
return # depends on [control=['if'], data=[]]
# Simulate the key presses.
self.qteMain.qteEmulateKeypresses(self.recorded_keysequence) |
def write(self, parallel=True):
"""
Writes the configuration to disk.
"""
# Write the Solution files
solutions = sorted(self._solutions, key=lambda x: x.Name)
with VSGWriteCommand('Writing VSG Solution', solutions, parallel) as command:
command.execute()
# Write the Projects files
projects = set(sorted((p for s in solutions for p in s.Projects), key=lambda x: x.Name))
with VSGWriteCommand('Writing VSG Projects', projects, parallel) as command:
command.execute()
# Register the registerables
registerables = set(sorted((p for s in solutions for p in s.Projects), key=lambda x: x.Name))
with VSGRegisterCommand('Registering Project Registerables', registerables) as command:
command.execute() | def function[write, parameter[self, parallel]]:
constant[
Writes the configuration to disk.
]
variable[solutions] assign[=] call[name[sorted], parameter[name[self]._solutions]]
with call[name[VSGWriteCommand], parameter[constant[Writing VSG Solution], name[solutions], name[parallel]]] begin[:]
call[name[command].execute, parameter[]]
variable[projects] assign[=] call[name[set], parameter[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b255a140>]]]]
with call[name[VSGWriteCommand], parameter[constant[Writing VSG Projects], name[projects], name[parallel]]] begin[:]
call[name[command].execute, parameter[]]
variable[registerables] assign[=] call[name[set], parameter[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b25964a0>]]]]
with call[name[VSGRegisterCommand], parameter[constant[Registering Project Registerables], name[registerables]]] begin[:]
call[name[command].execute, parameter[]] | keyword[def] identifier[write] ( identifier[self] , identifier[parallel] = keyword[True] ):
literal[string]
identifier[solutions] = identifier[sorted] ( identifier[self] . identifier[_solutions] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[Name] )
keyword[with] identifier[VSGWriteCommand] ( literal[string] , identifier[solutions] , identifier[parallel] ) keyword[as] identifier[command] :
identifier[command] . identifier[execute] ()
identifier[projects] = identifier[set] ( identifier[sorted] (( identifier[p] keyword[for] identifier[s] keyword[in] identifier[solutions] keyword[for] identifier[p] keyword[in] identifier[s] . identifier[Projects] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[Name] ))
keyword[with] identifier[VSGWriteCommand] ( literal[string] , identifier[projects] , identifier[parallel] ) keyword[as] identifier[command] :
identifier[command] . identifier[execute] ()
identifier[registerables] = identifier[set] ( identifier[sorted] (( identifier[p] keyword[for] identifier[s] keyword[in] identifier[solutions] keyword[for] identifier[p] keyword[in] identifier[s] . identifier[Projects] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[Name] ))
keyword[with] identifier[VSGRegisterCommand] ( literal[string] , identifier[registerables] ) keyword[as] identifier[command] :
identifier[command] . identifier[execute] () | def write(self, parallel=True):
"""
Writes the configuration to disk.
"""
# Write the Solution files
solutions = sorted(self._solutions, key=lambda x: x.Name)
with VSGWriteCommand('Writing VSG Solution', solutions, parallel) as command:
command.execute() # depends on [control=['with'], data=['command']]
# Write the Projects files
projects = set(sorted((p for s in solutions for p in s.Projects), key=lambda x: x.Name))
with VSGWriteCommand('Writing VSG Projects', projects, parallel) as command:
command.execute() # depends on [control=['with'], data=['command']]
# Register the registerables
registerables = set(sorted((p for s in solutions for p in s.Projects), key=lambda x: x.Name))
with VSGRegisterCommand('Registering Project Registerables', registerables) as command:
command.execute() # depends on [control=['with'], data=['command']] |
def send_mail_worker(config, mail, event):
"""Worker task to send out an email, which is a blocking process unless it is threaded"""
log = ""
try:
if config.get('ssl', True):
server = SMTP_SSL(config['server'], port=config['port'], timeout=30)
else:
server = SMTP(config['server'], port=config['port'], timeout=30)
if config['tls']:
log += 'Starting TLS\n'
server.starttls()
if config['username'] != '':
log += 'Logging in with ' + str(config['username']) + "\n"
server.login(config['username'], config['password'])
else:
log += 'No username, trying anonymous access\n'
log += 'Sending Mail\n'
response_send = server.send_message(mail)
server.quit()
except timeout as e:
log += 'Could not send email: ' + str(e) + "\n"
return False, log, event
log += 'Server response:' + str(response_send)
return True, log, event | def function[send_mail_worker, parameter[config, mail, event]]:
constant[Worker task to send out an email, which is a blocking process unless it is threaded]
variable[log] assign[=] constant[]
<ast.Try object at 0x7da1b0ff2e30>
<ast.AugAssign object at 0x7da1b0ff3fa0>
return[tuple[[<ast.Constant object at 0x7da1b0f477c0>, <ast.Name object at 0x7da1b0f44d00>, <ast.Name object at 0x7da1b0f47dc0>]]] | keyword[def] identifier[send_mail_worker] ( identifier[config] , identifier[mail] , identifier[event] ):
literal[string]
identifier[log] = literal[string]
keyword[try] :
keyword[if] identifier[config] . identifier[get] ( literal[string] , keyword[True] ):
identifier[server] = identifier[SMTP_SSL] ( identifier[config] [ literal[string] ], identifier[port] = identifier[config] [ literal[string] ], identifier[timeout] = literal[int] )
keyword[else] :
identifier[server] = identifier[SMTP] ( identifier[config] [ literal[string] ], identifier[port] = identifier[config] [ literal[string] ], identifier[timeout] = literal[int] )
keyword[if] identifier[config] [ literal[string] ]:
identifier[log] += literal[string]
identifier[server] . identifier[starttls] ()
keyword[if] identifier[config] [ literal[string] ]!= literal[string] :
identifier[log] += literal[string] + identifier[str] ( identifier[config] [ literal[string] ])+ literal[string]
identifier[server] . identifier[login] ( identifier[config] [ literal[string] ], identifier[config] [ literal[string] ])
keyword[else] :
identifier[log] += literal[string]
identifier[log] += literal[string]
identifier[response_send] = identifier[server] . identifier[send_message] ( identifier[mail] )
identifier[server] . identifier[quit] ()
keyword[except] identifier[timeout] keyword[as] identifier[e] :
identifier[log] += literal[string] + identifier[str] ( identifier[e] )+ literal[string]
keyword[return] keyword[False] , identifier[log] , identifier[event]
identifier[log] += literal[string] + identifier[str] ( identifier[response_send] )
keyword[return] keyword[True] , identifier[log] , identifier[event] | def send_mail_worker(config, mail, event):
"""Worker task to send out an email, which is a blocking process unless it is threaded"""
log = ''
try:
if config.get('ssl', True):
server = SMTP_SSL(config['server'], port=config['port'], timeout=30) # depends on [control=['if'], data=[]]
else:
server = SMTP(config['server'], port=config['port'], timeout=30)
if config['tls']:
log += 'Starting TLS\n'
server.starttls() # depends on [control=['if'], data=[]]
if config['username'] != '':
log += 'Logging in with ' + str(config['username']) + '\n'
server.login(config['username'], config['password']) # depends on [control=['if'], data=[]]
else:
log += 'No username, trying anonymous access\n'
log += 'Sending Mail\n'
response_send = server.send_message(mail)
server.quit() # depends on [control=['try'], data=[]]
except timeout as e:
log += 'Could not send email: ' + str(e) + '\n'
return (False, log, event) # depends on [control=['except'], data=['e']]
log += 'Server response:' + str(response_send)
return (True, log, event) |
def register_entity(self, entity_value, entity_type, alias_of=None):
"""
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
"""
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept')) | def function[register_entity, parameter[self, entity_value, entity_type, alias_of]]:
constant[
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
]
if name[alias_of] begin[:]
call[name[self].trie.insert, parameter[call[name[entity_value].lower, parameter[]]]] | keyword[def] identifier[register_entity] ( identifier[self] , identifier[entity_value] , identifier[entity_type] , identifier[alias_of] = keyword[None] ):
literal[string]
keyword[if] identifier[alias_of] :
identifier[self] . identifier[trie] . identifier[insert] ( identifier[entity_value] . identifier[lower] (), identifier[data] =( identifier[alias_of] , identifier[entity_type] ))
keyword[else] :
identifier[self] . identifier[trie] . identifier[insert] ( identifier[entity_value] . identifier[lower] (), identifier[data] =( identifier[entity_value] , identifier[entity_type] ))
identifier[self] . identifier[trie] . identifier[insert] ( identifier[entity_type] . identifier[lower] (), identifier[data] =( identifier[entity_type] , literal[string] )) | def register_entity(self, entity_value, entity_type, alias_of=None):
"""
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
"""
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type)) # depends on [control=['if'], data=[]]
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept')) |
def listen(self, max_seconds=30):
"""Waits to receive up to two bytes for up to max_seconds"""
if not self.connection:
self.connect()
start = time.time()
conn, _, err = select([self.connection], [], [
self.connection], max_seconds)
try:
if len(err) > 0:
raise socket.error("Couldn't open socket")
message = b''
while True:
if time.time() - start > max_seconds:
raise socket.error(
"Timeout of %s seconds waiting for W&B process" % max_seconds)
res = self.connection.recv(1024)
term = res.find(b'\0')
if term != -1:
message += res[:term]
break
else:
message += res
message = json.loads(message.decode('utf8'))
if message['status'] == 'done':
return True, None
elif message['status'] == 'ready':
return True, message
elif message['status'] == 'launch_error':
return False, None
else:
raise socket.error("Invalid status: %s" % message['status'])
except (socket.error, ValueError) as e:
util.sentry_exc(e)
return False, None | def function[listen, parameter[self, max_seconds]]:
constant[Waits to receive up to two bytes for up to max_seconds]
if <ast.UnaryOp object at 0x7da1b07f5300> begin[:]
call[name[self].connect, parameter[]]
variable[start] assign[=] call[name[time].time, parameter[]]
<ast.Tuple object at 0x7da1b07f5060> assign[=] call[name[select], parameter[list[[<ast.Attribute object at 0x7da1b07f4be0>]], list[[]], list[[<ast.Attribute object at 0x7da1b07f6440>]], name[max_seconds]]]
<ast.Try object at 0x7da1b07f4640> | keyword[def] identifier[listen] ( identifier[self] , identifier[max_seconds] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[connection] :
identifier[self] . identifier[connect] ()
identifier[start] = identifier[time] . identifier[time] ()
identifier[conn] , identifier[_] , identifier[err] = identifier[select] ([ identifier[self] . identifier[connection] ],[],[
identifier[self] . identifier[connection] ], identifier[max_seconds] )
keyword[try] :
keyword[if] identifier[len] ( identifier[err] )> literal[int] :
keyword[raise] identifier[socket] . identifier[error] ( literal[string] )
identifier[message] = literal[string]
keyword[while] keyword[True] :
keyword[if] identifier[time] . identifier[time] ()- identifier[start] > identifier[max_seconds] :
keyword[raise] identifier[socket] . identifier[error] (
literal[string] % identifier[max_seconds] )
identifier[res] = identifier[self] . identifier[connection] . identifier[recv] ( literal[int] )
identifier[term] = identifier[res] . identifier[find] ( literal[string] )
keyword[if] identifier[term] !=- literal[int] :
identifier[message] += identifier[res] [: identifier[term] ]
keyword[break]
keyword[else] :
identifier[message] += identifier[res]
identifier[message] = identifier[json] . identifier[loads] ( identifier[message] . identifier[decode] ( literal[string] ))
keyword[if] identifier[message] [ literal[string] ]== literal[string] :
keyword[return] keyword[True] , keyword[None]
keyword[elif] identifier[message] [ literal[string] ]== literal[string] :
keyword[return] keyword[True] , identifier[message]
keyword[elif] identifier[message] [ literal[string] ]== literal[string] :
keyword[return] keyword[False] , keyword[None]
keyword[else] :
keyword[raise] identifier[socket] . identifier[error] ( literal[string] % identifier[message] [ literal[string] ])
keyword[except] ( identifier[socket] . identifier[error] , identifier[ValueError] ) keyword[as] identifier[e] :
identifier[util] . identifier[sentry_exc] ( identifier[e] )
keyword[return] keyword[False] , keyword[None] | def listen(self, max_seconds=30):
"""Waits to receive up to two bytes for up to max_seconds"""
if not self.connection:
self.connect() # depends on [control=['if'], data=[]]
start = time.time()
(conn, _, err) = select([self.connection], [], [self.connection], max_seconds)
try:
if len(err) > 0:
raise socket.error("Couldn't open socket") # depends on [control=['if'], data=[]]
message = b''
while True:
if time.time() - start > max_seconds:
raise socket.error('Timeout of %s seconds waiting for W&B process' % max_seconds) # depends on [control=['if'], data=['max_seconds']]
res = self.connection.recv(1024)
term = res.find(b'\x00')
if term != -1:
message += res[:term]
break # depends on [control=['if'], data=['term']]
else:
message += res # depends on [control=['while'], data=[]]
message = json.loads(message.decode('utf8'))
if message['status'] == 'done':
return (True, None) # depends on [control=['if'], data=[]]
elif message['status'] == 'ready':
return (True, message) # depends on [control=['if'], data=[]]
elif message['status'] == 'launch_error':
return (False, None) # depends on [control=['if'], data=[]]
else:
raise socket.error('Invalid status: %s' % message['status']) # depends on [control=['try'], data=[]]
except (socket.error, ValueError) as e:
util.sentry_exc(e)
return (False, None) # depends on [control=['except'], data=['e']] |
def GetDefault(self, container=None):
"""Return boolean value."""
return rdfvalue.RDFBool(
super(ProtoBoolean, self).GetDefault(container=container)) | def function[GetDefault, parameter[self, container]]:
constant[Return boolean value.]
return[call[name[rdfvalue].RDFBool, parameter[call[call[name[super], parameter[name[ProtoBoolean], name[self]]].GetDefault, parameter[]]]]] | keyword[def] identifier[GetDefault] ( identifier[self] , identifier[container] = keyword[None] ):
literal[string]
keyword[return] identifier[rdfvalue] . identifier[RDFBool] (
identifier[super] ( identifier[ProtoBoolean] , identifier[self] ). identifier[GetDefault] ( identifier[container] = identifier[container] )) | def GetDefault(self, container=None):
"""Return boolean value."""
return rdfvalue.RDFBool(super(ProtoBoolean, self).GetDefault(container=container)) |
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_parse_log_path
g_script_name = os.path.basename(argv[0])
parse_args(argv)
if (g_parse_log_path is None):
print("")
print("ERROR: -f not specified")
usage()
d = Dataset(g_parse_log_path)
d.parse()
d.emit_header()
for i in range(0, g_num_rows):
d.emit_one_row() | def function[main, parameter[argv]]:
constant[
Main program.
@return: none
]
<ast.Global object at 0x7da207f02a70>
<ast.Global object at 0x7da207f01690>
variable[g_script_name] assign[=] call[name[os].path.basename, parameter[call[name[argv]][constant[0]]]]
call[name[parse_args], parameter[name[argv]]]
if compare[name[g_parse_log_path] is constant[None]] begin[:]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[ERROR: -f not specified]]]
call[name[usage], parameter[]]
variable[d] assign[=] call[name[Dataset], parameter[name[g_parse_log_path]]]
call[name[d].parse, parameter[]]
call[name[d].emit_header, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[g_num_rows]]]] begin[:]
call[name[d].emit_one_row, parameter[]] | keyword[def] identifier[main] ( identifier[argv] ):
literal[string]
keyword[global] identifier[g_script_name]
keyword[global] identifier[g_parse_log_path]
identifier[g_script_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[argv] [ literal[int] ])
identifier[parse_args] ( identifier[argv] )
keyword[if] ( identifier[g_parse_log_path] keyword[is] keyword[None] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[usage] ()
identifier[d] = identifier[Dataset] ( identifier[g_parse_log_path] )
identifier[d] . identifier[parse] ()
identifier[d] . identifier[emit_header] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[g_num_rows] ):
identifier[d] . identifier[emit_one_row] () | def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_parse_log_path
g_script_name = os.path.basename(argv[0])
parse_args(argv)
if g_parse_log_path is None:
print('')
print('ERROR: -f not specified')
usage() # depends on [control=['if'], data=[]]
d = Dataset(g_parse_log_path)
d.parse()
d.emit_header()
for i in range(0, g_num_rows):
d.emit_one_row() # depends on [control=['for'], data=[]] |
def _open_response(self, objects, namespace, pull_type, **params):
"""
Build an open... response once the objects have been extracted from
the repository.
"""
max_obj_cnt = params['MaxObjectCount']
if max_obj_cnt is None:
max_obj_cnt = _DEFAULT_MAX_OBJECT_COUNT
default_server_timeout = 40
timeout = default_server_timeout if params['OperationTimeout'] is None \
else params['OperationTimeout']
if len(objects) <= max_obj_cnt:
eos = u'TRUE'
context_id = ""
rtn_inst_names = objects
else:
eos = u'FALSE'
context_id = self._create_contextid()
# TODO:ks Future. Use the timeout along with response delay. Then
# user could timeout pulls. This means adding timer test to
# pulls and close. Timer should be used to close old contexts
# also.
self.enumeration_contexts[context_id] = {'pull_type': pull_type,
'data': objects,
'namespace': namespace,
'time': time.clock(),
'interoptimeout': timeout}
rtn_inst_names = objects[0:max_obj_cnt]
del objects[0: max_obj_cnt]
return self._make_pull_imethod_resp(rtn_inst_names, eos, context_id) | def function[_open_response, parameter[self, objects, namespace, pull_type]]:
constant[
Build an open... response once the objects have been extracted from
the repository.
]
variable[max_obj_cnt] assign[=] call[name[params]][constant[MaxObjectCount]]
if compare[name[max_obj_cnt] is constant[None]] begin[:]
variable[max_obj_cnt] assign[=] name[_DEFAULT_MAX_OBJECT_COUNT]
variable[default_server_timeout] assign[=] constant[40]
variable[timeout] assign[=] <ast.IfExp object at 0x7da20cabce20>
if compare[call[name[len], parameter[name[objects]]] less_or_equal[<=] name[max_obj_cnt]] begin[:]
variable[eos] assign[=] constant[TRUE]
variable[context_id] assign[=] constant[]
variable[rtn_inst_names] assign[=] name[objects]
return[call[name[self]._make_pull_imethod_resp, parameter[name[rtn_inst_names], name[eos], name[context_id]]]] | keyword[def] identifier[_open_response] ( identifier[self] , identifier[objects] , identifier[namespace] , identifier[pull_type] ,** identifier[params] ):
literal[string]
identifier[max_obj_cnt] = identifier[params] [ literal[string] ]
keyword[if] identifier[max_obj_cnt] keyword[is] keyword[None] :
identifier[max_obj_cnt] = identifier[_DEFAULT_MAX_OBJECT_COUNT]
identifier[default_server_timeout] = literal[int]
identifier[timeout] = identifier[default_server_timeout] keyword[if] identifier[params] [ literal[string] ] keyword[is] keyword[None] keyword[else] identifier[params] [ literal[string] ]
keyword[if] identifier[len] ( identifier[objects] )<= identifier[max_obj_cnt] :
identifier[eos] = literal[string]
identifier[context_id] = literal[string]
identifier[rtn_inst_names] = identifier[objects]
keyword[else] :
identifier[eos] = literal[string]
identifier[context_id] = identifier[self] . identifier[_create_contextid] ()
identifier[self] . identifier[enumeration_contexts] [ identifier[context_id] ]={ literal[string] : identifier[pull_type] ,
literal[string] : identifier[objects] ,
literal[string] : identifier[namespace] ,
literal[string] : identifier[time] . identifier[clock] (),
literal[string] : identifier[timeout] }
identifier[rtn_inst_names] = identifier[objects] [ literal[int] : identifier[max_obj_cnt] ]
keyword[del] identifier[objects] [ literal[int] : identifier[max_obj_cnt] ]
keyword[return] identifier[self] . identifier[_make_pull_imethod_resp] ( identifier[rtn_inst_names] , identifier[eos] , identifier[context_id] ) | def _open_response(self, objects, namespace, pull_type, **params):
"""
Build an open... response once the objects have been extracted from
the repository.
"""
max_obj_cnt = params['MaxObjectCount']
if max_obj_cnt is None:
max_obj_cnt = _DEFAULT_MAX_OBJECT_COUNT # depends on [control=['if'], data=['max_obj_cnt']]
default_server_timeout = 40
timeout = default_server_timeout if params['OperationTimeout'] is None else params['OperationTimeout']
if len(objects) <= max_obj_cnt:
eos = u'TRUE'
context_id = ''
rtn_inst_names = objects # depends on [control=['if'], data=[]]
else:
eos = u'FALSE'
context_id = self._create_contextid()
# TODO:ks Future. Use the timeout along with response delay. Then
# user could timeout pulls. This means adding timer test to
# pulls and close. Timer should be used to close old contexts
# also.
self.enumeration_contexts[context_id] = {'pull_type': pull_type, 'data': objects, 'namespace': namespace, 'time': time.clock(), 'interoptimeout': timeout}
rtn_inst_names = objects[0:max_obj_cnt]
del objects[0:max_obj_cnt]
return self._make_pull_imethod_resp(rtn_inst_names, eos, context_id) |
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all polygons from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all polygons from the top.
right : None or int, optional
Amount of pixels by which to shift all polygons from the right.
bottom : None or int, optional
Amount of pixels by which to shift all polygons from the bottom.
left : None or int, optional
Amount of pixels by which to shift all polygons from the left.
Returns
-------
imgaug.PolygonsOnImage
Shifted polygons.
"""
polys_new = [
poly.shift(top=top, right=right, bottom=bottom, left=left)
for poly
in self.polygons
]
return PolygonsOnImage(polys_new, shape=self.shape) | def function[shift, parameter[self, top, right, bottom, left]]:
constant[
Shift all polygons from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all polygons from the top.
right : None or int, optional
Amount of pixels by which to shift all polygons from the right.
bottom : None or int, optional
Amount of pixels by which to shift all polygons from the bottom.
left : None or int, optional
Amount of pixels by which to shift all polygons from the left.
Returns
-------
imgaug.PolygonsOnImage
Shifted polygons.
]
variable[polys_new] assign[=] <ast.ListComp object at 0x7da20c7ca050>
return[call[name[PolygonsOnImage], parameter[name[polys_new]]]] | keyword[def] identifier[shift] ( identifier[self] , identifier[top] = keyword[None] , identifier[right] = keyword[None] , identifier[bottom] = keyword[None] , identifier[left] = keyword[None] ):
literal[string]
identifier[polys_new] =[
identifier[poly] . identifier[shift] ( identifier[top] = identifier[top] , identifier[right] = identifier[right] , identifier[bottom] = identifier[bottom] , identifier[left] = identifier[left] )
keyword[for] identifier[poly]
keyword[in] identifier[self] . identifier[polygons]
]
keyword[return] identifier[PolygonsOnImage] ( identifier[polys_new] , identifier[shape] = identifier[self] . identifier[shape] ) | def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all polygons from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all polygons from the top.
right : None or int, optional
Amount of pixels by which to shift all polygons from the right.
bottom : None or int, optional
Amount of pixels by which to shift all polygons from the bottom.
left : None or int, optional
Amount of pixels by which to shift all polygons from the left.
Returns
-------
imgaug.PolygonsOnImage
Shifted polygons.
"""
polys_new = [poly.shift(top=top, right=right, bottom=bottom, left=left) for poly in self.polygons]
return PolygonsOnImage(polys_new, shape=self.shape) |
def hasBidAsk(self) -> bool:
"""
See if this ticker has a valid bid and ask.
"""
return (
self.bid != -1 and not isNan(self.bid) and self.bidSize > 0 and
self.ask != -1 and not isNan(self.ask) and self.askSize > 0) | def function[hasBidAsk, parameter[self]]:
constant[
See if this ticker has a valid bid and ask.
]
return[<ast.BoolOp object at 0x7da20e962ef0>] | keyword[def] identifier[hasBidAsk] ( identifier[self] )-> identifier[bool] :
literal[string]
keyword[return] (
identifier[self] . identifier[bid] !=- literal[int] keyword[and] keyword[not] identifier[isNan] ( identifier[self] . identifier[bid] ) keyword[and] identifier[self] . identifier[bidSize] > literal[int] keyword[and]
identifier[self] . identifier[ask] !=- literal[int] keyword[and] keyword[not] identifier[isNan] ( identifier[self] . identifier[ask] ) keyword[and] identifier[self] . identifier[askSize] > literal[int] ) | def hasBidAsk(self) -> bool:
"""
See if this ticker has a valid bid and ask.
"""
return self.bid != -1 and (not isNan(self.bid)) and (self.bidSize > 0) and (self.ask != -1) and (not isNan(self.ask)) and (self.askSize > 0) |
def _add_combined_condition_to_template(self, template_dict, condition_name, conditions_to_combine):
"""
Add top-level template condition that combines the given list of conditions.
:param dict template_dict: SAM template dictionary
:param string condition_name: Name of top-level template condition
:param list conditions_to_combine: List of conditions that should be combined (via OR operator) to form
top-level condition.
"""
# defensive precondition check
if not conditions_to_combine or len(conditions_to_combine) < 2:
raise ValueError('conditions_to_combine must have at least 2 conditions')
template_conditions = template_dict.setdefault('Conditions', {})
new_template_conditions = make_combined_condition(sorted(list(conditions_to_combine)), condition_name)
for name, definition in new_template_conditions.items():
template_conditions[name] = definition | def function[_add_combined_condition_to_template, parameter[self, template_dict, condition_name, conditions_to_combine]]:
constant[
Add top-level template condition that combines the given list of conditions.
:param dict template_dict: SAM template dictionary
:param string condition_name: Name of top-level template condition
:param list conditions_to_combine: List of conditions that should be combined (via OR operator) to form
top-level condition.
]
if <ast.BoolOp object at 0x7da2041d8dc0> begin[:]
<ast.Raise object at 0x7da1b1e15780>
variable[template_conditions] assign[=] call[name[template_dict].setdefault, parameter[constant[Conditions], dictionary[[], []]]]
variable[new_template_conditions] assign[=] call[name[make_combined_condition], parameter[call[name[sorted], parameter[call[name[list], parameter[name[conditions_to_combine]]]]], name[condition_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b1e156c0>, <ast.Name object at 0x7da1b1e14970>]]] in starred[call[name[new_template_conditions].items, parameter[]]] begin[:]
call[name[template_conditions]][name[name]] assign[=] name[definition] | keyword[def] identifier[_add_combined_condition_to_template] ( identifier[self] , identifier[template_dict] , identifier[condition_name] , identifier[conditions_to_combine] ):
literal[string]
keyword[if] keyword[not] identifier[conditions_to_combine] keyword[or] identifier[len] ( identifier[conditions_to_combine] )< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[template_conditions] = identifier[template_dict] . identifier[setdefault] ( literal[string] ,{})
identifier[new_template_conditions] = identifier[make_combined_condition] ( identifier[sorted] ( identifier[list] ( identifier[conditions_to_combine] )), identifier[condition_name] )
keyword[for] identifier[name] , identifier[definition] keyword[in] identifier[new_template_conditions] . identifier[items] ():
identifier[template_conditions] [ identifier[name] ]= identifier[definition] | def _add_combined_condition_to_template(self, template_dict, condition_name, conditions_to_combine):
"""
Add top-level template condition that combines the given list of conditions.
:param dict template_dict: SAM template dictionary
:param string condition_name: Name of top-level template condition
:param list conditions_to_combine: List of conditions that should be combined (via OR operator) to form
top-level condition.
"""
# defensive precondition check
if not conditions_to_combine or len(conditions_to_combine) < 2:
raise ValueError('conditions_to_combine must have at least 2 conditions') # depends on [control=['if'], data=[]]
template_conditions = template_dict.setdefault('Conditions', {})
new_template_conditions = make_combined_condition(sorted(list(conditions_to_combine)), condition_name)
for (name, definition) in new_template_conditions.items():
template_conditions[name] = definition # depends on [control=['for'], data=[]] |
def maybe_send(self, recipient: Address, message: Message):
""" Send message to recipient if the transport is running. """
if not is_binary_address(recipient):
raise InvalidAddress('Invalid address {}'.format(pex(recipient)))
messagedata = message.encode()
host_port = self.get_host_port(recipient)
self.maybe_sendraw(host_port, messagedata) | def function[maybe_send, parameter[self, recipient, message]]:
constant[ Send message to recipient if the transport is running. ]
if <ast.UnaryOp object at 0x7da1b1713580> begin[:]
<ast.Raise object at 0x7da1b1711570>
variable[messagedata] assign[=] call[name[message].encode, parameter[]]
variable[host_port] assign[=] call[name[self].get_host_port, parameter[name[recipient]]]
call[name[self].maybe_sendraw, parameter[name[host_port], name[messagedata]]] | keyword[def] identifier[maybe_send] ( identifier[self] , identifier[recipient] : identifier[Address] , identifier[message] : identifier[Message] ):
literal[string]
keyword[if] keyword[not] identifier[is_binary_address] ( identifier[recipient] ):
keyword[raise] identifier[InvalidAddress] ( literal[string] . identifier[format] ( identifier[pex] ( identifier[recipient] )))
identifier[messagedata] = identifier[message] . identifier[encode] ()
identifier[host_port] = identifier[self] . identifier[get_host_port] ( identifier[recipient] )
identifier[self] . identifier[maybe_sendraw] ( identifier[host_port] , identifier[messagedata] ) | def maybe_send(self, recipient: Address, message: Message):
""" Send message to recipient if the transport is running. """
if not is_binary_address(recipient):
raise InvalidAddress('Invalid address {}'.format(pex(recipient))) # depends on [control=['if'], data=[]]
messagedata = message.encode()
host_port = self.get_host_port(recipient)
self.maybe_sendraw(host_port, messagedata) |
def dump(self, stream, state, mevm, conc_tx=None):
"""
Concretize and write a human readable version of the transaction into the stream. Used during testcase
generation.
:param stream: Output stream to write to. Typically a file.
:param manticore.ethereum.State state: state that the tx exists in
:param manticore.ethereum.ManticoreEVM mevm: manticore instance
:return:
"""
from ..ethereum import ABI # circular imports
from ..ethereum.manticore import flagged
is_something_symbolic = False
if conc_tx is None:
conc_tx = self.concretize(state)
# The result if any RETURN or REVERT
stream.write("Type: %s (%d)\n" % (self.sort, self.depth))
caller_solution = conc_tx.caller
caller_name = mevm.account_name(caller_solution)
stream.write("From: %s(0x%x) %s\n" % (caller_name, caller_solution, flagged(issymbolic(self.caller))))
address_solution = conc_tx.address
address_name = mevm.account_name(address_solution)
stream.write("To: %s(0x%x) %s\n" % (address_name, address_solution, flagged(issymbolic(self.address))))
stream.write("Value: %d %s\n" % (conc_tx.value, flagged(issymbolic(self.value))))
stream.write("Gas used: %d %s\n" % (conc_tx.gas, flagged(issymbolic(self.gas))))
tx_data = conc_tx.data
stream.write("Data: 0x{} {}\n".format(binascii.hexlify(tx_data).decode(), flagged(issymbolic(self.data))))
if self.return_data is not None:
return_data = conc_tx.return_data
stream.write("Return_data: 0x{} {}\n".format(binascii.hexlify(return_data).decode(), flagged(issymbolic(self.return_data))))
metadata = mevm.get_metadata(self.address)
if self.sort == 'CREATE':
if metadata is not None:
conc_args_data = conc_tx.data[len(metadata._init_bytecode):]
arguments = ABI.deserialize(metadata.get_constructor_arguments(), conc_args_data)
# TODO confirm: arguments should all be concrete?
is_argument_symbolic = any(map(issymbolic, arguments)) # is this redundant since arguments are all concrete?
stream.write('Function call:\n')
stream.write("Constructor(")
stream.write(','.join(map(repr, map(state.solve_one, arguments)))) # is this redundant since arguments are all concrete?
stream.write(') -> %s %s\n' % (self.result, flagged(is_argument_symbolic)))
if self.sort == 'CALL':
if metadata is not None:
calldata = conc_tx.data
is_calldata_symbolic = issymbolic(self.data)
function_id = calldata[:4] # hope there is enough data
signature = metadata.get_func_signature(function_id)
function_name = metadata.get_func_name(function_id)
if signature:
_, arguments = ABI.deserialize(signature, calldata)
else:
arguments = (calldata,)
return_data = None
if self.result == 'RETURN':
ret_types = metadata.get_func_return_types(function_id)
return_data = conc_tx.return_data
return_values = ABI.deserialize(ret_types, return_data) # function return
is_return_symbolic = issymbolic(self.return_data)
stream.write('\n')
stream.write("Function call:\n")
stream.write("%s(" % function_name)
stream.write(','.join(map(repr, arguments)))
stream.write(') -> %s %s\n' % (self.result, flagged(is_calldata_symbolic)))
if return_data is not None:
if len(return_values) == 1:
return_values = return_values[0]
stream.write('return: %r %s\n' % (return_values, flagged(is_return_symbolic)))
is_something_symbolic = is_calldata_symbolic or is_return_symbolic
stream.write('\n\n')
return is_something_symbolic | def function[dump, parameter[self, stream, state, mevm, conc_tx]]:
constant[
Concretize and write a human readable version of the transaction into the stream. Used during testcase
generation.
:param stream: Output stream to write to. Typically a file.
:param manticore.ethereum.State state: state that the tx exists in
:param manticore.ethereum.ManticoreEVM mevm: manticore instance
:return:
]
from relative_module[ethereum] import module[ABI]
from relative_module[ethereum.manticore] import module[flagged]
variable[is_something_symbolic] assign[=] constant[False]
if compare[name[conc_tx] is constant[None]] begin[:]
variable[conc_tx] assign[=] call[name[self].concretize, parameter[name[state]]]
call[name[stream].write, parameter[binary_operation[constant[Type: %s (%d)
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2049626b0>, <ast.Attribute object at 0x7da204960fa0>]]]]]
variable[caller_solution] assign[=] name[conc_tx].caller
variable[caller_name] assign[=] call[name[mevm].account_name, parameter[name[caller_solution]]]
call[name[stream].write, parameter[binary_operation[constant[From: %s(0x%x) %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204963bb0>, <ast.Name object at 0x7da2049621a0>, <ast.Call object at 0x7da2049625c0>]]]]]
variable[address_solution] assign[=] name[conc_tx].address
variable[address_name] assign[=] call[name[mevm].account_name, parameter[name[address_solution]]]
call[name[stream].write, parameter[binary_operation[constant[To: %s(0x%x) %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204960ca0>, <ast.Name object at 0x7da204962da0>, <ast.Call object at 0x7da204962b00>]]]]]
call[name[stream].write, parameter[binary_operation[constant[Value: %d %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204961030>, <ast.Call object at 0x7da204962710>]]]]]
call[name[stream].write, parameter[binary_operation[constant[Gas used: %d %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204963f70>, <ast.Call object at 0x7da204962a10>]]]]]
variable[tx_data] assign[=] name[conc_tx].data
call[name[stream].write, parameter[call[constant[Data: 0x{} {}
].format, parameter[call[call[name[binascii].hexlify, parameter[name[tx_data]]].decode, parameter[]], call[name[flagged], parameter[call[name[issymbolic], parameter[name[self].data]]]]]]]]
if compare[name[self].return_data is_not constant[None]] begin[:]
variable[return_data] assign[=] name[conc_tx].return_data
call[name[stream].write, parameter[call[constant[Return_data: 0x{} {}
].format, parameter[call[call[name[binascii].hexlify, parameter[name[return_data]]].decode, parameter[]], call[name[flagged], parameter[call[name[issymbolic], parameter[name[self].return_data]]]]]]]]
variable[metadata] assign[=] call[name[mevm].get_metadata, parameter[name[self].address]]
if compare[name[self].sort equal[==] constant[CREATE]] begin[:]
if compare[name[metadata] is_not constant[None]] begin[:]
variable[conc_args_data] assign[=] call[name[conc_tx].data][<ast.Slice object at 0x7da1b0084fa0>]
variable[arguments] assign[=] call[name[ABI].deserialize, parameter[call[name[metadata].get_constructor_arguments, parameter[]], name[conc_args_data]]]
variable[is_argument_symbolic] assign[=] call[name[any], parameter[call[name[map], parameter[name[issymbolic], name[arguments]]]]]
call[name[stream].write, parameter[constant[Function call:
]]]
call[name[stream].write, parameter[constant[Constructor(]]]
call[name[stream].write, parameter[call[constant[,].join, parameter[call[name[map], parameter[name[repr], call[name[map], parameter[name[state].solve_one, name[arguments]]]]]]]]]
call[name[stream].write, parameter[binary_operation[constant[) -> %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0086da0>, <ast.Call object at 0x7da1b0084190>]]]]]
if compare[name[self].sort equal[==] constant[CALL]] begin[:]
if compare[name[metadata] is_not constant[None]] begin[:]
variable[calldata] assign[=] name[conc_tx].data
variable[is_calldata_symbolic] assign[=] call[name[issymbolic], parameter[name[self].data]]
variable[function_id] assign[=] call[name[calldata]][<ast.Slice object at 0x7da1b0086590>]
variable[signature] assign[=] call[name[metadata].get_func_signature, parameter[name[function_id]]]
variable[function_name] assign[=] call[name[metadata].get_func_name, parameter[name[function_id]]]
if name[signature] begin[:]
<ast.Tuple object at 0x7da1b0084e20> assign[=] call[name[ABI].deserialize, parameter[name[signature], name[calldata]]]
variable[return_data] assign[=] constant[None]
if compare[name[self].result equal[==] constant[RETURN]] begin[:]
variable[ret_types] assign[=] call[name[metadata].get_func_return_types, parameter[name[function_id]]]
variable[return_data] assign[=] name[conc_tx].return_data
variable[return_values] assign[=] call[name[ABI].deserialize, parameter[name[ret_types], name[return_data]]]
variable[is_return_symbolic] assign[=] call[name[issymbolic], parameter[name[self].return_data]]
call[name[stream].write, parameter[constant[
]]]
call[name[stream].write, parameter[constant[Function call:
]]]
call[name[stream].write, parameter[binary_operation[constant[%s(] <ast.Mod object at 0x7da2590d6920> name[function_name]]]]
call[name[stream].write, parameter[call[constant[,].join, parameter[call[name[map], parameter[name[repr], name[arguments]]]]]]]
call[name[stream].write, parameter[binary_operation[constant[) -> %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c76e6b0>, <ast.Call object at 0x7da20c76f9a0>]]]]]
if compare[name[return_data] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[name[return_values]]] equal[==] constant[1]] begin[:]
variable[return_values] assign[=] call[name[return_values]][constant[0]]
call[name[stream].write, parameter[binary_operation[constant[return: %r %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c76e980>, <ast.Call object at 0x7da20c76f0a0>]]]]]
variable[is_something_symbolic] assign[=] <ast.BoolOp object at 0x7da20c76e830>
call[name[stream].write, parameter[constant[
]]]
return[name[is_something_symbolic]] | keyword[def] identifier[dump] ( identifier[self] , identifier[stream] , identifier[state] , identifier[mevm] , identifier[conc_tx] = keyword[None] ):
literal[string]
keyword[from] .. identifier[ethereum] keyword[import] identifier[ABI]
keyword[from] .. identifier[ethereum] . identifier[manticore] keyword[import] identifier[flagged]
identifier[is_something_symbolic] = keyword[False]
keyword[if] identifier[conc_tx] keyword[is] keyword[None] :
identifier[conc_tx] = identifier[self] . identifier[concretize] ( identifier[state] )
identifier[stream] . identifier[write] ( literal[string] %( identifier[self] . identifier[sort] , identifier[self] . identifier[depth] ))
identifier[caller_solution] = identifier[conc_tx] . identifier[caller]
identifier[caller_name] = identifier[mevm] . identifier[account_name] ( identifier[caller_solution] )
identifier[stream] . identifier[write] ( literal[string] %( identifier[caller_name] , identifier[caller_solution] , identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[caller] ))))
identifier[address_solution] = identifier[conc_tx] . identifier[address]
identifier[address_name] = identifier[mevm] . identifier[account_name] ( identifier[address_solution] )
identifier[stream] . identifier[write] ( literal[string] %( identifier[address_name] , identifier[address_solution] , identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[address] ))))
identifier[stream] . identifier[write] ( literal[string] %( identifier[conc_tx] . identifier[value] , identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[value] ))))
identifier[stream] . identifier[write] ( literal[string] %( identifier[conc_tx] . identifier[gas] , identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[gas] ))))
identifier[tx_data] = identifier[conc_tx] . identifier[data]
identifier[stream] . identifier[write] ( literal[string] . identifier[format] ( identifier[binascii] . identifier[hexlify] ( identifier[tx_data] ). identifier[decode] (), identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[data] ))))
keyword[if] identifier[self] . identifier[return_data] keyword[is] keyword[not] keyword[None] :
identifier[return_data] = identifier[conc_tx] . identifier[return_data]
identifier[stream] . identifier[write] ( literal[string] . identifier[format] ( identifier[binascii] . identifier[hexlify] ( identifier[return_data] ). identifier[decode] (), identifier[flagged] ( identifier[issymbolic] ( identifier[self] . identifier[return_data] ))))
identifier[metadata] = identifier[mevm] . identifier[get_metadata] ( identifier[self] . identifier[address] )
keyword[if] identifier[self] . identifier[sort] == literal[string] :
keyword[if] identifier[metadata] keyword[is] keyword[not] keyword[None] :
identifier[conc_args_data] = identifier[conc_tx] . identifier[data] [ identifier[len] ( identifier[metadata] . identifier[_init_bytecode] ):]
identifier[arguments] = identifier[ABI] . identifier[deserialize] ( identifier[metadata] . identifier[get_constructor_arguments] (), identifier[conc_args_data] )
identifier[is_argument_symbolic] = identifier[any] ( identifier[map] ( identifier[issymbolic] , identifier[arguments] ))
identifier[stream] . identifier[write] ( literal[string] )
identifier[stream] . identifier[write] ( literal[string] )
identifier[stream] . identifier[write] ( literal[string] . identifier[join] ( identifier[map] ( identifier[repr] , identifier[map] ( identifier[state] . identifier[solve_one] , identifier[arguments] ))))
identifier[stream] . identifier[write] ( literal[string] %( identifier[self] . identifier[result] , identifier[flagged] ( identifier[is_argument_symbolic] )))
keyword[if] identifier[self] . identifier[sort] == literal[string] :
keyword[if] identifier[metadata] keyword[is] keyword[not] keyword[None] :
identifier[calldata] = identifier[conc_tx] . identifier[data]
identifier[is_calldata_symbolic] = identifier[issymbolic] ( identifier[self] . identifier[data] )
identifier[function_id] = identifier[calldata] [: literal[int] ]
identifier[signature] = identifier[metadata] . identifier[get_func_signature] ( identifier[function_id] )
identifier[function_name] = identifier[metadata] . identifier[get_func_name] ( identifier[function_id] )
keyword[if] identifier[signature] :
identifier[_] , identifier[arguments] = identifier[ABI] . identifier[deserialize] ( identifier[signature] , identifier[calldata] )
keyword[else] :
identifier[arguments] =( identifier[calldata] ,)
identifier[return_data] = keyword[None]
keyword[if] identifier[self] . identifier[result] == literal[string] :
identifier[ret_types] = identifier[metadata] . identifier[get_func_return_types] ( identifier[function_id] )
identifier[return_data] = identifier[conc_tx] . identifier[return_data]
identifier[return_values] = identifier[ABI] . identifier[deserialize] ( identifier[ret_types] , identifier[return_data] )
identifier[is_return_symbolic] = identifier[issymbolic] ( identifier[self] . identifier[return_data] )
identifier[stream] . identifier[write] ( literal[string] )
identifier[stream] . identifier[write] ( literal[string] )
identifier[stream] . identifier[write] ( literal[string] % identifier[function_name] )
identifier[stream] . identifier[write] ( literal[string] . identifier[join] ( identifier[map] ( identifier[repr] , identifier[arguments] )))
identifier[stream] . identifier[write] ( literal[string] %( identifier[self] . identifier[result] , identifier[flagged] ( identifier[is_calldata_symbolic] )))
keyword[if] identifier[return_data] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[return_values] )== literal[int] :
identifier[return_values] = identifier[return_values] [ literal[int] ]
identifier[stream] . identifier[write] ( literal[string] %( identifier[return_values] , identifier[flagged] ( identifier[is_return_symbolic] )))
identifier[is_something_symbolic] = identifier[is_calldata_symbolic] keyword[or] identifier[is_return_symbolic]
identifier[stream] . identifier[write] ( literal[string] )
keyword[return] identifier[is_something_symbolic] | def dump(self, stream, state, mevm, conc_tx=None):
"""
Concretize and write a human readable version of the transaction into the stream. Used during testcase
generation.
:param stream: Output stream to write to. Typically a file.
:param manticore.ethereum.State state: state that the tx exists in
:param manticore.ethereum.ManticoreEVM mevm: manticore instance
:return:
"""
from ..ethereum import ABI # circular imports
from ..ethereum.manticore import flagged
is_something_symbolic = False
if conc_tx is None:
conc_tx = self.concretize(state) # depends on [control=['if'], data=['conc_tx']]
# The result if any RETURN or REVERT
stream.write('Type: %s (%d)\n' % (self.sort, self.depth))
caller_solution = conc_tx.caller
caller_name = mevm.account_name(caller_solution)
stream.write('From: %s(0x%x) %s\n' % (caller_name, caller_solution, flagged(issymbolic(self.caller))))
address_solution = conc_tx.address
address_name = mevm.account_name(address_solution)
stream.write('To: %s(0x%x) %s\n' % (address_name, address_solution, flagged(issymbolic(self.address))))
stream.write('Value: %d %s\n' % (conc_tx.value, flagged(issymbolic(self.value))))
stream.write('Gas used: %d %s\n' % (conc_tx.gas, flagged(issymbolic(self.gas))))
tx_data = conc_tx.data
stream.write('Data: 0x{} {}\n'.format(binascii.hexlify(tx_data).decode(), flagged(issymbolic(self.data))))
if self.return_data is not None:
return_data = conc_tx.return_data
stream.write('Return_data: 0x{} {}\n'.format(binascii.hexlify(return_data).decode(), flagged(issymbolic(self.return_data)))) # depends on [control=['if'], data=[]]
metadata = mevm.get_metadata(self.address)
if self.sort == 'CREATE':
if metadata is not None:
conc_args_data = conc_tx.data[len(metadata._init_bytecode):]
arguments = ABI.deserialize(metadata.get_constructor_arguments(), conc_args_data)
# TODO confirm: arguments should all be concrete?
is_argument_symbolic = any(map(issymbolic, arguments)) # is this redundant since arguments are all concrete?
stream.write('Function call:\n')
stream.write('Constructor(')
stream.write(','.join(map(repr, map(state.solve_one, arguments)))) # is this redundant since arguments are all concrete?
stream.write(') -> %s %s\n' % (self.result, flagged(is_argument_symbolic))) # depends on [control=['if'], data=['metadata']] # depends on [control=['if'], data=[]]
if self.sort == 'CALL':
if metadata is not None:
calldata = conc_tx.data
is_calldata_symbolic = issymbolic(self.data)
function_id = calldata[:4] # hope there is enough data
signature = metadata.get_func_signature(function_id)
function_name = metadata.get_func_name(function_id)
if signature:
(_, arguments) = ABI.deserialize(signature, calldata) # depends on [control=['if'], data=[]]
else:
arguments = (calldata,)
return_data = None
if self.result == 'RETURN':
ret_types = metadata.get_func_return_types(function_id)
return_data = conc_tx.return_data
return_values = ABI.deserialize(ret_types, return_data) # function return # depends on [control=['if'], data=[]]
is_return_symbolic = issymbolic(self.return_data)
stream.write('\n')
stream.write('Function call:\n')
stream.write('%s(' % function_name)
stream.write(','.join(map(repr, arguments)))
stream.write(') -> %s %s\n' % (self.result, flagged(is_calldata_symbolic)))
if return_data is not None:
if len(return_values) == 1:
return_values = return_values[0] # depends on [control=['if'], data=[]]
stream.write('return: %r %s\n' % (return_values, flagged(is_return_symbolic))) # depends on [control=['if'], data=[]]
is_something_symbolic = is_calldata_symbolic or is_return_symbolic # depends on [control=['if'], data=['metadata']] # depends on [control=['if'], data=[]]
stream.write('\n\n')
return is_something_symbolic |
def surface(x, y, z):
"""Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart
"""
data = [go.Surface(x=x, y=y, z=z)]
return Chart(data=data) | def function[surface, parameter[x, y, z]]:
constant[Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart
]
variable[data] assign[=] list[[<ast.Call object at 0x7da1b10504c0>]]
return[call[name[Chart], parameter[]]] | keyword[def] identifier[surface] ( identifier[x] , identifier[y] , identifier[z] ):
literal[string]
identifier[data] =[ identifier[go] . identifier[Surface] ( identifier[x] = identifier[x] , identifier[y] = identifier[y] , identifier[z] = identifier[z] )]
keyword[return] identifier[Chart] ( identifier[data] = identifier[data] ) | def surface(x, y, z):
"""Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart
"""
data = [go.Surface(x=x, y=y, z=z)]
return Chart(data=data) |
def zonearea(idf, zonename, debug=False):
"""zone area"""
zone = idf.getobject('ZONE', zonename)
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR']
if debug:
print(len(floors))
print([floor.area for floor in floors])
# area = sum([floor.area for floor in floors])
if floors != []:
area = zonearea_floor(idf, zonename)
else:
area = zonearea_roofceiling(idf, zonename)
return area | def function[zonearea, parameter[idf, zonename, debug]]:
constant[zone area]
variable[zone] assign[=] call[name[idf].getobject, parameter[constant[ZONE], name[zonename]]]
variable[surfs] assign[=] call[name[idf].idfobjects][call[constant[BuildingSurface:Detailed].upper, parameter[]]]
variable[zone_surfs] assign[=] <ast.ListComp object at 0x7da20c76d2a0>
variable[floors] assign[=] <ast.ListComp object at 0x7da20c76dde0>
if name[debug] begin[:]
call[name[print], parameter[call[name[len], parameter[name[floors]]]]]
call[name[print], parameter[<ast.ListComp object at 0x7da20c76cbb0>]]
if compare[name[floors] not_equal[!=] list[[]]] begin[:]
variable[area] assign[=] call[name[zonearea_floor], parameter[name[idf], name[zonename]]]
return[name[area]] | keyword[def] identifier[zonearea] ( identifier[idf] , identifier[zonename] , identifier[debug] = keyword[False] ):
literal[string]
identifier[zone] = identifier[idf] . identifier[getobject] ( literal[string] , identifier[zonename] )
identifier[surfs] = identifier[idf] . identifier[idfobjects] [ literal[string] . identifier[upper] ()]
identifier[zone_surfs] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[surfs] keyword[if] identifier[s] . identifier[Zone_Name] == identifier[zone] . identifier[Name] ]
identifier[floors] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[zone_surfs] keyword[if] identifier[s] . identifier[Surface_Type] . identifier[upper] ()== literal[string] ]
keyword[if] identifier[debug] :
identifier[print] ( identifier[len] ( identifier[floors] ))
identifier[print] ([ identifier[floor] . identifier[area] keyword[for] identifier[floor] keyword[in] identifier[floors] ])
keyword[if] identifier[floors] !=[]:
identifier[area] = identifier[zonearea_floor] ( identifier[idf] , identifier[zonename] )
keyword[else] :
identifier[area] = identifier[zonearea_roofceiling] ( identifier[idf] , identifier[zonename] )
keyword[return] identifier[area] | def zonearea(idf, zonename, debug=False):
"""zone area"""
zone = idf.getobject('ZONE', zonename)
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR']
if debug:
print(len(floors))
print([floor.area for floor in floors]) # depends on [control=['if'], data=[]]
# area = sum([floor.area for floor in floors])
if floors != []:
area = zonearea_floor(idf, zonename) # depends on [control=['if'], data=[]]
else:
area = zonearea_roofceiling(idf, zonename)
return area |
def _ros_read_images(self, stream_buffer, number, staleness_limit = 10.):
""" Reads images from a stream buffer
Parameters
----------
stream_buffer : string
absolute path to the image buffer service
number : int
The number of frames to get. Must be less than the image buffer service's
current buffer size
staleness_limit : float, optional
Max value of how many seconds old the oldest image is. If the oldest image
grabbed is older than this value, a RuntimeError is thrown.
If None, staleness is ignored.
Returns
-------
List of nump.ndarray objects, each one an image
Images are in reverse chronological order (newest first)
"""
rospy.wait_for_service(stream_buffer, timeout = self.timeout)
ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)
ret = ros_image_buffer(number, 1)
if not staleness_limit == None:
if ret.timestamps[-1] > staleness_limit:
raise RuntimeError("Got data {0} seconds old, more than allowed {1} seconds"
.format(ret.timestamps[-1], staleness_limit))
data = ret.data.reshape(ret.data_dim1, ret.data_dim2, ret.data_dim3).astype(ret.dtype)
# Special handling for 1 element, since dstack's behavior is different
if number == 1:
return [data]
return np.dsplit(data, number) | def function[_ros_read_images, parameter[self, stream_buffer, number, staleness_limit]]:
constant[ Reads images from a stream buffer
Parameters
----------
stream_buffer : string
absolute path to the image buffer service
number : int
The number of frames to get. Must be less than the image buffer service's
current buffer size
staleness_limit : float, optional
Max value of how many seconds old the oldest image is. If the oldest image
grabbed is older than this value, a RuntimeError is thrown.
If None, staleness is ignored.
Returns
-------
List of nump.ndarray objects, each one an image
Images are in reverse chronological order (newest first)
]
call[name[rospy].wait_for_service, parameter[name[stream_buffer]]]
variable[ros_image_buffer] assign[=] call[name[rospy].ServiceProxy, parameter[name[stream_buffer], name[ImageBuffer]]]
variable[ret] assign[=] call[name[ros_image_buffer], parameter[name[number], constant[1]]]
if <ast.UnaryOp object at 0x7da20e954700> begin[:]
if compare[call[name[ret].timestamps][<ast.UnaryOp object at 0x7da20e954ca0>] greater[>] name[staleness_limit]] begin[:]
<ast.Raise object at 0x7da20e957370>
variable[data] assign[=] call[call[name[ret].data.reshape, parameter[name[ret].data_dim1, name[ret].data_dim2, name[ret].data_dim3]].astype, parameter[name[ret].dtype]]
if compare[name[number] equal[==] constant[1]] begin[:]
return[list[[<ast.Name object at 0x7da18ede75e0>]]]
return[call[name[np].dsplit, parameter[name[data], name[number]]]] | keyword[def] identifier[_ros_read_images] ( identifier[self] , identifier[stream_buffer] , identifier[number] , identifier[staleness_limit] = literal[int] ):
literal[string]
identifier[rospy] . identifier[wait_for_service] ( identifier[stream_buffer] , identifier[timeout] = identifier[self] . identifier[timeout] )
identifier[ros_image_buffer] = identifier[rospy] . identifier[ServiceProxy] ( identifier[stream_buffer] , identifier[ImageBuffer] )
identifier[ret] = identifier[ros_image_buffer] ( identifier[number] , literal[int] )
keyword[if] keyword[not] identifier[staleness_limit] == keyword[None] :
keyword[if] identifier[ret] . identifier[timestamps] [- literal[int] ]> identifier[staleness_limit] :
keyword[raise] identifier[RuntimeError] ( literal[string]
. identifier[format] ( identifier[ret] . identifier[timestamps] [- literal[int] ], identifier[staleness_limit] ))
identifier[data] = identifier[ret] . identifier[data] . identifier[reshape] ( identifier[ret] . identifier[data_dim1] , identifier[ret] . identifier[data_dim2] , identifier[ret] . identifier[data_dim3] ). identifier[astype] ( identifier[ret] . identifier[dtype] )
keyword[if] identifier[number] == literal[int] :
keyword[return] [ identifier[data] ]
keyword[return] identifier[np] . identifier[dsplit] ( identifier[data] , identifier[number] ) | def _ros_read_images(self, stream_buffer, number, staleness_limit=10.0):
""" Reads images from a stream buffer
Parameters
----------
stream_buffer : string
absolute path to the image buffer service
number : int
The number of frames to get. Must be less than the image buffer service's
current buffer size
staleness_limit : float, optional
Max value of how many seconds old the oldest image is. If the oldest image
grabbed is older than this value, a RuntimeError is thrown.
If None, staleness is ignored.
Returns
-------
List of nump.ndarray objects, each one an image
Images are in reverse chronological order (newest first)
"""
rospy.wait_for_service(stream_buffer, timeout=self.timeout)
ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)
ret = ros_image_buffer(number, 1)
if not staleness_limit == None:
if ret.timestamps[-1] > staleness_limit:
raise RuntimeError('Got data {0} seconds old, more than allowed {1} seconds'.format(ret.timestamps[-1], staleness_limit)) # depends on [control=['if'], data=['staleness_limit']] # depends on [control=['if'], data=[]]
data = ret.data.reshape(ret.data_dim1, ret.data_dim2, ret.data_dim3).astype(ret.dtype)
# Special handling for 1 element, since dstack's behavior is different
if number == 1:
return [data] # depends on [control=['if'], data=[]]
return np.dsplit(data, number) |
def merge_likelihood_headers(filenames, outfile):
"""
Merge header information from likelihood files.
Parameters:
-----------
filenames : input filenames
oufile : the merged file to write
Returns:
--------
data : the data being written
"""
filenames = np.atleast_1d(filenames)
ext='PIX_DATA'
nside = fitsio.read_header(filenames[0],ext=ext)['LKDNSIDE']
keys=['STELLAR','NINSIDE','NANNULUS']
data_dict = odict(PIXEL=[])
for k in keys:
data_dict[k] = []
for i,filename in enumerate(filenames):
logger.debug('(%i/%i) %s'%(i+1, len(filenames), filename))
header = fitsio.read_header(filename,ext=ext)
data_dict['PIXEL'].append(header['LKDPIX'])
for key in keys:
data_dict[key].append(header[key])
del header
data_dict['PIXEL'] = np.array(data_dict['PIXEL'],dtype=int)
for key in keys:
data_dict[key] = np.array(data_dict[key],dtype='f4')
#import pdb; pdb.set_trace()
write_partial_map(outfile, data_dict, nside)
return data_dict | def function[merge_likelihood_headers, parameter[filenames, outfile]]:
constant[
Merge header information from likelihood files.
Parameters:
-----------
filenames : input filenames
oufile : the merged file to write
Returns:
--------
data : the data being written
]
variable[filenames] assign[=] call[name[np].atleast_1d, parameter[name[filenames]]]
variable[ext] assign[=] constant[PIX_DATA]
variable[nside] assign[=] call[call[name[fitsio].read_header, parameter[call[name[filenames]][constant[0]]]]][constant[LKDNSIDE]]
variable[keys] assign[=] list[[<ast.Constant object at 0x7da1b24ec130>, <ast.Constant object at 0x7da1b24ed540>, <ast.Constant object at 0x7da1b24ed9c0>]]
variable[data_dict] assign[=] call[name[odict], parameter[]]
for taget[name[k]] in starred[name[keys]] begin[:]
call[name[data_dict]][name[k]] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b24ed210>, <ast.Name object at 0x7da1b24ed960>]]] in starred[call[name[enumerate], parameter[name[filenames]]]] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[(%i/%i) %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b24ec310>, <ast.Call object at 0x7da1b24ed660>, <ast.Name object at 0x7da1b244ab00>]]]]]
variable[header] assign[=] call[name[fitsio].read_header, parameter[name[filename]]]
call[call[name[data_dict]][constant[PIXEL]].append, parameter[call[name[header]][constant[LKDPIX]]]]
for taget[name[key]] in starred[name[keys]] begin[:]
call[call[name[data_dict]][name[key]].append, parameter[call[name[header]][name[key]]]]
<ast.Delete object at 0x7da1b24486a0>
call[name[data_dict]][constant[PIXEL]] assign[=] call[name[np].array, parameter[call[name[data_dict]][constant[PIXEL]]]]
for taget[name[key]] in starred[name[keys]] begin[:]
call[name[data_dict]][name[key]] assign[=] call[name[np].array, parameter[call[name[data_dict]][name[key]]]]
call[name[write_partial_map], parameter[name[outfile], name[data_dict], name[nside]]]
return[name[data_dict]] | keyword[def] identifier[merge_likelihood_headers] ( identifier[filenames] , identifier[outfile] ):
literal[string]
identifier[filenames] = identifier[np] . identifier[atleast_1d] ( identifier[filenames] )
identifier[ext] = literal[string]
identifier[nside] = identifier[fitsio] . identifier[read_header] ( identifier[filenames] [ literal[int] ], identifier[ext] = identifier[ext] )[ literal[string] ]
identifier[keys] =[ literal[string] , literal[string] , literal[string] ]
identifier[data_dict] = identifier[odict] ( identifier[PIXEL] =[])
keyword[for] identifier[k] keyword[in] identifier[keys] :
identifier[data_dict] [ identifier[k] ]=[]
keyword[for] identifier[i] , identifier[filename] keyword[in] identifier[enumerate] ( identifier[filenames] ):
identifier[logger] . identifier[debug] ( literal[string] %( identifier[i] + literal[int] , identifier[len] ( identifier[filenames] ), identifier[filename] ))
identifier[header] = identifier[fitsio] . identifier[read_header] ( identifier[filename] , identifier[ext] = identifier[ext] )
identifier[data_dict] [ literal[string] ]. identifier[append] ( identifier[header] [ literal[string] ])
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[data_dict] [ identifier[key] ]. identifier[append] ( identifier[header] [ identifier[key] ])
keyword[del] identifier[header]
identifier[data_dict] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[data_dict] [ literal[string] ], identifier[dtype] = identifier[int] )
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[data_dict] [ identifier[key] ]= identifier[np] . identifier[array] ( identifier[data_dict] [ identifier[key] ], identifier[dtype] = literal[string] )
identifier[write_partial_map] ( identifier[outfile] , identifier[data_dict] , identifier[nside] )
keyword[return] identifier[data_dict] | def merge_likelihood_headers(filenames, outfile):
"""
Merge header information from likelihood files.
Parameters:
-----------
filenames : input filenames
oufile : the merged file to write
Returns:
--------
data : the data being written
"""
filenames = np.atleast_1d(filenames)
ext = 'PIX_DATA'
nside = fitsio.read_header(filenames[0], ext=ext)['LKDNSIDE']
keys = ['STELLAR', 'NINSIDE', 'NANNULUS']
data_dict = odict(PIXEL=[])
for k in keys:
data_dict[k] = [] # depends on [control=['for'], data=['k']]
for (i, filename) in enumerate(filenames):
logger.debug('(%i/%i) %s' % (i + 1, len(filenames), filename))
header = fitsio.read_header(filename, ext=ext)
data_dict['PIXEL'].append(header['LKDPIX'])
for key in keys:
data_dict[key].append(header[key]) # depends on [control=['for'], data=['key']]
del header # depends on [control=['for'], data=[]]
data_dict['PIXEL'] = np.array(data_dict['PIXEL'], dtype=int)
for key in keys:
data_dict[key] = np.array(data_dict[key], dtype='f4') # depends on [control=['for'], data=['key']]
#import pdb; pdb.set_trace()
write_partial_map(outfile, data_dict, nside)
return data_dict |
def reorder_pinned_topics_courses(self, order, course_id):
"""
Reorder pinned topics.
Puts the pinned discussion topics in the specified order.
All pinned topics should be included.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - order
"""The ids of the pinned discussion topics in the desired order.
(For example, "order=104,102,103".)"""
data["order"] = order
self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True) | def function[reorder_pinned_topics_courses, parameter[self, order, course_id]]:
constant[
Reorder pinned topics.
Puts the pinned discussion topics in the specified order.
All pinned topics should be included.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[The ids of the pinned discussion topics in the desired order.
(For example, "order=104,102,103".)]
call[name[data]][constant[order]] assign[=] name[order]
call[name[self].logger.debug, parameter[call[constant[POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[POST], call[constant[/api/v1/courses/{course_id}/discussion_topics/reorder].format, parameter[]]]]] | keyword[def] identifier[reorder_pinned_topics_courses] ( identifier[self] , identifier[order] , identifier[course_id] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
identifier[data] [ literal[string] ]= identifier[order]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] ) | def reorder_pinned_topics_courses(self, order, course_id):
"""
Reorder pinned topics.
Puts the pinned discussion topics in the specified order.
All pinned topics should be included.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'ID'
path['course_id'] = course_id # REQUIRED - order
'The ids of the pinned discussion topics in the desired order.\n (For example, "order=104,102,103".)'
data['order'] = order
self.logger.debug('POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('POST', '/api/v1/courses/{course_id}/discussion_topics/reorder'.format(**path), data=data, params=params, no_data=True) |
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn) | def function[video_loss, parameter[top_out, targets, model_hparams, vocab_size, weights_fn]]:
constant[Compute loss numerator and denominator for one shard of output.]
<ast.Delete object at 0x7da1b201c760>
variable[logits] assign[=] name[top_out]
variable[logits] assign[=] call[name[tf].reshape, parameter[name[logits], binary_operation[list[[<ast.UnaryOp object at 0x7da1b201d510>]] + call[call[name[common_layers].shape_list, parameter[name[logits]]]][<ast.Slice object at 0x7da1b201d570>]]]]
variable[targets] assign[=] call[name[tf].reshape, parameter[name[targets], binary_operation[list[[<ast.UnaryOp object at 0x7da1b201d660>]] + call[call[name[common_layers].shape_list, parameter[name[targets]]]][<ast.Slice object at 0x7da1b201feb0>]]]]
variable[cutoff] assign[=] call[name[getattr], parameter[name[model_hparams], constant[video_modality_loss_cutoff], constant[0.01]]]
return[call[name[common_layers].padded_cross_entropy, parameter[name[logits], name[targets], name[model_hparams].label_smoothing]]] | keyword[def] identifier[video_loss] ( identifier[top_out] , identifier[targets] , identifier[model_hparams] , identifier[vocab_size] , identifier[weights_fn] ):
literal[string]
keyword[del] identifier[vocab_size]
identifier[logits] = identifier[top_out]
identifier[logits] = identifier[tf] . identifier[reshape] ( identifier[logits] ,[- literal[int] ]+ identifier[common_layers] . identifier[shape_list] ( identifier[logits] )[ literal[int] :])
identifier[targets] = identifier[tf] . identifier[reshape] ( identifier[targets] ,[- literal[int] ]+ identifier[common_layers] . identifier[shape_list] ( identifier[targets] )[ literal[int] :])
identifier[cutoff] = identifier[getattr] ( identifier[model_hparams] , literal[string] , literal[int] )
keyword[return] identifier[common_layers] . identifier[padded_cross_entropy] (
identifier[logits] ,
identifier[targets] ,
identifier[model_hparams] . identifier[label_smoothing] ,
identifier[cutoff] = identifier[cutoff] ,
identifier[weights_fn] = identifier[weights_fn] ) | def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, 'video_modality_loss_cutoff', 0.01)
return common_layers.padded_cross_entropy(logits, targets, model_hparams.label_smoothing, cutoff=cutoff, weights_fn=weights_fn) |
def _parse_lists(self, match):
'''Parse lists.'''
# Don't parse usernames here
if match.group(4) is None:
return match.group(0)
pre, at_char, user, list_name = match.groups()
list_name = list_name[1:]
if self._include_spans:
self._lists.append((user, list_name, match.span(0)))
else:
self._lists.append((user, list_name))
if self._html:
return '%s%s' % (pre, self.format_list(at_char, user, list_name)) | def function[_parse_lists, parameter[self, match]]:
constant[Parse lists.]
if compare[call[name[match].group, parameter[constant[4]]] is constant[None]] begin[:]
return[call[name[match].group, parameter[constant[0]]]]
<ast.Tuple object at 0x7da1b0ec0df0> assign[=] call[name[match].groups, parameter[]]
variable[list_name] assign[=] call[name[list_name]][<ast.Slice object at 0x7da1b0ec21d0>]
if name[self]._include_spans begin[:]
call[name[self]._lists.append, parameter[tuple[[<ast.Name object at 0x7da1b0ec0850>, <ast.Name object at 0x7da1b0ec00d0>, <ast.Call object at 0x7da1b0ec0310>]]]]
if name[self]._html begin[:]
return[binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ef1a20>, <ast.Call object at 0x7da1b0ef1a80>]]]] | keyword[def] identifier[_parse_lists] ( identifier[self] , identifier[match] ):
literal[string]
keyword[if] identifier[match] . identifier[group] ( literal[int] ) keyword[is] keyword[None] :
keyword[return] identifier[match] . identifier[group] ( literal[int] )
identifier[pre] , identifier[at_char] , identifier[user] , identifier[list_name] = identifier[match] . identifier[groups] ()
identifier[list_name] = identifier[list_name] [ literal[int] :]
keyword[if] identifier[self] . identifier[_include_spans] :
identifier[self] . identifier[_lists] . identifier[append] (( identifier[user] , identifier[list_name] , identifier[match] . identifier[span] ( literal[int] )))
keyword[else] :
identifier[self] . identifier[_lists] . identifier[append] (( identifier[user] , identifier[list_name] ))
keyword[if] identifier[self] . identifier[_html] :
keyword[return] literal[string] %( identifier[pre] , identifier[self] . identifier[format_list] ( identifier[at_char] , identifier[user] , identifier[list_name] )) | def _parse_lists(self, match):
"""Parse lists."""
# Don't parse usernames here
if match.group(4) is None:
return match.group(0) # depends on [control=['if'], data=[]]
(pre, at_char, user, list_name) = match.groups()
list_name = list_name[1:]
if self._include_spans:
self._lists.append((user, list_name, match.span(0))) # depends on [control=['if'], data=[]]
else:
self._lists.append((user, list_name))
if self._html:
return '%s%s' % (pre, self.format_list(at_char, user, list_name)) # depends on [control=['if'], data=[]] |
def set_timezone(self, timezone: str):
""" sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
"""
data = {"timezoneId": timezone}
return self._restCall("home/setTimezone", body=json.dumps(data)) | def function[set_timezone, parameter[self, timezone]]:
constant[ sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18ede66e0>], [<ast.Name object at 0x7da18ede4d00>]]
return[call[name[self]._restCall, parameter[constant[home/setTimezone]]]] | keyword[def] identifier[set_timezone] ( identifier[self] , identifier[timezone] : identifier[str] ):
literal[string]
identifier[data] ={ literal[string] : identifier[timezone] }
keyword[return] identifier[self] . identifier[_restCall] ( literal[string] , identifier[body] = identifier[json] . identifier[dumps] ( identifier[data] )) | def set_timezone(self, timezone: str):
""" sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
"""
data = {'timezoneId': timezone}
return self._restCall('home/setTimezone', body=json.dumps(data)) |
def process(self):
""" Calls the external cleanser scripts to (optionally) purge the meta data and then
send the contents of the dropbox via email.
"""
if self.num_attachments > 0:
self.status = u'100 processor running'
fs_dirty_archive = self._create_backup()
# calling _process_attachments has the side-effect of updating `send_attachments`
self._process_attachments()
if self.status_int < 500 and not self.send_attachments:
self._create_archive()
if self.status_int >= 500 and self.status_int < 600:
# cleansing failed
# if configured, we need to move the uncleansed archive to
# the appropriate folder and notify the editors
if 'dropbox_dirty_archive_url_format' in self.settings:
# create_archive
shutil.move(
fs_dirty_archive,
'%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id))
# update status
# it's now considered 'successful-ish' again
self.status = '490 cleanser failure but notify success'
if self.status_int == 800:
# at least one attachment was not supported
# if configured, we need to move the uncleansed archive to
# the appropriate folder and notify the editors
if 'dropbox_dirty_archive_url_format' in self.settings:
# create_archive
shutil.move(
fs_dirty_archive,
'%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id))
if self.status_int < 500 or self.status_int == 800:
try:
if self._notify_editors() > 0:
if self.status_int < 500:
self.status = '900 success'
else:
self.status = '605 smtp failure'
except Exception:
import traceback
tb = traceback.format_exc()
self.status = '610 smtp error (%s)' % tb
self.cleanup()
return self.status | def function[process, parameter[self]]:
constant[ Calls the external cleanser scripts to (optionally) purge the meta data and then
send the contents of the dropbox via email.
]
if compare[name[self].num_attachments greater[>] constant[0]] begin[:]
name[self].status assign[=] constant[100 processor running]
variable[fs_dirty_archive] assign[=] call[name[self]._create_backup, parameter[]]
call[name[self]._process_attachments, parameter[]]
if <ast.BoolOp object at 0x7da1b2347340> begin[:]
call[name[self]._create_archive, parameter[]]
if <ast.BoolOp object at 0x7da20c6e4be0> begin[:]
if compare[constant[dropbox_dirty_archive_url_format] in name[self].settings] begin[:]
call[name[shutil].move, parameter[name[fs_dirty_archive], binary_operation[constant[%s/%s.zip.pgp] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6e76d0>, <ast.Attribute object at 0x7da20c6e7f40>]]]]]
name[self].status assign[=] constant[490 cleanser failure but notify success]
if compare[name[self].status_int equal[==] constant[800]] begin[:]
if compare[constant[dropbox_dirty_archive_url_format] in name[self].settings] begin[:]
call[name[shutil].move, parameter[name[fs_dirty_archive], binary_operation[constant[%s/%s.zip.pgp] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6e64d0>, <ast.Attribute object at 0x7da20c6e4ee0>]]]]]
if <ast.BoolOp object at 0x7da20c6e6050> begin[:]
<ast.Try object at 0x7da18bcc83d0>
call[name[self].cleanup, parameter[]]
return[name[self].status] | keyword[def] identifier[process] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[num_attachments] > literal[int] :
identifier[self] . identifier[status] = literal[string]
identifier[fs_dirty_archive] = identifier[self] . identifier[_create_backup] ()
identifier[self] . identifier[_process_attachments] ()
keyword[if] identifier[self] . identifier[status_int] < literal[int] keyword[and] keyword[not] identifier[self] . identifier[send_attachments] :
identifier[self] . identifier[_create_archive] ()
keyword[if] identifier[self] . identifier[status_int] >= literal[int] keyword[and] identifier[self] . identifier[status_int] < literal[int] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[settings] :
identifier[shutil] . identifier[move] (
identifier[fs_dirty_archive] ,
literal[string] %( identifier[self] . identifier[container] . identifier[fs_archive_dirty] , identifier[self] . identifier[drop_id] ))
identifier[self] . identifier[status] = literal[string]
keyword[if] identifier[self] . identifier[status_int] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[settings] :
identifier[shutil] . identifier[move] (
identifier[fs_dirty_archive] ,
literal[string] %( identifier[self] . identifier[container] . identifier[fs_archive_dirty] , identifier[self] . identifier[drop_id] ))
keyword[if] identifier[self] . identifier[status_int] < literal[int] keyword[or] identifier[self] . identifier[status_int] == literal[int] :
keyword[try] :
keyword[if] identifier[self] . identifier[_notify_editors] ()> literal[int] :
keyword[if] identifier[self] . identifier[status_int] < literal[int] :
identifier[self] . identifier[status] = literal[string]
keyword[else] :
identifier[self] . identifier[status] = literal[string]
keyword[except] identifier[Exception] :
keyword[import] identifier[traceback]
identifier[tb] = identifier[traceback] . identifier[format_exc] ()
identifier[self] . identifier[status] = literal[string] % identifier[tb]
identifier[self] . identifier[cleanup] ()
keyword[return] identifier[self] . identifier[status] | def process(self):
""" Calls the external cleanser scripts to (optionally) purge the meta data and then
send the contents of the dropbox via email.
"""
if self.num_attachments > 0:
self.status = u'100 processor running'
fs_dirty_archive = self._create_backup()
# calling _process_attachments has the side-effect of updating `send_attachments`
self._process_attachments()
if self.status_int < 500 and (not self.send_attachments):
self._create_archive() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.status_int >= 500 and self.status_int < 600:
# cleansing failed
# if configured, we need to move the uncleansed archive to
# the appropriate folder and notify the editors
if 'dropbox_dirty_archive_url_format' in self.settings:
# create_archive
shutil.move(fs_dirty_archive, '%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id))
# update status
# it's now considered 'successful-ish' again
self.status = '490 cleanser failure but notify success' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.status_int == 800:
# at least one attachment was not supported
# if configured, we need to move the uncleansed archive to
# the appropriate folder and notify the editors
if 'dropbox_dirty_archive_url_format' in self.settings:
# create_archive
shutil.move(fs_dirty_archive, '%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.status_int < 500 or self.status_int == 800:
try:
if self._notify_editors() > 0:
if self.status_int < 500:
self.status = '900 success' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self.status = '605 smtp failure' # depends on [control=['try'], data=[]]
except Exception:
import traceback
tb = traceback.format_exc()
self.status = '610 smtp error (%s)' % tb # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
self.cleanup()
return self.status |
def print_title(title: str) -> None:
"""
Helper function to print a title.
"""
msg = "**** {} ".format(title)
print("{}{}{}{}".format(Style.BRIGHT, Fore.GREEN, msg, "*" * (80 - len(msg)))) | def function[print_title, parameter[title]]:
constant[
Helper function to print a title.
]
variable[msg] assign[=] call[constant[**** {} ].format, parameter[name[title]]]
call[name[print], parameter[call[constant[{}{}{}{}].format, parameter[name[Style].BRIGHT, name[Fore].GREEN, name[msg], binary_operation[constant[*] * binary_operation[constant[80] - call[name[len], parameter[name[msg]]]]]]]]] | keyword[def] identifier[print_title] ( identifier[title] : identifier[str] )-> keyword[None] :
literal[string]
identifier[msg] = literal[string] . identifier[format] ( identifier[title] )
identifier[print] ( literal[string] . identifier[format] ( identifier[Style] . identifier[BRIGHT] , identifier[Fore] . identifier[GREEN] , identifier[msg] , literal[string] *( literal[int] - identifier[len] ( identifier[msg] )))) | def print_title(title: str) -> None:
"""
Helper function to print a title.
"""
msg = '**** {} '.format(title)
print('{}{}{}{}'.format(Style.BRIGHT, Fore.GREEN, msg, '*' * (80 - len(msg)))) |
def getAllRegexpNames(regexpList = None):
'''
Method that recovers the names of the <RegexpObject> in a given list.
:param regexpList: list of <RegexpObject>. If None, all the available <RegexpObject> will be recovered.
:return: Array of strings containing the available regexps.
'''
if regexpList == None:
regexpList = getAllRegexp()
listNames = ['all']
# going through the regexpList
for r in regexpList:
listNames.append(r.name)
return listNames | def function[getAllRegexpNames, parameter[regexpList]]:
constant[
Method that recovers the names of the <RegexpObject> in a given list.
:param regexpList: list of <RegexpObject>. If None, all the available <RegexpObject> will be recovered.
:return: Array of strings containing the available regexps.
]
if compare[name[regexpList] equal[==] constant[None]] begin[:]
variable[regexpList] assign[=] call[name[getAllRegexp], parameter[]]
variable[listNames] assign[=] list[[<ast.Constant object at 0x7da20c6c7310>]]
for taget[name[r]] in starred[name[regexpList]] begin[:]
call[name[listNames].append, parameter[name[r].name]]
return[name[listNames]] | keyword[def] identifier[getAllRegexpNames] ( identifier[regexpList] = keyword[None] ):
literal[string]
keyword[if] identifier[regexpList] == keyword[None] :
identifier[regexpList] = identifier[getAllRegexp] ()
identifier[listNames] =[ literal[string] ]
keyword[for] identifier[r] keyword[in] identifier[regexpList] :
identifier[listNames] . identifier[append] ( identifier[r] . identifier[name] )
keyword[return] identifier[listNames] | def getAllRegexpNames(regexpList=None):
"""
Method that recovers the names of the <RegexpObject> in a given list.
:param regexpList: list of <RegexpObject>. If None, all the available <RegexpObject> will be recovered.
:return: Array of strings containing the available regexps.
"""
if regexpList == None:
regexpList = getAllRegexp() # depends on [control=['if'], data=['regexpList']]
listNames = ['all'] # going through the regexpList
for r in regexpList:
listNames.append(r.name) # depends on [control=['for'], data=['r']]
return listNames |
def file_query(database, file_name, **connection_args):
'''
Run an arbitrary SQL query from the specified file and return the
the number of affected rows.
.. versionadded:: 2017.7.0
database
database to run script inside
file_name
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
CLI Example:
.. code-block:: bash
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
Return data:
.. code-block:: python
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
'''
if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')):
file_name = __salt__['cp.cache_file'](file_name)
if os.path.exists(file_name):
with salt.utils.files.fopen(file_name, 'r') as ifile:
contents = salt.utils.stringutils.to_unicode(ifile.read())
else:
log.error('File "%s" does not exist', file_name)
return False
query_string = ""
ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}}
for line in contents.splitlines():
if re.match(r'--', line): # ignore sql comments
continue
if not re.search(r'[^-;]+;', line): # keep appending lines that don't end in ;
query_string = query_string + line
else:
query_string = query_string + line # append lines that end with ; and run query
query_result = query(database, query_string, **connection_args)
query_string = ""
if query_result is False:
# Fail out on error
return False
if 'query time' in query_result:
ret['query time']['raw'] += float(query_result['query time']['raw'])
if 'rows returned' in query_result:
ret['rows returned'] += query_result['rows returned']
if 'columns' in query_result:
ret['columns'].append(query_result['columns'])
if 'results' in query_result:
ret['results'].append(query_result['results'])
if 'rows affected' in query_result:
ret['rows affected'] += query_result['rows affected']
ret['query time']['human'] = six.text_type(round(float(ret['query time']['raw']), 2)) + 's'
ret['query time']['raw'] = round(float(ret['query time']['raw']), 5)
# Remove empty keys in ret
ret = {k: v for k, v in six.iteritems(ret) if v}
return ret | def function[file_query, parameter[database, file_name]]:
constant[
Run an arbitrary SQL query from the specified file and return the
the number of affected rows.
.. versionadded:: 2017.7.0
database
database to run script inside
file_name
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
CLI Example:
.. code-block:: bash
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
Return data:
.. code-block:: python
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18f58ed10>]] begin[:]
variable[file_name] assign[=] call[call[name[__salt__]][constant[cp.cache_file]], parameter[name[file_name]]]
if call[name[os].path.exists, parameter[name[file_name]]] begin[:]
with call[name[salt].utils.files.fopen, parameter[name[file_name], constant[r]]] begin[:]
variable[contents] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[call[name[ifile].read, parameter[]]]]
variable[query_string] assign[=] constant[]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f58e560>, <ast.Constant object at 0x7da18f58f6d0>, <ast.Constant object at 0x7da18f58e830>, <ast.Constant object at 0x7da18f58ea70>, <ast.Constant object at 0x7da18f58d630>], [<ast.Constant object at 0x7da18f58fc10>, <ast.List object at 0x7da18f58f1f0>, <ast.List object at 0x7da18f58ceb0>, <ast.Constant object at 0x7da18f58c520>, <ast.Dict object at 0x7da18f58e710>]]
for taget[name[line]] in starred[call[name[contents].splitlines, parameter[]]] begin[:]
if call[name[re].match, parameter[constant[--], name[line]]] begin[:]
continue
if <ast.UnaryOp object at 0x7da18f58fa00> begin[:]
variable[query_string] assign[=] binary_operation[name[query_string] + name[line]]
call[call[name[ret]][constant[query time]]][constant[human]] assign[=] binary_operation[call[name[six].text_type, parameter[call[name[round], parameter[call[name[float], parameter[call[call[name[ret]][constant[query time]]][constant[raw]]]], constant[2]]]]] + constant[s]]
call[call[name[ret]][constant[query time]]][constant[raw]] assign[=] call[name[round], parameter[call[name[float], parameter[call[call[name[ret]][constant[query time]]][constant[raw]]]], constant[5]]]
variable[ret] assign[=] <ast.DictComp object at 0x7da18fe91b10>
return[name[ret]] | keyword[def] identifier[file_query] ( identifier[database] , identifier[file_name] ,** identifier[connection_args] ):
literal[string]
keyword[if] identifier[any] ( identifier[file_name] . identifier[startswith] ( identifier[proto] ) keyword[for] identifier[proto] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )):
identifier[file_name] = identifier[__salt__] [ literal[string] ]( identifier[file_name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_name] ):
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[file_name] , literal[string] ) keyword[as] identifier[ifile] :
identifier[contents] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[ifile] . identifier[read] ())
keyword[else] :
identifier[log] . identifier[error] ( literal[string] , identifier[file_name] )
keyword[return] keyword[False]
identifier[query_string] = literal[string]
identifier[ret] ={ literal[string] : literal[int] , literal[string] :[], literal[string] :[], literal[string] : literal[int] , literal[string] :{ literal[string] : literal[int] }}
keyword[for] identifier[line] keyword[in] identifier[contents] . identifier[splitlines] ():
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[line] ):
keyword[continue]
keyword[if] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[query_string] = identifier[query_string] + identifier[line]
keyword[else] :
identifier[query_string] = identifier[query_string] + identifier[line]
identifier[query_result] = identifier[query] ( identifier[database] , identifier[query_string] ,** identifier[connection_args] )
identifier[query_string] = literal[string]
keyword[if] identifier[query_result] keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[in] identifier[query_result] :
identifier[ret] [ literal[string] ][ literal[string] ]+= identifier[float] ( identifier[query_result] [ literal[string] ][ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[query_result] :
identifier[ret] [ literal[string] ]+= identifier[query_result] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[query_result] :
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[query_result] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[query_result] :
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[query_result] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[query_result] :
identifier[ret] [ literal[string] ]+= identifier[query_result] [ literal[string] ]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[round] ( identifier[float] ( identifier[ret] [ literal[string] ][ literal[string] ]), literal[int] ))+ literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[round] ( identifier[float] ( identifier[ret] [ literal[string] ][ literal[string] ]), literal[int] )
identifier[ret] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[ret] ) keyword[if] identifier[v] }
keyword[return] identifier[ret] | def file_query(database, file_name, **connection_args):
"""
Run an arbitrary SQL query from the specified file and return the
the number of affected rows.
.. versionadded:: 2017.7.0
database
database to run script inside
file_name
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
CLI Example:
.. code-block:: bash
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
Return data:
.. code-block:: python
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
"""
if any((file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://'))):
file_name = __salt__['cp.cache_file'](file_name) # depends on [control=['if'], data=[]]
if os.path.exists(file_name):
with salt.utils.files.fopen(file_name, 'r') as ifile:
contents = salt.utils.stringutils.to_unicode(ifile.read()) # depends on [control=['with'], data=['ifile']] # depends on [control=['if'], data=[]]
else:
log.error('File "%s" does not exist', file_name)
return False
query_string = ''
ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}}
for line in contents.splitlines():
if re.match('--', line): # ignore sql comments
continue # depends on [control=['if'], data=[]]
if not re.search('[^-;]+;', line): # keep appending lines that don't end in ;
query_string = query_string + line # depends on [control=['if'], data=[]]
else:
query_string = query_string + line # append lines that end with ; and run query
query_result = query(database, query_string, **connection_args)
query_string = ''
if query_result is False:
# Fail out on error
return False # depends on [control=['if'], data=[]]
if 'query time' in query_result:
ret['query time']['raw'] += float(query_result['query time']['raw']) # depends on [control=['if'], data=['query_result']]
if 'rows returned' in query_result:
ret['rows returned'] += query_result['rows returned'] # depends on [control=['if'], data=['query_result']]
if 'columns' in query_result:
ret['columns'].append(query_result['columns']) # depends on [control=['if'], data=['query_result']]
if 'results' in query_result:
ret['results'].append(query_result['results']) # depends on [control=['if'], data=['query_result']]
if 'rows affected' in query_result:
ret['rows affected'] += query_result['rows affected'] # depends on [control=['if'], data=['query_result']] # depends on [control=['for'], data=['line']]
ret['query time']['human'] = six.text_type(round(float(ret['query time']['raw']), 2)) + 's'
ret['query time']['raw'] = round(float(ret['query time']['raw']), 5)
# Remove empty keys in ret
ret = {k: v for (k, v) in six.iteritems(ret) if v}
return ret |
def has_in_watched(self, watched):
"""
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
status, headers, data = self._requester.requestJson(
"GET",
"/repos/" + watched._identity + "/subscription"
)
return status == 200 | def function[has_in_watched, parameter[self, watched]]:
constant[
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
]
assert[call[name[isinstance], parameter[name[watched], name[github].Repository.Repository]]]
<ast.Tuple object at 0x7da1b2126e30> assign[=] call[name[self]._requester.requestJson, parameter[constant[GET], binary_operation[binary_operation[constant[/repos/] + name[watched]._identity] + constant[/subscription]]]]
return[compare[name[status] equal[==] constant[200]]] | keyword[def] identifier[has_in_watched] ( identifier[self] , identifier[watched] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[watched] , identifier[github] . identifier[Repository] . identifier[Repository] ), identifier[watched]
identifier[status] , identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJson] (
literal[string] ,
literal[string] + identifier[watched] . identifier[_identity] + literal[string]
)
keyword[return] identifier[status] == literal[int] | def has_in_watched(self, watched):
"""
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
(status, headers, data) = self._requester.requestJson('GET', '/repos/' + watched._identity + '/subscription')
return status == 200 |
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self | def function[bar, parameter[self, subset, axis, color, width, align, vmin, vmax]]:
constant[
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
]
if compare[name[align] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da207f01180>, <ast.Constant object at 0x7da207f02c50>, <ast.Constant object at 0x7da207f00a30>]]] begin[:]
<ast.Raise object at 0x7da207f02d40>
if <ast.UnaryOp object at 0x7da207f007c0> begin[:]
variable[color] assign[=] list[[<ast.Name object at 0x7da207f03bb0>, <ast.Name object at 0x7da207f00bb0>]]
variable[subset] assign[=] call[name[_maybe_numeric_slice], parameter[name[self].data, name[subset]]]
variable[subset] assign[=] call[name[_non_reducing_slice], parameter[name[subset]]]
call[name[self].apply, parameter[name[self]._bar]]
return[name[self]] | keyword[def] identifier[bar] ( identifier[self] , identifier[subset] = keyword[None] , identifier[axis] = literal[int] , identifier[color] = literal[string] , identifier[width] = literal[int] ,
identifier[align] = literal[string] , identifier[vmin] = keyword[None] , identifier[vmax] = keyword[None] ):
literal[string]
keyword[if] identifier[align] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] ( identifier[is_list_like] ( identifier[color] )):
identifier[color] =[ identifier[color] , identifier[color] ]
keyword[elif] identifier[len] ( identifier[color] )== literal[int] :
identifier[color] =[ identifier[color] [ literal[int] ], identifier[color] [ literal[int] ]]
keyword[elif] identifier[len] ( identifier[color] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] )
identifier[subset] = identifier[_maybe_numeric_slice] ( identifier[self] . identifier[data] , identifier[subset] )
identifier[subset] = identifier[_non_reducing_slice] ( identifier[subset] )
identifier[self] . identifier[apply] ( identifier[self] . identifier[_bar] , identifier[subset] = identifier[subset] , identifier[axis] = identifier[axis] ,
identifier[align] = identifier[align] , identifier[colors] = identifier[color] , identifier[width] = identifier[width] ,
identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] )
keyword[return] identifier[self] | def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}") # depends on [control=['if'], data=[]]
if not is_list_like(color):
color = [color, color] # depends on [control=['if'], data=[]]
elif len(color) == 1:
color = [color[0], color[0]] # depends on [control=['if'], data=[]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like of length 2: [`color_neg`, `color_pos`] (eg: color=['#d65f5f', '#5fba7d'])") # depends on [control=['if'], data=[]]
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax)
return self |
def e_164(msisdn: str) -> str:
"""
Returns the msisdn in E.164 international format.
"""
# Phonenumbers library requires the + to identify the country, so we add it if it
# does not already exist
number = phonenumbers.parse("+{}".format(msisdn.lstrip("+")), None)
return phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) | def function[e_164, parameter[msisdn]]:
constant[
Returns the msisdn in E.164 international format.
]
variable[number] assign[=] call[name[phonenumbers].parse, parameter[call[constant[+{}].format, parameter[call[name[msisdn].lstrip, parameter[constant[+]]]]], constant[None]]]
return[call[name[phonenumbers].format_number, parameter[name[number], name[phonenumbers].PhoneNumberFormat.E164]]] | keyword[def] identifier[e_164] ( identifier[msisdn] : identifier[str] )-> identifier[str] :
literal[string]
identifier[number] = identifier[phonenumbers] . identifier[parse] ( literal[string] . identifier[format] ( identifier[msisdn] . identifier[lstrip] ( literal[string] )), keyword[None] )
keyword[return] identifier[phonenumbers] . identifier[format_number] ( identifier[number] , identifier[phonenumbers] . identifier[PhoneNumberFormat] . identifier[E164] ) | def e_164(msisdn: str) -> str:
"""
Returns the msisdn in E.164 international format.
"""
# Phonenumbers library requires the + to identify the country, so we add it if it
# does not already exist
number = phonenumbers.parse('+{}'.format(msisdn.lstrip('+')), None)
return phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) |
def serialize_yaml_tofile(filename, resource):
"""
Serializes a K8S resource to YAML-formatted file.
"""
stream = file(filename, "w")
yaml.dump(resource, stream, default_flow_style=False) | def function[serialize_yaml_tofile, parameter[filename, resource]]:
constant[
Serializes a K8S resource to YAML-formatted file.
]
variable[stream] assign[=] call[name[file], parameter[name[filename], constant[w]]]
call[name[yaml].dump, parameter[name[resource], name[stream]]] | keyword[def] identifier[serialize_yaml_tofile] ( identifier[filename] , identifier[resource] ):
literal[string]
identifier[stream] = identifier[file] ( identifier[filename] , literal[string] )
identifier[yaml] . identifier[dump] ( identifier[resource] , identifier[stream] , identifier[default_flow_style] = keyword[False] ) | def serialize_yaml_tofile(filename, resource):
"""
Serializes a K8S resource to YAML-formatted file.
"""
stream = file(filename, 'w')
yaml.dump(resource, stream, default_flow_style=False) |
def compute_followup_snr_series(data_reader, htilde, trig_time,
duration=0.095, check_state=True,
coinc_window=0.05):
"""Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True.
"""
if check_state:
# was the detector observing for the full amount of involved data?
state_start_time = trig_time - duration / 2 - htilde.length_in_time
state_end_time = trig_time + duration / 2
state_duration = state_end_time - state_start_time
if data_reader.state is not None:
if not data_reader.state.is_extent_valid(state_start_time,
state_duration):
return None
# was the data quality ok for the full amount of involved data?
dq_start_time = state_start_time - data_reader.dq_padding
dq_duration = state_duration + 2 * data_reader.dq_padding
if data_reader.dq is not None:
if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration):
return None
stilde = data_reader.overwhitened_data(htilde.delta_f)
snr, _, norm = matched_filter_core(htilde, stilde,
h_norm=htilde.sigmasq(stilde.psd))
valid_end = int(len(snr) - data_reader.trim_padding)
valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate)
half_dur_samples = int(snr.sample_rate * duration / 2)
coinc_samples = int(snr.sample_rate * coinc_window)
valid_start -= half_dur_samples + coinc_samples
valid_end += half_dur_samples
if valid_start < 0 or valid_end > len(snr)-1:
raise ValueError(('Requested SNR duration ({0} s)'
' too long').format(duration))
# Onsource slice for Bayestar followup
onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate
onsource_idx = int(round(onsource_idx))
onsource_slice = slice(onsource_idx - half_dur_samples,
onsource_idx + half_dur_samples + 1)
return snr[onsource_slice] * norm | def function[compute_followup_snr_series, parameter[data_reader, htilde, trig_time, duration, check_state, coinc_window]]:
constant[Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True.
]
if name[check_state] begin[:]
variable[state_start_time] assign[=] binary_operation[binary_operation[name[trig_time] - binary_operation[name[duration] / constant[2]]] - name[htilde].length_in_time]
variable[state_end_time] assign[=] binary_operation[name[trig_time] + binary_operation[name[duration] / constant[2]]]
variable[state_duration] assign[=] binary_operation[name[state_end_time] - name[state_start_time]]
if compare[name[data_reader].state is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c9936a0> begin[:]
return[constant[None]]
variable[dq_start_time] assign[=] binary_operation[name[state_start_time] - name[data_reader].dq_padding]
variable[dq_duration] assign[=] binary_operation[name[state_duration] + binary_operation[constant[2] * name[data_reader].dq_padding]]
if compare[name[data_reader].dq is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c991cf0> begin[:]
return[constant[None]]
variable[stilde] assign[=] call[name[data_reader].overwhitened_data, parameter[name[htilde].delta_f]]
<ast.Tuple object at 0x7da20c992320> assign[=] call[name[matched_filter_core], parameter[name[htilde], name[stilde]]]
variable[valid_end] assign[=] call[name[int], parameter[binary_operation[call[name[len], parameter[name[snr]]] - name[data_reader].trim_padding]]]
variable[valid_start] assign[=] call[name[int], parameter[binary_operation[name[valid_end] - binary_operation[name[data_reader].blocksize * name[snr].sample_rate]]]]
variable[half_dur_samples] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[snr].sample_rate * name[duration]] / constant[2]]]]
variable[coinc_samples] assign[=] call[name[int], parameter[binary_operation[name[snr].sample_rate * name[coinc_window]]]]
<ast.AugAssign object at 0x7da20c990220>
<ast.AugAssign object at 0x7da20c992f50>
if <ast.BoolOp object at 0x7da20c992b00> begin[:]
<ast.Raise object at 0x7da20c9926b0>
variable[onsource_idx] assign[=] binary_operation[call[name[float], parameter[binary_operation[name[trig_time] - name[snr].start_time]]] * name[snr].sample_rate]
variable[onsource_idx] assign[=] call[name[int], parameter[call[name[round], parameter[name[onsource_idx]]]]]
variable[onsource_slice] assign[=] call[name[slice], parameter[binary_operation[name[onsource_idx] - name[half_dur_samples]], binary_operation[binary_operation[name[onsource_idx] + name[half_dur_samples]] + constant[1]]]]
return[binary_operation[call[name[snr]][name[onsource_slice]] * name[norm]]] | keyword[def] identifier[compute_followup_snr_series] ( identifier[data_reader] , identifier[htilde] , identifier[trig_time] ,
identifier[duration] = literal[int] , identifier[check_state] = keyword[True] ,
identifier[coinc_window] = literal[int] ):
literal[string]
keyword[if] identifier[check_state] :
identifier[state_start_time] = identifier[trig_time] - identifier[duration] / literal[int] - identifier[htilde] . identifier[length_in_time]
identifier[state_end_time] = identifier[trig_time] + identifier[duration] / literal[int]
identifier[state_duration] = identifier[state_end_time] - identifier[state_start_time]
keyword[if] identifier[data_reader] . identifier[state] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[data_reader] . identifier[state] . identifier[is_extent_valid] ( identifier[state_start_time] ,
identifier[state_duration] ):
keyword[return] keyword[None]
identifier[dq_start_time] = identifier[state_start_time] - identifier[data_reader] . identifier[dq_padding]
identifier[dq_duration] = identifier[state_duration] + literal[int] * identifier[data_reader] . identifier[dq_padding]
keyword[if] identifier[data_reader] . identifier[dq] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[data_reader] . identifier[dq] . identifier[is_extent_valid] ( identifier[dq_start_time] , identifier[dq_duration] ):
keyword[return] keyword[None]
identifier[stilde] = identifier[data_reader] . identifier[overwhitened_data] ( identifier[htilde] . identifier[delta_f] )
identifier[snr] , identifier[_] , identifier[norm] = identifier[matched_filter_core] ( identifier[htilde] , identifier[stilde] ,
identifier[h_norm] = identifier[htilde] . identifier[sigmasq] ( identifier[stilde] . identifier[psd] ))
identifier[valid_end] = identifier[int] ( identifier[len] ( identifier[snr] )- identifier[data_reader] . identifier[trim_padding] )
identifier[valid_start] = identifier[int] ( identifier[valid_end] - identifier[data_reader] . identifier[blocksize] * identifier[snr] . identifier[sample_rate] )
identifier[half_dur_samples] = identifier[int] ( identifier[snr] . identifier[sample_rate] * identifier[duration] / literal[int] )
identifier[coinc_samples] = identifier[int] ( identifier[snr] . identifier[sample_rate] * identifier[coinc_window] )
identifier[valid_start] -= identifier[half_dur_samples] + identifier[coinc_samples]
identifier[valid_end] += identifier[half_dur_samples]
keyword[if] identifier[valid_start] < literal[int] keyword[or] identifier[valid_end] > identifier[len] ( identifier[snr] )- literal[int] :
keyword[raise] identifier[ValueError] (( literal[string]
literal[string] ). identifier[format] ( identifier[duration] ))
identifier[onsource_idx] = identifier[float] ( identifier[trig_time] - identifier[snr] . identifier[start_time] )* identifier[snr] . identifier[sample_rate]
identifier[onsource_idx] = identifier[int] ( identifier[round] ( identifier[onsource_idx] ))
identifier[onsource_slice] = identifier[slice] ( identifier[onsource_idx] - identifier[half_dur_samples] ,
identifier[onsource_idx] + identifier[half_dur_samples] + literal[int] )
keyword[return] identifier[snr] [ identifier[onsource_slice] ]* identifier[norm] | def compute_followup_snr_series(data_reader, htilde, trig_time, duration=0.095, check_state=True, coinc_window=0.05):
"""Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True.
"""
if check_state:
# was the detector observing for the full amount of involved data?
state_start_time = trig_time - duration / 2 - htilde.length_in_time
state_end_time = trig_time + duration / 2
state_duration = state_end_time - state_start_time
if data_reader.state is not None:
if not data_reader.state.is_extent_valid(state_start_time, state_duration):
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# was the data quality ok for the full amount of involved data?
dq_start_time = state_start_time - data_reader.dq_padding
dq_duration = state_duration + 2 * data_reader.dq_padding
if data_reader.dq is not None:
if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration):
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
stilde = data_reader.overwhitened_data(htilde.delta_f)
(snr, _, norm) = matched_filter_core(htilde, stilde, h_norm=htilde.sigmasq(stilde.psd))
valid_end = int(len(snr) - data_reader.trim_padding)
valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate)
half_dur_samples = int(snr.sample_rate * duration / 2)
coinc_samples = int(snr.sample_rate * coinc_window)
valid_start -= half_dur_samples + coinc_samples
valid_end += half_dur_samples
if valid_start < 0 or valid_end > len(snr) - 1:
raise ValueError('Requested SNR duration ({0} s) too long'.format(duration)) # depends on [control=['if'], data=[]]
# Onsource slice for Bayestar followup
onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate
onsource_idx = int(round(onsource_idx))
onsource_slice = slice(onsource_idx - half_dur_samples, onsource_idx + half_dur_samples + 1)
return snr[onsource_slice] * norm |
def list_apps(site):
'''
Get all configured IIS applications for the specified site.
Args:
site (str): The IIS site name.
Returns: A dictionary of the application names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apps site
'''
ret = dict()
ps_cmd = list()
ps_cmd.append("Get-WebApplication -Site '{0}'".format(site))
ps_cmd.append(r"| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,")
ps_cmd.append(r"@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },")
ps_cmd.append(r"@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')")
ps_cmd.append(r"| Foreach-Object { $_.Trim() } ) } }")
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
protocols = list()
# If there are no associated protocols, protocols will be an empty dict,
# if there is one protocol, it will be a string, and if there are
# multiple, it will be a dict with 'Count' and 'value' as the keys.
if isinstance(item['protocols'], dict):
if 'value' in item['protocols']:
protocols += item['protocols']['value']
else:
protocols.append(item['protocols'])
ret[item['name']] = {'apppool': item['applicationPool'],
'path': item['path'],
'preload': item['preloadEnabled'],
'protocols': protocols,
'sourcepath': item['PhysicalPath']}
if not ret:
log.warning('No apps found in output: %s', cmd_ret)
return ret | def function[list_apps, parameter[site]]:
constant[
Get all configured IIS applications for the specified site.
Args:
site (str): The IIS site name.
Returns: A dictionary of the application names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apps site
]
variable[ret] assign[=] call[name[dict], parameter[]]
variable[ps_cmd] assign[=] call[name[list], parameter[]]
call[name[ps_cmd].append, parameter[call[constant[Get-WebApplication -Site '{0}'].format, parameter[name[site]]]]]
call[name[ps_cmd].append, parameter[constant[| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,]]]
call[name[ps_cmd].append, parameter[constant[@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },]]]
call[name[ps_cmd].append, parameter[constant[@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')]]]
call[name[ps_cmd].append, parameter[constant[| Foreach-Object { $_.Trim() } ) } }]]]
variable[cmd_ret] assign[=] call[name[_srvmgr], parameter[]]
<ast.Try object at 0x7da1b21e0eb0>
for taget[name[item]] in starred[name[items]] begin[:]
variable[protocols] assign[=] call[name[list], parameter[]]
if call[name[isinstance], parameter[call[name[item]][constant[protocols]], name[dict]]] begin[:]
if compare[constant[value] in call[name[item]][constant[protocols]]] begin[:]
<ast.AugAssign object at 0x7da1b21e2830>
call[name[ret]][call[name[item]][constant[name]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e31f0>, <ast.Constant object at 0x7da1b21e0520>, <ast.Constant object at 0x7da1b21e18d0>, <ast.Constant object at 0x7da1b21e3820>, <ast.Constant object at 0x7da1b21e1300>], [<ast.Subscript object at 0x7da1b21e33d0>, <ast.Subscript object at 0x7da1b21e0220>, <ast.Subscript object at 0x7da1b21e2560>, <ast.Name object at 0x7da1b21e19f0>, <ast.Subscript object at 0x7da1b21e1a50>]]
if <ast.UnaryOp object at 0x7da1b21e28f0> begin[:]
call[name[log].warning, parameter[constant[No apps found in output: %s], name[cmd_ret]]]
return[name[ret]] | keyword[def] identifier[list_apps] ( identifier[site] ):
literal[string]
identifier[ret] = identifier[dict] ()
identifier[ps_cmd] = identifier[list] ()
identifier[ps_cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[site] ))
identifier[ps_cmd] . identifier[append] ( literal[string] )
identifier[ps_cmd] . identifier[append] ( literal[string] )
identifier[ps_cmd] . identifier[append] ( literal[string] )
identifier[ps_cmd] . identifier[append] ( literal[string] )
identifier[cmd_ret] = identifier[_srvmgr] ( identifier[cmd] = identifier[ps_cmd] , identifier[return_json] = keyword[True] )
keyword[try] :
identifier[items] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[cmd_ret] [ literal[string] ], identifier[strict] = keyword[False] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] )
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[protocols] = identifier[list] ()
keyword[if] identifier[isinstance] ( identifier[item] [ literal[string] ], identifier[dict] ):
keyword[if] literal[string] keyword[in] identifier[item] [ literal[string] ]:
identifier[protocols] += identifier[item] [ literal[string] ][ literal[string] ]
keyword[else] :
identifier[protocols] . identifier[append] ( identifier[item] [ literal[string] ])
identifier[ret] [ identifier[item] [ literal[string] ]]={ literal[string] : identifier[item] [ literal[string] ],
literal[string] : identifier[item] [ literal[string] ],
literal[string] : identifier[item] [ literal[string] ],
literal[string] : identifier[protocols] ,
literal[string] : identifier[item] [ literal[string] ]}
keyword[if] keyword[not] identifier[ret] :
identifier[log] . identifier[warning] ( literal[string] , identifier[cmd_ret] )
keyword[return] identifier[ret] | def list_apps(site):
"""
Get all configured IIS applications for the specified site.
Args:
site (str): The IIS site name.
Returns: A dictionary of the application names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apps site
"""
ret = dict()
ps_cmd = list()
ps_cmd.append("Get-WebApplication -Site '{0}'".format(site))
ps_cmd.append('| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,')
ps_cmd.append("@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },")
ps_cmd.append("@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')")
ps_cmd.append('| Foreach-Object { $_.Trim() } ) } }')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) # depends on [control=['try'], data=[]]
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.') # depends on [control=['except'], data=[]]
for item in items:
protocols = list()
# If there are no associated protocols, protocols will be an empty dict,
# if there is one protocol, it will be a string, and if there are
# multiple, it will be a dict with 'Count' and 'value' as the keys.
if isinstance(item['protocols'], dict):
if 'value' in item['protocols']:
protocols += item['protocols']['value'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
protocols.append(item['protocols'])
ret[item['name']] = {'apppool': item['applicationPool'], 'path': item['path'], 'preload': item['preloadEnabled'], 'protocols': protocols, 'sourcepath': item['PhysicalPath']} # depends on [control=['for'], data=['item']]
if not ret:
log.warning('No apps found in output: %s', cmd_ret) # depends on [control=['if'], data=[]]
return ret |
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd() | def function[path, parameter[self]]:
constant[:return: path of the underlying mapped file
:raise ValueError: if attached path is not a path]
if call[name[isinstance], parameter[call[name[self]._rlist.path_or_fd, parameter[]], name[int]]] begin[:]
<ast.Raise object at 0x7da1b04991b0>
return[call[name[self]._rlist.path_or_fd, parameter[]]] | keyword[def] identifier[path] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_rlist] . identifier[path_or_fd] (), identifier[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[_rlist] . identifier[path_or_fd] () | def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError('Path queried although mapping was applied to a file descriptor') # depends on [control=['if'], data=[]]
# END handle type
return self._rlist.path_or_fd() |
def expires(self):
"""
The date and time after which the certificate will be considered invalid.
"""
not_after = self._cert.get_notAfter().decode('ascii')
return datetime.datetime.strptime(not_after, '%Y%m%d%H%M%SZ').replace(
tzinfo=datetime.timezone.utc) | def function[expires, parameter[self]]:
constant[
The date and time after which the certificate will be considered invalid.
]
variable[not_after] assign[=] call[call[name[self]._cert.get_notAfter, parameter[]].decode, parameter[constant[ascii]]]
return[call[call[name[datetime].datetime.strptime, parameter[name[not_after], constant[%Y%m%d%H%M%SZ]]].replace, parameter[]]] | keyword[def] identifier[expires] ( identifier[self] ):
literal[string]
identifier[not_after] = identifier[self] . identifier[_cert] . identifier[get_notAfter] (). identifier[decode] ( literal[string] )
keyword[return] identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[not_after] , literal[string] ). identifier[replace] (
identifier[tzinfo] = identifier[datetime] . identifier[timezone] . identifier[utc] ) | def expires(self):
"""
The date and time after which the certificate will be considered invalid.
"""
not_after = self._cert.get_notAfter().decode('ascii')
return datetime.datetime.strptime(not_after, '%Y%m%d%H%M%SZ').replace(tzinfo=datetime.timezone.utc) |
def calculate_y_ticks(self, plot_height):
"""Calculate the y-axis items dependent on the plot height."""
calibrated_data_min = self.calibrated_data_min
calibrated_data_max = self.calibrated_data_max
calibrated_data_range = calibrated_data_max - calibrated_data_min
ticker = self.y_ticker
y_ticks = list()
for tick_value, tick_label in zip(ticker.values, ticker.labels):
if calibrated_data_range != 0.0:
y_tick = plot_height - plot_height * (tick_value - calibrated_data_min) / calibrated_data_range
else:
y_tick = plot_height - plot_height * 0.5
if y_tick >= 0 and y_tick <= plot_height:
y_ticks.append((y_tick, tick_label))
return y_ticks | def function[calculate_y_ticks, parameter[self, plot_height]]:
constant[Calculate the y-axis items dependent on the plot height.]
variable[calibrated_data_min] assign[=] name[self].calibrated_data_min
variable[calibrated_data_max] assign[=] name[self].calibrated_data_max
variable[calibrated_data_range] assign[=] binary_operation[name[calibrated_data_max] - name[calibrated_data_min]]
variable[ticker] assign[=] name[self].y_ticker
variable[y_ticks] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9562c0>, <ast.Name object at 0x7da20e9579d0>]]] in starred[call[name[zip], parameter[name[ticker].values, name[ticker].labels]]] begin[:]
if compare[name[calibrated_data_range] not_equal[!=] constant[0.0]] begin[:]
variable[y_tick] assign[=] binary_operation[name[plot_height] - binary_operation[binary_operation[name[plot_height] * binary_operation[name[tick_value] - name[calibrated_data_min]]] / name[calibrated_data_range]]]
if <ast.BoolOp object at 0x7da20e957d30> begin[:]
call[name[y_ticks].append, parameter[tuple[[<ast.Name object at 0x7da20e956560>, <ast.Name object at 0x7da20e956320>]]]]
return[name[y_ticks]] | keyword[def] identifier[calculate_y_ticks] ( identifier[self] , identifier[plot_height] ):
literal[string]
identifier[calibrated_data_min] = identifier[self] . identifier[calibrated_data_min]
identifier[calibrated_data_max] = identifier[self] . identifier[calibrated_data_max]
identifier[calibrated_data_range] = identifier[calibrated_data_max] - identifier[calibrated_data_min]
identifier[ticker] = identifier[self] . identifier[y_ticker]
identifier[y_ticks] = identifier[list] ()
keyword[for] identifier[tick_value] , identifier[tick_label] keyword[in] identifier[zip] ( identifier[ticker] . identifier[values] , identifier[ticker] . identifier[labels] ):
keyword[if] identifier[calibrated_data_range] != literal[int] :
identifier[y_tick] = identifier[plot_height] - identifier[plot_height] *( identifier[tick_value] - identifier[calibrated_data_min] )/ identifier[calibrated_data_range]
keyword[else] :
identifier[y_tick] = identifier[plot_height] - identifier[plot_height] * literal[int]
keyword[if] identifier[y_tick] >= literal[int] keyword[and] identifier[y_tick] <= identifier[plot_height] :
identifier[y_ticks] . identifier[append] (( identifier[y_tick] , identifier[tick_label] ))
keyword[return] identifier[y_ticks] | def calculate_y_ticks(self, plot_height):
"""Calculate the y-axis items dependent on the plot height."""
calibrated_data_min = self.calibrated_data_min
calibrated_data_max = self.calibrated_data_max
calibrated_data_range = calibrated_data_max - calibrated_data_min
ticker = self.y_ticker
y_ticks = list()
for (tick_value, tick_label) in zip(ticker.values, ticker.labels):
if calibrated_data_range != 0.0:
y_tick = plot_height - plot_height * (tick_value - calibrated_data_min) / calibrated_data_range # depends on [control=['if'], data=['calibrated_data_range']]
else:
y_tick = plot_height - plot_height * 0.5
if y_tick >= 0 and y_tick <= plot_height:
y_ticks.append((y_tick, tick_label)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return y_ticks |
def stop(self):
"""
stops the process triggered by start
Setting the shared memory boolean run to false, which should prevent
the loop from repeating. Call __cleanup to make sure the process
stopped. After that we could trigger start() again.
"""
if self.is_alive():
self._proc.terminate()
if self._proc is not None:
self.__cleanup()
if self.raise_error:
if self._proc.exitcode == 255:
raise LoopExceptionError("the loop function return non zero exticode ({})!\n".format(self._proc.exitcode)+
"see log (INFO level) for traceback information")
self.pipe_handler.close()
self._proc = None | def function[stop, parameter[self]]:
constant[
stops the process triggered by start
Setting the shared memory boolean run to false, which should prevent
the loop from repeating. Call __cleanup to make sure the process
stopped. After that we could trigger start() again.
]
if call[name[self].is_alive, parameter[]] begin[:]
call[name[self]._proc.terminate, parameter[]]
if compare[name[self]._proc is_not constant[None]] begin[:]
call[name[self].__cleanup, parameter[]]
if name[self].raise_error begin[:]
if compare[name[self]._proc.exitcode equal[==] constant[255]] begin[:]
<ast.Raise object at 0x7da2054a4970>
call[name[self].pipe_handler.close, parameter[]]
name[self]._proc assign[=] constant[None] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_alive] ():
identifier[self] . identifier[_proc] . identifier[terminate] ()
keyword[if] identifier[self] . identifier[_proc] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[__cleanup] ()
keyword[if] identifier[self] . identifier[raise_error] :
keyword[if] identifier[self] . identifier[_proc] . identifier[exitcode] == literal[int] :
keyword[raise] identifier[LoopExceptionError] ( literal[string] . identifier[format] ( identifier[self] . identifier[_proc] . identifier[exitcode] )+
literal[string] )
identifier[self] . identifier[pipe_handler] . identifier[close] ()
identifier[self] . identifier[_proc] = keyword[None] | def stop(self):
"""
stops the process triggered by start
Setting the shared memory boolean run to false, which should prevent
the loop from repeating. Call __cleanup to make sure the process
stopped. After that we could trigger start() again.
"""
if self.is_alive():
self._proc.terminate() # depends on [control=['if'], data=[]]
if self._proc is not None:
self.__cleanup()
if self.raise_error:
if self._proc.exitcode == 255:
raise LoopExceptionError('the loop function return non zero exticode ({})!\n'.format(self._proc.exitcode) + 'see log (INFO level) for traceback information') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.pipe_handler.close()
self._proc = None |
def move_todo_list(self, list_id, to):
"""
This allows you to reposition a list relative to the other lists in
the project. A list with position 1 will show up at the top of the
page. Moving lists around lets you prioritize. Moving a list to a
position less than 1, or more than the number of lists in a project,
will force the position to be between 1 and the number of lists
(inclusive).
"""
path = '/todos/move_list/%u' % list_id
req = ET.Element('request')
ET.SubElement(req, 'to').text = str(int(to))
return self._request(path, req) | def function[move_todo_list, parameter[self, list_id, to]]:
constant[
This allows you to reposition a list relative to the other lists in
the project. A list with position 1 will show up at the top of the
page. Moving lists around lets you prioritize. Moving a list to a
position less than 1, or more than the number of lists in a project,
will force the position to be between 1 and the number of lists
(inclusive).
]
variable[path] assign[=] binary_operation[constant[/todos/move_list/%u] <ast.Mod object at 0x7da2590d6920> name[list_id]]
variable[req] assign[=] call[name[ET].Element, parameter[constant[request]]]
call[name[ET].SubElement, parameter[name[req], constant[to]]].text assign[=] call[name[str], parameter[call[name[int], parameter[name[to]]]]]
return[call[name[self]._request, parameter[name[path], name[req]]]] | keyword[def] identifier[move_todo_list] ( identifier[self] , identifier[list_id] , identifier[to] ):
literal[string]
identifier[path] = literal[string] % identifier[list_id]
identifier[req] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[ET] . identifier[SubElement] ( identifier[req] , literal[string] ). identifier[text] = identifier[str] ( identifier[int] ( identifier[to] ))
keyword[return] identifier[self] . identifier[_request] ( identifier[path] , identifier[req] ) | def move_todo_list(self, list_id, to):
"""
This allows you to reposition a list relative to the other lists in
the project. A list with position 1 will show up at the top of the
page. Moving lists around lets you prioritize. Moving a list to a
position less than 1, or more than the number of lists in a project,
will force the position to be between 1 and the number of lists
(inclusive).
"""
path = '/todos/move_list/%u' % list_id
req = ET.Element('request')
ET.SubElement(req, 'to').text = str(int(to))
return self._request(path, req) |
async def _receive_loop(self):
"""Receive the response to a request made to the Watchman service.
Note that when trying to receive a PDU from the Watchman service,
we might get a unilateral response to a subscription or log, so these
are processed and queued up for later retrieval. This function only
returns when a non-unilateral response is received."""
try:
while True:
response = await self.connection.receive()
if self._is_unilateral(response):
await self._process_unilateral_response(response)
else:
await self.bilateral_response_queue.put(response)
except Exception as ex:
await self._broadcast_exception(ex)
# We may get a cancel exception on close, so don't close again.
if not self._closed:
self.close() | <ast.AsyncFunctionDef object at 0x7da1b1f97760> | keyword[async] keyword[def] identifier[_receive_loop] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[while] keyword[True] :
identifier[response] = keyword[await] identifier[self] . identifier[connection] . identifier[receive] ()
keyword[if] identifier[self] . identifier[_is_unilateral] ( identifier[response] ):
keyword[await] identifier[self] . identifier[_process_unilateral_response] ( identifier[response] )
keyword[else] :
keyword[await] identifier[self] . identifier[bilateral_response_queue] . identifier[put] ( identifier[response] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[await] identifier[self] . identifier[_broadcast_exception] ( identifier[ex] )
keyword[if] keyword[not] identifier[self] . identifier[_closed] :
identifier[self] . identifier[close] () | async def _receive_loop(self):
"""Receive the response to a request made to the Watchman service.
Note that when trying to receive a PDU from the Watchman service,
we might get a unilateral response to a subscription or log, so these
are processed and queued up for later retrieval. This function only
returns when a non-unilateral response is received."""
try:
while True:
response = await self.connection.receive()
if self._is_unilateral(response):
await self._process_unilateral_response(response) # depends on [control=['if'], data=[]]
else:
await self.bilateral_response_queue.put(response) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except Exception as ex:
await self._broadcast_exception(ex)
# We may get a cancel exception on close, so don't close again.
if not self._closed:
self.close() # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] |
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize) | def function[_apply_axis_properties, parameter[self, axis, rot, fontsize]]:
constant[ Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
]
if <ast.BoolOp object at 0x7da207f038b0> begin[:]
variable[labels] assign[=] binary_operation[call[name[axis].get_majorticklabels, parameter[]] + call[name[axis].get_minorticklabels, parameter[]]]
for taget[name[label]] in starred[name[labels]] begin[:]
if compare[name[rot] is_not constant[None]] begin[:]
call[name[label].set_rotation, parameter[name[rot]]]
if compare[name[fontsize] is_not constant[None]] begin[:]
call[name[label].set_fontsize, parameter[name[fontsize]]] | keyword[def] identifier[_apply_axis_properties] ( identifier[self] , identifier[axis] , identifier[rot] = keyword[None] , identifier[fontsize] = keyword[None] ):
literal[string]
keyword[if] identifier[rot] keyword[is] keyword[not] keyword[None] keyword[or] identifier[fontsize] keyword[is] keyword[not] keyword[None] :
identifier[labels] = identifier[axis] . identifier[get_majorticklabels] ()+ identifier[axis] . identifier[get_minorticklabels] ()
keyword[for] identifier[label] keyword[in] identifier[labels] :
keyword[if] identifier[rot] keyword[is] keyword[not] keyword[None] :
identifier[label] . identifier[set_rotation] ( identifier[rot] )
keyword[if] identifier[fontsize] keyword[is] keyword[not] keyword[None] :
identifier[label] . identifier[set_fontsize] ( identifier[fontsize] ) | def _apply_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot) # depends on [control=['if'], data=['rot']]
if fontsize is not None:
label.set_fontsize(fontsize) # depends on [control=['if'], data=['fontsize']] # depends on [control=['for'], data=['label']] # depends on [control=['if'], data=[]] |
def setup_formatters(self, *args):
"""Setup formatters by observing the first row.
Args:
*args: row cells
"""
formatters = []
col_offset = 0
# initialize formatters for row-id, timestamp and time-diff columns
if self.rownum:
formatters.append(fmt.RowNumberFormatter.setup(0))
col_offset += 1
if self.timestamp:
formatters.append(fmt.DatetimeFormatter.setup(
datetime.datetime.now(),
fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format,
col_width=26))
col_offset += 1
if self.time_diff:
formatters.append(fmt.TimeDeltaFormatter.setup(0))
col_offset += 1
# initialize formatters for user-defined columns
for coli, value in enumerate(args):
fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)
kwargs = {}
# set column width
if self.default_colwidth is not None:
kwargs['col_width'] = self.default_colwidth
if coli in self.column_widths:
kwargs['col_width'] = self.column_widths[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_widths:
kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]]
# set formatter function
if fmt_class == fmt.FloatFormatter and self.float_format is not None:
kwargs['fmt'] = self.float_format
if coli in self.column_formatters:
kwargs['fmt'] = self.column_formatters[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_formatters:
kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]]
formatter = fmt_class.setup(value, **kwargs)
formatters.append(formatter)
self.formatters = formatters | def function[setup_formatters, parameter[self]]:
constant[Setup formatters by observing the first row.
Args:
*args: row cells
]
variable[formatters] assign[=] list[[]]
variable[col_offset] assign[=] constant[0]
if name[self].rownum begin[:]
call[name[formatters].append, parameter[call[name[fmt].RowNumberFormatter.setup, parameter[constant[0]]]]]
<ast.AugAssign object at 0x7da1afe59ab0>
if name[self].timestamp begin[:]
call[name[formatters].append, parameter[call[name[fmt].DatetimeFormatter.setup, parameter[call[name[datetime].datetime.now, parameter[]]]]]]
<ast.AugAssign object at 0x7da1afe1bfd0>
if name[self].time_diff begin[:]
call[name[formatters].append, parameter[call[name[fmt].TimeDeltaFormatter.setup, parameter[constant[0]]]]]
<ast.AugAssign object at 0x7da1afe19c90>
for taget[tuple[[<ast.Name object at 0x7da1afe19270>, <ast.Name object at 0x7da1afe1ae60>]]] in starred[call[name[enumerate], parameter[name[args]]]] begin[:]
variable[fmt_class] assign[=] call[name[type2fmt].get, parameter[call[name[type], parameter[name[value]]], name[fmt].GenericFormatter]]
variable[kwargs] assign[=] dictionary[[], []]
if compare[name[self].default_colwidth is_not constant[None]] begin[:]
call[name[kwargs]][constant[col_width]] assign[=] name[self].default_colwidth
if compare[name[coli] in name[self].column_widths] begin[:]
call[name[kwargs]][constant[col_width]] assign[=] call[name[self].column_widths][name[coli]]
if <ast.BoolOp object at 0x7da1afe19150> begin[:]
call[name[kwargs]][constant[fmt]] assign[=] name[self].float_format
if compare[name[coli] in name[self].column_formatters] begin[:]
call[name[kwargs]][constant[fmt]] assign[=] call[name[self].column_formatters][name[coli]]
variable[formatter] assign[=] call[name[fmt_class].setup, parameter[name[value]]]
call[name[formatters].append, parameter[name[formatter]]]
name[self].formatters assign[=] name[formatters] | keyword[def] identifier[setup_formatters] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[formatters] =[]
identifier[col_offset] = literal[int]
keyword[if] identifier[self] . identifier[rownum] :
identifier[formatters] . identifier[append] ( identifier[fmt] . identifier[RowNumberFormatter] . identifier[setup] ( literal[int] ))
identifier[col_offset] += literal[int]
keyword[if] identifier[self] . identifier[timestamp] :
identifier[formatters] . identifier[append] ( identifier[fmt] . identifier[DatetimeFormatter] . identifier[setup] (
identifier[datetime] . identifier[datetime] . identifier[now] (),
identifier[fmt] = literal[string] . identifier[format] ,
identifier[col_width] = literal[int] ))
identifier[col_offset] += literal[int]
keyword[if] identifier[self] . identifier[time_diff] :
identifier[formatters] . identifier[append] ( identifier[fmt] . identifier[TimeDeltaFormatter] . identifier[setup] ( literal[int] ))
identifier[col_offset] += literal[int]
keyword[for] identifier[coli] , identifier[value] keyword[in] identifier[enumerate] ( identifier[args] ):
identifier[fmt_class] = identifier[type2fmt] . identifier[get] ( identifier[type] ( identifier[value] ), identifier[fmt] . identifier[GenericFormatter] )
identifier[kwargs] ={}
keyword[if] identifier[self] . identifier[default_colwidth] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[default_colwidth]
keyword[if] identifier[coli] keyword[in] identifier[self] . identifier[column_widths] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[column_widths] [ identifier[coli] ]
keyword[elif] identifier[self] . identifier[columns] keyword[and] identifier[self] . identifier[columns] [ identifier[coli] + identifier[col_offset] ] keyword[in] identifier[self] . identifier[column_widths] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[column_widths] [ identifier[self] . identifier[columns] [ identifier[coli] + identifier[col_offset] ]]
keyword[if] identifier[fmt_class] == identifier[fmt] . identifier[FloatFormatter] keyword[and] identifier[self] . identifier[float_format] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[float_format]
keyword[if] identifier[coli] keyword[in] identifier[self] . identifier[column_formatters] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[column_formatters] [ identifier[coli] ]
keyword[elif] identifier[self] . identifier[columns] keyword[and] identifier[self] . identifier[columns] [ identifier[coli] + identifier[col_offset] ] keyword[in] identifier[self] . identifier[column_formatters] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[column_formatters] [ identifier[self] . identifier[columns] [ identifier[coli] + identifier[col_offset] ]]
identifier[formatter] = identifier[fmt_class] . identifier[setup] ( identifier[value] ,** identifier[kwargs] )
identifier[formatters] . identifier[append] ( identifier[formatter] )
identifier[self] . identifier[formatters] = identifier[formatters] | def setup_formatters(self, *args):
"""Setup formatters by observing the first row.
Args:
*args: row cells
"""
formatters = []
col_offset = 0
# initialize formatters for row-id, timestamp and time-diff columns
if self.rownum:
formatters.append(fmt.RowNumberFormatter.setup(0))
col_offset += 1 # depends on [control=['if'], data=[]]
if self.timestamp:
formatters.append(fmt.DatetimeFormatter.setup(datetime.datetime.now(), fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format, col_width=26))
col_offset += 1 # depends on [control=['if'], data=[]]
if self.time_diff:
formatters.append(fmt.TimeDeltaFormatter.setup(0))
col_offset += 1 # depends on [control=['if'], data=[]]
# initialize formatters for user-defined columns
for (coli, value) in enumerate(args):
fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)
kwargs = {}
# set column width
if self.default_colwidth is not None:
kwargs['col_width'] = self.default_colwidth # depends on [control=['if'], data=[]]
if coli in self.column_widths:
kwargs['col_width'] = self.column_widths[coli] # depends on [control=['if'], data=['coli']]
elif self.columns and self.columns[coli + col_offset] in self.column_widths:
kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]] # depends on [control=['if'], data=[]]
# set formatter function
if fmt_class == fmt.FloatFormatter and self.float_format is not None:
kwargs['fmt'] = self.float_format # depends on [control=['if'], data=[]]
if coli in self.column_formatters:
kwargs['fmt'] = self.column_formatters[coli] # depends on [control=['if'], data=['coli']]
elif self.columns and self.columns[coli + col_offset] in self.column_formatters:
kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]] # depends on [control=['if'], data=[]]
formatter = fmt_class.setup(value, **kwargs)
formatters.append(formatter) # depends on [control=['for'], data=[]]
self.formatters = formatters |
def flatten_unique(l: Iterable) -> List:
""" Return a list of UNIQUE non-list items in l """
rval = OrderedDict()
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
for ev in flatten_unique(e):
rval[ev] = None
else:
rval[e] = None
return list(rval.keys()) | def function[flatten_unique, parameter[l]]:
constant[ Return a list of UNIQUE non-list items in l ]
variable[rval] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[e]] in starred[name[l]] begin[:]
if <ast.BoolOp object at 0x7da204565bd0> begin[:]
for taget[name[ev]] in starred[call[name[flatten_unique], parameter[name[e]]]] begin[:]
call[name[rval]][name[ev]] assign[=] constant[None]
return[call[name[list], parameter[call[name[rval].keys, parameter[]]]]] | keyword[def] identifier[flatten_unique] ( identifier[l] : identifier[Iterable] )-> identifier[List] :
literal[string]
identifier[rval] = identifier[OrderedDict] ()
keyword[for] identifier[e] keyword[in] identifier[l] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[e] , identifier[str] ) keyword[and] identifier[isinstance] ( identifier[e] , identifier[Iterable] ):
keyword[for] identifier[ev] keyword[in] identifier[flatten_unique] ( identifier[e] ):
identifier[rval] [ identifier[ev] ]= keyword[None]
keyword[else] :
identifier[rval] [ identifier[e] ]= keyword[None]
keyword[return] identifier[list] ( identifier[rval] . identifier[keys] ()) | def flatten_unique(l: Iterable) -> List:
""" Return a list of UNIQUE non-list items in l """
rval = OrderedDict()
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
for ev in flatten_unique(e):
rval[ev] = None # depends on [control=['for'], data=['ev']] # depends on [control=['if'], data=[]]
else:
rval[e] = None # depends on [control=['for'], data=['e']]
return list(rval.keys()) |
def _get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
Returns only the mean values.
See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM`
"""
# distances
distsl = copy.copy(dists)
distsl.rjb, distsl.rrup = \
utils.get_equivalent_distances_east(rup.mag, dists.repi)
#
# Pezeshk et al. 2011 - Rrup
mean1, stds1 = super().get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean1 = self.apply_correction_to_BC(mean1, imt, distsl)
#
# Atkinson 2008 - Rjb
gmpe = Atkinson2008prime()
mean2, stds2 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002SingleCornerSaturation()
mean4, stds4 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean4 = self.apply_correction_to_BC(mean4, imt, distsl)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002DoubleCornerSaturation()
mean5, stds5 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean5 = self.apply_correction_to_BC(mean5, imt, distsl)
#
# distances
distsl.rjb, distsl.rrup = \
utils.get_equivalent_distances_east(rup.mag, dists.repi, ab06=True)
#
# Atkinson and Boore 2006 - Rrup
gmpe = AtkinsonBoore2006Modified2011()
mean3, stds3 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
# Computing adjusted mean and stds
mean_adj = mean1*0.2 + mean2*0.2 + mean3*0.2 + mean4*0.2 + mean5*0.2
# Note that in this case we do not apply a triangular smoothing on
# distance as explained at page 996 of Atkinson and Adams (2013)
# for the calculation of the standard deviation
stds_adj = np.log(np.exp(stds1)*0.2 + np.exp(stds2)*0.2 +
np.exp(stds3)*0.2 + np.exp(stds4)*0.2 +
np.exp(stds5)*0.2)
#
return mean_adj, stds_adj | def function[_get_mean_and_stddevs, parameter[self, sites, rup, dists, imt, stddev_types]]:
constant[
Returns only the mean values.
See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM`
]
variable[distsl] assign[=] call[name[copy].copy, parameter[name[dists]]]
<ast.Tuple object at 0x7da18ede64d0> assign[=] call[name[utils].get_equivalent_distances_east, parameter[name[rup].mag, name[dists].repi]]
<ast.Tuple object at 0x7da18ede5810> assign[=] call[call[name[super], parameter[]].get_mean_and_stddevs, parameter[name[sites], name[rup], name[distsl], name[imt], name[stddev_types]]]
variable[mean1] assign[=] call[name[self].apply_correction_to_BC, parameter[name[mean1], name[imt], name[distsl]]]
variable[gmpe] assign[=] call[name[Atkinson2008prime], parameter[]]
<ast.Tuple object at 0x7da18ede6350> assign[=] call[name[gmpe].get_mean_and_stddevs, parameter[name[sites], name[rup], name[distsl], name[imt], name[stddev_types]]]
variable[gmpe] assign[=] call[name[SilvaEtAl2002SingleCornerSaturation], parameter[]]
<ast.Tuple object at 0x7da18ede6020> assign[=] call[name[gmpe].get_mean_and_stddevs, parameter[name[sites], name[rup], name[distsl], name[imt], name[stddev_types]]]
variable[mean4] assign[=] call[name[self].apply_correction_to_BC, parameter[name[mean4], name[imt], name[distsl]]]
variable[gmpe] assign[=] call[name[SilvaEtAl2002DoubleCornerSaturation], parameter[]]
<ast.Tuple object at 0x7da18ede5b10> assign[=] call[name[gmpe].get_mean_and_stddevs, parameter[name[sites], name[rup], name[distsl], name[imt], name[stddev_types]]]
variable[mean5] assign[=] call[name[self].apply_correction_to_BC, parameter[name[mean5], name[imt], name[distsl]]]
<ast.Tuple object at 0x7da18bccbdc0> assign[=] call[name[utils].get_equivalent_distances_east, parameter[name[rup].mag, name[dists].repi]]
variable[gmpe] assign[=] call[name[AtkinsonBoore2006Modified2011], parameter[]]
<ast.Tuple object at 0x7da18ede7970> assign[=] call[name[gmpe].get_mean_and_stddevs, parameter[name[sites], name[rup], name[distsl], name[imt], name[stddev_types]]]
variable[mean_adj] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[mean1] * constant[0.2]] + binary_operation[name[mean2] * constant[0.2]]] + binary_operation[name[mean3] * constant[0.2]]] + binary_operation[name[mean4] * constant[0.2]]] + binary_operation[name[mean5] * constant[0.2]]]
variable[stds_adj] assign[=] call[name[np].log, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[np].exp, parameter[name[stds1]]] * constant[0.2]] + binary_operation[call[name[np].exp, parameter[name[stds2]]] * constant[0.2]]] + binary_operation[call[name[np].exp, parameter[name[stds3]]] * constant[0.2]]] + binary_operation[call[name[np].exp, parameter[name[stds4]]] * constant[0.2]]] + binary_operation[call[name[np].exp, parameter[name[stds5]]] * constant[0.2]]]]]
return[tuple[[<ast.Name object at 0x7da18ede77c0>, <ast.Name object at 0x7da18ede6d40>]]] | keyword[def] identifier[_get_mean_and_stddevs] ( identifier[self] , identifier[sites] , identifier[rup] , identifier[dists] , identifier[imt] , identifier[stddev_types] ):
literal[string]
identifier[distsl] = identifier[copy] . identifier[copy] ( identifier[dists] )
identifier[distsl] . identifier[rjb] , identifier[distsl] . identifier[rrup] = identifier[utils] . identifier[get_equivalent_distances_east] ( identifier[rup] . identifier[mag] , identifier[dists] . identifier[repi] )
identifier[mean1] , identifier[stds1] = identifier[super] (). identifier[get_mean_and_stddevs] ( identifier[sites] , identifier[rup] , identifier[distsl] , identifier[imt] ,
identifier[stddev_types] )
identifier[mean1] = identifier[self] . identifier[apply_correction_to_BC] ( identifier[mean1] , identifier[imt] , identifier[distsl] )
identifier[gmpe] = identifier[Atkinson2008prime] ()
identifier[mean2] , identifier[stds2] = identifier[gmpe] . identifier[get_mean_and_stddevs] ( identifier[sites] , identifier[rup] , identifier[distsl] , identifier[imt] ,
identifier[stddev_types] )
identifier[gmpe] = identifier[SilvaEtAl2002SingleCornerSaturation] ()
identifier[mean4] , identifier[stds4] = identifier[gmpe] . identifier[get_mean_and_stddevs] ( identifier[sites] , identifier[rup] , identifier[distsl] , identifier[imt] ,
identifier[stddev_types] )
identifier[mean4] = identifier[self] . identifier[apply_correction_to_BC] ( identifier[mean4] , identifier[imt] , identifier[distsl] )
identifier[gmpe] = identifier[SilvaEtAl2002DoubleCornerSaturation] ()
identifier[mean5] , identifier[stds5] = identifier[gmpe] . identifier[get_mean_and_stddevs] ( identifier[sites] , identifier[rup] , identifier[distsl] , identifier[imt] ,
identifier[stddev_types] )
identifier[mean5] = identifier[self] . identifier[apply_correction_to_BC] ( identifier[mean5] , identifier[imt] , identifier[distsl] )
identifier[distsl] . identifier[rjb] , identifier[distsl] . identifier[rrup] = identifier[utils] . identifier[get_equivalent_distances_east] ( identifier[rup] . identifier[mag] , identifier[dists] . identifier[repi] , identifier[ab06] = keyword[True] )
identifier[gmpe] = identifier[AtkinsonBoore2006Modified2011] ()
identifier[mean3] , identifier[stds3] = identifier[gmpe] . identifier[get_mean_and_stddevs] ( identifier[sites] , identifier[rup] , identifier[distsl] , identifier[imt] ,
identifier[stddev_types] )
identifier[mean_adj] = identifier[mean1] * literal[int] + identifier[mean2] * literal[int] + identifier[mean3] * literal[int] + identifier[mean4] * literal[int] + identifier[mean5] * literal[int]
identifier[stds_adj] = identifier[np] . identifier[log] ( identifier[np] . identifier[exp] ( identifier[stds1] )* literal[int] + identifier[np] . identifier[exp] ( identifier[stds2] )* literal[int] +
identifier[np] . identifier[exp] ( identifier[stds3] )* literal[int] + identifier[np] . identifier[exp] ( identifier[stds4] )* literal[int] +
identifier[np] . identifier[exp] ( identifier[stds5] )* literal[int] )
keyword[return] identifier[mean_adj] , identifier[stds_adj] | def _get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
Returns only the mean values.
See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM`
"""
# distances
distsl = copy.copy(dists)
(distsl.rjb, distsl.rrup) = utils.get_equivalent_distances_east(rup.mag, dists.repi)
#
# Pezeshk et al. 2011 - Rrup
(mean1, stds1) = super().get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types)
mean1 = self.apply_correction_to_BC(mean1, imt, distsl)
#
# Atkinson 2008 - Rjb
gmpe = Atkinson2008prime()
(mean2, stds2) = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002SingleCornerSaturation()
(mean4, stds4) = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types)
mean4 = self.apply_correction_to_BC(mean4, imt, distsl)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002DoubleCornerSaturation()
(mean5, stds5) = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types)
mean5 = self.apply_correction_to_BC(mean5, imt, distsl)
#
# distances
(distsl.rjb, distsl.rrup) = utils.get_equivalent_distances_east(rup.mag, dists.repi, ab06=True)
#
# Atkinson and Boore 2006 - Rrup
gmpe = AtkinsonBoore2006Modified2011()
(mean3, stds3) = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types)
# Computing adjusted mean and stds
mean_adj = mean1 * 0.2 + mean2 * 0.2 + mean3 * 0.2 + mean4 * 0.2 + mean5 * 0.2
# Note that in this case we do not apply a triangular smoothing on
# distance as explained at page 996 of Atkinson and Adams (2013)
# for the calculation of the standard deviation
stds_adj = np.log(np.exp(stds1) * 0.2 + np.exp(stds2) * 0.2 + np.exp(stds3) * 0.2 + np.exp(stds4) * 0.2 + np.exp(stds5) * 0.2)
#
return (mean_adj, stds_adj) |
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max) | def function[zremrangebylex, parameter[self, name, min, max]]:
constant[
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
]
return[call[name[self].execute_command, parameter[constant[ZREMRANGEBYLEX], name[name], name[min], name[max]]]] | keyword[def] identifier[zremrangebylex] ( identifier[self] , identifier[name] , identifier[min] , identifier[max] ):
literal[string]
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[min] , identifier[max] ) | def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max) |
def get(self):
"""Get layout options."""
return {k:v for k,v in list(self.options.items()) if k in self._allowed_layout} | def function[get, parameter[self]]:
constant[Get layout options.]
return[<ast.DictComp object at 0x7da1b18e5ab0>] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[self] . identifier[options] . identifier[items] ()) keyword[if] identifier[k] keyword[in] identifier[self] . identifier[_allowed_layout] } | def get(self):
"""Get layout options."""
return {k: v for (k, v) in list(self.options.items()) if k in self._allowed_layout} |
def register(self):
"""
Register this machine
"""
client_hostname = determine_hostname()
# This will undo a blacklist
logger.debug("API: Create system")
system = self.create_system(new_machine_id=False)
if system is False:
return ('Could not reach the Insights service to register.', '', '', '')
# If we get a 409, we know we need to generate a new machine-id
if system.status_code == 409:
system = self.create_system(new_machine_id=True)
self.handle_fail_rcs(system)
logger.debug("System: %s", system.json())
message = system.headers.get("x-rh-message", "")
# Do grouping
if self.config.group is not None:
self.do_group()
# Display registration success messasge to STDOUT and logs
if system.status_code == 201:
try:
system_json = system.json()
machine_id = system_json["machine_id"]
account_number = system_json["account_number"]
logger.info("You successfully registered %s to account %s." % (machine_id, account_number))
except:
logger.debug('Received invalid JSON on system registration.')
logger.debug('API still indicates valid registration with 201 status code.')
logger.debug(system)
logger.debug(system.json())
if self.config.group is not None:
return (message, client_hostname, self.config.group, self.config.display_name)
elif self.config.display_name is not None:
return (message, client_hostname, "None", self.config.display_name)
else:
return (message, client_hostname, "None", "") | def function[register, parameter[self]]:
constant[
Register this machine
]
variable[client_hostname] assign[=] call[name[determine_hostname], parameter[]]
call[name[logger].debug, parameter[constant[API: Create system]]]
variable[system] assign[=] call[name[self].create_system, parameter[]]
if compare[name[system] is constant[False]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2054a4c40>, <ast.Constant object at 0x7da2054a7220>, <ast.Constant object at 0x7da2054a5de0>, <ast.Constant object at 0x7da2054a45b0>]]]
if compare[name[system].status_code equal[==] constant[409]] begin[:]
variable[system] assign[=] call[name[self].create_system, parameter[]]
call[name[self].handle_fail_rcs, parameter[name[system]]]
call[name[logger].debug, parameter[constant[System: %s], call[name[system].json, parameter[]]]]
variable[message] assign[=] call[name[system].headers.get, parameter[constant[x-rh-message], constant[]]]
if compare[name[self].config.group is_not constant[None]] begin[:]
call[name[self].do_group, parameter[]]
if compare[name[system].status_code equal[==] constant[201]] begin[:]
<ast.Try object at 0x7da2054a52d0>
if compare[name[self].config.group is_not constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da2054a5d80>, <ast.Name object at 0x7da2054a6ce0>, <ast.Attribute object at 0x7da2054a46a0>, <ast.Attribute object at 0x7da2054a4a30>]]] | keyword[def] identifier[register] ( identifier[self] ):
literal[string]
identifier[client_hostname] = identifier[determine_hostname] ()
identifier[logger] . identifier[debug] ( literal[string] )
identifier[system] = identifier[self] . identifier[create_system] ( identifier[new_machine_id] = keyword[False] )
keyword[if] identifier[system] keyword[is] keyword[False] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[if] identifier[system] . identifier[status_code] == literal[int] :
identifier[system] = identifier[self] . identifier[create_system] ( identifier[new_machine_id] = keyword[True] )
identifier[self] . identifier[handle_fail_rcs] ( identifier[system] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[system] . identifier[json] ())
identifier[message] = identifier[system] . identifier[headers] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[config] . identifier[group] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[do_group] ()
keyword[if] identifier[system] . identifier[status_code] == literal[int] :
keyword[try] :
identifier[system_json] = identifier[system] . identifier[json] ()
identifier[machine_id] = identifier[system_json] [ literal[string] ]
identifier[account_number] = identifier[system_json] [ literal[string] ]
identifier[logger] . identifier[info] ( literal[string] %( identifier[machine_id] , identifier[account_number] ))
keyword[except] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( identifier[system] )
identifier[logger] . identifier[debug] ( identifier[system] . identifier[json] ())
keyword[if] identifier[self] . identifier[config] . identifier[group] keyword[is] keyword[not] keyword[None] :
keyword[return] ( identifier[message] , identifier[client_hostname] , identifier[self] . identifier[config] . identifier[group] , identifier[self] . identifier[config] . identifier[display_name] )
keyword[elif] identifier[self] . identifier[config] . identifier[display_name] keyword[is] keyword[not] keyword[None] :
keyword[return] ( identifier[message] , identifier[client_hostname] , literal[string] , identifier[self] . identifier[config] . identifier[display_name] )
keyword[else] :
keyword[return] ( identifier[message] , identifier[client_hostname] , literal[string] , literal[string] ) | def register(self):
"""
Register this machine
"""
client_hostname = determine_hostname()
# This will undo a blacklist
logger.debug('API: Create system')
system = self.create_system(new_machine_id=False)
if system is False:
return ('Could not reach the Insights service to register.', '', '', '') # depends on [control=['if'], data=[]]
# If we get a 409, we know we need to generate a new machine-id
if system.status_code == 409:
system = self.create_system(new_machine_id=True) # depends on [control=['if'], data=[]]
self.handle_fail_rcs(system)
logger.debug('System: %s', system.json())
message = system.headers.get('x-rh-message', '')
# Do grouping
if self.config.group is not None:
self.do_group() # depends on [control=['if'], data=[]]
# Display registration success messasge to STDOUT and logs
if system.status_code == 201:
try:
system_json = system.json()
machine_id = system_json['machine_id']
account_number = system_json['account_number']
logger.info('You successfully registered %s to account %s.' % (machine_id, account_number)) # depends on [control=['try'], data=[]]
except:
logger.debug('Received invalid JSON on system registration.')
logger.debug('API still indicates valid registration with 201 status code.')
logger.debug(system)
logger.debug(system.json()) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if self.config.group is not None:
return (message, client_hostname, self.config.group, self.config.display_name) # depends on [control=['if'], data=[]]
elif self.config.display_name is not None:
return (message, client_hostname, 'None', self.config.display_name) # depends on [control=['if'], data=[]]
else:
return (message, client_hostname, 'None', '') |
def sign(self, identity, digest):
"""Sign the digest and return a serialized signature."""
log.info('please confirm GPG signature on %s for "%s"...',
self.device, identity.to_string())
if identity.curve_name == formats.CURVE_NIST256:
digest = digest[:32] # sign the first 256 bits
log.debug('signing digest: %s', util.hexlify(digest))
with self.device:
sig = self.device.sign(blob=digest, identity=identity)
return (util.bytes2num(sig[:32]), util.bytes2num(sig[32:])) | def function[sign, parameter[self, identity, digest]]:
constant[Sign the digest and return a serialized signature.]
call[name[log].info, parameter[constant[please confirm GPG signature on %s for "%s"...], name[self].device, call[name[identity].to_string, parameter[]]]]
if compare[name[identity].curve_name equal[==] name[formats].CURVE_NIST256] begin[:]
variable[digest] assign[=] call[name[digest]][<ast.Slice object at 0x7da1b1241c30>]
call[name[log].debug, parameter[constant[signing digest: %s], call[name[util].hexlify, parameter[name[digest]]]]]
with name[self].device begin[:]
variable[sig] assign[=] call[name[self].device.sign, parameter[]]
return[tuple[[<ast.Call object at 0x7da1b1241b10>, <ast.Call object at 0x7da1b1212440>]]] | keyword[def] identifier[sign] ( identifier[self] , identifier[identity] , identifier[digest] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] ,
identifier[self] . identifier[device] , identifier[identity] . identifier[to_string] ())
keyword[if] identifier[identity] . identifier[curve_name] == identifier[formats] . identifier[CURVE_NIST256] :
identifier[digest] = identifier[digest] [: literal[int] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[util] . identifier[hexlify] ( identifier[digest] ))
keyword[with] identifier[self] . identifier[device] :
identifier[sig] = identifier[self] . identifier[device] . identifier[sign] ( identifier[blob] = identifier[digest] , identifier[identity] = identifier[identity] )
keyword[return] ( identifier[util] . identifier[bytes2num] ( identifier[sig] [: literal[int] ]), identifier[util] . identifier[bytes2num] ( identifier[sig] [ literal[int] :])) | def sign(self, identity, digest):
"""Sign the digest and return a serialized signature."""
log.info('please confirm GPG signature on %s for "%s"...', self.device, identity.to_string())
if identity.curve_name == formats.CURVE_NIST256:
digest = digest[:32] # sign the first 256 bits # depends on [control=['if'], data=[]]
log.debug('signing digest: %s', util.hexlify(digest))
with self.device:
sig = self.device.sign(blob=digest, identity=identity) # depends on [control=['with'], data=[]]
return (util.bytes2num(sig[:32]), util.bytes2num(sig[32:])) |
def get(self, resource="", rid=None, **query):
"""
Retrieves the resource with given id 'rid', or all resources of given type.
Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying
a limit=1 filter.
Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0)
Keyword arguments can be parsed for filtering the query, for example:
connection.get('products', limit=3, min_price=10.5)
(see Bigcommerce resource documentation).
"""
if rid:
if resource[-1] != '/':
resource += '/'
resource += str(rid)
response = self._run_method('GET', resource, query=query)
return self._handle_response(resource, response) | def function[get, parameter[self, resource, rid]]:
constant[
Retrieves the resource with given id 'rid', or all resources of given type.
Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying
a limit=1 filter.
Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0)
Keyword arguments can be parsed for filtering the query, for example:
connection.get('products', limit=3, min_price=10.5)
(see Bigcommerce resource documentation).
]
if name[rid] begin[:]
if compare[call[name[resource]][<ast.UnaryOp object at 0x7da204960160>] not_equal[!=] constant[/]] begin[:]
<ast.AugAssign object at 0x7da1b031ccd0>
<ast.AugAssign object at 0x7da1b031c6a0>
variable[response] assign[=] call[name[self]._run_method, parameter[constant[GET], name[resource]]]
return[call[name[self]._handle_response, parameter[name[resource], name[response]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[resource] = literal[string] , identifier[rid] = keyword[None] ,** identifier[query] ):
literal[string]
keyword[if] identifier[rid] :
keyword[if] identifier[resource] [- literal[int] ]!= literal[string] :
identifier[resource] += literal[string]
identifier[resource] += identifier[str] ( identifier[rid] )
identifier[response] = identifier[self] . identifier[_run_method] ( literal[string] , identifier[resource] , identifier[query] = identifier[query] )
keyword[return] identifier[self] . identifier[_handle_response] ( identifier[resource] , identifier[response] ) | def get(self, resource='', rid=None, **query):
"""
Retrieves the resource with given id 'rid', or all resources of given type.
Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying
a limit=1 filter.
Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0)
Keyword arguments can be parsed for filtering the query, for example:
connection.get('products', limit=3, min_price=10.5)
(see Bigcommerce resource documentation).
"""
if rid:
if resource[-1] != '/':
resource += '/' # depends on [control=['if'], data=[]]
resource += str(rid) # depends on [control=['if'], data=[]]
response = self._run_method('GET', resource, query=query)
return self._handle_response(resource, response) |
def SetCredentials(api_username,api_passwd):
"""Establish API username and password associated with APIv2 commands."""
global V2_API_USERNAME
global V2_API_PASSWD
global _V2_ENABLED
_V2_ENABLED = True
V2_API_USERNAME = api_username
V2_API_PASSWD = api_passwd | def function[SetCredentials, parameter[api_username, api_passwd]]:
constant[Establish API username and password associated with APIv2 commands.]
<ast.Global object at 0x7da1b244be80>
<ast.Global object at 0x7da1b2449300>
<ast.Global object at 0x7da1b2448e20>
variable[_V2_ENABLED] assign[=] constant[True]
variable[V2_API_USERNAME] assign[=] name[api_username]
variable[V2_API_PASSWD] assign[=] name[api_passwd] | keyword[def] identifier[SetCredentials] ( identifier[api_username] , identifier[api_passwd] ):
literal[string]
keyword[global] identifier[V2_API_USERNAME]
keyword[global] identifier[V2_API_PASSWD]
keyword[global] identifier[_V2_ENABLED]
identifier[_V2_ENABLED] = keyword[True]
identifier[V2_API_USERNAME] = identifier[api_username]
identifier[V2_API_PASSWD] = identifier[api_passwd] | def SetCredentials(api_username, api_passwd):
"""Establish API username and password associated with APIv2 commands."""
global V2_API_USERNAME
global V2_API_PASSWD
global _V2_ENABLED
_V2_ENABLED = True
V2_API_USERNAME = api_username
V2_API_PASSWD = api_passwd |
def get_mapping(self, other):
"""
get self to other mapping
"""
m = next(self._matcher(other).isomorphisms_iter(), None)
if m:
return {v: k for k, v in m.items()} | def function[get_mapping, parameter[self, other]]:
constant[
get self to other mapping
]
variable[m] assign[=] call[name[next], parameter[call[call[name[self]._matcher, parameter[name[other]]].isomorphisms_iter, parameter[]], constant[None]]]
if name[m] begin[:]
return[<ast.DictComp object at 0x7da18fe918a0>] | keyword[def] identifier[get_mapping] ( identifier[self] , identifier[other] ):
literal[string]
identifier[m] = identifier[next] ( identifier[self] . identifier[_matcher] ( identifier[other] ). identifier[isomorphisms_iter] (), keyword[None] )
keyword[if] identifier[m] :
keyword[return] { identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[m] . identifier[items] ()} | def get_mapping(self, other):
"""
get self to other mapping
"""
m = next(self._matcher(other).isomorphisms_iter(), None)
if m:
return {v: k for (k, v) in m.items()} # depends on [control=['if'], data=[]] |
def make_assignment(instr, queue, stack):
"""
Make an ast.Assign node.
"""
value = make_expr(stack)
# Make assignment targets.
# If there are multiple assignments (e.g. 'a = b = c'),
# each LHS expression except the last is preceded by a DUP_TOP instruction.
# Thus, we make targets until we don't see a DUP_TOP, and then make one
# more.
targets = []
while isinstance(instr, instrs.DUP_TOP):
targets.append(make_assign_target(queue.popleft(), queue, stack))
instr = queue.popleft()
targets.append(make_assign_target(instr, queue, stack))
return ast.Assign(targets=targets, value=value) | def function[make_assignment, parameter[instr, queue, stack]]:
constant[
Make an ast.Assign node.
]
variable[value] assign[=] call[name[make_expr], parameter[name[stack]]]
variable[targets] assign[=] list[[]]
while call[name[isinstance], parameter[name[instr], name[instrs].DUP_TOP]] begin[:]
call[name[targets].append, parameter[call[name[make_assign_target], parameter[call[name[queue].popleft, parameter[]], name[queue], name[stack]]]]]
variable[instr] assign[=] call[name[queue].popleft, parameter[]]
call[name[targets].append, parameter[call[name[make_assign_target], parameter[name[instr], name[queue], name[stack]]]]]
return[call[name[ast].Assign, parameter[]]] | keyword[def] identifier[make_assignment] ( identifier[instr] , identifier[queue] , identifier[stack] ):
literal[string]
identifier[value] = identifier[make_expr] ( identifier[stack] )
identifier[targets] =[]
keyword[while] identifier[isinstance] ( identifier[instr] , identifier[instrs] . identifier[DUP_TOP] ):
identifier[targets] . identifier[append] ( identifier[make_assign_target] ( identifier[queue] . identifier[popleft] (), identifier[queue] , identifier[stack] ))
identifier[instr] = identifier[queue] . identifier[popleft] ()
identifier[targets] . identifier[append] ( identifier[make_assign_target] ( identifier[instr] , identifier[queue] , identifier[stack] ))
keyword[return] identifier[ast] . identifier[Assign] ( identifier[targets] = identifier[targets] , identifier[value] = identifier[value] ) | def make_assignment(instr, queue, stack):
"""
Make an ast.Assign node.
"""
value = make_expr(stack)
# Make assignment targets.
# If there are multiple assignments (e.g. 'a = b = c'),
# each LHS expression except the last is preceded by a DUP_TOP instruction.
# Thus, we make targets until we don't see a DUP_TOP, and then make one
# more.
targets = []
while isinstance(instr, instrs.DUP_TOP):
targets.append(make_assign_target(queue.popleft(), queue, stack))
instr = queue.popleft() # depends on [control=['while'], data=[]]
targets.append(make_assign_target(instr, queue, stack))
return ast.Assign(targets=targets, value=value) |
def get(self, repository, snapshot, params=None):
"""
Retrieve information about a snapshot.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable snapshots,
defaults to false which means a NotFoundError `snapshot_missing_exception` is thrown
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg verbose: Whether to show verbose snapshot info or only show the
basic info found in the repository index blob
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('GET', _make_path('_snapshot',
repository, snapshot), params=params) | def function[get, parameter[self, repository, snapshot, params]]:
constant[
Retrieve information about a snapshot.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable snapshots,
defaults to false which means a NotFoundError `snapshot_missing_exception` is thrown
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg verbose: Whether to show verbose snapshot info or only show the
basic info found in the repository index blob
]
for taget[name[param]] in starred[tuple[[<ast.Name object at 0x7da18f720fa0>, <ast.Name object at 0x7da18f723550>]]] begin[:]
if compare[name[param] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da18f722da0>
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[constant[_snapshot], name[repository], name[snapshot]]]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[repository] , identifier[snapshot] , identifier[params] = keyword[None] ):
literal[string]
keyword[for] identifier[param] keyword[in] ( identifier[repository] , identifier[snapshot] ):
keyword[if] identifier[param] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] ( literal[string] , identifier[_make_path] ( literal[string] ,
identifier[repository] , identifier[snapshot] ), identifier[params] = identifier[params] ) | def get(self, repository, snapshot, params=None):
"""
Retrieve information about a snapshot.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable snapshots,
defaults to false which means a NotFoundError `snapshot_missing_exception` is thrown
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg verbose: Whether to show verbose snapshot info or only show the
basic info found in the repository index blob
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError('Empty value passed for a required argument.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param']]
return self.transport.perform_request('GET', _make_path('_snapshot', repository, snapshot), params=params) |
def url_quote (url):
"""Quote given URL."""
if not url_is_absolute(url):
return document_quote(url)
urlparts = list(urlparse.urlsplit(url))
urlparts[0] = url_quote_part(urlparts[0]) # scheme
urlparts[1] = url_quote_part(urlparts[1], ':') # host
urlparts[2] = url_quote_part(urlparts[2], '/=,') # path
urlparts[3] = url_quote_part(urlparts[3], '&=,') # query
l = []
for k, v, sep in parse_qsl(urlparts[3], True): # query
k = url_quote_part(k, '/-:,;')
if v:
v = url_quote_part(v, '/-:,;')
l.append("%s=%s%s" % (k, v, sep))
else:
l.append("%s%s" % (k, sep))
urlparts[3] = ''.join(l)
urlparts[4] = url_quote_part(urlparts[4]) # anchor
return urlunsplit(urlparts) | def function[url_quote, parameter[url]]:
constant[Quote given URL.]
if <ast.UnaryOp object at 0x7da18dc04880> begin[:]
return[call[name[document_quote], parameter[name[url]]]]
variable[urlparts] assign[=] call[name[list], parameter[call[name[urlparse].urlsplit, parameter[name[url]]]]]
call[name[urlparts]][constant[0]] assign[=] call[name[url_quote_part], parameter[call[name[urlparts]][constant[0]]]]
call[name[urlparts]][constant[1]] assign[=] call[name[url_quote_part], parameter[call[name[urlparts]][constant[1]], constant[:]]]
call[name[urlparts]][constant[2]] assign[=] call[name[url_quote_part], parameter[call[name[urlparts]][constant[2]], constant[/=,]]]
call[name[urlparts]][constant[3]] assign[=] call[name[url_quote_part], parameter[call[name[urlparts]][constant[3]], constant[&=,]]]
variable[l] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc06d10>, <ast.Name object at 0x7da18dc04a00>, <ast.Name object at 0x7da18dc04130>]]] in starred[call[name[parse_qsl], parameter[call[name[urlparts]][constant[3]], constant[True]]]] begin[:]
variable[k] assign[=] call[name[url_quote_part], parameter[name[k], constant[/-:,;]]]
if name[v] begin[:]
variable[v] assign[=] call[name[url_quote_part], parameter[name[v], constant[/-:,;]]]
call[name[l].append, parameter[binary_operation[constant[%s=%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc06950>, <ast.Name object at 0x7da18dc04460>, <ast.Name object at 0x7da18dc04070>]]]]]
call[name[urlparts]][constant[3]] assign[=] call[constant[].join, parameter[name[l]]]
call[name[urlparts]][constant[4]] assign[=] call[name[url_quote_part], parameter[call[name[urlparts]][constant[4]]]]
return[call[name[urlunsplit], parameter[name[urlparts]]]] | keyword[def] identifier[url_quote] ( identifier[url] ):
literal[string]
keyword[if] keyword[not] identifier[url_is_absolute] ( identifier[url] ):
keyword[return] identifier[document_quote] ( identifier[url] )
identifier[urlparts] = identifier[list] ( identifier[urlparse] . identifier[urlsplit] ( identifier[url] ))
identifier[urlparts] [ literal[int] ]= identifier[url_quote_part] ( identifier[urlparts] [ literal[int] ])
identifier[urlparts] [ literal[int] ]= identifier[url_quote_part] ( identifier[urlparts] [ literal[int] ], literal[string] )
identifier[urlparts] [ literal[int] ]= identifier[url_quote_part] ( identifier[urlparts] [ literal[int] ], literal[string] )
identifier[urlparts] [ literal[int] ]= identifier[url_quote_part] ( identifier[urlparts] [ literal[int] ], literal[string] )
identifier[l] =[]
keyword[for] identifier[k] , identifier[v] , identifier[sep] keyword[in] identifier[parse_qsl] ( identifier[urlparts] [ literal[int] ], keyword[True] ):
identifier[k] = identifier[url_quote_part] ( identifier[k] , literal[string] )
keyword[if] identifier[v] :
identifier[v] = identifier[url_quote_part] ( identifier[v] , literal[string] )
identifier[l] . identifier[append] ( literal[string] %( identifier[k] , identifier[v] , identifier[sep] ))
keyword[else] :
identifier[l] . identifier[append] ( literal[string] %( identifier[k] , identifier[sep] ))
identifier[urlparts] [ literal[int] ]= literal[string] . identifier[join] ( identifier[l] )
identifier[urlparts] [ literal[int] ]= identifier[url_quote_part] ( identifier[urlparts] [ literal[int] ])
keyword[return] identifier[urlunsplit] ( identifier[urlparts] ) | def url_quote(url):
"""Quote given URL."""
if not url_is_absolute(url):
return document_quote(url) # depends on [control=['if'], data=[]]
urlparts = list(urlparse.urlsplit(url))
urlparts[0] = url_quote_part(urlparts[0]) # scheme
urlparts[1] = url_quote_part(urlparts[1], ':') # host
urlparts[2] = url_quote_part(urlparts[2], '/=,') # path
urlparts[3] = url_quote_part(urlparts[3], '&=,') # query
l = []
for (k, v, sep) in parse_qsl(urlparts[3], True): # query
k = url_quote_part(k, '/-:,;')
if v:
v = url_quote_part(v, '/-:,;')
l.append('%s=%s%s' % (k, v, sep)) # depends on [control=['if'], data=[]]
else:
l.append('%s%s' % (k, sep)) # depends on [control=['for'], data=[]]
urlparts[3] = ''.join(l)
urlparts[4] = url_quote_part(urlparts[4]) # anchor
return urlunsplit(urlparts) |
def main():
"""Main part of the download script."""
# Read config file. This has to get updated via git
project_root = utils.get_project_root()
infofile = os.path.join(project_root, "raw-datasets/info.yml")
logging.info("Read '%s'...", infofile)
with open(infofile, 'r') as ymlfile:
datasets = yaml.load(ymlfile)
for dataset in datasets:
local_path_file = os.path.join(project_root, dataset['online_path'])
i = 0
while not is_file_consistent(local_path_file, dataset['md5']) and i < 3:
if os.path.isfile(local_path_file):
local_file_size = os.path.getsize(local_path_file)
logging.info("MD5 codes differ. ")
logging.info("The file size of the downloaded file is %s.",
utils.sizeof_fmt(local_file_size))
logging.info("Download the file '%s'...", dataset['online_path'])
urllib.urlretrieve(dataset['url'], local_path_file)
i += 1
if i < 10:
logging.info("Found '%s'.", dataset['online_path']) | def function[main, parameter[]]:
constant[Main part of the download script.]
variable[project_root] assign[=] call[name[utils].get_project_root, parameter[]]
variable[infofile] assign[=] call[name[os].path.join, parameter[name[project_root], constant[raw-datasets/info.yml]]]
call[name[logging].info, parameter[constant[Read '%s'...], name[infofile]]]
with call[name[open], parameter[name[infofile], constant[r]]] begin[:]
variable[datasets] assign[=] call[name[yaml].load, parameter[name[ymlfile]]]
for taget[name[dataset]] in starred[name[datasets]] begin[:]
variable[local_path_file] assign[=] call[name[os].path.join, parameter[name[project_root], call[name[dataset]][constant[online_path]]]]
variable[i] assign[=] constant[0]
while <ast.BoolOp object at 0x7da1b282a830> begin[:]
if call[name[os].path.isfile, parameter[name[local_path_file]]] begin[:]
variable[local_file_size] assign[=] call[name[os].path.getsize, parameter[name[local_path_file]]]
call[name[logging].info, parameter[constant[MD5 codes differ. ]]]
call[name[logging].info, parameter[constant[The file size of the downloaded file is %s.], call[name[utils].sizeof_fmt, parameter[name[local_file_size]]]]]
call[name[logging].info, parameter[constant[Download the file '%s'...], call[name[dataset]][constant[online_path]]]]
call[name[urllib].urlretrieve, parameter[call[name[dataset]][constant[url]], name[local_path_file]]]
<ast.AugAssign object at 0x7da1b2872590>
if compare[name[i] less[<] constant[10]] begin[:]
call[name[logging].info, parameter[constant[Found '%s'.], call[name[dataset]][constant[online_path]]]] | keyword[def] identifier[main] ():
literal[string]
identifier[project_root] = identifier[utils] . identifier[get_project_root] ()
identifier[infofile] = identifier[os] . identifier[path] . identifier[join] ( identifier[project_root] , literal[string] )
identifier[logging] . identifier[info] ( literal[string] , identifier[infofile] )
keyword[with] identifier[open] ( identifier[infofile] , literal[string] ) keyword[as] identifier[ymlfile] :
identifier[datasets] = identifier[yaml] . identifier[load] ( identifier[ymlfile] )
keyword[for] identifier[dataset] keyword[in] identifier[datasets] :
identifier[local_path_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[project_root] , identifier[dataset] [ literal[string] ])
identifier[i] = literal[int]
keyword[while] keyword[not] identifier[is_file_consistent] ( identifier[local_path_file] , identifier[dataset] [ literal[string] ]) keyword[and] identifier[i] < literal[int] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[local_path_file] ):
identifier[local_file_size] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[local_path_file] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[logging] . identifier[info] ( literal[string] ,
identifier[utils] . identifier[sizeof_fmt] ( identifier[local_file_size] ))
identifier[logging] . identifier[info] ( literal[string] , identifier[dataset] [ literal[string] ])
identifier[urllib] . identifier[urlretrieve] ( identifier[dataset] [ literal[string] ], identifier[local_path_file] )
identifier[i] += literal[int]
keyword[if] identifier[i] < literal[int] :
identifier[logging] . identifier[info] ( literal[string] , identifier[dataset] [ literal[string] ]) | def main():
"""Main part of the download script."""
# Read config file. This has to get updated via git
project_root = utils.get_project_root()
infofile = os.path.join(project_root, 'raw-datasets/info.yml')
logging.info("Read '%s'...", infofile)
with open(infofile, 'r') as ymlfile:
datasets = yaml.load(ymlfile) # depends on [control=['with'], data=['ymlfile']]
for dataset in datasets:
local_path_file = os.path.join(project_root, dataset['online_path'])
i = 0
while not is_file_consistent(local_path_file, dataset['md5']) and i < 3:
if os.path.isfile(local_path_file):
local_file_size = os.path.getsize(local_path_file)
logging.info('MD5 codes differ. ')
logging.info('The file size of the downloaded file is %s.', utils.sizeof_fmt(local_file_size)) # depends on [control=['if'], data=[]]
logging.info("Download the file '%s'...", dataset['online_path'])
urllib.urlretrieve(dataset['url'], local_path_file)
i += 1 # depends on [control=['while'], data=[]]
if i < 10:
logging.info("Found '%s'.", dataset['online_path']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dataset']] |
def update(self, message):
"""
Updates the states in the primary AlarmDecoder object based on
the LRR message provided.
:param message: LRR message object
:type message: :py:class:`~alarmdecoder.messages.LRRMessage`
"""
# Firmware version < 2.2a.8.6
if message.version == 1:
if message.event_type == 'ALARM_PANIC':
self._alarmdecoder._update_panic_status(True)
elif message.event_type == 'CANCEL':
self._alarmdecoder._update_panic_status(False)
# Firmware version >= 2.2a.8.6
elif message.version == 2:
source = message.event_source
if source == LRR_EVENT_TYPE.CID:
self._handle_cid_message(message)
elif source == LRR_EVENT_TYPE.DSC:
self._handle_dsc_message(message)
elif source == LRR_EVENT_TYPE.ADEMCO:
self._handle_ademco_message(message)
elif source == LRR_EVENT_TYPE.ALARMDECODER:
self._handle_alarmdecoder_message(message)
elif source == LRR_EVENT_TYPE.UNKNOWN:
self._handle_unknown_message(message)
else:
pass | def function[update, parameter[self, message]]:
constant[
Updates the states in the primary AlarmDecoder object based on
the LRR message provided.
:param message: LRR message object
:type message: :py:class:`~alarmdecoder.messages.LRRMessage`
]
if compare[name[message].version equal[==] constant[1]] begin[:]
if compare[name[message].event_type equal[==] constant[ALARM_PANIC]] begin[:]
call[name[self]._alarmdecoder._update_panic_status, parameter[constant[True]]] | keyword[def] identifier[update] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] identifier[message] . identifier[version] == literal[int] :
keyword[if] identifier[message] . identifier[event_type] == literal[string] :
identifier[self] . identifier[_alarmdecoder] . identifier[_update_panic_status] ( keyword[True] )
keyword[elif] identifier[message] . identifier[event_type] == literal[string] :
identifier[self] . identifier[_alarmdecoder] . identifier[_update_panic_status] ( keyword[False] )
keyword[elif] identifier[message] . identifier[version] == literal[int] :
identifier[source] = identifier[message] . identifier[event_source]
keyword[if] identifier[source] == identifier[LRR_EVENT_TYPE] . identifier[CID] :
identifier[self] . identifier[_handle_cid_message] ( identifier[message] )
keyword[elif] identifier[source] == identifier[LRR_EVENT_TYPE] . identifier[DSC] :
identifier[self] . identifier[_handle_dsc_message] ( identifier[message] )
keyword[elif] identifier[source] == identifier[LRR_EVENT_TYPE] . identifier[ADEMCO] :
identifier[self] . identifier[_handle_ademco_message] ( identifier[message] )
keyword[elif] identifier[source] == identifier[LRR_EVENT_TYPE] . identifier[ALARMDECODER] :
identifier[self] . identifier[_handle_alarmdecoder_message] ( identifier[message] )
keyword[elif] identifier[source] == identifier[LRR_EVENT_TYPE] . identifier[UNKNOWN] :
identifier[self] . identifier[_handle_unknown_message] ( identifier[message] )
keyword[else] :
keyword[pass] | def update(self, message):
"""
Updates the states in the primary AlarmDecoder object based on
the LRR message provided.
:param message: LRR message object
:type message: :py:class:`~alarmdecoder.messages.LRRMessage`
"""
# Firmware version < 2.2a.8.6
if message.version == 1:
if message.event_type == 'ALARM_PANIC':
self._alarmdecoder._update_panic_status(True) # depends on [control=['if'], data=[]]
elif message.event_type == 'CANCEL':
self._alarmdecoder._update_panic_status(False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Firmware version >= 2.2a.8.6
elif message.version == 2:
source = message.event_source
if source == LRR_EVENT_TYPE.CID:
self._handle_cid_message(message) # depends on [control=['if'], data=[]]
elif source == LRR_EVENT_TYPE.DSC:
self._handle_dsc_message(message) # depends on [control=['if'], data=[]]
elif source == LRR_EVENT_TYPE.ADEMCO:
self._handle_ademco_message(message) # depends on [control=['if'], data=[]]
elif source == LRR_EVENT_TYPE.ALARMDECODER:
self._handle_alarmdecoder_message(message) # depends on [control=['if'], data=[]]
elif source == LRR_EVENT_TYPE.UNKNOWN:
self._handle_unknown_message(message) # depends on [control=['if'], data=[]]
else:
pass # depends on [control=['if'], data=[]] |
def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
"""Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
"""
return (
python2 in (None, is_python2)
and python3 in (None, is_python3)
and windows in (None, is_windows)
and linux in (None, is_linux)
and osx in (None, is_osx)
) | def function[is_config, parameter[python2, python3, windows, linux, osx]]:
constant[Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
]
return[<ast.BoolOp object at 0x7da1b1ef95a0>] | keyword[def] identifier[is_config] ( identifier[python2] = keyword[None] , identifier[python3] = keyword[None] , identifier[windows] = keyword[None] , identifier[linux] = keyword[None] , identifier[osx] = keyword[None] ):
literal[string]
keyword[return] (
identifier[python2] keyword[in] ( keyword[None] , identifier[is_python2] )
keyword[and] identifier[python3] keyword[in] ( keyword[None] , identifier[is_python3] )
keyword[and] identifier[windows] keyword[in] ( keyword[None] , identifier[is_windows] )
keyword[and] identifier[linux] keyword[in] ( keyword[None] , identifier[is_linux] )
keyword[and] identifier[osx] keyword[in] ( keyword[None] , identifier[is_osx] )
) | def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
"""Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
"""
return python2 in (None, is_python2) and python3 in (None, is_python3) and (windows in (None, is_windows)) and (linux in (None, is_linux)) and (osx in (None, is_osx)) |
def append_path_payment_op(self,
destination,
send_code,
send_issuer,
send_max,
dest_code,
dest_issuer,
dest_amount,
path,
source=None):
"""Append a :class:`PathPayment <stellar_base.operation.PathPayment>`
operation to the list of operations.
:param str destination: The destination address (Account ID) for the
payment.
:param str send_code: The asset code for the source asset deducted from
the source account.
:param send_issuer: The address of the issuer of the source asset.
:type send_issuer: str, None
:param str send_max: The maximum amount of send asset to deduct
(excluding fees).
:param str dest_code: The asset code for the final destination asset
sent to the recipient.
:param dest_issuer: Account address that receives the payment.
:type dest_issuer: str, None
:param str dest_amount: The amount of destination asset the destination
account receives.
:param list path: A list of asset tuples, each tuple containing a
(asset_code, asset_issuer) for each asset in the path. For the native
asset, `None` is used for the asset_issuer.
:param str source: The source address of the path payment.
:return: This builder instance.
"""
# path: a list of asset tuple which contains asset_code and asset_issuer,
# [(asset_code, asset_issuer), (asset_code, asset_issuer)] for native asset you can deliver
# ('XLM', None)
send_asset = Asset(send_code, send_issuer)
dest_asset = Asset(dest_code, dest_issuer)
assets = []
for p in path:
assets.append(Asset(p[0], p[1]))
op = operation.PathPayment(destination, send_asset, send_max,
dest_asset, dest_amount, assets, source)
return self.append_op(op) | def function[append_path_payment_op, parameter[self, destination, send_code, send_issuer, send_max, dest_code, dest_issuer, dest_amount, path, source]]:
constant[Append a :class:`PathPayment <stellar_base.operation.PathPayment>`
operation to the list of operations.
:param str destination: The destination address (Account ID) for the
payment.
:param str send_code: The asset code for the source asset deducted from
the source account.
:param send_issuer: The address of the issuer of the source asset.
:type send_issuer: str, None
:param str send_max: The maximum amount of send asset to deduct
(excluding fees).
:param str dest_code: The asset code for the final destination asset
sent to the recipient.
:param dest_issuer: Account address that receives the payment.
:type dest_issuer: str, None
:param str dest_amount: The amount of destination asset the destination
account receives.
:param list path: A list of asset tuples, each tuple containing a
(asset_code, asset_issuer) for each asset in the path. For the native
asset, `None` is used for the asset_issuer.
:param str source: The source address of the path payment.
:return: This builder instance.
]
variable[send_asset] assign[=] call[name[Asset], parameter[name[send_code], name[send_issuer]]]
variable[dest_asset] assign[=] call[name[Asset], parameter[name[dest_code], name[dest_issuer]]]
variable[assets] assign[=] list[[]]
for taget[name[p]] in starred[name[path]] begin[:]
call[name[assets].append, parameter[call[name[Asset], parameter[call[name[p]][constant[0]], call[name[p]][constant[1]]]]]]
variable[op] assign[=] call[name[operation].PathPayment, parameter[name[destination], name[send_asset], name[send_max], name[dest_asset], name[dest_amount], name[assets], name[source]]]
return[call[name[self].append_op, parameter[name[op]]]] | keyword[def] identifier[append_path_payment_op] ( identifier[self] ,
identifier[destination] ,
identifier[send_code] ,
identifier[send_issuer] ,
identifier[send_max] ,
identifier[dest_code] ,
identifier[dest_issuer] ,
identifier[dest_amount] ,
identifier[path] ,
identifier[source] = keyword[None] ):
literal[string]
identifier[send_asset] = identifier[Asset] ( identifier[send_code] , identifier[send_issuer] )
identifier[dest_asset] = identifier[Asset] ( identifier[dest_code] , identifier[dest_issuer] )
identifier[assets] =[]
keyword[for] identifier[p] keyword[in] identifier[path] :
identifier[assets] . identifier[append] ( identifier[Asset] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]))
identifier[op] = identifier[operation] . identifier[PathPayment] ( identifier[destination] , identifier[send_asset] , identifier[send_max] ,
identifier[dest_asset] , identifier[dest_amount] , identifier[assets] , identifier[source] )
keyword[return] identifier[self] . identifier[append_op] ( identifier[op] ) | def append_path_payment_op(self, destination, send_code, send_issuer, send_max, dest_code, dest_issuer, dest_amount, path, source=None):
"""Append a :class:`PathPayment <stellar_base.operation.PathPayment>`
operation to the list of operations.
:param str destination: The destination address (Account ID) for the
payment.
:param str send_code: The asset code for the source asset deducted from
the source account.
:param send_issuer: The address of the issuer of the source asset.
:type send_issuer: str, None
:param str send_max: The maximum amount of send asset to deduct
(excluding fees).
:param str dest_code: The asset code for the final destination asset
sent to the recipient.
:param dest_issuer: Account address that receives the payment.
:type dest_issuer: str, None
:param str dest_amount: The amount of destination asset the destination
account receives.
:param list path: A list of asset tuples, each tuple containing a
(asset_code, asset_issuer) for each asset in the path. For the native
asset, `None` is used for the asset_issuer.
:param str source: The source address of the path payment.
:return: This builder instance.
"""
# path: a list of asset tuple which contains asset_code and asset_issuer,
# [(asset_code, asset_issuer), (asset_code, asset_issuer)] for native asset you can deliver
# ('XLM', None)
send_asset = Asset(send_code, send_issuer)
dest_asset = Asset(dest_code, dest_issuer)
assets = []
for p in path:
assets.append(Asset(p[0], p[1])) # depends on [control=['for'], data=['p']]
op = operation.PathPayment(destination, send_asset, send_max, dest_asset, dest_amount, assets, source)
return self.append_op(op) |
def union_update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
for item in other.items:
self.add(item) | def function[union_update, parameter[self, other]]:
constant[Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
]
if <ast.UnaryOp object at 0x7da18c4cc2e0> begin[:]
<ast.Raise object at 0x7da18f720b20>
if compare[name[self] is name[other]] begin[:]
return[None]
for taget[name[item]] in starred[name[other].items] begin[:]
call[name[self].add, parameter[name[item]]] | keyword[def] identifier[union_update] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[Set] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] keyword[is] identifier[other] :
keyword[return]
keyword[for] identifier[item] keyword[in] identifier[other] . identifier[items] :
identifier[self] . identifier[add] ( identifier[item] ) | def union_update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance') # depends on [control=['if'], data=[]]
if self is other:
return # depends on [control=['if'], data=[]]
for item in other.items:
self.add(item) # depends on [control=['for'], data=['item']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.