code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def makeResetPacket(ID, param):
"""
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
"""
if param not in [0x01, 0x02, 0xff]:
raise Exception('Packet.makeResetPacket invalide parameter {}'.format(param))
# pkt = makePacket(ID, xl320.XL320_RESET, None, [param])
pkt = makePacket(ID, xl320.XL320_RESET, None, [1])
return pkt
|
def function[makeResetPacket, parameter[ID, param]]:
constant[
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
]
if compare[name[param] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20c76fb20>, <ast.Constant object at 0x7da20c76f310>, <ast.Constant object at 0x7da20c76ca60>]]] begin[:]
<ast.Raise object at 0x7da20c76e5c0>
variable[pkt] assign[=] call[name[makePacket], parameter[name[ID], name[xl320].XL320_RESET, constant[None], list[[<ast.Constant object at 0x7da20c76c0a0>]]]]
return[name[pkt]]
|
keyword[def] identifier[makeResetPacket] ( identifier[ID] , identifier[param] ):
literal[string]
keyword[if] identifier[param] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[param] ))
identifier[pkt] = identifier[makePacket] ( identifier[ID] , identifier[xl320] . identifier[XL320_RESET] , keyword[None] ,[ literal[int] ])
keyword[return] identifier[pkt]
|
def makeResetPacket(ID, param):
"""
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
"""
if param not in [1, 2, 255]:
raise Exception('Packet.makeResetPacket invalide parameter {}'.format(param)) # depends on [control=['if'], data=['param']] # pkt = makePacket(ID, xl320.XL320_RESET, None, [param])
pkt = makePacket(ID, xl320.XL320_RESET, None, [1])
return pkt
|
def token(self):
""" returns the token for the site """
if self._token is None or \
datetime.datetime.now() >= self._token_expires_on:
result = self._getTokenArcMap()
if 'error' in result:
self._valid = False
self._message = result
else:
self._valid = True
self._message = "Token Generated"
return self._token
|
def function[token, parameter[self]]:
constant[ returns the token for the site ]
if <ast.BoolOp object at 0x7da1b12903d0> begin[:]
variable[result] assign[=] call[name[self]._getTokenArcMap, parameter[]]
if compare[constant[error] in name[result]] begin[:]
name[self]._valid assign[=] constant[False]
name[self]._message assign[=] name[result]
return[name[self]._token]
|
keyword[def] identifier[token] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_token] keyword[is] keyword[None] keyword[or] identifier[datetime] . identifier[datetime] . identifier[now] ()>= identifier[self] . identifier[_token_expires_on] :
identifier[result] = identifier[self] . identifier[_getTokenArcMap] ()
keyword[if] literal[string] keyword[in] identifier[result] :
identifier[self] . identifier[_valid] = keyword[False]
identifier[self] . identifier[_message] = identifier[result]
keyword[else] :
identifier[self] . identifier[_valid] = keyword[True]
identifier[self] . identifier[_message] = literal[string]
keyword[return] identifier[self] . identifier[_token]
|
def token(self):
""" returns the token for the site """
if self._token is None or datetime.datetime.now() >= self._token_expires_on:
result = self._getTokenArcMap()
if 'error' in result:
self._valid = False
self._message = result # depends on [control=['if'], data=['result']]
else:
self._valid = True
self._message = 'Token Generated' # depends on [control=['if'], data=[]]
return self._token
|
def list_directories_and_files(self, share_name, directory_name=None,
num_results=None, marker=None, timeout=None):
'''
Returns a generator to list the directories and files under the specified share.
The generator will lazily follow the continuation tokens returned by
the service and stop when all directories and files have been returned or
num_results is reached.
If num_results is specified and the share has more than that number of
containers, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param int num_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
num_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting num_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
args = (share_name, directory_name)
kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout}
resp = self._list_directories_and_files(*args, **kwargs)
return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
|
def function[list_directories_and_files, parameter[self, share_name, directory_name, num_results, marker, timeout]]:
constant[
Returns a generator to list the directories and files under the specified share.
The generator will lazily follow the continuation tokens returned by
the service and stop when all directories and files have been returned or
num_results is reached.
If num_results is specified and the share has more than that number of
containers, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param int num_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
num_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting num_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b197c910>, <ast.Name object at 0x7da1b197d660>]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b197c3a0>, <ast.Constant object at 0x7da1b197e4a0>, <ast.Constant object at 0x7da20c990610>], [<ast.Name object at 0x7da20c990820>, <ast.Name object at 0x7da20c991a20>, <ast.Name object at 0x7da20c990100>]]
variable[resp] assign[=] call[name[self]._list_directories_and_files, parameter[<ast.Starred object at 0x7da20c990fd0>]]
return[call[name[ListGenerator], parameter[name[resp], name[self]._list_directories_and_files, name[args], name[kwargs]]]]
|
keyword[def] identifier[list_directories_and_files] ( identifier[self] , identifier[share_name] , identifier[directory_name] = keyword[None] ,
identifier[num_results] = keyword[None] , identifier[marker] = keyword[None] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[args] =( identifier[share_name] , identifier[directory_name] )
identifier[kwargs] ={ literal[string] : identifier[marker] , literal[string] : identifier[num_results] , literal[string] : identifier[timeout] }
identifier[resp] = identifier[self] . identifier[_list_directories_and_files] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[ListGenerator] ( identifier[resp] , identifier[self] . identifier[_list_directories_and_files] , identifier[args] , identifier[kwargs] )
|
def list_directories_and_files(self, share_name, directory_name=None, num_results=None, marker=None, timeout=None):
"""
Returns a generator to list the directories and files under the specified share.
The generator will lazily follow the continuation tokens returned by
the service and stop when all directories and files have been returned or
num_results is reached.
If num_results is specified and the share has more than that number of
containers, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param int num_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
num_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting num_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
"""
args = (share_name, directory_name)
kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout}
resp = self._list_directories_and_files(*args, **kwargs)
return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
|
def all(self, endpoint, *args, **kwargs):
"""Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
"""
# 1. Initialize the pagination parameters.
kwargs.setdefault('params', {})['offset'] = 0
kwargs.setdefault('params', {})['limit'] = self.limit
kwargs['__method__'] = 'get'
# 2. Create an initial paginated request.
payload = self.request(endpoint, *args, **kwargs)
has_next = payload.get('result', {}).setdefault(
'meta', {'next': None}
)['next']
# 3. Loop until the end
while has_next:
# 4. Increment the offset
kwargs['params']['offset'] += self.limit
# 5. Query again
_payload = self.request(endpoint, *args, **kwargs)
# 6. Add the paginated data to the global one
payload['result']['data'].extend(_payload['result']['data'])
# 7. Compute has_next
has_next = _payload['result']['meta']['next']
del payload['result']['meta']
return payload
|
def function[all, parameter[self, endpoint]]:
constant[Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
]
call[call[name[kwargs].setdefault, parameter[constant[params], dictionary[[], []]]]][constant[offset]] assign[=] constant[0]
call[call[name[kwargs].setdefault, parameter[constant[params], dictionary[[], []]]]][constant[limit]] assign[=] name[self].limit
call[name[kwargs]][constant[__method__]] assign[=] constant[get]
variable[payload] assign[=] call[name[self].request, parameter[name[endpoint], <ast.Starred object at 0x7da1b133e650>]]
variable[has_next] assign[=] call[call[call[name[payload].get, parameter[constant[result], dictionary[[], []]]].setdefault, parameter[constant[meta], dictionary[[<ast.Constant object at 0x7da1b133f6d0>], [<ast.Constant object at 0x7da1b133eef0>]]]]][constant[next]]
while name[has_next] begin[:]
<ast.AugAssign object at 0x7da1b133f7c0>
variable[_payload] assign[=] call[name[self].request, parameter[name[endpoint], <ast.Starred object at 0x7da1b133c4f0>]]
call[call[call[name[payload]][constant[result]]][constant[data]].extend, parameter[call[call[name[_payload]][constant[result]]][constant[data]]]]
variable[has_next] assign[=] call[call[call[name[_payload]][constant[result]]][constant[meta]]][constant[next]]
<ast.Delete object at 0x7da1b133fcd0>
return[name[payload]]
|
keyword[def] identifier[all] ( identifier[self] , identifier[endpoint] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= literal[int]
identifier[kwargs] . identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= identifier[self] . identifier[limit]
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[payload] = identifier[self] . identifier[request] ( identifier[endpoint] ,* identifier[args] ,** identifier[kwargs] )
identifier[has_next] = identifier[payload] . identifier[get] ( literal[string] ,{}). identifier[setdefault] (
literal[string] ,{ literal[string] : keyword[None] }
)[ literal[string] ]
keyword[while] identifier[has_next] :
identifier[kwargs] [ literal[string] ][ literal[string] ]+= identifier[self] . identifier[limit]
identifier[_payload] = identifier[self] . identifier[request] ( identifier[endpoint] ,* identifier[args] ,** identifier[kwargs] )
identifier[payload] [ literal[string] ][ literal[string] ]. identifier[extend] ( identifier[_payload] [ literal[string] ][ literal[string] ])
identifier[has_next] = identifier[_payload] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[del] identifier[payload] [ literal[string] ][ literal[string] ]
keyword[return] identifier[payload]
|
def all(self, endpoint, *args, **kwargs):
"""Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
"""
# 1. Initialize the pagination parameters.
kwargs.setdefault('params', {})['offset'] = 0
kwargs.setdefault('params', {})['limit'] = self.limit
kwargs['__method__'] = 'get'
# 2. Create an initial paginated request.
payload = self.request(endpoint, *args, **kwargs)
has_next = payload.get('result', {}).setdefault('meta', {'next': None})['next']
# 3. Loop until the end
while has_next:
# 4. Increment the offset
kwargs['params']['offset'] += self.limit
# 5. Query again
_payload = self.request(endpoint, *args, **kwargs)
# 6. Add the paginated data to the global one
payload['result']['data'].extend(_payload['result']['data'])
# 7. Compute has_next
has_next = _payload['result']['meta']['next'] # depends on [control=['while'], data=[]]
del payload['result']['meta']
return payload
|
def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass
|
def function[get_all_conversion_chains, parameter[self, from_type, to_type]]:
constant[
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
]
pass
|
keyword[def] identifier[get_all_conversion_chains] ( identifier[self] , identifier[from_type] : identifier[Type] [ identifier[Any] ]= identifier[JOKER] , identifier[to_type] : identifier[Type] [ identifier[Any] ]= identifier[JOKER] )-> identifier[Tuple] [ identifier[List] [ identifier[Converter] ], identifier[List] [ identifier[Converter] ], identifier[List] [ identifier[Converter] ]]:
literal[string]
keyword[pass]
|
def get_all_conversion_chains(self, from_type: Type[Any]=JOKER, to_type: Type[Any]=JOKER) -> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass
|
def get_comments_are_moderated(instance):
"""
Check if comments are moderated for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no moderation
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(mod, None, instance, None)
|
def function[get_comments_are_moderated, parameter[instance]]:
constant[
Check if comments are moderated for the instance
]
if <ast.UnaryOp object at 0x7da1b27e1960> begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b27e3430>
return[call[name[CommentModerator].moderate, parameter[name[mod], constant[None], name[instance], constant[None]]]]
|
keyword[def] identifier[get_comments_are_moderated] ( identifier[instance] ):
literal[string]
keyword[if] keyword[not] identifier[IS_INSTALLED] :
keyword[return] keyword[False]
keyword[try] :
identifier[mod] = identifier[moderator] . identifier[_registry] [ identifier[instance] . identifier[__class__] ]
keyword[except] identifier[KeyError] :
keyword[return] keyword[False]
keyword[return] identifier[CommentModerator] . identifier[moderate] ( identifier[mod] , keyword[None] , identifier[instance] , keyword[None] )
|
def get_comments_are_moderated(instance):
"""
Check if comments are moderated for the instance
"""
if not IS_INSTALLED:
return False # depends on [control=['if'], data=[]]
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__] # depends on [control=['try'], data=[]]
except KeyError:
# No moderator = no moderation
return False # depends on [control=['except'], data=[]]
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(mod, None, instance, None)
|
def _probe_characteristics(self, conn, services, timeout=5.0):
"""Probe gatt services for all associated characteristics in a BLE device
Args:
conn (int): the connection handle to probe
services (dict): a dictionary of services produced by probe_services()
timeout (float): the maximum number of seconds to spend in any single task
"""
for service in services.values():
success, result = self._enumerate_handles(conn, service['start_handle'],
service['end_handle'])
if not success:
return False, None
attributes = result['attributes']
service['characteristics'] = {}
last_char = None
for handle, attribute in attributes.items():
if attribute['uuid'].hex[-4:] == '0328':
success, result = self._read_handle(conn, handle, timeout)
if not success:
return False, None
value = result['data']
char = parse_characteristic_declaration(value)
service['characteristics'][char['uuid']] = char
last_char = char
elif attribute['uuid'].hex[-4:] == '0229':
if last_char is None:
return False, None
success, result = self._read_handle(conn, handle, timeout)
if not success:
return False, None
value = result['data']
assert len(value) == 2
value, = unpack("<H", value)
last_char['client_configuration'] = {'handle': handle, 'value': value}
return True, {'services': services}
|
def function[_probe_characteristics, parameter[self, conn, services, timeout]]:
constant[Probe gatt services for all associated characteristics in a BLE device
Args:
conn (int): the connection handle to probe
services (dict): a dictionary of services produced by probe_services()
timeout (float): the maximum number of seconds to spend in any single task
]
for taget[name[service]] in starred[call[name[services].values, parameter[]]] begin[:]
<ast.Tuple object at 0x7da20c993c70> assign[=] call[name[self]._enumerate_handles, parameter[name[conn], call[name[service]][constant[start_handle]], call[name[service]][constant[end_handle]]]]
if <ast.UnaryOp object at 0x7da18fe90940> begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe91e70>, <ast.Constant object at 0x7da18fe934c0>]]]
variable[attributes] assign[=] call[name[result]][constant[attributes]]
call[name[service]][constant[characteristics]] assign[=] dictionary[[], []]
variable[last_char] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18fe91390>, <ast.Name object at 0x7da18fe91bd0>]]] in starred[call[name[attributes].items, parameter[]]] begin[:]
if compare[call[call[name[attribute]][constant[uuid]].hex][<ast.Slice object at 0x7da18fe92ec0>] equal[==] constant[0328]] begin[:]
<ast.Tuple object at 0x7da18fe91150> assign[=] call[name[self]._read_handle, parameter[name[conn], name[handle], name[timeout]]]
if <ast.UnaryOp object at 0x7da18fe90b20> begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe93b80>, <ast.Constant object at 0x7da18fe919c0>]]]
variable[value] assign[=] call[name[result]][constant[data]]
variable[char] assign[=] call[name[parse_characteristic_declaration], parameter[name[value]]]
call[call[name[service]][constant[characteristics]]][call[name[char]][constant[uuid]]] assign[=] name[char]
variable[last_char] assign[=] name[char]
return[tuple[[<ast.Constant object at 0x7da207f01600>, <ast.Dict object at 0x7da207f02740>]]]
|
keyword[def] identifier[_probe_characteristics] ( identifier[self] , identifier[conn] , identifier[services] , identifier[timeout] = literal[int] ):
literal[string]
keyword[for] identifier[service] keyword[in] identifier[services] . identifier[values] ():
identifier[success] , identifier[result] = identifier[self] . identifier[_enumerate_handles] ( identifier[conn] , identifier[service] [ literal[string] ],
identifier[service] [ literal[string] ])
keyword[if] keyword[not] identifier[success] :
keyword[return] keyword[False] , keyword[None]
identifier[attributes] = identifier[result] [ literal[string] ]
identifier[service] [ literal[string] ]={}
identifier[last_char] = keyword[None]
keyword[for] identifier[handle] , identifier[attribute] keyword[in] identifier[attributes] . identifier[items] ():
keyword[if] identifier[attribute] [ literal[string] ]. identifier[hex] [- literal[int] :]== literal[string] :
identifier[success] , identifier[result] = identifier[self] . identifier[_read_handle] ( identifier[conn] , identifier[handle] , identifier[timeout] )
keyword[if] keyword[not] identifier[success] :
keyword[return] keyword[False] , keyword[None]
identifier[value] = identifier[result] [ literal[string] ]
identifier[char] = identifier[parse_characteristic_declaration] ( identifier[value] )
identifier[service] [ literal[string] ][ identifier[char] [ literal[string] ]]= identifier[char]
identifier[last_char] = identifier[char]
keyword[elif] identifier[attribute] [ literal[string] ]. identifier[hex] [- literal[int] :]== literal[string] :
keyword[if] identifier[last_char] keyword[is] keyword[None] :
keyword[return] keyword[False] , keyword[None]
identifier[success] , identifier[result] = identifier[self] . identifier[_read_handle] ( identifier[conn] , identifier[handle] , identifier[timeout] )
keyword[if] keyword[not] identifier[success] :
keyword[return] keyword[False] , keyword[None]
identifier[value] = identifier[result] [ literal[string] ]
keyword[assert] identifier[len] ( identifier[value] )== literal[int]
identifier[value] ,= identifier[unpack] ( literal[string] , identifier[value] )
identifier[last_char] [ literal[string] ]={ literal[string] : identifier[handle] , literal[string] : identifier[value] }
keyword[return] keyword[True] ,{ literal[string] : identifier[services] }
|
def _probe_characteristics(self, conn, services, timeout=5.0):
"""Probe gatt services for all associated characteristics in a BLE device
Args:
conn (int): the connection handle to probe
services (dict): a dictionary of services produced by probe_services()
timeout (float): the maximum number of seconds to spend in any single task
"""
for service in services.values():
(success, result) = self._enumerate_handles(conn, service['start_handle'], service['end_handle'])
if not success:
return (False, None) # depends on [control=['if'], data=[]]
attributes = result['attributes']
service['characteristics'] = {}
last_char = None
for (handle, attribute) in attributes.items():
if attribute['uuid'].hex[-4:] == '0328':
(success, result) = self._read_handle(conn, handle, timeout)
if not success:
return (False, None) # depends on [control=['if'], data=[]]
value = result['data']
char = parse_characteristic_declaration(value)
service['characteristics'][char['uuid']] = char
last_char = char # depends on [control=['if'], data=[]]
elif attribute['uuid'].hex[-4:] == '0229':
if last_char is None:
return (False, None) # depends on [control=['if'], data=[]]
(success, result) = self._read_handle(conn, handle, timeout)
if not success:
return (False, None) # depends on [control=['if'], data=[]]
value = result['data']
assert len(value) == 2
(value,) = unpack('<H', value)
last_char['client_configuration'] = {'handle': handle, 'value': value} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['service']]
return (True, {'services': services})
|
def __params_descriptor(self, message_type, request_kind, path, method_id,
request_params_class):
"""Describe the parameters of a method.
If the message_type is not a ResourceContainer, will fall back to
__params_descriptor_without_container (which will eventually be deprecated).
If the message type is a ResourceContainer, then all path/query parameters
will come from the ResourceContainer. This method will also make sure all
path parameters are covered by the message fields.
Args:
message_type: messages.Message or ResourceContainer class, Message with
parameters to describe.
request_kind: The type of request being made.
path: string, HTTP path to method.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
request_params_class: messages.Message, the original params message when
using a ResourceContainer. Otherwise, this should be null.
Returns:
A tuple (dict, list of string): Descriptor of the parameters, Order of the
parameters.
"""
path_parameter_dict = self.__get_path_parameters(path)
if request_params_class is None:
if path_parameter_dict:
_logger.warning('Method %s specifies path parameters but you are not '
'using a ResourceContainer; instead, you are using %r. '
'This will fail in future releases; please switch to '
'using ResourceContainer as soon as possible.',
method_id, type(message_type))
return self.__params_descriptor_without_container(
message_type, request_kind, path)
# From here, we can assume message_type is from a ResourceContainer.
message_type = request_params_class
params = {}
# Make sure all path parameters are covered.
for field_name, matched_path_parameters in path_parameter_dict.iteritems():
field = message_type.field_by_name(field_name)
self.__validate_path_parameters(field, matched_path_parameters)
# Add all fields, sort by field.number since we have parameterOrder.
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__add_parameter(field, matched_path_parameters, params)
return params
|
def function[__params_descriptor, parameter[self, message_type, request_kind, path, method_id, request_params_class]]:
constant[Describe the parameters of a method.
If the message_type is not a ResourceContainer, will fall back to
__params_descriptor_without_container (which will eventually be deprecated).
If the message type is a ResourceContainer, then all path/query parameters
will come from the ResourceContainer. This method will also make sure all
path parameters are covered by the message fields.
Args:
message_type: messages.Message or ResourceContainer class, Message with
parameters to describe.
request_kind: The type of request being made.
path: string, HTTP path to method.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
request_params_class: messages.Message, the original params message when
using a ResourceContainer. Otherwise, this should be null.
Returns:
A tuple (dict, list of string): Descriptor of the parameters, Order of the
parameters.
]
variable[path_parameter_dict] assign[=] call[name[self].__get_path_parameters, parameter[name[path]]]
if compare[name[request_params_class] is constant[None]] begin[:]
if name[path_parameter_dict] begin[:]
call[name[_logger].warning, parameter[constant[Method %s specifies path parameters but you are not using a ResourceContainer; instead, you are using %r. This will fail in future releases; please switch to using ResourceContainer as soon as possible.], name[method_id], call[name[type], parameter[name[message_type]]]]]
return[call[name[self].__params_descriptor_without_container, parameter[name[message_type], name[request_kind], name[path]]]]
variable[message_type] assign[=] name[request_params_class]
variable[params] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0f59c60>, <ast.Name object at 0x7da1b0f5ada0>]]] in starred[call[name[path_parameter_dict].iteritems, parameter[]]] begin[:]
variable[field] assign[=] call[name[message_type].field_by_name, parameter[name[field_name]]]
call[name[self].__validate_path_parameters, parameter[name[field], name[matched_path_parameters]]]
for taget[name[field]] in starred[call[name[sorted], parameter[call[name[message_type].all_fields, parameter[]]]]] begin[:]
variable[matched_path_parameters] assign[=] call[name[path_parameter_dict].get, parameter[name[field].name, list[[]]]]
call[name[self].__add_parameter, parameter[name[field], name[matched_path_parameters], name[params]]]
return[name[params]]
|
keyword[def] identifier[__params_descriptor] ( identifier[self] , identifier[message_type] , identifier[request_kind] , identifier[path] , identifier[method_id] ,
identifier[request_params_class] ):
literal[string]
identifier[path_parameter_dict] = identifier[self] . identifier[__get_path_parameters] ( identifier[path] )
keyword[if] identifier[request_params_class] keyword[is] keyword[None] :
keyword[if] identifier[path_parameter_dict] :
identifier[_logger] . identifier[warning] ( literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[method_id] , identifier[type] ( identifier[message_type] ))
keyword[return] identifier[self] . identifier[__params_descriptor_without_container] (
identifier[message_type] , identifier[request_kind] , identifier[path] )
identifier[message_type] = identifier[request_params_class]
identifier[params] ={}
keyword[for] identifier[field_name] , identifier[matched_path_parameters] keyword[in] identifier[path_parameter_dict] . identifier[iteritems] ():
identifier[field] = identifier[message_type] . identifier[field_by_name] ( identifier[field_name] )
identifier[self] . identifier[__validate_path_parameters] ( identifier[field] , identifier[matched_path_parameters] )
keyword[for] identifier[field] keyword[in] identifier[sorted] ( identifier[message_type] . identifier[all_fields] (), identifier[key] = keyword[lambda] identifier[f] : identifier[f] . identifier[number] ):
identifier[matched_path_parameters] = identifier[path_parameter_dict] . identifier[get] ( identifier[field] . identifier[name] ,[])
identifier[self] . identifier[__add_parameter] ( identifier[field] , identifier[matched_path_parameters] , identifier[params] )
keyword[return] identifier[params]
|
def __params_descriptor(self, message_type, request_kind, path, method_id, request_params_class):
"""Describe the parameters of a method.
If the message_type is not a ResourceContainer, will fall back to
__params_descriptor_without_container (which will eventually be deprecated).
If the message type is a ResourceContainer, then all path/query parameters
will come from the ResourceContainer. This method will also make sure all
path parameters are covered by the message fields.
Args:
message_type: messages.Message or ResourceContainer class, Message with
parameters to describe.
request_kind: The type of request being made.
path: string, HTTP path to method.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
request_params_class: messages.Message, the original params message when
using a ResourceContainer. Otherwise, this should be null.
Returns:
A tuple (dict, list of string): Descriptor of the parameters, Order of the
parameters.
"""
path_parameter_dict = self.__get_path_parameters(path)
if request_params_class is None:
if path_parameter_dict:
_logger.warning('Method %s specifies path parameters but you are not using a ResourceContainer; instead, you are using %r. This will fail in future releases; please switch to using ResourceContainer as soon as possible.', method_id, type(message_type)) # depends on [control=['if'], data=[]]
return self.__params_descriptor_without_container(message_type, request_kind, path) # depends on [control=['if'], data=[]]
# From here, we can assume message_type is from a ResourceContainer.
message_type = request_params_class
params = {}
# Make sure all path parameters are covered.
for (field_name, matched_path_parameters) in path_parameter_dict.iteritems():
field = message_type.field_by_name(field_name)
self.__validate_path_parameters(field, matched_path_parameters) # depends on [control=['for'], data=[]]
# Add all fields, sort by field.number since we have parameterOrder.
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__add_parameter(field, matched_path_parameters, params) # depends on [control=['for'], data=['field']]
return params
|
def __respond_with_list(self, data):
"""
Builds a python list from a json object
:param data: the json object
:returns: a nested list
"""
response = []
if isinstance(data, dict):
data.pop('seq', None)
data = list(data.values())
for item in data:
values = item
if isinstance(item, list) or isinstance(item, dict):
values = self.__respond_with_list(item)
if isinstance(values, list) and len(values) == 1:
response.extend(values)
else:
response.append(values)
return response
|
def function[__respond_with_list, parameter[self, data]]:
constant[
Builds a python list from a json object
:param data: the json object
:returns: a nested list
]
variable[response] assign[=] list[[]]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
call[name[data].pop, parameter[constant[seq], constant[None]]]
variable[data] assign[=] call[name[list], parameter[call[name[data].values, parameter[]]]]
for taget[name[item]] in starred[name[data]] begin[:]
variable[values] assign[=] name[item]
if <ast.BoolOp object at 0x7da1b2345810> begin[:]
variable[values] assign[=] call[name[self].__respond_with_list, parameter[name[item]]]
if <ast.BoolOp object at 0x7da20c6e7df0> begin[:]
call[name[response].extend, parameter[name[values]]]
return[name[response]]
|
keyword[def] identifier[__respond_with_list] ( identifier[self] , identifier[data] ):
literal[string]
identifier[response] =[]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[data] . identifier[pop] ( literal[string] , keyword[None] )
identifier[data] = identifier[list] ( identifier[data] . identifier[values] ())
keyword[for] identifier[item] keyword[in] identifier[data] :
identifier[values] = identifier[item]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[item] , identifier[dict] ):
identifier[values] = identifier[self] . identifier[__respond_with_list] ( identifier[item] )
keyword[if] identifier[isinstance] ( identifier[values] , identifier[list] ) keyword[and] identifier[len] ( identifier[values] )== literal[int] :
identifier[response] . identifier[extend] ( identifier[values] )
keyword[else] :
identifier[response] . identifier[append] ( identifier[values] )
keyword[return] identifier[response]
|
def __respond_with_list(self, data):
"""
Builds a python list from a json object
:param data: the json object
:returns: a nested list
"""
response = []
if isinstance(data, dict):
data.pop('seq', None)
data = list(data.values()) # depends on [control=['if'], data=[]]
for item in data:
values = item
if isinstance(item, list) or isinstance(item, dict):
values = self.__respond_with_list(item) # depends on [control=['if'], data=[]]
if isinstance(values, list) and len(values) == 1:
response.extend(values) # depends on [control=['if'], data=[]]
else:
response.append(values) # depends on [control=['for'], data=['item']]
return response
|
def to_dict(self, converter=None):
"""Returns a copy dict of the current object
If a converter function is given, pass each value to it.
Per default the values are converted by `self.stringify`.
"""
if converter is None:
converter = self.stringify
out = dict()
for k, v in self.iteritems():
out[k] = converter(v)
return out
|
def function[to_dict, parameter[self, converter]]:
constant[Returns a copy dict of the current object
If a converter function is given, pass each value to it.
Per default the values are converted by `self.stringify`.
]
if compare[name[converter] is constant[None]] begin[:]
variable[converter] assign[=] name[self].stringify
variable[out] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1b84b20>, <ast.Name object at 0x7da1b1b84400>]]] in starred[call[name[self].iteritems, parameter[]]] begin[:]
call[name[out]][name[k]] assign[=] call[name[converter], parameter[name[v]]]
return[name[out]]
|
keyword[def] identifier[to_dict] ( identifier[self] , identifier[converter] = keyword[None] ):
literal[string]
keyword[if] identifier[converter] keyword[is] keyword[None] :
identifier[converter] = identifier[self] . identifier[stringify]
identifier[out] = identifier[dict] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[iteritems] ():
identifier[out] [ identifier[k] ]= identifier[converter] ( identifier[v] )
keyword[return] identifier[out]
|
def to_dict(self, converter=None):
"""Returns a copy dict of the current object
If a converter function is given, pass each value to it.
Per default the values are converted by `self.stringify`.
"""
if converter is None:
converter = self.stringify # depends on [control=['if'], data=['converter']]
out = dict()
for (k, v) in self.iteritems():
out[k] = converter(v) # depends on [control=['for'], data=[]]
return out
|
def get_object(self, name):
"""Retrieve an object by a dotted name relative to the space."""
parts = name.split(".")
child = parts.pop(0)
if parts:
return self.spaces[child].get_object(".".join(parts))
else:
return self._namespace_impl[child]
|
def function[get_object, parameter[self, name]]:
constant[Retrieve an object by a dotted name relative to the space.]
variable[parts] assign[=] call[name[name].split, parameter[constant[.]]]
variable[child] assign[=] call[name[parts].pop, parameter[constant[0]]]
if name[parts] begin[:]
return[call[call[name[self].spaces][name[child]].get_object, parameter[call[constant[.].join, parameter[name[parts]]]]]]
|
keyword[def] identifier[get_object] ( identifier[self] , identifier[name] ):
literal[string]
identifier[parts] = identifier[name] . identifier[split] ( literal[string] )
identifier[child] = identifier[parts] . identifier[pop] ( literal[int] )
keyword[if] identifier[parts] :
keyword[return] identifier[self] . identifier[spaces] [ identifier[child] ]. identifier[get_object] ( literal[string] . identifier[join] ( identifier[parts] ))
keyword[else] :
keyword[return] identifier[self] . identifier[_namespace_impl] [ identifier[child] ]
|
def get_object(self, name):
"""Retrieve an object by a dotted name relative to the space."""
parts = name.split('.')
child = parts.pop(0)
if parts:
return self.spaces[child].get_object('.'.join(parts)) # depends on [control=['if'], data=[]]
else:
return self._namespace_impl[child]
|
def get_single_external_tool_accounts(self, account_id, external_tool_id):
"""
Get a single external tool.
Returns the specified external tool.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - external_tool_id
"""ID"""
path["external_tool_id"] = external_tool_id
self.logger.debug("GET /api/v1/accounts/{account_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True)
|
def function[get_single_external_tool_accounts, parameter[self, account_id, external_tool_id]]:
constant[
Get a single external tool.
Returns the specified external tool.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[account_id]] assign[=] name[account_id]
constant[ID]
call[name[path]][constant[external_tool_id]] assign[=] name[external_tool_id]
call[name[self].logger.debug, parameter[call[constant[GET /api/v1/accounts/{account_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[GET], call[constant[/api/v1/accounts/{account_id}/external_tools/{external_tool_id}].format, parameter[]]]]]
|
keyword[def] identifier[get_single_external_tool_accounts] ( identifier[self] , identifier[account_id] , identifier[external_tool_id] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[account_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[external_tool_id]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] )
|
def get_single_external_tool_accounts(self, account_id, external_tool_id):
"""
Get a single external tool.
Returns the specified external tool.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - account_id
'ID'
path['account_id'] = account_id # REQUIRED - PATH - external_tool_id
'ID'
path['external_tool_id'] = external_tool_id
self.logger.debug('GET /api/v1/accounts/{account_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('GET', '/api/v1/accounts/{account_id}/external_tools/{external_tool_id}'.format(**path), data=data, params=params, no_data=True)
|
def join(self, timeout=None):
"""Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app."""
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2])
|
def function[join, parameter[self, timeout]]:
constant[Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app.]
call[name[Thread].join, parameter[name[self]]]
if name[self].exc begin[:]
variable[msg] assign[=] binary_operation[constant[Thread '%s' threw an exception `%s`: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da2054a7940>, <ast.Attribute object at 0x7da2054a6dd0>, <ast.Subscript object at 0x7da2054a7a90>]]]
variable[new_exc] assign[=] call[name[LaneExecutionError], parameter[name[msg]]]
if name[PY3] begin[:]
<ast.Raise object at 0x7da2054a5f30>
|
keyword[def] identifier[join] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[Thread] . identifier[join] ( identifier[self] , identifier[timeout] = identifier[timeout] )
keyword[if] identifier[self] . identifier[exc] :
identifier[msg] = literal[string] %( identifier[self] . identifier[getName] (), identifier[self] . identifier[exc] [ literal[int] ]. identifier[__name__] , identifier[self] . identifier[exc] [ literal[int] ])
identifier[new_exc] = identifier[LaneExecutionError] ( identifier[msg] )
keyword[if] identifier[PY3] :
keyword[raise] identifier[new_exc] . identifier[with_traceback] ( identifier[self] . identifier[exc] [ literal[int] ])
keyword[else] :
keyword[raise] ( identifier[new_exc] . identifier[__class__] , identifier[new_exc] , identifier[self] . identifier[exc] [ literal[int] ])
|
def join(self, timeout=None):
"""Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app."""
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" % (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member # depends on [control=['if'], data=[]]
else:
raise (new_exc.__class__, new_exc, self.exc[2]) # depends on [control=['if'], data=[]]
|
def handle(self, request, buffer_size):
"""
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
"""
logger = self.logger
data = self.__receive(request, buffer_size)
if data is None:
return False
else:
arr = array('B',data)
for message in split_array(arr,StxEtxHandler.ETX):
if message[0] == StxEtxHandler.STX:
message = message[1:]
logger.debug(message)
result = self.handler_function(bytearray(message))
if self.component_type == StreamComponent.PROCESSOR:
if not self.__send(request, result):
return False
return True
|
def function[handle, parameter[self, request, buffer_size]]:
constant[
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
]
variable[logger] assign[=] name[self].logger
variable[data] assign[=] call[name[self].__receive, parameter[name[request], name[buffer_size]]]
if compare[name[data] is constant[None]] begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[handle] ( identifier[self] , identifier[request] , identifier[buffer_size] ):
literal[string]
identifier[logger] = identifier[self] . identifier[logger]
identifier[data] = identifier[self] . identifier[__receive] ( identifier[request] , identifier[buffer_size] )
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[else] :
identifier[arr] = identifier[array] ( literal[string] , identifier[data] )
keyword[for] identifier[message] keyword[in] identifier[split_array] ( identifier[arr] , identifier[StxEtxHandler] . identifier[ETX] ):
keyword[if] identifier[message] [ literal[int] ]== identifier[StxEtxHandler] . identifier[STX] :
identifier[message] = identifier[message] [ literal[int] :]
identifier[logger] . identifier[debug] ( identifier[message] )
identifier[result] = identifier[self] . identifier[handler_function] ( identifier[bytearray] ( identifier[message] ))
keyword[if] identifier[self] . identifier[component_type] == identifier[StreamComponent] . identifier[PROCESSOR] :
keyword[if] keyword[not] identifier[self] . identifier[__send] ( identifier[request] , identifier[result] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def handle(self, request, buffer_size):
"""
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
"""
logger = self.logger
data = self.__receive(request, buffer_size)
if data is None:
return False # depends on [control=['if'], data=[]]
else:
arr = array('B', data)
for message in split_array(arr, StxEtxHandler.ETX):
if message[0] == StxEtxHandler.STX:
message = message[1:] # depends on [control=['if'], data=[]]
logger.debug(message)
result = self.handler_function(bytearray(message))
if self.component_type == StreamComponent.PROCESSOR:
if not self.__send(request, result):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['message']]
return True
|
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets),) bool
is A facet on the meshes convex hull or not
"""
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n,3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=np.bool)
for i, normal, origin in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all()
return on_hull
|
def function[facets_on_hull, parameter[self]]:
constant[
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets),) bool
is A facet on the meshes convex hull or not
]
variable[normals] assign[=] name[self].facets_normal
variable[origins] assign[=] name[self].facets_origin
variable[convex] assign[=] call[call[name[self].convex_hull.vertices.view, parameter[name[np].ndarray]].copy, parameter[]]
variable[on_hull] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[self].facets]]]]
for taget[tuple[[<ast.Name object at 0x7da20c76d030>, <ast.Name object at 0x7da20c76f6d0>, <ast.Name object at 0x7da20c76d780>]]] in starred[call[name[zip], parameter[call[name[range], parameter[call[name[len], parameter[name[normals]]]]], name[normals], name[origins]]]] begin[:]
variable[dot] assign[=] call[name[np].dot, parameter[name[normal], binary_operation[name[convex] - name[origin]].T]]
call[name[on_hull]][name[i]] assign[=] call[compare[name[dot] less[<] name[tol].merge].all, parameter[]]
return[name[on_hull]]
|
keyword[def] identifier[facets_on_hull] ( identifier[self] ):
literal[string]
identifier[normals] = identifier[self] . identifier[facets_normal]
identifier[origins] = identifier[self] . identifier[facets_origin]
identifier[convex] = identifier[self] . identifier[convex_hull] . identifier[vertices] . identifier[view] ( identifier[np] . identifier[ndarray] ). identifier[copy] ()
identifier[on_hull] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[self] . identifier[facets] ), identifier[dtype] = identifier[np] . identifier[bool] )
keyword[for] identifier[i] , identifier[normal] , identifier[origin] keyword[in] identifier[zip] ( identifier[range] ( identifier[len] ( identifier[normals] )), identifier[normals] , identifier[origins] ):
identifier[dot] = identifier[np] . identifier[dot] ( identifier[normal] ,( identifier[convex] - identifier[origin] ). identifier[T] )
identifier[on_hull] [ identifier[i] ]=( identifier[dot] < identifier[tol] . identifier[merge] ). identifier[all] ()
keyword[return] identifier[on_hull]
|
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets),) bool
is A facet on the meshes convex hull or not
"""
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n,3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=np.bool)
for (i, normal, origin) in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all() # depends on [control=['for'], data=[]]
return on_hull
|
def vis_detection(im_orig, detections, class_names, thresh=0.7):
"""visualize [cls, conf, x1, y1, x2, y2]"""
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
for [cls, conf, x1, y1, x2, y2] in detections:
cls = int(cls)
if cls > 0 and conf > thresh:
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=colors[cls], linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(class_names[cls], conf),
bbox=dict(facecolor=colors[cls], alpha=0.5), fontsize=12, color='white')
plt.show()
|
def function[vis_detection, parameter[im_orig, detections, class_names, thresh]]:
constant[visualize [cls, conf, x1, y1, x2, y2]]
import module[matplotlib.pyplot] as alias[plt]
import module[random]
call[name[plt].imshow, parameter[name[im_orig]]]
variable[colors] assign[=] <ast.ListComp object at 0x7da2054a6860>
for taget[list[[<ast.Name object at 0x7da2054a5780>, <ast.Name object at 0x7da2054a7c10>, <ast.Name object at 0x7da2054a7070>, <ast.Name object at 0x7da2054a47c0>, <ast.Name object at 0x7da2054a6680>, <ast.Name object at 0x7da2054a7af0>]]] in starred[name[detections]] begin[:]
variable[cls] assign[=] call[name[int], parameter[name[cls]]]
if <ast.BoolOp object at 0x7da2054a54e0> begin[:]
variable[rect] assign[=] call[name[plt].Rectangle, parameter[tuple[[<ast.Name object at 0x7da2054a4a30>, <ast.Name object at 0x7da2054a5e70>]], binary_operation[name[x2] - name[x1]], binary_operation[name[y2] - name[y1]]]]
call[call[name[plt].gca, parameter[]].add_patch, parameter[name[rect]]]
call[call[name[plt].gca, parameter[]].text, parameter[name[x1], binary_operation[name[y1] - constant[2]], call[constant[{:s} {:.3f}].format, parameter[call[name[class_names]][name[cls]], name[conf]]]]]
call[name[plt].show, parameter[]]
|
keyword[def] identifier[vis_detection] ( identifier[im_orig] , identifier[detections] , identifier[class_names] , identifier[thresh] = literal[int] ):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[random]
identifier[plt] . identifier[imshow] ( identifier[im_orig] )
identifier[colors] =[( identifier[random] . identifier[random] (), identifier[random] . identifier[random] (), identifier[random] . identifier[random] ()) keyword[for] identifier[_] keyword[in] identifier[class_names] ]
keyword[for] [ identifier[cls] , identifier[conf] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] ] keyword[in] identifier[detections] :
identifier[cls] = identifier[int] ( identifier[cls] )
keyword[if] identifier[cls] > literal[int] keyword[and] identifier[conf] > identifier[thresh] :
identifier[rect] = identifier[plt] . identifier[Rectangle] (( identifier[x1] , identifier[y1] ), identifier[x2] - identifier[x1] , identifier[y2] - identifier[y1] ,
identifier[fill] = keyword[False] , identifier[edgecolor] = identifier[colors] [ identifier[cls] ], identifier[linewidth] = literal[int] )
identifier[plt] . identifier[gca] (). identifier[add_patch] ( identifier[rect] )
identifier[plt] . identifier[gca] (). identifier[text] ( identifier[x1] , identifier[y1] - literal[int] , literal[string] . identifier[format] ( identifier[class_names] [ identifier[cls] ], identifier[conf] ),
identifier[bbox] = identifier[dict] ( identifier[facecolor] = identifier[colors] [ identifier[cls] ], identifier[alpha] = literal[int] ), identifier[fontsize] = literal[int] , identifier[color] = literal[string] )
identifier[plt] . identifier[show] ()
|
def vis_detection(im_orig, detections, class_names, thresh=0.7):
"""visualize [cls, conf, x1, y1, x2, y2]"""
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
for [cls, conf, x1, y1, x2, y2] in detections:
cls = int(cls)
if cls > 0 and conf > thresh:
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor=colors[cls], linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(class_names[cls], conf), bbox=dict(facecolor=colors[cls], alpha=0.5), fontsize=12, color='white') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
plt.show()
|
def getElementsByName(self, name):
""" get element with given name,
return list of element objects regarding to 'name'
:param name: element name, case sensitive, if elements are
auto-generated from LteParser, the name should be lower cased.
"""
try:
return filter(lambda x: x.name == name, self._lattice_eleobjlist)
except:
return []
|
def function[getElementsByName, parameter[self, name]]:
constant[ get element with given name,
return list of element objects regarding to 'name'
:param name: element name, case sensitive, if elements are
auto-generated from LteParser, the name should be lower cased.
]
<ast.Try object at 0x7da1b08112d0>
|
keyword[def] identifier[getElementsByName] ( identifier[self] , identifier[name] ):
literal[string]
keyword[try] :
keyword[return] identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[name] == identifier[name] , identifier[self] . identifier[_lattice_eleobjlist] )
keyword[except] :
keyword[return] []
|
def getElementsByName(self, name):
""" get element with given name,
return list of element objects regarding to 'name'
:param name: element name, case sensitive, if elements are
auto-generated from LteParser, the name should be lower cased.
"""
try:
return filter(lambda x: x.name == name, self._lattice_eleobjlist) # depends on [control=['try'], data=[]]
except:
return [] # depends on [control=['except'], data=[]]
|
def _checkIfDatabaseIsEmpty(self, successHandler=None, failHandler=None):
"""
Check if database contains any tables.
:param successHandler: <function(<bool>)> method called if interrogation was successful where the first argument
is a boolean flag specifying if the database is empty or not
:param failHandler: <function(<str>)> method called if interrogation failed where the first argument is the
error message
:return: <void>
"""
def failCallback(error):
errorMessage = str(error)
if isinstance(error, Failure):
errorMessage = error.getErrorMessage()
if failHandler is not None:
reactor.callInThread(failHandler, errorMessage)
def selectCallback(transaction, successHandler):
querySelect = \
"SELECT `TABLE_NAME` " \
"FROM INFORMATION_SCHEMA.TABLES " \
"WHERE " \
"`TABLE_SCHEMA` = %s" \
";"
try:
transaction.execute(
querySelect,
(self.application.config["viper.mysql"]["name"],)
)
tables = transaction.fetchall()
except Exception as e:
failCallback(e)
return
if successHandler is not None:
reactor.callInThread(successHandler, len(tables) == 0)
interaction = self.runInteraction(selectCallback, successHandler)
interaction.addErrback(failCallback)
|
def function[_checkIfDatabaseIsEmpty, parameter[self, successHandler, failHandler]]:
constant[
Check if database contains any tables.
:param successHandler: <function(<bool>)> method called if interrogation was successful where the first argument
is a boolean flag specifying if the database is empty or not
:param failHandler: <function(<str>)> method called if interrogation failed where the first argument is the
error message
:return: <void>
]
def function[failCallback, parameter[error]]:
variable[errorMessage] assign[=] call[name[str], parameter[name[error]]]
if call[name[isinstance], parameter[name[error], name[Failure]]] begin[:]
variable[errorMessage] assign[=] call[name[error].getErrorMessage, parameter[]]
if compare[name[failHandler] is_not constant[None]] begin[:]
call[name[reactor].callInThread, parameter[name[failHandler], name[errorMessage]]]
def function[selectCallback, parameter[transaction, successHandler]]:
variable[querySelect] assign[=] constant[SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE `TABLE_SCHEMA` = %s;]
<ast.Try object at 0x7da18fe93c70>
if compare[name[successHandler] is_not constant[None]] begin[:]
call[name[reactor].callInThread, parameter[name[successHandler], compare[call[name[len], parameter[name[tables]]] equal[==] constant[0]]]]
variable[interaction] assign[=] call[name[self].runInteraction, parameter[name[selectCallback], name[successHandler]]]
call[name[interaction].addErrback, parameter[name[failCallback]]]
|
keyword[def] identifier[_checkIfDatabaseIsEmpty] ( identifier[self] , identifier[successHandler] = keyword[None] , identifier[failHandler] = keyword[None] ):
literal[string]
keyword[def] identifier[failCallback] ( identifier[error] ):
identifier[errorMessage] = identifier[str] ( identifier[error] )
keyword[if] identifier[isinstance] ( identifier[error] , identifier[Failure] ):
identifier[errorMessage] = identifier[error] . identifier[getErrorMessage] ()
keyword[if] identifier[failHandler] keyword[is] keyword[not] keyword[None] :
identifier[reactor] . identifier[callInThread] ( identifier[failHandler] , identifier[errorMessage] )
keyword[def] identifier[selectCallback] ( identifier[transaction] , identifier[successHandler] ):
identifier[querySelect] = literal[string] literal[string] literal[string] literal[string] literal[string]
keyword[try] :
identifier[transaction] . identifier[execute] (
identifier[querySelect] ,
( identifier[self] . identifier[application] . identifier[config] [ literal[string] ][ literal[string] ],)
)
identifier[tables] = identifier[transaction] . identifier[fetchall] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[failCallback] ( identifier[e] )
keyword[return]
keyword[if] identifier[successHandler] keyword[is] keyword[not] keyword[None] :
identifier[reactor] . identifier[callInThread] ( identifier[successHandler] , identifier[len] ( identifier[tables] )== literal[int] )
identifier[interaction] = identifier[self] . identifier[runInteraction] ( identifier[selectCallback] , identifier[successHandler] )
identifier[interaction] . identifier[addErrback] ( identifier[failCallback] )
|
def _checkIfDatabaseIsEmpty(self, successHandler=None, failHandler=None):
"""
Check if database contains any tables.
:param successHandler: <function(<bool>)> method called if interrogation was successful where the first argument
is a boolean flag specifying if the database is empty or not
:param failHandler: <function(<str>)> method called if interrogation failed where the first argument is the
error message
:return: <void>
"""
def failCallback(error):
errorMessage = str(error)
if isinstance(error, Failure):
errorMessage = error.getErrorMessage() # depends on [control=['if'], data=[]]
if failHandler is not None:
reactor.callInThread(failHandler, errorMessage) # depends on [control=['if'], data=['failHandler']]
def selectCallback(transaction, successHandler):
querySelect = 'SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE `TABLE_SCHEMA` = %s;'
try:
transaction.execute(querySelect, (self.application.config['viper.mysql']['name'],))
tables = transaction.fetchall() # depends on [control=['try'], data=[]]
except Exception as e:
failCallback(e)
return # depends on [control=['except'], data=['e']]
if successHandler is not None:
reactor.callInThread(successHandler, len(tables) == 0) # depends on [control=['if'], data=['successHandler']]
interaction = self.runInteraction(selectCallback, successHandler)
interaction.addErrback(failCallback)
|
def createSomeItems(store, itemType, values, counter):
"""
Create some instances of a particular type in a store.
"""
for i in counter:
itemType(store=store, **values)
|
def function[createSomeItems, parameter[store, itemType, values, counter]]:
constant[
Create some instances of a particular type in a store.
]
for taget[name[i]] in starred[name[counter]] begin[:]
call[name[itemType], parameter[]]
|
keyword[def] identifier[createSomeItems] ( identifier[store] , identifier[itemType] , identifier[values] , identifier[counter] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[counter] :
identifier[itemType] ( identifier[store] = identifier[store] ,** identifier[values] )
|
def createSomeItems(store, itemType, values, counter):
"""
Create some instances of a particular type in a store.
"""
for i in counter:
itemType(store=store, **values) # depends on [control=['for'], data=[]]
|
def send_confirm_email_email(self, user, user_email):
"""Send the 'email confirmation' email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_ENABLE_CONFIRM_EMAIL: return
# The confirm_email email is sent to a specific user_email.email or user.email
email = user_email.email if user_email else user.email
# Generate a confirm_email_link
object_id = user_email.id if user_email else user.id
token = self.user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_CONFIRM_EMAIL_TEMPLATE,
confirm_email_link=confirm_email_link,
)
|
def function[send_confirm_email_email, parameter[self, user, user_email]]:
constant[Send the 'email confirmation' email.]
if <ast.UnaryOp object at 0x7da18bc70850> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da18bc70bb0> begin[:]
return[None]
variable[email] assign[=] <ast.IfExp object at 0x7da18bc717e0>
variable[object_id] assign[=] <ast.IfExp object at 0x7da18bc703d0>
variable[token] assign[=] call[name[self].user_manager.generate_token, parameter[name[object_id]]]
variable[confirm_email_link] assign[=] call[name[url_for], parameter[constant[user.confirm_email]]]
call[name[self]._render_and_send_email, parameter[name[email], name[user], name[self].user_manager.USER_CONFIRM_EMAIL_TEMPLATE]]
|
keyword[def] identifier[send_confirm_email_email] ( identifier[self] , identifier[user] , identifier[user_email] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[user_manager] . identifier[USER_ENABLE_EMAIL] : keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[user_manager] . identifier[USER_ENABLE_CONFIRM_EMAIL] : keyword[return]
identifier[email] = identifier[user_email] . identifier[email] keyword[if] identifier[user_email] keyword[else] identifier[user] . identifier[email]
identifier[object_id] = identifier[user_email] . identifier[id] keyword[if] identifier[user_email] keyword[else] identifier[user] . identifier[id]
identifier[token] = identifier[self] . identifier[user_manager] . identifier[generate_token] ( identifier[object_id] )
identifier[confirm_email_link] = identifier[url_for] ( literal[string] , identifier[token] = identifier[token] , identifier[_external] = keyword[True] )
identifier[self] . identifier[_render_and_send_email] (
identifier[email] ,
identifier[user] ,
identifier[self] . identifier[user_manager] . identifier[USER_CONFIRM_EMAIL_TEMPLATE] ,
identifier[confirm_email_link] = identifier[confirm_email_link] ,
)
|
def send_confirm_email_email(self, user, user_email):
"""Send the 'email confirmation' email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL:
return # depends on [control=['if'], data=[]]
if not self.user_manager.USER_ENABLE_CONFIRM_EMAIL:
return # depends on [control=['if'], data=[]]
# The confirm_email email is sent to a specific user_email.email or user.email
email = user_email.email if user_email else user.email
# Generate a confirm_email_link
object_id = user_email.id if user_email else user.id
token = self.user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(email, user, self.user_manager.USER_CONFIRM_EMAIL_TEMPLATE, confirm_email_link=confirm_email_link)
|
def formula_1980(household, period, parameters):
'''
To compute this allowance, the 'rent' value must be provided for the same month, but 'housing_occupancy_status' is not necessary.
'''
return household('rent', period) * parameters(period).benefits.housing_allowance
|
def function[formula_1980, parameter[household, period, parameters]]:
constant[
To compute this allowance, the 'rent' value must be provided for the same month, but 'housing_occupancy_status' is not necessary.
]
return[binary_operation[call[name[household], parameter[constant[rent], name[period]]] * call[name[parameters], parameter[name[period]]].benefits.housing_allowance]]
|
keyword[def] identifier[formula_1980] ( identifier[household] , identifier[period] , identifier[parameters] ):
literal[string]
keyword[return] identifier[household] ( literal[string] , identifier[period] )* identifier[parameters] ( identifier[period] ). identifier[benefits] . identifier[housing_allowance]
|
def formula_1980(household, period, parameters):
"""
To compute this allowance, the 'rent' value must be provided for the same month, but 'housing_occupancy_status' is not necessary.
"""
return household('rent', period) * parameters(period).benefits.housing_allowance
|
def create_payload(self):
"""Remove ``smart_class_parameter_id`` or ``smart_variable_id``"""
payload = super(OverrideValue, self).create_payload()
if hasattr(self, 'smart_class_parameter'):
del payload['smart_class_parameter_id']
if hasattr(self, 'smart_variable'):
del payload['smart_variable_id']
return payload
|
def function[create_payload, parameter[self]]:
constant[Remove ``smart_class_parameter_id`` or ``smart_variable_id``]
variable[payload] assign[=] call[call[name[super], parameter[name[OverrideValue], name[self]]].create_payload, parameter[]]
if call[name[hasattr], parameter[name[self], constant[smart_class_parameter]]] begin[:]
<ast.Delete object at 0x7da18bccbca0>
if call[name[hasattr], parameter[name[self], constant[smart_variable]]] begin[:]
<ast.Delete object at 0x7da18bccba30>
return[name[payload]]
|
keyword[def] identifier[create_payload] ( identifier[self] ):
literal[string]
identifier[payload] = identifier[super] ( identifier[OverrideValue] , identifier[self] ). identifier[create_payload] ()
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[del] identifier[payload] [ literal[string] ]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[del] identifier[payload] [ literal[string] ]
keyword[return] identifier[payload]
|
def create_payload(self):
"""Remove ``smart_class_parameter_id`` or ``smart_variable_id``"""
payload = super(OverrideValue, self).create_payload()
if hasattr(self, 'smart_class_parameter'):
del payload['smart_class_parameter_id'] # depends on [control=['if'], data=[]]
if hasattr(self, 'smart_variable'):
del payload['smart_variable_id'] # depends on [control=['if'], data=[]]
return payload
|
def polygonize(self, width, cap_style_line=CAP_STYLE.flat, cap_style_point=CAP_STYLE.round):
"""Turns line or point into a buffered polygon."""
shape = self._shape
if isinstance(shape, (LineString, MultiLineString)):
return self.__class__(
shape.buffer(width / 2, cap_style=cap_style_line),
self.crs
)
elif isinstance(shape, (Point, MultiPoint)):
return self.__class__(
shape.buffer(width / 2, cap_style=cap_style_point),
self.crs
)
else:
return self
|
def function[polygonize, parameter[self, width, cap_style_line, cap_style_point]]:
constant[Turns line or point into a buffered polygon.]
variable[shape] assign[=] name[self]._shape
if call[name[isinstance], parameter[name[shape], tuple[[<ast.Name object at 0x7da2054a4e80>, <ast.Name object at 0x7da2054a5990>]]]] begin[:]
return[call[name[self].__class__, parameter[call[name[shape].buffer, parameter[binary_operation[name[width] / constant[2]]]], name[self].crs]]]
|
keyword[def] identifier[polygonize] ( identifier[self] , identifier[width] , identifier[cap_style_line] = identifier[CAP_STYLE] . identifier[flat] , identifier[cap_style_point] = identifier[CAP_STYLE] . identifier[round] ):
literal[string]
identifier[shape] = identifier[self] . identifier[_shape]
keyword[if] identifier[isinstance] ( identifier[shape] ,( identifier[LineString] , identifier[MultiLineString] )):
keyword[return] identifier[self] . identifier[__class__] (
identifier[shape] . identifier[buffer] ( identifier[width] / literal[int] , identifier[cap_style] = identifier[cap_style_line] ),
identifier[self] . identifier[crs]
)
keyword[elif] identifier[isinstance] ( identifier[shape] ,( identifier[Point] , identifier[MultiPoint] )):
keyword[return] identifier[self] . identifier[__class__] (
identifier[shape] . identifier[buffer] ( identifier[width] / literal[int] , identifier[cap_style] = identifier[cap_style_point] ),
identifier[self] . identifier[crs]
)
keyword[else] :
keyword[return] identifier[self]
|
def polygonize(self, width, cap_style_line=CAP_STYLE.flat, cap_style_point=CAP_STYLE.round):
"""Turns line or point into a buffered polygon."""
shape = self._shape
if isinstance(shape, (LineString, MultiLineString)):
return self.__class__(shape.buffer(width / 2, cap_style=cap_style_line), self.crs) # depends on [control=['if'], data=[]]
elif isinstance(shape, (Point, MultiPoint)):
return self.__class__(shape.buffer(width / 2, cap_style=cap_style_point), self.crs) # depends on [control=['if'], data=[]]
else:
return self
|
def signed_session(self, session=None):
"""
Sign requests session with the token. This method is called every time a request is going on the wire.
The user is responsible for updating the token with the preferred tool/SDK.
In general there are two options:
- override this method to update the token in a preferred way and set Authorization header on session
- not override this method, and have a timer that triggers periodically to update the token on this class
The second option is recommended as it tends to be more performance-friendly.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
session = session or requests.Session()
session.headers['Authorization'] = "Bearer {}".format(self.token)
return session
|
def function[signed_session, parameter[self, session]]:
constant[
Sign requests session with the token. This method is called every time a request is going on the wire.
The user is responsible for updating the token with the preferred tool/SDK.
In general there are two options:
- override this method to update the token in a preferred way and set Authorization header on session
- not override this method, and have a timer that triggers periodically to update the token on this class
The second option is recommended as it tends to be more performance-friendly.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
]
variable[session] assign[=] <ast.BoolOp object at 0x7da1b1d0fb80>
call[name[session].headers][constant[Authorization]] assign[=] call[constant[Bearer {}].format, parameter[name[self].token]]
return[name[session]]
|
keyword[def] identifier[signed_session] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[session] = identifier[session] keyword[or] identifier[requests] . identifier[Session] ()
identifier[session] . identifier[headers] [ literal[string] ]= literal[string] . identifier[format] ( identifier[self] . identifier[token] )
keyword[return] identifier[session]
|
def signed_session(self, session=None):
"""
Sign requests session with the token. This method is called every time a request is going on the wire.
The user is responsible for updating the token with the preferred tool/SDK.
In general there are two options:
- override this method to update the token in a preferred way and set Authorization header on session
- not override this method, and have a timer that triggers periodically to update the token on this class
The second option is recommended as it tends to be more performance-friendly.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
session = session or requests.Session()
session.headers['Authorization'] = 'Bearer {}'.format(self.token)
return session
|
def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None):
"""
Creates a new fully connected layer.
:param name: name for the layer.
:param output_count: number of outputs of the fully connected layer.
:param relu: boolean flag to set if ReLu should be applied at the end of this layer.
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
"""
with tf.variable_scope(name):
input_layer = self.__network.get_layer(input_layer_name)
vectorized_input, dimension = self.vectorize_input(input_layer)
weights = self.__make_var('weights', shape=[dimension, output_count])
biases = self.__make_var('biases', shape=[output_count])
operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = operation(vectorized_input, weights, biases, name=name)
self.__network.add_layer(name, layer_output=fc)
|
def function[new_fully_connected, parameter[self, name, output_count, relu, input_layer_name]]:
constant[
Creates a new fully connected layer.
:param name: name for the layer.
:param output_count: number of outputs of the fully connected layer.
:param relu: boolean flag to set if ReLu should be applied at the end of this layer.
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[input_layer] assign[=] call[name[self].__network.get_layer, parameter[name[input_layer_name]]]
<ast.Tuple object at 0x7da1b1bdf8e0> assign[=] call[name[self].vectorize_input, parameter[name[input_layer]]]
variable[weights] assign[=] call[name[self].__make_var, parameter[constant[weights]]]
variable[biases] assign[=] call[name[self].__make_var, parameter[constant[biases]]]
variable[operation] assign[=] <ast.IfExp object at 0x7da1b1b60bb0>
variable[fc] assign[=] call[name[operation], parameter[name[vectorized_input], name[weights], name[biases]]]
call[name[self].__network.add_layer, parameter[name[name]]]
|
keyword[def] identifier[new_fully_connected] ( identifier[self] , identifier[name] : identifier[str] , identifier[output_count] : identifier[int] , identifier[relu] = keyword[True] , identifier[input_layer_name] : identifier[str] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] ):
identifier[input_layer] = identifier[self] . identifier[__network] . identifier[get_layer] ( identifier[input_layer_name] )
identifier[vectorized_input] , identifier[dimension] = identifier[self] . identifier[vectorize_input] ( identifier[input_layer] )
identifier[weights] = identifier[self] . identifier[__make_var] ( literal[string] , identifier[shape] =[ identifier[dimension] , identifier[output_count] ])
identifier[biases] = identifier[self] . identifier[__make_var] ( literal[string] , identifier[shape] =[ identifier[output_count] ])
identifier[operation] = identifier[tf] . identifier[nn] . identifier[relu_layer] keyword[if] identifier[relu] keyword[else] identifier[tf] . identifier[nn] . identifier[xw_plus_b]
identifier[fc] = identifier[operation] ( identifier[vectorized_input] , identifier[weights] , identifier[biases] , identifier[name] = identifier[name] )
identifier[self] . identifier[__network] . identifier[add_layer] ( identifier[name] , identifier[layer_output] = identifier[fc] )
|
def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None):
"""
Creates a new fully connected layer.
:param name: name for the layer.
:param output_count: number of outputs of the fully connected layer.
:param relu: boolean flag to set if ReLu should be applied at the end of this layer.
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
"""
with tf.variable_scope(name):
input_layer = self.__network.get_layer(input_layer_name)
(vectorized_input, dimension) = self.vectorize_input(input_layer)
weights = self.__make_var('weights', shape=[dimension, output_count])
biases = self.__make_var('biases', shape=[output_count])
operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = operation(vectorized_input, weights, biases, name=name) # depends on [control=['with'], data=[]]
self.__network.add_layer(name, layer_output=fc)
|
def inet_pton(af, addr):
"""Convert an IP address from text representation into binary form."""
# Will replace Net/Net6 objects
addr = plain_str(addr)
# Use inet_pton if available
try:
return socket.inet_pton(af, addr)
except AttributeError:
try:
return _INET_PTON[af](addr)
except KeyError:
raise socket.error("Address family not supported by protocol")
|
def function[inet_pton, parameter[af, addr]]:
constant[Convert an IP address from text representation into binary form.]
variable[addr] assign[=] call[name[plain_str], parameter[name[addr]]]
<ast.Try object at 0x7da1b1c19ea0>
|
keyword[def] identifier[inet_pton] ( identifier[af] , identifier[addr] ):
literal[string]
identifier[addr] = identifier[plain_str] ( identifier[addr] )
keyword[try] :
keyword[return] identifier[socket] . identifier[inet_pton] ( identifier[af] , identifier[addr] )
keyword[except] identifier[AttributeError] :
keyword[try] :
keyword[return] identifier[_INET_PTON] [ identifier[af] ]( identifier[addr] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[socket] . identifier[error] ( literal[string] )
|
def inet_pton(af, addr):
"""Convert an IP address from text representation into binary form."""
# Will replace Net/Net6 objects
addr = plain_str(addr)
# Use inet_pton if available
try:
return socket.inet_pton(af, addr) # depends on [control=['try'], data=[]]
except AttributeError:
try:
return _INET_PTON[af](addr) # depends on [control=['try'], data=[]]
except KeyError:
raise socket.error('Address family not supported by protocol') # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
|
def get_encoded_text(container, xpath):
"""Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str
"""
try:
return "".join(container.find(xpath, ns).itertext())
except AttributeError:
return None
|
def function[get_encoded_text, parameter[container, xpath]]:
constant[Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str
]
<ast.Try object at 0x7da1b23465c0>
|
keyword[def] identifier[get_encoded_text] ( identifier[container] , identifier[xpath] ):
literal[string]
keyword[try] :
keyword[return] literal[string] . identifier[join] ( identifier[container] . identifier[find] ( identifier[xpath] , identifier[ns] ). identifier[itertext] ())
keyword[except] identifier[AttributeError] :
keyword[return] keyword[None]
|
def get_encoded_text(container, xpath):
"""Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str
"""
try:
return ''.join(container.find(xpath, ns).itertext()) # depends on [control=['try'], data=[]]
except AttributeError:
return None # depends on [control=['except'], data=[]]
|
def waitForAllConnectionsToClose(self):
"""
Wait for all currently-open connections to enter the 'CLOSED' state.
Currently this is only usable from test fixtures.
"""
if not self._connections:
return self._stop()
return self._allConnectionsClosed.deferred().addBoth(self._stop)
|
def function[waitForAllConnectionsToClose, parameter[self]]:
constant[
Wait for all currently-open connections to enter the 'CLOSED' state.
Currently this is only usable from test fixtures.
]
if <ast.UnaryOp object at 0x7da2047e8cd0> begin[:]
return[call[name[self]._stop, parameter[]]]
return[call[call[name[self]._allConnectionsClosed.deferred, parameter[]].addBoth, parameter[name[self]._stop]]]
|
keyword[def] identifier[waitForAllConnectionsToClose] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_connections] :
keyword[return] identifier[self] . identifier[_stop] ()
keyword[return] identifier[self] . identifier[_allConnectionsClosed] . identifier[deferred] (). identifier[addBoth] ( identifier[self] . identifier[_stop] )
|
def waitForAllConnectionsToClose(self):
"""
Wait for all currently-open connections to enter the 'CLOSED' state.
Currently this is only usable from test fixtures.
"""
if not self._connections:
return self._stop() # depends on [control=['if'], data=[]]
return self._allConnectionsClosed.deferred().addBoth(self._stop)
|
def acl_middleware(callback):
"""Returns a aiohttp_auth.acl middleware factory for use by the aiohttp
application object.
Args:
callback: This is a callable which takes a user_id (as returned from
the auth.get_auth function), and expects a sequence of permitted ACL
groups to be returned. This can be a empty tuple to represent no
explicit permissions, or None to explicitly forbid this particular
user_id. Note that the user_id passed may be None if no
authenticated user exists.
Returns:
A aiohttp middleware factory.
"""
async def _acl_middleware_factory(app, handler):
async def _middleware_handler(request):
# Save the policy in the request
request[GROUPS_KEY] = callback
# Call the next handler in the chain
return await handler(request)
return _middleware_handler
return _acl_middleware_factory
|
def function[acl_middleware, parameter[callback]]:
constant[Returns a aiohttp_auth.acl middleware factory for use by the aiohttp
application object.
Args:
callback: This is a callable which takes a user_id (as returned from
the auth.get_auth function), and expects a sequence of permitted ACL
groups to be returned. This can be a empty tuple to represent no
explicit permissions, or None to explicitly forbid this particular
user_id. Note that the user_id passed may be None if no
authenticated user exists.
Returns:
A aiohttp middleware factory.
]
<ast.AsyncFunctionDef object at 0x7da18c4cfe80>
return[name[_acl_middleware_factory]]
|
keyword[def] identifier[acl_middleware] ( identifier[callback] ):
literal[string]
keyword[async] keyword[def] identifier[_acl_middleware_factory] ( identifier[app] , identifier[handler] ):
keyword[async] keyword[def] identifier[_middleware_handler] ( identifier[request] ):
identifier[request] [ identifier[GROUPS_KEY] ]= identifier[callback]
keyword[return] keyword[await] identifier[handler] ( identifier[request] )
keyword[return] identifier[_middleware_handler]
keyword[return] identifier[_acl_middleware_factory]
|
def acl_middleware(callback):
"""Returns a aiohttp_auth.acl middleware factory for use by the aiohttp
application object.
Args:
callback: This is a callable which takes a user_id (as returned from
the auth.get_auth function), and expects a sequence of permitted ACL
groups to be returned. This can be a empty tuple to represent no
explicit permissions, or None to explicitly forbid this particular
user_id. Note that the user_id passed may be None if no
authenticated user exists.
Returns:
A aiohttp middleware factory.
"""
async def _acl_middleware_factory(app, handler):
async def _middleware_handler(request):
# Save the policy in the request
request[GROUPS_KEY] = callback
# Call the next handler in the chain
return await handler(request)
return _middleware_handler
return _acl_middleware_factory
|
def ExpandRecursiveGlobs(cls, path, path_separator):
"""Expands recursive like globs present in an artifact path.
If a path ends in '**', with up to two optional digits such as '**10',
the '**' will recursively match all files and zero or more directories
from the specified path. The optional digits indicate the recursion depth.
By default recursion depth is 10 directories.
If the glob is followed by the specified path segment separator, only
directories and subdirectories will be matched.
Args:
path (str): path to be expanded.
path_separator (str): path segment separator.
Returns:
list[str]: String path expanded for each glob.
"""
glob_regex = r'(.*)?{0:s}\*\*(\d{{1,2}})?({0:s})?$'.format(
re.escape(path_separator))
match = re.search(glob_regex, path)
if not match:
return [path]
skip_first = False
if match.group(3):
skip_first = True
if match.group(2):
iterations = int(match.group(2))
else:
iterations = cls._RECURSIVE_GLOB_LIMIT
logger.warning((
'Path "{0:s}" contains fully recursive glob, limiting to 10 '
'levels').format(path))
return cls.AppendPathEntries(
match.group(1), path_separator, iterations, skip_first)
|
def function[ExpandRecursiveGlobs, parameter[cls, path, path_separator]]:
constant[Expands recursive like globs present in an artifact path.
If a path ends in '**', with up to two optional digits such as '**10',
the '**' will recursively match all files and zero or more directories
from the specified path. The optional digits indicate the recursion depth.
By default recursion depth is 10 directories.
If the glob is followed by the specified path segment separator, only
directories and subdirectories will be matched.
Args:
path (str): path to be expanded.
path_separator (str): path segment separator.
Returns:
list[str]: String path expanded for each glob.
]
variable[glob_regex] assign[=] call[constant[(.*)?{0:s}\*\*(\d{{1,2}})?({0:s})?$].format, parameter[call[name[re].escape, parameter[name[path_separator]]]]]
variable[match] assign[=] call[name[re].search, parameter[name[glob_regex], name[path]]]
if <ast.UnaryOp object at 0x7da2044c3670> begin[:]
return[list[[<ast.Name object at 0x7da2044c3460>]]]
variable[skip_first] assign[=] constant[False]
if call[name[match].group, parameter[constant[3]]] begin[:]
variable[skip_first] assign[=] constant[True]
if call[name[match].group, parameter[constant[2]]] begin[:]
variable[iterations] assign[=] call[name[int], parameter[call[name[match].group, parameter[constant[2]]]]]
return[call[name[cls].AppendPathEntries, parameter[call[name[match].group, parameter[constant[1]]], name[path_separator], name[iterations], name[skip_first]]]]
|
keyword[def] identifier[ExpandRecursiveGlobs] ( identifier[cls] , identifier[path] , identifier[path_separator] ):
literal[string]
identifier[glob_regex] = literal[string] . identifier[format] (
identifier[re] . identifier[escape] ( identifier[path_separator] ))
identifier[match] = identifier[re] . identifier[search] ( identifier[glob_regex] , identifier[path] )
keyword[if] keyword[not] identifier[match] :
keyword[return] [ identifier[path] ]
identifier[skip_first] = keyword[False]
keyword[if] identifier[match] . identifier[group] ( literal[int] ):
identifier[skip_first] = keyword[True]
keyword[if] identifier[match] . identifier[group] ( literal[int] ):
identifier[iterations] = identifier[int] ( identifier[match] . identifier[group] ( literal[int] ))
keyword[else] :
identifier[iterations] = identifier[cls] . identifier[_RECURSIVE_GLOB_LIMIT]
identifier[logger] . identifier[warning] ((
literal[string]
literal[string] ). identifier[format] ( identifier[path] ))
keyword[return] identifier[cls] . identifier[AppendPathEntries] (
identifier[match] . identifier[group] ( literal[int] ), identifier[path_separator] , identifier[iterations] , identifier[skip_first] )
|
def ExpandRecursiveGlobs(cls, path, path_separator):
"""Expands recursive like globs present in an artifact path.
If a path ends in '**', with up to two optional digits such as '**10',
the '**' will recursively match all files and zero or more directories
from the specified path. The optional digits indicate the recursion depth.
By default recursion depth is 10 directories.
If the glob is followed by the specified path segment separator, only
directories and subdirectories will be matched.
Args:
path (str): path to be expanded.
path_separator (str): path segment separator.
Returns:
list[str]: String path expanded for each glob.
"""
glob_regex = '(.*)?{0:s}\\*\\*(\\d{{1,2}})?({0:s})?$'.format(re.escape(path_separator))
match = re.search(glob_regex, path)
if not match:
return [path] # depends on [control=['if'], data=[]]
skip_first = False
if match.group(3):
skip_first = True # depends on [control=['if'], data=[]]
if match.group(2):
iterations = int(match.group(2)) # depends on [control=['if'], data=[]]
else:
iterations = cls._RECURSIVE_GLOB_LIMIT
logger.warning('Path "{0:s}" contains fully recursive glob, limiting to 10 levels'.format(path))
return cls.AppendPathEntries(match.group(1), path_separator, iterations, skip_first)
|
def removeNotification(self, notificationId):
"""Destroy a notification, hiding it first if it currently shown to the user."""
fn = self.function_table.removeNotification
result = fn(notificationId)
return result
|
def function[removeNotification, parameter[self, notificationId]]:
constant[Destroy a notification, hiding it first if it currently shown to the user.]
variable[fn] assign[=] name[self].function_table.removeNotification
variable[result] assign[=] call[name[fn], parameter[name[notificationId]]]
return[name[result]]
|
keyword[def] identifier[removeNotification] ( identifier[self] , identifier[notificationId] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[removeNotification]
identifier[result] = identifier[fn] ( identifier[notificationId] )
keyword[return] identifier[result]
|
def removeNotification(self, notificationId):
"""Destroy a notification, hiding it first if it currently shown to the user."""
fn = self.function_table.removeNotification
result = fn(notificationId)
return result
|
def pcre(tgt, minion_id=None):
'''
Return True if the minion ID matches the given pcre target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.pcre '.*'
'''
if minion_id is not None:
opts = copy.copy(__opts__)
if not isinstance(minion_id, six.string_types):
minion_id = six.text_type(minion_id)
opts['id'] = minion_id
else:
opts = __opts__
matchers = salt.loader.matchers(opts)
try:
return matchers['pcre_match.match'](tgt, opts=__opts__)
except Exception as exc:
log.exception(exc)
return False
|
def function[pcre, parameter[tgt, minion_id]]:
constant[
Return True if the minion ID matches the given pcre target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.pcre '.*'
]
if compare[name[minion_id] is_not constant[None]] begin[:]
variable[opts] assign[=] call[name[copy].copy, parameter[name[__opts__]]]
if <ast.UnaryOp object at 0x7da1b1c27730> begin[:]
variable[minion_id] assign[=] call[name[six].text_type, parameter[name[minion_id]]]
call[name[opts]][constant[id]] assign[=] name[minion_id]
variable[matchers] assign[=] call[name[salt].loader.matchers, parameter[name[opts]]]
<ast.Try object at 0x7da1b1c27c10>
|
keyword[def] identifier[pcre] ( identifier[tgt] , identifier[minion_id] = keyword[None] ):
literal[string]
keyword[if] identifier[minion_id] keyword[is] keyword[not] keyword[None] :
identifier[opts] = identifier[copy] . identifier[copy] ( identifier[__opts__] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[minion_id] , identifier[six] . identifier[string_types] ):
identifier[minion_id] = identifier[six] . identifier[text_type] ( identifier[minion_id] )
identifier[opts] [ literal[string] ]= identifier[minion_id]
keyword[else] :
identifier[opts] = identifier[__opts__]
identifier[matchers] = identifier[salt] . identifier[loader] . identifier[matchers] ( identifier[opts] )
keyword[try] :
keyword[return] identifier[matchers] [ literal[string] ]( identifier[tgt] , identifier[opts] = identifier[__opts__] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[log] . identifier[exception] ( identifier[exc] )
keyword[return] keyword[False]
|
def pcre(tgt, minion_id=None):
"""
Return True if the minion ID matches the given pcre target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.pcre '.*'
"""
if minion_id is not None:
opts = copy.copy(__opts__)
if not isinstance(minion_id, six.string_types):
minion_id = six.text_type(minion_id) # depends on [control=['if'], data=[]]
opts['id'] = minion_id # depends on [control=['if'], data=['minion_id']]
else:
opts = __opts__
matchers = salt.loader.matchers(opts)
try:
return matchers['pcre_match.match'](tgt, opts=__opts__) # depends on [control=['try'], data=[]]
except Exception as exc:
log.exception(exc)
return False # depends on [control=['except'], data=['exc']]
|
def delete(name, timeout=90):
'''
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to delete {0}. {1}'.format(name, exc.strerror))
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
|
def function[delete, parameter[name, timeout]]:
constant[
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
]
variable[handle_scm] assign[=] call[name[win32service].OpenSCManager, parameter[constant[None], constant[None], name[win32service].SC_MANAGER_CONNECT]]
<ast.Try object at 0x7da20e954190>
<ast.Try object at 0x7da18c4cd750>
variable[end_time] assign[=] binary_operation[call[name[time].time, parameter[]] + call[name[int], parameter[name[timeout]]]]
while <ast.BoolOp object at 0x7da18c4ccbe0> begin[:]
call[name[time].sleep, parameter[constant[1]]]
return[compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[name[get_all], parameter[]]]]
|
keyword[def] identifier[delete] ( identifier[name] , identifier[timeout] = literal[int] ):
literal[string]
identifier[handle_scm] = identifier[win32service] . identifier[OpenSCManager] (
keyword[None] , keyword[None] , identifier[win32service] . identifier[SC_MANAGER_CONNECT] )
keyword[try] :
identifier[handle_svc] = identifier[win32service] . identifier[OpenService] (
identifier[handle_scm] , identifier[name] , identifier[win32service] . identifier[SERVICE_ALL_ACCESS] )
keyword[except] identifier[pywintypes] . identifier[error] keyword[as] identifier[exc] :
identifier[win32service] . identifier[CloseServiceHandle] ( identifier[handle_scm] )
keyword[if] identifier[exc] . identifier[winerror] != literal[int] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[name] , identifier[exc] . identifier[strerror] ))
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[True]
keyword[try] :
identifier[win32service] . identifier[DeleteService] ( identifier[handle_svc] )
keyword[except] identifier[pywintypes] . identifier[error] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[name] , identifier[exc] . identifier[strerror] ))
keyword[finally] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[win32service] . identifier[CloseServiceHandle] ( identifier[handle_scm] )
identifier[win32service] . identifier[CloseServiceHandle] ( identifier[handle_svc] )
identifier[end_time] = identifier[time] . identifier[time] ()+ identifier[int] ( identifier[timeout] )
keyword[while] identifier[name] keyword[in] identifier[get_all] () keyword[and] identifier[time] . identifier[time] ()< identifier[end_time] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] identifier[name] keyword[not] keyword[in] identifier[get_all] ()
|
def delete(name, timeout=90):
"""
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
"""
handle_scm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(handle_scm, name, win32service.SERVICE_ALL_ACCESS) # depends on [control=['try'], data=[]]
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError('Failed to open {0}. {1}'.format(name, exc.strerror)) # depends on [control=['if'], data=[]]
log.debug('Service "%s" is not present', name)
return True # depends on [control=['except'], data=['exc']]
try:
win32service.DeleteService(handle_svc) # depends on [control=['try'], data=[]]
except pywintypes.error as exc:
raise CommandExecutionError('Failed to delete {0}. {1}'.format(name, exc.strerror)) # depends on [control=['except'], data=['exc']]
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1) # depends on [control=['while'], data=[]]
return name not in get_all()
|
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
|
def function[get_training_status, parameter[self, model_id, token, url]]:
constant[ Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
]
variable[auth] assign[=] binary_operation[constant[Bearer ] + call[name[self].check_for_token, parameter[name[token]]]]
variable[h] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344df0>, <ast.Constant object at 0x7da1b2347220>], [<ast.Name object at 0x7da1b23473a0>, <ast.Constant object at 0x7da1b2346fb0>]]
variable[the_url] assign[=] binary_operation[binary_operation[name[url] + constant[/]] + name[model_id]]
variable[r] assign[=] call[name[requests].get, parameter[name[the_url]]]
return[name[r]]
|
keyword[def] identifier[get_training_status] ( identifier[self] , identifier[model_id] , identifier[token] = keyword[None] , identifier[url] = identifier[API_TRAIN_MODEL] ):
literal[string]
identifier[auth] = literal[string] + identifier[self] . identifier[check_for_token] ( identifier[token] )
identifier[h] ={ literal[string] : identifier[auth] , literal[string] : literal[string] }
identifier[the_url] = identifier[url] + literal[string] + identifier[model_id]
identifier[r] = identifier[requests] . identifier[get] ( identifier[the_url] , identifier[headers] = identifier[h] )
keyword[return] identifier[r]
|
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control': 'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
|
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer
|
def function[consume, parameter[self, queue, consumer, consumer_tag, no_local, no_ack, exclusive, nowait, ticket, cb]]:
constant[
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
]
variable[nowait] assign[=] <ast.BoolOp object at 0x7da1b0626e30>
if <ast.BoolOp object at 0x7da1b0627b50> begin[:]
variable[consumer_tag] assign[=] call[name[self]._generate_consumer_tag, parameter[]]
variable[args] assign[=] call[name[Writer], parameter[]]
call[call[call[call[call[name[args].write_short, parameter[<ast.BoolOp object at 0x7da1b0627430>]].write_shortstr, parameter[name[queue]]].write_shortstr, parameter[name[consumer_tag]]].write_bits, parameter[name[no_local], name[no_ack], name[exclusive], name[nowait]]].write_table, parameter[dictionary[[], []]]]
call[name[self].send_frame, parameter[call[name[MethodFrame], parameter[name[self].channel_id, constant[60], constant[20], name[args]]]]]
if <ast.UnaryOp object at 0x7da1b069d6c0> begin[:]
call[name[self]._pending_consumers.append, parameter[tuple[[<ast.Name object at 0x7da20c6e5c30>, <ast.Name object at 0x7da20c6e4430>]]]]
call[name[self].channel.add_synchronous_cb, parameter[name[self]._recv_consume_ok]]
|
keyword[def] identifier[consume] ( identifier[self] , identifier[queue] , identifier[consumer] , identifier[consumer_tag] = literal[string] , identifier[no_local] = keyword[False] ,
identifier[no_ack] = keyword[True] , identifier[exclusive] = keyword[False] , identifier[nowait] = keyword[True] , identifier[ticket] = keyword[None] ,
identifier[cb] = keyword[None] ):
literal[string]
identifier[nowait] = identifier[nowait] keyword[and] identifier[self] . identifier[allow_nowait] () keyword[and] keyword[not] identifier[cb]
keyword[if] identifier[nowait] keyword[and] identifier[consumer_tag] == literal[string] :
identifier[consumer_tag] = identifier[self] . identifier[_generate_consumer_tag] ()
identifier[args] = identifier[Writer] ()
identifier[args] . identifier[write_short] ( identifier[ticket] keyword[or] identifier[self] . identifier[default_ticket] ). identifier[write_shortstr] ( identifier[queue] ). identifier[write_shortstr] ( identifier[consumer_tag] ). identifier[write_bits] ( identifier[no_local] , identifier[no_ack] , identifier[exclusive] , identifier[nowait] ). identifier[write_table] ({})
identifier[self] . identifier[send_frame] ( identifier[MethodFrame] ( identifier[self] . identifier[channel_id] , literal[int] , literal[int] , identifier[args] ))
keyword[if] keyword[not] identifier[nowait] :
identifier[self] . identifier[_pending_consumers] . identifier[append] (( identifier[consumer] , identifier[cb] ))
identifier[self] . identifier[channel] . identifier[add_synchronous_cb] ( identifier[self] . identifier[_recv_consume_ok] )
keyword[else] :
identifier[self] . identifier[_consumer_cb] [ identifier[consumer_tag] ]= identifier[consumer]
|
def consume(self, queue, consumer, consumer_tag='', no_local=False, no_ack=True, exclusive=False, nowait=True, ticket=None, cb=None):
"""
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
"""
nowait = nowait and self.allow_nowait() and (not cb)
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag() # depends on [control=['if'], data=[]]
args = Writer()
args.write_short(ticket or self.default_ticket).write_shortstr(queue).write_shortstr(consumer_tag).write_bits(no_local, no_ack, exclusive, nowait).write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok) # depends on [control=['if'], data=[]]
else:
self._consumer_cb[consumer_tag] = consumer
|
def _representable(value: Any) -> bool:
"""
Check whether we want to represent the value in the error message on contract breach.
We do not want to represent classes, methods, modules and functions.
:param value: value related to an AST node
:return: True if we want to represent it in the violation error
"""
return not inspect.isclass(value) and not inspect.isfunction(value) and not inspect.ismethod(value) and not \
inspect.ismodule(value) and not inspect.isbuiltin(value)
|
def function[_representable, parameter[value]]:
constant[
Check whether we want to represent the value in the error message on contract breach.
We do not want to represent classes, methods, modules and functions.
:param value: value related to an AST node
:return: True if we want to represent it in the violation error
]
return[<ast.BoolOp object at 0x7da1b10b1f30>]
|
keyword[def] identifier[_representable] ( identifier[value] : identifier[Any] )-> identifier[bool] :
literal[string]
keyword[return] keyword[not] identifier[inspect] . identifier[isclass] ( identifier[value] ) keyword[and] keyword[not] identifier[inspect] . identifier[isfunction] ( identifier[value] ) keyword[and] keyword[not] identifier[inspect] . identifier[ismethod] ( identifier[value] ) keyword[and] keyword[not] identifier[inspect] . identifier[ismodule] ( identifier[value] ) keyword[and] keyword[not] identifier[inspect] . identifier[isbuiltin] ( identifier[value] )
|
def _representable(value: Any) -> bool:
"""
Check whether we want to represent the value in the error message on contract breach.
We do not want to represent classes, methods, modules and functions.
:param value: value related to an AST node
:return: True if we want to represent it in the violation error
"""
return not inspect.isclass(value) and (not inspect.isfunction(value)) and (not inspect.ismethod(value)) and (not inspect.ismodule(value)) and (not inspect.isbuiltin(value))
|
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
|
def function[to_definition, parameter[self]]:
constant[
Converts the name instance to a pyqode.core.share.Definition
]
variable[icon] assign[=] call[dictionary[[<ast.Attribute object at 0x7da18dc06680>, <ast.Attribute object at 0x7da18dc078b0>, <ast.Attribute object at 0x7da18dc07f40>, <ast.Attribute object at 0x7da18dc06890>, <ast.Attribute object at 0x7da18dc07eb0>], [<ast.Attribute object at 0x7da18dc04700>, <ast.Attribute object at 0x7da18dc04310>, <ast.Attribute object at 0x7da18dc04250>, <ast.Attribute object at 0x7da18dc06fe0>, <ast.Attribute object at 0x7da18dc058a0>]]][name[self].node_type]
variable[d] assign[=] call[name[Definition], parameter[name[self].name, name[self].line, name[self].column, name[icon], name[self].description]]
for taget[name[ch]] in starred[name[self].children] begin[:]
call[name[d].add_child, parameter[call[name[ch].to_definition, parameter[]]]]
return[name[d]]
|
keyword[def] identifier[to_definition] ( identifier[self] ):
literal[string]
identifier[icon] ={
identifier[Name] . identifier[Type] . identifier[Root] : identifier[icons] . identifier[ICON_MIMETYPE] ,
identifier[Name] . identifier[Type] . identifier[Division] : identifier[icons] . identifier[ICON_DIVISION] ,
identifier[Name] . identifier[Type] . identifier[Section] : identifier[icons] . identifier[ICON_SECTION] ,
identifier[Name] . identifier[Type] . identifier[Variable] : identifier[icons] . identifier[ICON_VAR] ,
identifier[Name] . identifier[Type] . identifier[Paragraph] : identifier[icons] . identifier[ICON_FUNC]
}[ identifier[self] . identifier[node_type] ]
identifier[d] = identifier[Definition] ( identifier[self] . identifier[name] , identifier[self] . identifier[line] , identifier[self] . identifier[column] , identifier[icon] , identifier[self] . identifier[description] )
keyword[for] identifier[ch] keyword[in] identifier[self] . identifier[children] :
identifier[d] . identifier[add_child] ( identifier[ch] . identifier[to_definition] ())
keyword[return] identifier[d]
|
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {Name.Type.Root: icons.ICON_MIMETYPE, Name.Type.Division: icons.ICON_DIVISION, Name.Type.Section: icons.ICON_SECTION, Name.Type.Variable: icons.ICON_VAR, Name.Type.Paragraph: icons.ICON_FUNC}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition()) # depends on [control=['for'], data=['ch']]
return d
|
def save_element_as_image_file(self, selector, file_name, folder=None):
""" Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder. """
element = self.find_element(selector)
element_png = element.screenshot_as_png
if len(file_name.split('.')[0]) < 1:
raise Exception("Error: file_name length must be > 0.")
if not file_name.endswith(".png"):
file_name = file_name + ".png"
image_file_path = None
if folder:
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) > 0:
self.create_folder(folder)
image_file_path = "%s/%s" % (folder, file_name)
if not image_file_path:
image_file_path = file_name
with open(image_file_path, "wb") as file:
file.write(element_png)
|
def function[save_element_as_image_file, parameter[self, selector, file_name, folder]]:
constant[ Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder. ]
variable[element] assign[=] call[name[self].find_element, parameter[name[selector]]]
variable[element_png] assign[=] name[element].screenshot_as_png
if compare[call[name[len], parameter[call[call[name[file_name].split, parameter[constant[.]]]][constant[0]]]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1bca2f0>
if <ast.UnaryOp object at 0x7da1b1bca6b0> begin[:]
variable[file_name] assign[=] binary_operation[name[file_name] + constant[.png]]
variable[image_file_path] assign[=] constant[None]
if name[folder] begin[:]
if call[name[folder].endswith, parameter[constant[/]]] begin[:]
variable[folder] assign[=] call[name[folder]][<ast.Slice object at 0x7da1b1bc81c0>]
if compare[call[name[len], parameter[name[folder]]] greater[>] constant[0]] begin[:]
call[name[self].create_folder, parameter[name[folder]]]
variable[image_file_path] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1bca680>, <ast.Name object at 0x7da1b1bcb880>]]]
if <ast.UnaryOp object at 0x7da1b1bcad10> begin[:]
variable[image_file_path] assign[=] name[file_name]
with call[name[open], parameter[name[image_file_path], constant[wb]]] begin[:]
call[name[file].write, parameter[name[element_png]]]
|
keyword[def] identifier[save_element_as_image_file] ( identifier[self] , identifier[selector] , identifier[file_name] , identifier[folder] = keyword[None] ):
literal[string]
identifier[element] = identifier[self] . identifier[find_element] ( identifier[selector] )
identifier[element_png] = identifier[element] . identifier[screenshot_as_png]
keyword[if] identifier[len] ( identifier[file_name] . identifier[split] ( literal[string] )[ literal[int] ])< literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] identifier[file_name] . identifier[endswith] ( literal[string] ):
identifier[file_name] = identifier[file_name] + literal[string]
identifier[image_file_path] = keyword[None]
keyword[if] identifier[folder] :
keyword[if] identifier[folder] . identifier[endswith] ( literal[string] ):
identifier[folder] = identifier[folder] [:- literal[int] ]
keyword[if] identifier[len] ( identifier[folder] )> literal[int] :
identifier[self] . identifier[create_folder] ( identifier[folder] )
identifier[image_file_path] = literal[string] %( identifier[folder] , identifier[file_name] )
keyword[if] keyword[not] identifier[image_file_path] :
identifier[image_file_path] = identifier[file_name]
keyword[with] identifier[open] ( identifier[image_file_path] , literal[string] ) keyword[as] identifier[file] :
identifier[file] . identifier[write] ( identifier[element_png] )
|
def save_element_as_image_file(self, selector, file_name, folder=None):
""" Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder. """
element = self.find_element(selector)
element_png = element.screenshot_as_png
if len(file_name.split('.')[0]) < 1:
raise Exception('Error: file_name length must be > 0.') # depends on [control=['if'], data=[]]
if not file_name.endswith('.png'):
file_name = file_name + '.png' # depends on [control=['if'], data=[]]
image_file_path = None
if folder:
if folder.endswith('/'):
folder = folder[:-1] # depends on [control=['if'], data=[]]
if len(folder) > 0:
self.create_folder(folder)
image_file_path = '%s/%s' % (folder, file_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not image_file_path:
image_file_path = file_name # depends on [control=['if'], data=[]]
with open(image_file_path, 'wb') as file:
file.write(element_png) # depends on [control=['with'], data=['file']]
|
def validate_int(value):
""" Integer validator """
if value and not isinstance(value, int):
try:
int(str(value))
except (TypeError, ValueError):
raise ValidationError('not a valid number')
return value
|
def function[validate_int, parameter[value]]:
constant[ Integer validator ]
if <ast.BoolOp object at 0x7da20c76eda0> begin[:]
<ast.Try object at 0x7da20c76d9f0>
return[name[value]]
|
keyword[def] identifier[validate_int] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[int] ):
keyword[try] :
identifier[int] ( identifier[str] ( identifier[value] ))
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValidationError] ( literal[string] )
keyword[return] identifier[value]
|
def validate_int(value):
""" Integer validator """
if value and (not isinstance(value, int)):
try:
int(str(value)) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValidationError('not a valid number') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return value
|
def get_paragraph(self):
"""
Write a paragraph
of 5 sentences.
"""
self.text = ''
for x in range(randint(5, 12)):
sentence = self._write_sentence()
self.text = self.text + sentence
return self.text
|
def function[get_paragraph, parameter[self]]:
constant[
Write a paragraph
of 5 sentences.
]
name[self].text assign[=] constant[]
for taget[name[x]] in starred[call[name[range], parameter[call[name[randint], parameter[constant[5], constant[12]]]]]] begin[:]
variable[sentence] assign[=] call[name[self]._write_sentence, parameter[]]
name[self].text assign[=] binary_operation[name[self].text + name[sentence]]
return[name[self].text]
|
keyword[def] identifier[get_paragraph] ( identifier[self] ):
literal[string]
identifier[self] . identifier[text] = literal[string]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[randint] ( literal[int] , literal[int] )):
identifier[sentence] = identifier[self] . identifier[_write_sentence] ()
identifier[self] . identifier[text] = identifier[self] . identifier[text] + identifier[sentence]
keyword[return] identifier[self] . identifier[text]
|
def get_paragraph(self):
"""
Write a paragraph
of 5 sentences.
"""
self.text = ''
for x in range(randint(5, 12)):
sentence = self._write_sentence()
self.text = self.text + sentence # depends on [control=['for'], data=[]]
return self.text
|
def set_bucket_type_props(self, transport, bucket_type, props):
"""
set_bucket_type_props(bucket_type, props)
Sets properties for the given bucket-type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket_type: the bucket-type whose properties will be set
:type bucket_type: BucketType
:param props: the properties to set
:type props: dict
"""
_validate_bucket_props(props)
return transport.set_bucket_type_props(bucket_type, props)
|
def function[set_bucket_type_props, parameter[self, transport, bucket_type, props]]:
constant[
set_bucket_type_props(bucket_type, props)
Sets properties for the given bucket-type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket_type: the bucket-type whose properties will be set
:type bucket_type: BucketType
:param props: the properties to set
:type props: dict
]
call[name[_validate_bucket_props], parameter[name[props]]]
return[call[name[transport].set_bucket_type_props, parameter[name[bucket_type], name[props]]]]
|
keyword[def] identifier[set_bucket_type_props] ( identifier[self] , identifier[transport] , identifier[bucket_type] , identifier[props] ):
literal[string]
identifier[_validate_bucket_props] ( identifier[props] )
keyword[return] identifier[transport] . identifier[set_bucket_type_props] ( identifier[bucket_type] , identifier[props] )
|
def set_bucket_type_props(self, transport, bucket_type, props):
"""
set_bucket_type_props(bucket_type, props)
Sets properties for the given bucket-type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket_type: the bucket-type whose properties will be set
:type bucket_type: BucketType
:param props: the properties to set
:type props: dict
"""
_validate_bucket_props(props)
return transport.set_bucket_type_props(bucket_type, props)
|
def legend_scaler(legend_values, max_labels=10.0):
"""
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
"""
if len(legend_values) < max_labels:
legend_ticks = legend_values
else:
spacer = int(math.ceil(len(legend_values)/max_labels))
legend_ticks = []
for i in legend_values[::spacer]:
legend_ticks += [i]
legend_ticks += ['']*(spacer-1)
return legend_ticks
|
def function[legend_scaler, parameter[legend_values, max_labels]]:
constant[
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
]
if compare[call[name[len], parameter[name[legend_values]]] less[<] name[max_labels]] begin[:]
variable[legend_ticks] assign[=] name[legend_values]
return[name[legend_ticks]]
|
keyword[def] identifier[legend_scaler] ( identifier[legend_values] , identifier[max_labels] = literal[int] ):
literal[string]
keyword[if] identifier[len] ( identifier[legend_values] )< identifier[max_labels] :
identifier[legend_ticks] = identifier[legend_values]
keyword[else] :
identifier[spacer] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[len] ( identifier[legend_values] )/ identifier[max_labels] ))
identifier[legend_ticks] =[]
keyword[for] identifier[i] keyword[in] identifier[legend_values] [:: identifier[spacer] ]:
identifier[legend_ticks] +=[ identifier[i] ]
identifier[legend_ticks] +=[ literal[string] ]*( identifier[spacer] - literal[int] )
keyword[return] identifier[legend_ticks]
|
def legend_scaler(legend_values, max_labels=10.0):
"""
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
"""
if len(legend_values) < max_labels:
legend_ticks = legend_values # depends on [control=['if'], data=[]]
else:
spacer = int(math.ceil(len(legend_values) / max_labels))
legend_ticks = []
for i in legend_values[::spacer]:
legend_ticks += [i]
legend_ticks += [''] * (spacer - 1) # depends on [control=['for'], data=['i']]
return legend_ticks
|
def register_on_snapshot_deleted(self, callback):
"""Set the callback function to consume on snapshot deleted events.
Callback receives a ISnapshotDeletedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_snapshot_deleted
return self.event_source.register_callback(callback, event_type)
|
def function[register_on_snapshot_deleted, parameter[self, callback]]:
constant[Set the callback function to consume on snapshot deleted events.
Callback receives a ISnapshotDeletedEvent object.
Returns the callback_id
]
variable[event_type] assign[=] name[library].VBoxEventType.on_snapshot_deleted
return[call[name[self].event_source.register_callback, parameter[name[callback], name[event_type]]]]
|
keyword[def] identifier[register_on_snapshot_deleted] ( identifier[self] , identifier[callback] ):
literal[string]
identifier[event_type] = identifier[library] . identifier[VBoxEventType] . identifier[on_snapshot_deleted]
keyword[return] identifier[self] . identifier[event_source] . identifier[register_callback] ( identifier[callback] , identifier[event_type] )
|
def register_on_snapshot_deleted(self, callback):
"""Set the callback function to consume on snapshot deleted events.
Callback receives a ISnapshotDeletedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_snapshot_deleted
return self.event_source.register_callback(callback, event_type)
|
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower()
|
def function[get_field_description, parameter[f]]:
constant[Get the type description of a GRPC Message field.]
variable[type_name] assign[=] call[name[get_field_type], parameter[name[f]]]
if <ast.BoolOp object at 0x7da1b191e1a0> begin[:]
return[constant[map<string, string>]]
|
keyword[def] identifier[get_field_description] ( identifier[f] ):
literal[string]
identifier[type_name] = identifier[get_field_type] ( identifier[f] )
keyword[if] identifier[type_name] == literal[string] keyword[and] { identifier[sf] . identifier[name] keyword[for] identifier[sf] keyword[in] identifier[f] . identifier[message_type] . identifier[fields] }=={ literal[string] , literal[string] }:
keyword[return] literal[string]
keyword[elif] identifier[type_name] == literal[string] :
keyword[return] identifier[f] . identifier[message_type] . identifier[full_name]
keyword[elif] identifier[type_name] == literal[string] :
keyword[return] identifier[f] . identifier[enum_type] . identifier[full_name]
keyword[else] :
keyword[return] identifier[type_name] . identifier[lower] ()
|
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and {sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>' # depends on [control=['if'], data=[]]
elif type_name == 'MESSAGE':
return f.message_type.full_name # depends on [control=['if'], data=[]]
elif type_name == 'ENUM':
return f.enum_type.full_name # depends on [control=['if'], data=[]]
else:
return type_name.lower()
|
def _run_parallel_multiprocess(self):
"""Run processes from queue
"""
_log.debug("run.parallel.multiprocess.start")
processes = []
ProcRunner.instance = self
for i in range(self._ncores):
self._status.running(i)
proc = multiprocessing.Process(target=ProcRunner.run, args=(i,))
proc.start()
processes.append(proc)
for i in range(self._ncores):
processes[i].join()
code = processes[i].exitcode
self._status.success(i) if 0 == code else self._status.fail(i)
_log.debug("run.parallel.multiprocess.end states={}".format(self._status))
|
def function[_run_parallel_multiprocess, parameter[self]]:
constant[Run processes from queue
]
call[name[_log].debug, parameter[constant[run.parallel.multiprocess.start]]]
variable[processes] assign[=] list[[]]
name[ProcRunner].instance assign[=] name[self]
for taget[name[i]] in starred[call[name[range], parameter[name[self]._ncores]]] begin[:]
call[name[self]._status.running, parameter[name[i]]]
variable[proc] assign[=] call[name[multiprocessing].Process, parameter[]]
call[name[proc].start, parameter[]]
call[name[processes].append, parameter[name[proc]]]
for taget[name[i]] in starred[call[name[range], parameter[name[self]._ncores]]] begin[:]
call[call[name[processes]][name[i]].join, parameter[]]
variable[code] assign[=] call[name[processes]][name[i]].exitcode
<ast.IfExp object at 0x7da18fe91d20>
call[name[_log].debug, parameter[call[constant[run.parallel.multiprocess.end states={}].format, parameter[name[self]._status]]]]
|
keyword[def] identifier[_run_parallel_multiprocess] ( identifier[self] ):
literal[string]
identifier[_log] . identifier[debug] ( literal[string] )
identifier[processes] =[]
identifier[ProcRunner] . identifier[instance] = identifier[self]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_ncores] ):
identifier[self] . identifier[_status] . identifier[running] ( identifier[i] )
identifier[proc] = identifier[multiprocessing] . identifier[Process] ( identifier[target] = identifier[ProcRunner] . identifier[run] , identifier[args] =( identifier[i] ,))
identifier[proc] . identifier[start] ()
identifier[processes] . identifier[append] ( identifier[proc] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_ncores] ):
identifier[processes] [ identifier[i] ]. identifier[join] ()
identifier[code] = identifier[processes] [ identifier[i] ]. identifier[exitcode]
identifier[self] . identifier[_status] . identifier[success] ( identifier[i] ) keyword[if] literal[int] == identifier[code] keyword[else] identifier[self] . identifier[_status] . identifier[fail] ( identifier[i] )
identifier[_log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[_status] ))
|
def _run_parallel_multiprocess(self):
"""Run processes from queue
"""
_log.debug('run.parallel.multiprocess.start')
processes = []
ProcRunner.instance = self
for i in range(self._ncores):
self._status.running(i)
proc = multiprocessing.Process(target=ProcRunner.run, args=(i,))
proc.start()
processes.append(proc) # depends on [control=['for'], data=['i']]
for i in range(self._ncores):
processes[i].join()
code = processes[i].exitcode
self._status.success(i) if 0 == code else self._status.fail(i) # depends on [control=['for'], data=['i']]
_log.debug('run.parallel.multiprocess.end states={}'.format(self._status))
|
def describe_formatted(self, name, database=None):
"""
Retrieve results of DESCRIBE FORMATTED command. See Impala
documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
"""
from ibis.impala.metadata import parse_metadata
stmt = self._table_command(
'DESCRIBE FORMATTED', name, database=database
)
query = ImpalaQuery(self, stmt)
result = query.execute()
# Leave formatting to pandas
for c in result.columns:
result[c] = result[c].str.strip()
return parse_metadata(result)
|
def function[describe_formatted, parameter[self, name, database]]:
constant[
Retrieve results of DESCRIBE FORMATTED command. See Impala
documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
]
from relative_module[ibis.impala.metadata] import module[parse_metadata]
variable[stmt] assign[=] call[name[self]._table_command, parameter[constant[DESCRIBE FORMATTED], name[name]]]
variable[query] assign[=] call[name[ImpalaQuery], parameter[name[self], name[stmt]]]
variable[result] assign[=] call[name[query].execute, parameter[]]
for taget[name[c]] in starred[name[result].columns] begin[:]
call[name[result]][name[c]] assign[=] call[call[name[result]][name[c]].str.strip, parameter[]]
return[call[name[parse_metadata], parameter[name[result]]]]
|
keyword[def] identifier[describe_formatted] ( identifier[self] , identifier[name] , identifier[database] = keyword[None] ):
literal[string]
keyword[from] identifier[ibis] . identifier[impala] . identifier[metadata] keyword[import] identifier[parse_metadata]
identifier[stmt] = identifier[self] . identifier[_table_command] (
literal[string] , identifier[name] , identifier[database] = identifier[database]
)
identifier[query] = identifier[ImpalaQuery] ( identifier[self] , identifier[stmt] )
identifier[result] = identifier[query] . identifier[execute] ()
keyword[for] identifier[c] keyword[in] identifier[result] . identifier[columns] :
identifier[result] [ identifier[c] ]= identifier[result] [ identifier[c] ]. identifier[str] . identifier[strip] ()
keyword[return] identifier[parse_metadata] ( identifier[result] )
|
def describe_formatted(self, name, database=None):
"""
Retrieve results of DESCRIBE FORMATTED command. See Impala
documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
"""
from ibis.impala.metadata import parse_metadata
stmt = self._table_command('DESCRIBE FORMATTED', name, database=database)
query = ImpalaQuery(self, stmt)
result = query.execute()
# Leave formatting to pandas
for c in result.columns:
result[c] = result[c].str.strip() # depends on [control=['for'], data=['c']]
return parse_metadata(result)
|
def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None,
ndigits=5, unit='', stroke_width=1, fill_opacity=.9,
geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with overlapping filled contours."""
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
|
def function[contourf_to_geojson_overlap, parameter[contourf, geojson_filepath, min_angle_deg, ndigits, unit, stroke_width, fill_opacity, geojson_properties, strdump, serialize]]:
constant[Transform matplotlib.contourf to geojson with overlapping filled contours.]
variable[polygon_features] assign[=] list[[]]
variable[contourf_idx] assign[=] constant[0]
for taget[name[collection]] in starred[name[contourf].collections] begin[:]
variable[color] assign[=] call[name[collection].get_facecolor, parameter[]]
for taget[name[path]] in starred[call[name[collection].get_paths, parameter[]]] begin[:]
for taget[name[coord]] in starred[call[name[path].to_polygons, parameter[]]] begin[:]
if name[min_angle_deg] begin[:]
variable[coord] assign[=] call[name[keep_high_angle], parameter[name[coord], name[min_angle_deg]]]
variable[coord] assign[=] <ast.IfExp object at 0x7da1b10a7850>
variable[polygon] assign[=] call[name[Polygon], parameter[]]
variable[fcolor] assign[=] call[name[rgb2hex], parameter[call[name[color]][constant[0]]]]
variable[properties] assign[=] call[name[set_contourf_properties], parameter[name[stroke_width], name[fcolor], name[fill_opacity], name[contourf].levels, name[contourf_idx], name[unit]]]
if name[geojson_properties] begin[:]
call[name[properties].update, parameter[name[geojson_properties]]]
variable[feature] assign[=] call[name[Feature], parameter[]]
call[name[polygon_features].append, parameter[name[feature]]]
<ast.AugAssign object at 0x7da2047ea560>
variable[feature_collection] assign[=] call[name[FeatureCollection], parameter[name[polygon_features]]]
return[call[name[_render_feature_collection], parameter[name[feature_collection], name[geojson_filepath], name[strdump], name[serialize]]]]
|
keyword[def] identifier[contourf_to_geojson_overlap] ( identifier[contourf] , identifier[geojson_filepath] = keyword[None] , identifier[min_angle_deg] = keyword[None] ,
identifier[ndigits] = literal[int] , identifier[unit] = literal[string] , identifier[stroke_width] = literal[int] , identifier[fill_opacity] = literal[int] ,
identifier[geojson_properties] = keyword[None] , identifier[strdump] = keyword[False] , identifier[serialize] = keyword[True] ):
literal[string]
identifier[polygon_features] =[]
identifier[contourf_idx] = literal[int]
keyword[for] identifier[collection] keyword[in] identifier[contourf] . identifier[collections] :
identifier[color] = identifier[collection] . identifier[get_facecolor] ()
keyword[for] identifier[path] keyword[in] identifier[collection] . identifier[get_paths] ():
keyword[for] identifier[coord] keyword[in] identifier[path] . identifier[to_polygons] ():
keyword[if] identifier[min_angle_deg] :
identifier[coord] = identifier[keep_high_angle] ( identifier[coord] , identifier[min_angle_deg] )
identifier[coord] = identifier[np] . identifier[around] ( identifier[coord] , identifier[ndigits] ) keyword[if] identifier[ndigits] keyword[else] identifier[coord]
identifier[polygon] = identifier[Polygon] ( identifier[coordinates] =[ identifier[coord] . identifier[tolist] ()])
identifier[fcolor] = identifier[rgb2hex] ( identifier[color] [ literal[int] ])
identifier[properties] = identifier[set_contourf_properties] ( identifier[stroke_width] , identifier[fcolor] , identifier[fill_opacity] , identifier[contourf] . identifier[levels] , identifier[contourf_idx] , identifier[unit] )
keyword[if] identifier[geojson_properties] :
identifier[properties] . identifier[update] ( identifier[geojson_properties] )
identifier[feature] = identifier[Feature] ( identifier[geometry] = identifier[polygon] , identifier[properties] = identifier[properties] )
identifier[polygon_features] . identifier[append] ( identifier[feature] )
identifier[contourf_idx] += literal[int]
identifier[feature_collection] = identifier[FeatureCollection] ( identifier[polygon_features] )
keyword[return] identifier[_render_feature_collection] ( identifier[feature_collection] , identifier[geojson_filepath] , identifier[strdump] , identifier[serialize] )
|
def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=0.9, geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with overlapping filled contours."""
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg) # depends on [control=['if'], data=[]]
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties) # depends on [control=['if'], data=[]]
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature) # depends on [control=['for'], data=['coord']] # depends on [control=['for'], data=['path']]
contourf_idx += 1 # depends on [control=['for'], data=['collection']]
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
|
def get_all():
'''
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' s6.get_all
'''
if not SERVICE_DIR:
raise CommandExecutionError("Could not find service directory.")
service_list = [dirname for dirname
in os.listdir(SERVICE_DIR)
if not dirname.startswith('.')]
return sorted(service_list)
|
def function[get_all, parameter[]]:
constant[
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' s6.get_all
]
if <ast.UnaryOp object at 0x7da20c6c5ea0> begin[:]
<ast.Raise object at 0x7da20c6c4e80>
variable[service_list] assign[=] <ast.ListComp object at 0x7da20c6c6b00>
return[call[name[sorted], parameter[name[service_list]]]]
|
keyword[def] identifier[get_all] ():
literal[string]
keyword[if] keyword[not] identifier[SERVICE_DIR] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] )
identifier[service_list] =[ identifier[dirname] keyword[for] identifier[dirname]
keyword[in] identifier[os] . identifier[listdir] ( identifier[SERVICE_DIR] )
keyword[if] keyword[not] identifier[dirname] . identifier[startswith] ( literal[string] )]
keyword[return] identifier[sorted] ( identifier[service_list] )
|
def get_all():
"""
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' s6.get_all
"""
if not SERVICE_DIR:
raise CommandExecutionError('Could not find service directory.') # depends on [control=['if'], data=[]]
service_list = [dirname for dirname in os.listdir(SERVICE_DIR) if not dirname.startswith('.')]
return sorted(service_list)
|
def read(self, subpath=None):
"""
Returns the UTF-8 content of the specified subpath.
subpath is expected to already have been normalized.
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
is_binary = self.is_binary(subpath)
filename = self.readme_for(subpath)
try:
if is_binary:
return self._read_binary(filename)
return self._read_text(filename)
# OSError for Python 3 base class, EnvironmentError for Python 2
except (OSError, EnvironmentError) as ex:
if ex.errno == errno.ENOENT:
raise ReadmeNotFoundError(filename)
raise
|
def function[read, parameter[self, subpath]]:
constant[
Returns the UTF-8 content of the specified subpath.
subpath is expected to already have been normalized.
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
]
variable[is_binary] assign[=] call[name[self].is_binary, parameter[name[subpath]]]
variable[filename] assign[=] call[name[self].readme_for, parameter[name[subpath]]]
<ast.Try object at 0x7da1b1d07640>
|
keyword[def] identifier[read] ( identifier[self] , identifier[subpath] = keyword[None] ):
literal[string]
identifier[is_binary] = identifier[self] . identifier[is_binary] ( identifier[subpath] )
identifier[filename] = identifier[self] . identifier[readme_for] ( identifier[subpath] )
keyword[try] :
keyword[if] identifier[is_binary] :
keyword[return] identifier[self] . identifier[_read_binary] ( identifier[filename] )
keyword[return] identifier[self] . identifier[_read_text] ( identifier[filename] )
keyword[except] ( identifier[OSError] , identifier[EnvironmentError] ) keyword[as] identifier[ex] :
keyword[if] identifier[ex] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[raise] identifier[ReadmeNotFoundError] ( identifier[filename] )
keyword[raise]
|
def read(self, subpath=None):
"""
Returns the UTF-8 content of the specified subpath.
subpath is expected to already have been normalized.
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
is_binary = self.is_binary(subpath)
filename = self.readme_for(subpath)
try:
if is_binary:
return self._read_binary(filename) # depends on [control=['if'], data=[]]
return self._read_text(filename) # depends on [control=['try'], data=[]]
# OSError for Python 3 base class, EnvironmentError for Python 2
except (OSError, EnvironmentError) as ex:
if ex.errno == errno.ENOENT:
raise ReadmeNotFoundError(filename) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['ex']]
|
def absent(name, persist=False, comment=True, mods=None):
'''
Verify that the named kernel module is not loaded
name
The name of the kernel module to verify is not loaded
persist
Remove module from ``/etc/modules``
comment
Comment out module in ``/etc/modules`` rather than remove it
mods
A list of modules to verify are unloaded. If this argument is used,
the ``name`` argument, although still required, is not used, and
becomes a placeholder
.. versionadded:: 2016.3.0
'''
if not isinstance(mods, (list, tuple)):
mods = [name]
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
loaded_mods = __salt__['kmod.mod_list']()
if persist:
persist_mods = __salt__['kmod.mod_list'](True)
# Union of loaded modules and persistent modules
loaded_mods = list(set(loaded_mods) | set(persist_mods))
# Intersection of proposed modules and loaded modules
to_unload = list(set(mods) & set(loaded_mods))
if to_unload:
if __opts__['test']:
ret['result'] = None
if len(to_unload) == 1:
_append_comment(ret, 'Kernel module {0} is set to be removed'.format(to_unload[0]))
elif len(to_unload) > 1:
_append_comment(ret, 'Kernel modules {0} are set to be removed'.format(', '.join(to_unload)))
return ret
# Unload modules and collect results
unloaded = {'yes': [], 'no': [], 'failed': []}
for mod in to_unload:
unload_result = __salt__['kmod.remove'](mod, persist, comment)
if isinstance(unload_result, (list, tuple)):
if unload_result:
for module in unload_result:
ret['changes'][module] = 'removed'
unloaded['yes'].append(mod)
else:
ret['result'] = False
unloaded['no'].append(mod)
else:
ret['result'] = False
unloaded['failed'].append([mod, unload_result])
# Update comment with results
if len(unloaded['yes']) == 1:
_append_comment(ret, 'Removed kernel module {0}'.format(unloaded['yes'][0]))
elif len(unloaded['yes']) > 1:
_append_comment(ret, 'Removed kernel modules {0}'.format(', '.join(unloaded['yes'])))
if len(unloaded['no']) == 1:
_append_comment(ret, 'Failed to remove kernel module {0}'.format(unloaded['no'][0]))
if len(unloaded['no']) > 1:
_append_comment(ret, 'Failed to remove kernel modules {0}'.format(', '.join(unloaded['no'])))
if unloaded['failed']:
for mod, msg in unloaded['failed']:
_append_comment(ret, 'Failed to remove kernel module {0}: {1}'.format(mod, msg))
return ret
else:
if len(mods) == 1:
ret['comment'] = 'Kernel module {0} is already removed'.format(mods[0])
else:
ret['comment'] = 'Kernel modules {0} are already removed'.format(', '.join(mods))
return ret
|
def function[absent, parameter[name, persist, comment, mods]]:
constant[
Verify that the named kernel module is not loaded
name
The name of the kernel module to verify is not loaded
persist
Remove module from ``/etc/modules``
comment
Comment out module in ``/etc/modules`` rather than remove it
mods
A list of modules to verify are unloaded. If this argument is used,
the ``name`` argument, although still required, is not used, and
becomes a placeholder
.. versionadded:: 2016.3.0
]
if <ast.UnaryOp object at 0x7da1b1c1bd30> begin[:]
variable[mods] assign[=] list[[<ast.Name object at 0x7da1b1c1bb50>]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c1ba90>, <ast.Constant object at 0x7da1b1c1ba60>, <ast.Constant object at 0x7da1b1c1ba30>, <ast.Constant object at 0x7da1b1c1ba00>], [<ast.Name object at 0x7da1b1c1b9d0>, <ast.Constant object at 0x7da1b1c1b9a0>, <ast.Dict object at 0x7da1b1c1b970>, <ast.Constant object at 0x7da1b1c1b940>]]
variable[loaded_mods] assign[=] call[call[name[__salt__]][constant[kmod.mod_list]], parameter[]]
if name[persist] begin[:]
variable[persist_mods] assign[=] call[call[name[__salt__]][constant[kmod.mod_list]], parameter[constant[True]]]
variable[loaded_mods] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[loaded_mods]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[name[persist_mods]]]]]]
variable[to_unload] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[mods]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[name[loaded_mods]]]]]]
if name[to_unload] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
if compare[call[name[len], parameter[name[to_unload]]] equal[==] constant[1]] begin[:]
call[name[_append_comment], parameter[name[ret], call[constant[Kernel module {0} is set to be removed].format, parameter[call[name[to_unload]][constant[0]]]]]]
return[name[ret]]
variable[unloaded] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c1a860>, <ast.Constant object at 0x7da1b1c1a830>, <ast.Constant object at 0x7da1b1c1a800>], [<ast.List object at 0x7da1b1c1a7d0>, <ast.List object at 0x7da1b1c1a7a0>, <ast.List object at 0x7da1b1c1a770>]]
for taget[name[mod]] in starred[name[to_unload]] begin[:]
variable[unload_result] assign[=] call[call[name[__salt__]][constant[kmod.remove]], parameter[name[mod], name[persist], name[comment]]]
if call[name[isinstance], parameter[name[unload_result], tuple[[<ast.Name object at 0x7da1b1c1a410>, <ast.Name object at 0x7da1b1c1a3e0>]]]] begin[:]
if name[unload_result] begin[:]
for taget[name[module]] in starred[name[unload_result]] begin[:]
call[call[name[ret]][constant[changes]]][name[module]] assign[=] constant[removed]
call[call[name[unloaded]][constant[yes]].append, parameter[name[mod]]]
if compare[call[name[len], parameter[call[name[unloaded]][constant[yes]]]] equal[==] constant[1]] begin[:]
call[name[_append_comment], parameter[name[ret], call[constant[Removed kernel module {0}].format, parameter[call[call[name[unloaded]][constant[yes]]][constant[0]]]]]]
if compare[call[name[len], parameter[call[name[unloaded]][constant[no]]]] equal[==] constant[1]] begin[:]
call[name[_append_comment], parameter[name[ret], call[constant[Failed to remove kernel module {0}].format, parameter[call[call[name[unloaded]][constant[no]]][constant[0]]]]]]
if compare[call[name[len], parameter[call[name[unloaded]][constant[no]]]] greater[>] constant[1]] begin[:]
call[name[_append_comment], parameter[name[ret], call[constant[Failed to remove kernel modules {0}].format, parameter[call[constant[, ].join, parameter[call[name[unloaded]][constant[no]]]]]]]]
if call[name[unloaded]][constant[failed]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2137280>, <ast.Name object at 0x7da1b2137a00>]]] in starred[call[name[unloaded]][constant[failed]]] begin[:]
call[name[_append_comment], parameter[name[ret], call[constant[Failed to remove kernel module {0}: {1}].format, parameter[name[mod], name[msg]]]]]
return[name[ret]]
|
keyword[def] identifier[absent] ( identifier[name] , identifier[persist] = keyword[False] , identifier[comment] = keyword[True] , identifier[mods] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[mods] ,( identifier[list] , identifier[tuple] )):
identifier[mods] =[ identifier[name] ]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[True] ,
literal[string] :{},
literal[string] : literal[string] }
identifier[loaded_mods] = identifier[__salt__] [ literal[string] ]()
keyword[if] identifier[persist] :
identifier[persist_mods] = identifier[__salt__] [ literal[string] ]( keyword[True] )
identifier[loaded_mods] = identifier[list] ( identifier[set] ( identifier[loaded_mods] )| identifier[set] ( identifier[persist_mods] ))
identifier[to_unload] = identifier[list] ( identifier[set] ( identifier[mods] )& identifier[set] ( identifier[loaded_mods] ))
keyword[if] identifier[to_unload] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[len] ( identifier[to_unload] )== literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( identifier[to_unload] [ literal[int] ]))
keyword[elif] identifier[len] ( identifier[to_unload] )> literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[to_unload] )))
keyword[return] identifier[ret]
identifier[unloaded] ={ literal[string] :[], literal[string] :[], literal[string] :[]}
keyword[for] identifier[mod] keyword[in] identifier[to_unload] :
identifier[unload_result] = identifier[__salt__] [ literal[string] ]( identifier[mod] , identifier[persist] , identifier[comment] )
keyword[if] identifier[isinstance] ( identifier[unload_result] ,( identifier[list] , identifier[tuple] )):
keyword[if] identifier[unload_result] :
keyword[for] identifier[module] keyword[in] identifier[unload_result] :
identifier[ret] [ literal[string] ][ identifier[module] ]= literal[string]
identifier[unloaded] [ literal[string] ]. identifier[append] ( identifier[mod] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[unloaded] [ literal[string] ]. identifier[append] ( identifier[mod] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[unloaded] [ literal[string] ]. identifier[append] ([ identifier[mod] , identifier[unload_result] ])
keyword[if] identifier[len] ( identifier[unloaded] [ literal[string] ])== literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( identifier[unloaded] [ literal[string] ][ literal[int] ]))
keyword[elif] identifier[len] ( identifier[unloaded] [ literal[string] ])> literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[unloaded] [ literal[string] ])))
keyword[if] identifier[len] ( identifier[unloaded] [ literal[string] ])== literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( identifier[unloaded] [ literal[string] ][ literal[int] ]))
keyword[if] identifier[len] ( identifier[unloaded] [ literal[string] ])> literal[int] :
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[unloaded] [ literal[string] ])))
keyword[if] identifier[unloaded] [ literal[string] ]:
keyword[for] identifier[mod] , identifier[msg] keyword[in] identifier[unloaded] [ literal[string] ]:
identifier[_append_comment] ( identifier[ret] , literal[string] . identifier[format] ( identifier[mod] , identifier[msg] ))
keyword[return] identifier[ret]
keyword[else] :
keyword[if] identifier[len] ( identifier[mods] )== literal[int] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[mods] [ literal[int] ])
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[mods] ))
keyword[return] identifier[ret]
|
def absent(name, persist=False, comment=True, mods=None):
"""
Verify that the named kernel module is not loaded
name
The name of the kernel module to verify is not loaded
persist
Remove module from ``/etc/modules``
comment
Comment out module in ``/etc/modules`` rather than remove it
mods
A list of modules to verify are unloaded. If this argument is used,
the ``name`` argument, although still required, is not used, and
becomes a placeholder
.. versionadded:: 2016.3.0
"""
if not isinstance(mods, (list, tuple)):
mods = [name] # depends on [control=['if'], data=[]]
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
loaded_mods = __salt__['kmod.mod_list']()
if persist:
persist_mods = __salt__['kmod.mod_list'](True)
# Union of loaded modules and persistent modules
loaded_mods = list(set(loaded_mods) | set(persist_mods)) # depends on [control=['if'], data=[]]
# Intersection of proposed modules and loaded modules
to_unload = list(set(mods) & set(loaded_mods))
if to_unload:
if __opts__['test']:
ret['result'] = None
if len(to_unload) == 1:
_append_comment(ret, 'Kernel module {0} is set to be removed'.format(to_unload[0])) # depends on [control=['if'], data=[]]
elif len(to_unload) > 1:
_append_comment(ret, 'Kernel modules {0} are set to be removed'.format(', '.join(to_unload))) # depends on [control=['if'], data=[]]
return ret # depends on [control=['if'], data=[]]
# Unload modules and collect results
unloaded = {'yes': [], 'no': [], 'failed': []}
for mod in to_unload:
unload_result = __salt__['kmod.remove'](mod, persist, comment)
if isinstance(unload_result, (list, tuple)):
if unload_result:
for module in unload_result:
ret['changes'][module] = 'removed' # depends on [control=['for'], data=['module']]
unloaded['yes'].append(mod) # depends on [control=['if'], data=[]]
else:
ret['result'] = False
unloaded['no'].append(mod) # depends on [control=['if'], data=[]]
else:
ret['result'] = False
unloaded['failed'].append([mod, unload_result]) # depends on [control=['for'], data=['mod']]
# Update comment with results
if len(unloaded['yes']) == 1:
_append_comment(ret, 'Removed kernel module {0}'.format(unloaded['yes'][0])) # depends on [control=['if'], data=[]]
elif len(unloaded['yes']) > 1:
_append_comment(ret, 'Removed kernel modules {0}'.format(', '.join(unloaded['yes']))) # depends on [control=['if'], data=[]]
if len(unloaded['no']) == 1:
_append_comment(ret, 'Failed to remove kernel module {0}'.format(unloaded['no'][0])) # depends on [control=['if'], data=[]]
if len(unloaded['no']) > 1:
_append_comment(ret, 'Failed to remove kernel modules {0}'.format(', '.join(unloaded['no']))) # depends on [control=['if'], data=[]]
if unloaded['failed']:
for (mod, msg) in unloaded['failed']:
_append_comment(ret, 'Failed to remove kernel module {0}: {1}'.format(mod, msg)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return ret # depends on [control=['if'], data=[]]
else:
if len(mods) == 1:
ret['comment'] = 'Kernel module {0} is already removed'.format(mods[0]) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Kernel modules {0} are already removed'.format(', '.join(mods))
return ret
|
def query(self, **kwargs):
"""
Query the data source, subselecting data. Available keyword arguments
include
- model
- scenario
- region
- variable
Example
-------
```
Connection.query(model='MESSAGE', scenario='SSP2*',
variable=['Emissions|CO2', 'Primary Energy'])
```
"""
headers = {
'Authorization': 'Bearer {}'.format(self.auth()),
'Content-Type': 'application/json',
}
data = json.dumps(self._query_post_data(**kwargs))
url = self.base_url + 'runs/bulk/ts'
r = requests.post(url, headers=headers, data=data)
# refactor returned json object to be castable to an IamDataFrame
df = (
pd.read_json(r.content, orient='records')
.drop(columns='runId')
.rename(columns={'time': 'subannual'})
)
# check if returned dataframe has subannual disaggregation, drop if not
if pd.Series([i in [-1, 'year'] for i in df.subannual]).all():
df.drop(columns='subannual', inplace=True)
# check if there are multiple version for any model/scenario
lst = (
df[META_IDX + ['version']].drop_duplicates()
.groupby(META_IDX).count().version
)
if max(lst) > 1:
raise ValueError('multiple versions for {}'.format(
lst[lst > 1].index.to_list()))
df.drop(columns='version', inplace=True)
return df
|
def function[query, parameter[self]]:
constant[
Query the data source, subselecting data. Available keyword arguments
include
- model
- scenario
- region
- variable
Example
-------
```
Connection.query(model='MESSAGE', scenario='SSP2*',
variable=['Emissions|CO2', 'Primary Energy'])
```
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f183a0>, <ast.Constant object at 0x7da1b0f18100>], [<ast.Call object at 0x7da1b0f18310>, <ast.Constant object at 0x7da1b0f18af0>]]
variable[data] assign[=] call[name[json].dumps, parameter[call[name[self]._query_post_data, parameter[]]]]
variable[url] assign[=] binary_operation[name[self].base_url + constant[runs/bulk/ts]]
variable[r] assign[=] call[name[requests].post, parameter[name[url]]]
variable[df] assign[=] call[call[call[name[pd].read_json, parameter[name[r].content]].drop, parameter[]].rename, parameter[]]
if call[call[name[pd].Series, parameter[<ast.ListComp object at 0x7da18fe933a0>]].all, parameter[]] begin[:]
call[name[df].drop, parameter[]]
variable[lst] assign[=] call[call[call[call[name[df]][binary_operation[name[META_IDX] + list[[<ast.Constant object at 0x7da18bccac50>]]]].drop_duplicates, parameter[]].groupby, parameter[name[META_IDX]]].count, parameter[]].version
if compare[call[name[max], parameter[name[lst]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da18bcc8df0>
call[name[df].drop, parameter[]]
return[name[df]]
|
keyword[def] identifier[query] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[headers] ={
literal[string] : literal[string] . identifier[format] ( identifier[self] . identifier[auth] ()),
literal[string] : literal[string] ,
}
identifier[data] = identifier[json] . identifier[dumps] ( identifier[self] . identifier[_query_post_data] (** identifier[kwargs] ))
identifier[url] = identifier[self] . identifier[base_url] + literal[string]
identifier[r] = identifier[requests] . identifier[post] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[data] = identifier[data] )
identifier[df] =(
identifier[pd] . identifier[read_json] ( identifier[r] . identifier[content] , identifier[orient] = literal[string] )
. identifier[drop] ( identifier[columns] = literal[string] )
. identifier[rename] ( identifier[columns] ={ literal[string] : literal[string] })
)
keyword[if] identifier[pd] . identifier[Series] ([ identifier[i] keyword[in] [- literal[int] , literal[string] ] keyword[for] identifier[i] keyword[in] identifier[df] . identifier[subannual] ]). identifier[all] ():
identifier[df] . identifier[drop] ( identifier[columns] = literal[string] , identifier[inplace] = keyword[True] )
identifier[lst] =(
identifier[df] [ identifier[META_IDX] +[ literal[string] ]]. identifier[drop_duplicates] ()
. identifier[groupby] ( identifier[META_IDX] ). identifier[count] (). identifier[version]
)
keyword[if] identifier[max] ( identifier[lst] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[lst] [ identifier[lst] > literal[int] ]. identifier[index] . identifier[to_list] ()))
identifier[df] . identifier[drop] ( identifier[columns] = literal[string] , identifier[inplace] = keyword[True] )
keyword[return] identifier[df]
|
def query(self, **kwargs):
"""
Query the data source, subselecting data. Available keyword arguments
include
- model
- scenario
- region
- variable
Example
-------
```
Connection.query(model='MESSAGE', scenario='SSP2*',
variable=['Emissions|CO2', 'Primary Energy'])
```
"""
headers = {'Authorization': 'Bearer {}'.format(self.auth()), 'Content-Type': 'application/json'}
data = json.dumps(self._query_post_data(**kwargs))
url = self.base_url + 'runs/bulk/ts'
r = requests.post(url, headers=headers, data=data)
# refactor returned json object to be castable to an IamDataFrame
df = pd.read_json(r.content, orient='records').drop(columns='runId').rename(columns={'time': 'subannual'})
# check if returned dataframe has subannual disaggregation, drop if not
if pd.Series([i in [-1, 'year'] for i in df.subannual]).all():
df.drop(columns='subannual', inplace=True) # depends on [control=['if'], data=[]]
# check if there are multiple version for any model/scenario
lst = df[META_IDX + ['version']].drop_duplicates().groupby(META_IDX).count().version
if max(lst) > 1:
raise ValueError('multiple versions for {}'.format(lst[lst > 1].index.to_list())) # depends on [control=['if'], data=[]]
df.drop(columns='version', inplace=True)
return df
|
def extract_common_fields(self, data):
"""Extract fields from a basic user query."""
email = None
for curr_email in data.get("emails", []):
email = email or curr_email.get("email")
if curr_email.get("verified", False) and \
curr_email.get("primary", False):
email = curr_email.get("email")
return dict(
email=email,
id=data.get('id'),
name=data.get('name'),
first_name=data.get('first_name'),
last_name=data.get('last_name'),
image_url=data.get('image_url')
)
|
def function[extract_common_fields, parameter[self, data]]:
constant[Extract fields from a basic user query.]
variable[email] assign[=] constant[None]
for taget[name[curr_email]] in starred[call[name[data].get, parameter[constant[emails], list[[]]]]] begin[:]
variable[email] assign[=] <ast.BoolOp object at 0x7da204963bb0>
if <ast.BoolOp object at 0x7da204962b30> begin[:]
variable[email] assign[=] call[name[curr_email].get, parameter[constant[email]]]
return[call[name[dict], parameter[]]]
|
keyword[def] identifier[extract_common_fields] ( identifier[self] , identifier[data] ):
literal[string]
identifier[email] = keyword[None]
keyword[for] identifier[curr_email] keyword[in] identifier[data] . identifier[get] ( literal[string] ,[]):
identifier[email] = identifier[email] keyword[or] identifier[curr_email] . identifier[get] ( literal[string] )
keyword[if] identifier[curr_email] . identifier[get] ( literal[string] , keyword[False] ) keyword[and] identifier[curr_email] . identifier[get] ( literal[string] , keyword[False] ):
identifier[email] = identifier[curr_email] . identifier[get] ( literal[string] )
keyword[return] identifier[dict] (
identifier[email] = identifier[email] ,
identifier[id] = identifier[data] . identifier[get] ( literal[string] ),
identifier[name] = identifier[data] . identifier[get] ( literal[string] ),
identifier[first_name] = identifier[data] . identifier[get] ( literal[string] ),
identifier[last_name] = identifier[data] . identifier[get] ( literal[string] ),
identifier[image_url] = identifier[data] . identifier[get] ( literal[string] )
)
|
def extract_common_fields(self, data):
"""Extract fields from a basic user query."""
email = None
for curr_email in data.get('emails', []):
email = email or curr_email.get('email')
if curr_email.get('verified', False) and curr_email.get('primary', False):
email = curr_email.get('email') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['curr_email']]
return dict(email=email, id=data.get('id'), name=data.get('name'), first_name=data.get('first_name'), last_name=data.get('last_name'), image_url=data.get('image_url'))
|
def cmd_string(name, cmd):
# type: (AName, ACmd) -> ADefine
"""Define a string parameter coming from a shell command to be used within
this YAML file. Trailing newlines will be stripped."""
value = subprocess.check_output(cmd, shell=True).rstrip("\n")
return Define(name, value)
|
def function[cmd_string, parameter[name, cmd]]:
constant[Define a string parameter coming from a shell command to be used within
this YAML file. Trailing newlines will be stripped.]
variable[value] assign[=] call[call[name[subprocess].check_output, parameter[name[cmd]]].rstrip, parameter[constant[
]]]
return[call[name[Define], parameter[name[name], name[value]]]]
|
keyword[def] identifier[cmd_string] ( identifier[name] , identifier[cmd] ):
literal[string]
identifier[value] = identifier[subprocess] . identifier[check_output] ( identifier[cmd] , identifier[shell] = keyword[True] ). identifier[rstrip] ( literal[string] )
keyword[return] identifier[Define] ( identifier[name] , identifier[value] )
|
def cmd_string(name, cmd):
# type: (AName, ACmd) -> ADefine
'Define a string parameter coming from a shell command to be used within\n this YAML file. Trailing newlines will be stripped.'
value = subprocess.check_output(cmd, shell=True).rstrip('\n')
return Define(name, value)
|
def py2round(value):
"""Round values as in Python 2, for Python 3 compatibility.
All x.5 values are rounded away from zero.
In Python 3, this has changed to avoid bias: when x is even,
rounding is towards zero, when x is odd, rounding is away
from zero. Thus, in Python 3, round(2.5) results in 2,
round(3.5) is 4.
Python 3 also returns an int; Python 2 returns a float.
"""
if value > 0:
return float(floor(float(value)+0.5))
else:
return float(ceil(float(value)-0.5))
|
def function[py2round, parameter[value]]:
constant[Round values as in Python 2, for Python 3 compatibility.
All x.5 values are rounded away from zero.
In Python 3, this has changed to avoid bias: when x is even,
rounding is towards zero, when x is odd, rounding is away
from zero. Thus, in Python 3, round(2.5) results in 2,
round(3.5) is 4.
Python 3 also returns an int; Python 2 returns a float.
]
if compare[name[value] greater[>] constant[0]] begin[:]
return[call[name[float], parameter[call[name[floor], parameter[binary_operation[call[name[float], parameter[name[value]]] + constant[0.5]]]]]]]
|
keyword[def] identifier[py2round] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] > literal[int] :
keyword[return] identifier[float] ( identifier[floor] ( identifier[float] ( identifier[value] )+ literal[int] ))
keyword[else] :
keyword[return] identifier[float] ( identifier[ceil] ( identifier[float] ( identifier[value] )- literal[int] ))
|
def py2round(value):
"""Round values as in Python 2, for Python 3 compatibility.
All x.5 values are rounded away from zero.
In Python 3, this has changed to avoid bias: when x is even,
rounding is towards zero, when x is odd, rounding is away
from zero. Thus, in Python 3, round(2.5) results in 2,
round(3.5) is 4.
Python 3 also returns an int; Python 2 returns a float.
"""
if value > 0:
return float(floor(float(value) + 0.5)) # depends on [control=['if'], data=['value']]
else:
return float(ceil(float(value) - 0.5))
|
def refresh(self):
"""!
\~english
Update current view content to display
Supported: JMRPiDisplay_SSD1306 and Adafruit SSD1306 driver
\~chinese
更新当前视图内容到显示屏
支持: JMRPiDisplay_SSD1306 和 Adafruit SSD1306 driver
"""
try:
# suport for RPiDisplay SSD1306 driver
self.Display.setImage( self._catchCurrentViewContent() )
except:
try:
# suport for Adafruit SSD1306 driver
self.Display.image( self._catchCurrentViewContent() )
except:
raise "Can not update image to buffer."
self.Display.display()
|
def function[refresh, parameter[self]]:
constant[!
\~english
Update current view content to display
Supported: JMRPiDisplay_SSD1306 and Adafruit SSD1306 driver
\~chinese
更新当前视图内容到显示屏
支持: JMRPiDisplay_SSD1306 和 Adafruit SSD1306 driver
]
<ast.Try object at 0x7da18ede4400>
call[name[self].Display.display, parameter[]]
|
keyword[def] identifier[refresh] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[Display] . identifier[setImage] ( identifier[self] . identifier[_catchCurrentViewContent] ())
keyword[except] :
keyword[try] :
identifier[self] . identifier[Display] . identifier[image] ( identifier[self] . identifier[_catchCurrentViewContent] ())
keyword[except] :
keyword[raise] literal[string]
identifier[self] . identifier[Display] . identifier[display] ()
|
def refresh(self):
"""!
\\~english
Update current view content to display
Supported: JMRPiDisplay_SSD1306 and Adafruit SSD1306 driver
\\~chinese
更新当前视图内容到显示屏
支持: JMRPiDisplay_SSD1306 和 Adafruit SSD1306 driver
"""
try:
# suport for RPiDisplay SSD1306 driver
self.Display.setImage(self._catchCurrentViewContent()) # depends on [control=['try'], data=[]]
except:
try:
# suport for Adafruit SSD1306 driver
self.Display.image(self._catchCurrentViewContent()) # depends on [control=['try'], data=[]]
except:
raise 'Can not update image to buffer.' # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
self.Display.display()
|
def get_positions(self):
"""
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
"""
url = "/2/positions"
data = self._get_resource(url)
positions = []
for entry in data['positions']:
positions.append(self.position_from_json(entry))
return positions
|
def function[get_positions, parameter[self]]:
constant[
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
]
variable[url] assign[=] constant[/2/positions]
variable[data] assign[=] call[name[self]._get_resource, parameter[name[url]]]
variable[positions] assign[=] list[[]]
for taget[name[entry]] in starred[call[name[data]][constant[positions]]] begin[:]
call[name[positions].append, parameter[call[name[self].position_from_json, parameter[name[entry]]]]]
return[name[positions]]
|
keyword[def] identifier[get_positions] ( identifier[self] ):
literal[string]
identifier[url] = literal[string]
identifier[data] = identifier[self] . identifier[_get_resource] ( identifier[url] )
identifier[positions] =[]
keyword[for] identifier[entry] keyword[in] identifier[data] [ literal[string] ]:
identifier[positions] . identifier[append] ( identifier[self] . identifier[position_from_json] ( identifier[entry] ))
keyword[return] identifier[positions]
|
def get_positions(self):
"""
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
"""
url = '/2/positions'
data = self._get_resource(url)
positions = []
for entry in data['positions']:
positions.append(self.position_from_json(entry)) # depends on [control=['for'], data=['entry']]
return positions
|
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
|
def function[lifetimes, parameter[self, dates, include_start_date, country_codes]]:
constant[
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
]
if call[name[isinstance], parameter[name[country_codes], name[string_types]]] begin[:]
<ast.Raise object at 0x7da1b2041030>
variable[country_codes] assign[=] call[name[frozenset], parameter[name[country_codes]]]
variable[lifetimes] assign[=] call[name[self]._asset_lifetimes.get, parameter[name[country_codes]]]
if compare[name[lifetimes] is constant[None]] begin[:]
call[name[self]._asset_lifetimes][name[country_codes]] assign[=] call[name[self]._compute_asset_lifetimes, parameter[name[country_codes]]]
variable[raw_dates] assign[=] call[name[as_column], parameter[name[dates].asi8]]
if name[include_start_date] begin[:]
variable[mask] assign[=] compare[name[lifetimes].start less_or_equal[<=] name[raw_dates]]
<ast.AugAssign object at 0x7da1b2040940>
return[call[name[pd].DataFrame, parameter[name[mask]]]]
|
keyword[def] identifier[lifetimes] ( identifier[self] , identifier[dates] , identifier[include_start_date] , identifier[country_codes] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[country_codes] , identifier[string_types] ):
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] . identifier[format] ( identifier[country_codes] ),
)
identifier[country_codes] = identifier[frozenset] ( identifier[country_codes] )
identifier[lifetimes] = identifier[self] . identifier[_asset_lifetimes] . identifier[get] ( identifier[country_codes] )
keyword[if] identifier[lifetimes] keyword[is] keyword[None] :
identifier[self] . identifier[_asset_lifetimes] [ identifier[country_codes] ]= identifier[lifetimes] =(
identifier[self] . identifier[_compute_asset_lifetimes] ( identifier[country_codes] )
)
identifier[raw_dates] = identifier[as_column] ( identifier[dates] . identifier[asi8] )
keyword[if] identifier[include_start_date] :
identifier[mask] = identifier[lifetimes] . identifier[start] <= identifier[raw_dates]
keyword[else] :
identifier[mask] = identifier[lifetimes] . identifier[start] < identifier[raw_dates]
identifier[mask] &=( identifier[raw_dates] <= identifier[lifetimes] . identifier[end] )
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[mask] , identifier[index] = identifier[dates] , identifier[columns] = identifier[lifetimes] . identifier[sid] )
|
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError('Got string {!r} instead of an iterable of strings in AssetFinder.lifetimes.'.format(country_codes)) # depends on [control=['if'], data=[]]
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = self._compute_asset_lifetimes(country_codes) # depends on [control=['if'], data=['lifetimes']]
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates # depends on [control=['if'], data=[]]
else:
mask = lifetimes.start < raw_dates
mask &= raw_dates <= lifetimes.end
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
|
def get_after(self, timestamp, s=None):
"""
Find all the (available) logs that are after the given time stamp.
If `s` is not supplied, then all lines are used. Otherwise, only the
lines contain the `s` are used. `s` can be either a single string or a
string list. For list, all keywords in the list must be found in each
line.
This method then finds all lines which have a time stamp after the
given `timestamp`. Lines that do not contain a time stamp
are considered to be part of the previous line and are therefore
included if the last log line was included or excluded otherwise.
Time stamps are recognised by converting the time format into a
regular expression which matches the time format in the string. This
is then searched for in each line in turn. Only lines with a time
stamp matching this expression will trigger the decision to include
or exclude lines. Therefore, if the log for some reason does not
contain a time stamp that matches this format, no lines will be
returned.
The time format is given in ``strptime()`` format, in the object's
``time_format`` property. Users of the object should **not** change
this property; instead, the parser should subclass
:class:`LogFileOutput` and change the ``time_format`` property.
Some logs, regrettably, change time stamps formats across different
lines, or change time stamp formats in different versions of the
program. In order to accommodate this, the timestamp format can be a
list of ``strptime()`` format strings. These are combined as
alternatives in the regular expression, and are given to ``strptime``
in order. These can also be listed as the values of a dict, e.g.::
{'pre_10.1.5': '%y%m%d %H:%M:%S', 'post_10.1.5': '%Y-%m-%d %H:%M:%S'}
.. note::
Some logs - notably /var/log/messages - do not contain a year
in the timestamp. This detected by the absence of a '%y' or '%Y' in
the time format. If that year field is absent, the year is assumed
to be the year in the given timestamp being sought. Some attempt is
made to handle logs with a rollover from December to January, by
finding when the log's timestamp (with current year assumed) is over
eleven months (specifically, 330 days) ahead of or behind the
timestamp date and shifting that log date by 365 days so that it is
more likely to be in the sought range. This paragraph is sponsored
by syslog.
Parameters:
timestamp(datetime.datetime): lines before this time are ignored.
s(str or list): one or more strings to search for.
If not supplied, all available lines are searched.
Yields:
dict:
The parsed lines with timestamps after this date in
the same format they were supplied. It at least contains the
``raw_message`` as a key.
Raises:
ParseException: If the format conversion string contains a
format that we don't recognise. In particular, no attempt is
made to recognise or parse the time zone or other obscure
values like day of year or week of year.
"""
time_format = self.time_format
# Annoyingly, strptime insists that it get the whole time string and
# nothing but the time string. However, for most logs we only have a
# string with the timestamp in it. We can't just catch the ValueError
# because at that point we do not actually have a valid datetime
# object. So we convert the time format string to a regex, use that
# to find just the timestamp, and then use strptime on that. Thanks,
# Python. All these need to cope with different languages and
# character sets. Note that we don't include time zone or other
# outputs (e.g. day-of-year) that don't usually occur in time stamps.
format_conversion_for = {
'a': r'\w{3}', 'A': r'\w+', # Week day name
'w': r'[0123456]', # Week day number
'd': r'([0 ][123456789]|[12]\d|3[01])', # Day of month
'b': r'\w{3}', 'B': r'\w+', # Month name
'm': r'([0 ]\d|1[012])', # Month number
'y': r'\d{2}', 'Y': r'\d{4}', # Year
'H': r'([01 ]\d|2[0123])', # Hour - 24 hour format
'I': r'([0 ]?\d|1[012])', # Hour - 12 hour format
'p': r'\w{2}', # AM / PM
'M': r'([012345]\d)', # Minutes
'S': r'([012345]\d|60)', # Seconds, including leap second
'f': r'\d{6}', # Microseconds
}
# Construct the regex from the time string
timefmt_re = re.compile(r'%(\w)')
def replacer(match):
if match.group(1) in format_conversion_for:
return format_conversion_for[match.group(1)]
else:
raise ParseException(
"get_after does not understand strptime format '{c}'".format(
c=match.group(0)
)
)
# Please do not attempt to be tricky and put a regular expression
# inside your time format, as we are going to also use it in
# strptime too and that may not work out so well.
# Check time_format - must be string or list. Set the 'logs_have_year'
# flag and timestamp parser function appropriately.
# Grab values of dict as a list first
if isinstance(time_format, dict):
time_format = list(time_format.values())
if isinstance(time_format, six.string_types):
logs_have_year = ('%Y' in time_format or '%y' in time_format)
time_re = re.compile('(' + timefmt_re.sub(replacer, time_format) + ')')
# Curry strptime with time_format string.
def test_parser(logstamp):
return datetime.datetime.strptime(logstamp, time_format)
parse_fn = test_parser
elif isinstance(time_format, list):
logs_have_year = all('%Y' in tf or '%y' in tf for tf in time_format)
time_re = re.compile('(' + '|'.join(
timefmt_re.sub(replacer, tf) for tf in time_format
) + ')')
def test_all_parsers(logstamp):
# One of these must match, because the regex has selected only
# strings that will match.
for tf in time_format:
try:
ts = datetime.datetime.strptime(logstamp, tf)
except ValueError:
pass
return ts
parse_fn = test_all_parsers
else:
raise ParseException(
"get_after does not recognise time formats of type {t}".format(
t=type(time_format)
)
)
# Most logs will appear in string format, but some logs (e.g.
# Messages) are available in list-of-dicts format. So we choose one
# of two 'date_compare' functions. HOWEVER: we still have to check
# the string found for a valid date, because log parsing often fails.
# Because of generators, we check this per line
# Now try to find the time stamp in each log line and add lines to
# our output if they are currently being included in the log.
eleven_months = datetime.timedelta(days=330)
including_lines = False
search_by_expression = self._valid_search(s)
for line in self.lines:
# If `s` is not None, keywords must be found in the line
if s and not search_by_expression(line):
continue
# Otherwise, search all lines
match = time_re.search(line)
if match:
logstamp = parse_fn(match.group(0))
if not logs_have_year:
# Substitute timestamp year for logstamp year
logstamp = logstamp.replace(year=timestamp.year)
if logstamp - timestamp > eleven_months:
# If timestamp in January and log in December, move
# log to previous year
logstamp = logstamp.replace(year=timestamp.year - 1)
elif timestamp - logstamp > eleven_months:
# If timestamp in December and log in January, move
# log to next year
logstamp = logstamp.replace(year=timestamp.year + 1)
if logstamp >= timestamp:
# Later - include
including_lines = True
yield self._parse_line(line)
else:
# Earlier - start excluding
including_lines = False
else:
# If we're including lines, add this continuation line
if including_lines:
yield self._parse_line(line)
|
def function[get_after, parameter[self, timestamp, s]]:
constant[
Find all the (available) logs that are after the given time stamp.
If `s` is not supplied, then all lines are used. Otherwise, only the
lines contain the `s` are used. `s` can be either a single string or a
string list. For list, all keywords in the list must be found in each
line.
This method then finds all lines which have a time stamp after the
given `timestamp`. Lines that do not contain a time stamp
are considered to be part of the previous line and are therefore
included if the last log line was included or excluded otherwise.
Time stamps are recognised by converting the time format into a
regular expression which matches the time format in the string. This
is then searched for in each line in turn. Only lines with a time
stamp matching this expression will trigger the decision to include
or exclude lines. Therefore, if the log for some reason does not
contain a time stamp that matches this format, no lines will be
returned.
The time format is given in ``strptime()`` format, in the object's
``time_format`` property. Users of the object should **not** change
this property; instead, the parser should subclass
:class:`LogFileOutput` and change the ``time_format`` property.
Some logs, regrettably, change time stamps formats across different
lines, or change time stamp formats in different versions of the
program. In order to accommodate this, the timestamp format can be a
list of ``strptime()`` format strings. These are combined as
alternatives in the regular expression, and are given to ``strptime``
in order. These can also be listed as the values of a dict, e.g.::
{'pre_10.1.5': '%y%m%d %H:%M:%S', 'post_10.1.5': '%Y-%m-%d %H:%M:%S'}
.. note::
Some logs - notably /var/log/messages - do not contain a year
in the timestamp. This detected by the absence of a '%y' or '%Y' in
the time format. If that year field is absent, the year is assumed
to be the year in the given timestamp being sought. Some attempt is
made to handle logs with a rollover from December to January, by
finding when the log's timestamp (with current year assumed) is over
eleven months (specifically, 330 days) ahead of or behind the
timestamp date and shifting that log date by 365 days so that it is
more likely to be in the sought range. This paragraph is sponsored
by syslog.
Parameters:
timestamp(datetime.datetime): lines before this time are ignored.
s(str or list): one or more strings to search for.
If not supplied, all available lines are searched.
Yields:
dict:
The parsed lines with timestamps after this date in
the same format they were supplied. It at least contains the
``raw_message`` as a key.
Raises:
ParseException: If the format conversion string contains a
format that we don't recognise. In particular, no attempt is
made to recognise or parse the time zone or other obscure
values like day of year or week of year.
]
variable[time_format] assign[=] name[self].time_format
variable[format_conversion_for] assign[=] dictionary[[<ast.Constant object at 0x7da2046239d0>, <ast.Constant object at 0x7da204620a00>, <ast.Constant object at 0x7da204622cb0>, <ast.Constant object at 0x7da204620370>, <ast.Constant object at 0x7da2046214e0>, <ast.Constant object at 0x7da204621870>, <ast.Constant object at 0x7da2046222c0>, <ast.Constant object at 0x7da204623730>, <ast.Constant object at 0x7da2046205b0>, <ast.Constant object at 0x7da204621180>, <ast.Constant object at 0x7da204621810>, <ast.Constant object at 0x7da2046226b0>, <ast.Constant object at 0x7da2046208b0>, <ast.Constant object at 0x7da204622950>, <ast.Constant object at 0x7da2046213f0>], [<ast.Constant object at 0x7da204620e50>, <ast.Constant object at 0x7da204620580>, <ast.Constant object at 0x7da204622e00>, <ast.Constant object at 0x7da204623e80>, <ast.Constant object at 0x7da2046237f0>, <ast.Constant object at 0x7da204621750>, <ast.Constant object at 0x7da204620bb0>, <ast.Constant object at 0x7da204623910>, <ast.Constant object at 0x7da204623ca0>, <ast.Constant object at 0x7da204622380>, <ast.Constant object at 0x7da204622e90>, <ast.Constant object at 0x7da2046227d0>, <ast.Constant object at 0x7da204623640>, <ast.Constant object at 0x7da204620160>, <ast.Constant object at 0x7da204622080>]]
variable[timefmt_re] assign[=] call[name[re].compile, parameter[constant[%(\w)]]]
def function[replacer, parameter[match]]:
if compare[call[name[match].group, parameter[constant[1]]] in name[format_conversion_for]] begin[:]
return[call[name[format_conversion_for]][call[name[match].group, parameter[constant[1]]]]]
if call[name[isinstance], parameter[name[time_format], name[dict]]] begin[:]
variable[time_format] assign[=] call[name[list], parameter[call[name[time_format].values, parameter[]]]]
if call[name[isinstance], parameter[name[time_format], name[six].string_types]] begin[:]
variable[logs_have_year] assign[=] <ast.BoolOp object at 0x7da204622ec0>
variable[time_re] assign[=] call[name[re].compile, parameter[binary_operation[binary_operation[constant[(] + call[name[timefmt_re].sub, parameter[name[replacer], name[time_format]]]] + constant[)]]]]
def function[test_parser, parameter[logstamp]]:
return[call[name[datetime].datetime.strptime, parameter[name[logstamp], name[time_format]]]]
variable[parse_fn] assign[=] name[test_parser]
variable[eleven_months] assign[=] call[name[datetime].timedelta, parameter[]]
variable[including_lines] assign[=] constant[False]
variable[search_by_expression] assign[=] call[name[self]._valid_search, parameter[name[s]]]
for taget[name[line]] in starred[name[self].lines] begin[:]
if <ast.BoolOp object at 0x7da20c9917e0> begin[:]
continue
variable[match] assign[=] call[name[time_re].search, parameter[name[line]]]
if name[match] begin[:]
variable[logstamp] assign[=] call[name[parse_fn], parameter[call[name[match].group, parameter[constant[0]]]]]
if <ast.UnaryOp object at 0x7da20c990940> begin[:]
variable[logstamp] assign[=] call[name[logstamp].replace, parameter[]]
if compare[binary_operation[name[logstamp] - name[timestamp]] greater[>] name[eleven_months]] begin[:]
variable[logstamp] assign[=] call[name[logstamp].replace, parameter[]]
if compare[name[logstamp] greater_or_equal[>=] name[timestamp]] begin[:]
variable[including_lines] assign[=] constant[True]
<ast.Yield object at 0x7da20c993070>
|
keyword[def] identifier[get_after] ( identifier[self] , identifier[timestamp] , identifier[s] = keyword[None] ):
literal[string]
identifier[time_format] = identifier[self] . identifier[time_format]
identifier[format_conversion_for] ={
literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[timefmt_re] = identifier[re] . identifier[compile] ( literal[string] )
keyword[def] identifier[replacer] ( identifier[match] ):
keyword[if] identifier[match] . identifier[group] ( literal[int] ) keyword[in] identifier[format_conversion_for] :
keyword[return] identifier[format_conversion_for] [ identifier[match] . identifier[group] ( literal[int] )]
keyword[else] :
keyword[raise] identifier[ParseException] (
literal[string] . identifier[format] (
identifier[c] = identifier[match] . identifier[group] ( literal[int] )
)
)
keyword[if] identifier[isinstance] ( identifier[time_format] , identifier[dict] ):
identifier[time_format] = identifier[list] ( identifier[time_format] . identifier[values] ())
keyword[if] identifier[isinstance] ( identifier[time_format] , identifier[six] . identifier[string_types] ):
identifier[logs_have_year] =( literal[string] keyword[in] identifier[time_format] keyword[or] literal[string] keyword[in] identifier[time_format] )
identifier[time_re] = identifier[re] . identifier[compile] ( literal[string] + identifier[timefmt_re] . identifier[sub] ( identifier[replacer] , identifier[time_format] )+ literal[string] )
keyword[def] identifier[test_parser] ( identifier[logstamp] ):
keyword[return] identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[logstamp] , identifier[time_format] )
identifier[parse_fn] = identifier[test_parser]
keyword[elif] identifier[isinstance] ( identifier[time_format] , identifier[list] ):
identifier[logs_have_year] = identifier[all] ( literal[string] keyword[in] identifier[tf] keyword[or] literal[string] keyword[in] identifier[tf] keyword[for] identifier[tf] keyword[in] identifier[time_format] )
identifier[time_re] = identifier[re] . identifier[compile] ( literal[string] + literal[string] . identifier[join] (
identifier[timefmt_re] . identifier[sub] ( identifier[replacer] , identifier[tf] ) keyword[for] identifier[tf] keyword[in] identifier[time_format]
)+ literal[string] )
keyword[def] identifier[test_all_parsers] ( identifier[logstamp] ):
keyword[for] identifier[tf] keyword[in] identifier[time_format] :
keyword[try] :
identifier[ts] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[logstamp] , identifier[tf] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[ts]
identifier[parse_fn] = identifier[test_all_parsers]
keyword[else] :
keyword[raise] identifier[ParseException] (
literal[string] . identifier[format] (
identifier[t] = identifier[type] ( identifier[time_format] )
)
)
identifier[eleven_months] = identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
identifier[including_lines] = keyword[False]
identifier[search_by_expression] = identifier[self] . identifier[_valid_search] ( identifier[s] )
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[lines] :
keyword[if] identifier[s] keyword[and] keyword[not] identifier[search_by_expression] ( identifier[line] ):
keyword[continue]
identifier[match] = identifier[time_re] . identifier[search] ( identifier[line] )
keyword[if] identifier[match] :
identifier[logstamp] = identifier[parse_fn] ( identifier[match] . identifier[group] ( literal[int] ))
keyword[if] keyword[not] identifier[logs_have_year] :
identifier[logstamp] = identifier[logstamp] . identifier[replace] ( identifier[year] = identifier[timestamp] . identifier[year] )
keyword[if] identifier[logstamp] - identifier[timestamp] > identifier[eleven_months] :
identifier[logstamp] = identifier[logstamp] . identifier[replace] ( identifier[year] = identifier[timestamp] . identifier[year] - literal[int] )
keyword[elif] identifier[timestamp] - identifier[logstamp] > identifier[eleven_months] :
identifier[logstamp] = identifier[logstamp] . identifier[replace] ( identifier[year] = identifier[timestamp] . identifier[year] + literal[int] )
keyword[if] identifier[logstamp] >= identifier[timestamp] :
identifier[including_lines] = keyword[True]
keyword[yield] identifier[self] . identifier[_parse_line] ( identifier[line] )
keyword[else] :
identifier[including_lines] = keyword[False]
keyword[else] :
keyword[if] identifier[including_lines] :
keyword[yield] identifier[self] . identifier[_parse_line] ( identifier[line] )
|
def get_after(self, timestamp, s=None):
"""
Find all the (available) logs that are after the given time stamp.
If `s` is not supplied, then all lines are used. Otherwise, only the
lines contain the `s` are used. `s` can be either a single string or a
string list. For list, all keywords in the list must be found in each
line.
This method then finds all lines which have a time stamp after the
given `timestamp`. Lines that do not contain a time stamp
are considered to be part of the previous line and are therefore
included if the last log line was included or excluded otherwise.
Time stamps are recognised by converting the time format into a
regular expression which matches the time format in the string. This
is then searched for in each line in turn. Only lines with a time
stamp matching this expression will trigger the decision to include
or exclude lines. Therefore, if the log for some reason does not
contain a time stamp that matches this format, no lines will be
returned.
The time format is given in ``strptime()`` format, in the object's
``time_format`` property. Users of the object should **not** change
this property; instead, the parser should subclass
:class:`LogFileOutput` and change the ``time_format`` property.
Some logs, regrettably, change time stamps formats across different
lines, or change time stamp formats in different versions of the
program. In order to accommodate this, the timestamp format can be a
list of ``strptime()`` format strings. These are combined as
alternatives in the regular expression, and are given to ``strptime``
in order. These can also be listed as the values of a dict, e.g.::
{'pre_10.1.5': '%y%m%d %H:%M:%S', 'post_10.1.5': '%Y-%m-%d %H:%M:%S'}
.. note::
Some logs - notably /var/log/messages - do not contain a year
in the timestamp. This detected by the absence of a '%y' or '%Y' in
the time format. If that year field is absent, the year is assumed
to be the year in the given timestamp being sought. Some attempt is
made to handle logs with a rollover from December to January, by
finding when the log's timestamp (with current year assumed) is over
eleven months (specifically, 330 days) ahead of or behind the
timestamp date and shifting that log date by 365 days so that it is
more likely to be in the sought range. This paragraph is sponsored
by syslog.
Parameters:
timestamp(datetime.datetime): lines before this time are ignored.
s(str or list): one or more strings to search for.
If not supplied, all available lines are searched.
Yields:
dict:
The parsed lines with timestamps after this date in
the same format they were supplied. It at least contains the
``raw_message`` as a key.
Raises:
ParseException: If the format conversion string contains a
format that we don't recognise. In particular, no attempt is
made to recognise or parse the time zone or other obscure
values like day of year or week of year.
"""
time_format = self.time_format
# Annoyingly, strptime insists that it get the whole time string and
# nothing but the time string. However, for most logs we only have a
# string with the timestamp in it. We can't just catch the ValueError
# because at that point we do not actually have a valid datetime
# object. So we convert the time format string to a regex, use that
# to find just the timestamp, and then use strptime on that. Thanks,
# Python. All these need to cope with different languages and
# character sets. Note that we don't include time zone or other
# outputs (e.g. day-of-year) that don't usually occur in time stamps.
# Week day name
# Week day number
# Day of month
# Month name
# Month number
# Year
# Hour - 24 hour format
# Hour - 12 hour format
# AM / PM
# Minutes
# Seconds, including leap second
# Microseconds
format_conversion_for = {'a': '\\w{3}', 'A': '\\w+', 'w': '[0123456]', 'd': '([0 ][123456789]|[12]\\d|3[01])', 'b': '\\w{3}', 'B': '\\w+', 'm': '([0 ]\\d|1[012])', 'y': '\\d{2}', 'Y': '\\d{4}', 'H': '([01 ]\\d|2[0123])', 'I': '([0 ]?\\d|1[012])', 'p': '\\w{2}', 'M': '([012345]\\d)', 'S': '([012345]\\d|60)', 'f': '\\d{6}'}
# Construct the regex from the time string
timefmt_re = re.compile('%(\\w)')
def replacer(match):
if match.group(1) in format_conversion_for:
return format_conversion_for[match.group(1)] # depends on [control=['if'], data=['format_conversion_for']]
else:
raise ParseException("get_after does not understand strptime format '{c}'".format(c=match.group(0)))
# Please do not attempt to be tricky and put a regular expression
# inside your time format, as we are going to also use it in
# strptime too and that may not work out so well.
# Check time_format - must be string or list. Set the 'logs_have_year'
# flag and timestamp parser function appropriately.
# Grab values of dict as a list first
if isinstance(time_format, dict):
time_format = list(time_format.values()) # depends on [control=['if'], data=[]]
if isinstance(time_format, six.string_types):
logs_have_year = '%Y' in time_format or '%y' in time_format
time_re = re.compile('(' + timefmt_re.sub(replacer, time_format) + ')')
# Curry strptime with time_format string.
def test_parser(logstamp):
return datetime.datetime.strptime(logstamp, time_format)
parse_fn = test_parser # depends on [control=['if'], data=[]]
elif isinstance(time_format, list):
logs_have_year = all(('%Y' in tf or '%y' in tf for tf in time_format))
time_re = re.compile('(' + '|'.join((timefmt_re.sub(replacer, tf) for tf in time_format)) + ')')
def test_all_parsers(logstamp):
# One of these must match, because the regex has selected only
# strings that will match.
for tf in time_format:
try:
ts = datetime.datetime.strptime(logstamp, tf) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['tf']]
return ts
parse_fn = test_all_parsers # depends on [control=['if'], data=[]]
else:
raise ParseException('get_after does not recognise time formats of type {t}'.format(t=type(time_format)))
# Most logs will appear in string format, but some logs (e.g.
# Messages) are available in list-of-dicts format. So we choose one
# of two 'date_compare' functions. HOWEVER: we still have to check
# the string found for a valid date, because log parsing often fails.
# Because of generators, we check this per line
# Now try to find the time stamp in each log line and add lines to
# our output if they are currently being included in the log.
eleven_months = datetime.timedelta(days=330)
including_lines = False
search_by_expression = self._valid_search(s)
for line in self.lines:
# If `s` is not None, keywords must be found in the line
if s and (not search_by_expression(line)):
continue # depends on [control=['if'], data=[]]
# Otherwise, search all lines
match = time_re.search(line)
if match:
logstamp = parse_fn(match.group(0))
if not logs_have_year:
# Substitute timestamp year for logstamp year
logstamp = logstamp.replace(year=timestamp.year)
if logstamp - timestamp > eleven_months:
# If timestamp in January and log in December, move
# log to previous year
logstamp = logstamp.replace(year=timestamp.year - 1) # depends on [control=['if'], data=[]]
elif timestamp - logstamp > eleven_months:
# If timestamp in December and log in January, move
# log to next year
logstamp = logstamp.replace(year=timestamp.year + 1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if logstamp >= timestamp:
# Later - include
including_lines = True
yield self._parse_line(line) # depends on [control=['if'], data=[]]
else:
# Earlier - start excluding
including_lines = False # depends on [control=['if'], data=[]]
# If we're including lines, add this continuation line
elif including_lines:
yield self._parse_line(line) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
|
def timeit(method):
"""Compute the download time."""
def wrapper(*args, **kwargs):
start = time.time()
result = method(*args, **kwargs)
end = time.time()
click.echo('Cost {}s'.format(int(end-start)))
return result
return wrapper
|
def function[timeit, parameter[method]]:
constant[Compute the download time.]
def function[wrapper, parameter[]]:
variable[start] assign[=] call[name[time].time, parameter[]]
variable[result] assign[=] call[name[method], parameter[<ast.Starred object at 0x7da1b07b9ed0>]]
variable[end] assign[=] call[name[time].time, parameter[]]
call[name[click].echo, parameter[call[constant[Cost {}s].format, parameter[call[name[int], parameter[binary_operation[name[end] - name[start]]]]]]]]
return[name[result]]
return[name[wrapper]]
|
keyword[def] identifier[timeit] ( identifier[method] ):
literal[string]
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[start] = identifier[time] . identifier[time] ()
identifier[result] = identifier[method] (* identifier[args] ,** identifier[kwargs] )
identifier[end] = identifier[time] . identifier[time] ()
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[int] ( identifier[end] - identifier[start] )))
keyword[return] identifier[result]
keyword[return] identifier[wrapper]
|
def timeit(method):
"""Compute the download time."""
def wrapper(*args, **kwargs):
start = time.time()
result = method(*args, **kwargs)
end = time.time()
click.echo('Cost {}s'.format(int(end - start)))
return result
return wrapper
|
def alias_book(self, book_id, alias_id):
"""Adds an ``Id`` to a ``Book`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Book`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another book, it is
reassigned to the given book ``Id``.
arg: book_id (osid.id.Id): the ``Id`` of a ``Book``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``book_id`` not found
raise: NullArgument - ``book_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=book_id, alias_id=alias_id)
self._alias_id(primary_id=book_id, equivalent_id=alias_id)
|
def function[alias_book, parameter[self, book_id, alias_id]]:
constant[Adds an ``Id`` to a ``Book`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Book`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another book, it is
reassigned to the given book ``Id``.
arg: book_id (osid.id.Id): the ``Id`` of a ``Book``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``book_id`` not found
raise: NullArgument - ``book_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.alias_catalog, parameter[]]]
call[name[self]._alias_id, parameter[]]
|
keyword[def] identifier[alias_book] ( identifier[self] , identifier[book_id] , identifier[alias_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[alias_catalog] ( identifier[catalog_id] = identifier[book_id] , identifier[alias_id] = identifier[alias_id] )
identifier[self] . identifier[_alias_id] ( identifier[primary_id] = identifier[book_id] , identifier[equivalent_id] = identifier[alias_id] )
|
def alias_book(self, book_id, alias_id):
"""Adds an ``Id`` to a ``Book`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Book`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another book, it is
reassigned to the given book ``Id``.
arg: book_id (osid.id.Id): the ``Id`` of a ``Book``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``book_id`` not found
raise: NullArgument - ``book_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=book_id, alias_id=alias_id) # depends on [control=['if'], data=[]]
self._alias_id(primary_id=book_id, equivalent_id=alias_id)
|
def nan_maximum_filter(arr, ksize):
'''
same as scipy.filters.maximum_filter
but working excluding nans
'''
out = np.empty_like(arr)
_calc(arr, out, ksize//2)
return out
|
def function[nan_maximum_filter, parameter[arr, ksize]]:
constant[
same as scipy.filters.maximum_filter
but working excluding nans
]
variable[out] assign[=] call[name[np].empty_like, parameter[name[arr]]]
call[name[_calc], parameter[name[arr], name[out], binary_operation[name[ksize] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]]
return[name[out]]
|
keyword[def] identifier[nan_maximum_filter] ( identifier[arr] , identifier[ksize] ):
literal[string]
identifier[out] = identifier[np] . identifier[empty_like] ( identifier[arr] )
identifier[_calc] ( identifier[arr] , identifier[out] , identifier[ksize] // literal[int] )
keyword[return] identifier[out]
|
def nan_maximum_filter(arr, ksize):
"""
same as scipy.filters.maximum_filter
but working excluding nans
"""
out = np.empty_like(arr)
_calc(arr, out, ksize // 2)
return out
|
def count(self, index=None, doc_type=None, body=None, **query_params):
"""
Execute a query and get the number of matches for that query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_
:param index: A comma-separated list of indices to restrict the results
:param doc_type: A comma-separated list of types to restrict the results
:param body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
if doc_type and not index:
index = EsConst.ALL_VALUES
path = self._es_parser.make_path(index, doc_type, EsMethods.COUNT)
result = yield self._perform_request(HttpMethod.GET, path, body, params=query_params)
returnValue(result)
|
def function[count, parameter[self, index, doc_type, body]]:
constant[
Execute a query and get the number of matches for that query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_
:param index: A comma-separated list of indices to restrict the results
:param doc_type: A comma-separated list of types to restrict the results
:param body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
]
if <ast.BoolOp object at 0x7da18c4cf2b0> begin[:]
variable[index] assign[=] name[EsConst].ALL_VALUES
variable[path] assign[=] call[name[self]._es_parser.make_path, parameter[name[index], name[doc_type], name[EsMethods].COUNT]]
variable[result] assign[=] <ast.Yield object at 0x7da18c4ce0b0>
call[name[returnValue], parameter[name[result]]]
|
keyword[def] identifier[count] ( identifier[self] , identifier[index] = keyword[None] , identifier[doc_type] = keyword[None] , identifier[body] = keyword[None] ,** identifier[query_params] ):
literal[string]
keyword[if] identifier[doc_type] keyword[and] keyword[not] identifier[index] :
identifier[index] = identifier[EsConst] . identifier[ALL_VALUES]
identifier[path] = identifier[self] . identifier[_es_parser] . identifier[make_path] ( identifier[index] , identifier[doc_type] , identifier[EsMethods] . identifier[COUNT] )
identifier[result] = keyword[yield] identifier[self] . identifier[_perform_request] ( identifier[HttpMethod] . identifier[GET] , identifier[path] , identifier[body] , identifier[params] = identifier[query_params] )
identifier[returnValue] ( identifier[result] )
|
def count(self, index=None, doc_type=None, body=None, **query_params):
"""
Execute a query and get the number of matches for that query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_
:param index: A comma-separated list of indices to restrict the results
:param doc_type: A comma-separated list of types to restrict the results
:param body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
if doc_type and (not index):
index = EsConst.ALL_VALUES # depends on [control=['if'], data=[]]
path = self._es_parser.make_path(index, doc_type, EsMethods.COUNT)
result = (yield self._perform_request(HttpMethod.GET, path, body, params=query_params))
returnValue(result)
|
def put(self, lo, hi):
"""Copy 16-bit value to result."""
self.buf.append(lo)
self.buf.append(hi)
self.pos += 1
|
def function[put, parameter[self, lo, hi]]:
constant[Copy 16-bit value to result.]
call[name[self].buf.append, parameter[name[lo]]]
call[name[self].buf.append, parameter[name[hi]]]
<ast.AugAssign object at 0x7da20e957100>
|
keyword[def] identifier[put] ( identifier[self] , identifier[lo] , identifier[hi] ):
literal[string]
identifier[self] . identifier[buf] . identifier[append] ( identifier[lo] )
identifier[self] . identifier[buf] . identifier[append] ( identifier[hi] )
identifier[self] . identifier[pos] += literal[int]
|
def put(self, lo, hi):
"""Copy 16-bit value to result."""
self.buf.append(lo)
self.buf.append(hi)
self.pos += 1
|
def element_abund_marco(i_decay, stable_isotope_list,
stable_isotope_identifier,
mass_fractions_array_not_decayed,
mass_fractions_array_decayed):
'''
Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated
'''
# this way is done in a really simple way. May be done better for sure, in a couple of loops.
# I keep this, since I have only to copy over old script. Falk will probably redo it.
#import numpy as np
#from NuGridPy import utils as u
global elem_abund
elem_abund = np.zeros(z_bismuth)
global elem_abund_decayed
elem_abund_decayed = np.zeros(z_bismuth)
global elem_prod_fac
elem_prod_fac = np.zeros(z_bismuth)
global elem_prod_fac_decayed
elem_prod_fac_decayed = np.zeros(z_bismuth)
# notice that elem_abund include all contribution, both from stables and unstables in
# that moment.
for i in range(z_bismuth):
dummy = 0.
for j in range(len(spe)):
if znum_int[j] == i+1 and stable_isotope_identifier[j] > 0.5:
dummy = dummy + float(mass_fractions_array_not_decayed[j])
elem_abund[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac[i] = float(old_div(elem_abund[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac[i] = 0.
if i_decay == 2:
for i in range(z_bismuth):
dummy = 0.
for j in range(len(mass_fractions_array_decayed)):
if znum_int[cl[stable_isotope_list[j].capitalize()]] == i+1:
#print znum_int[cl[stable[j].capitalize()]],cl[stable[j].capitalize()],stable[j]
dummy = dummy + float(mass_fractions_array_decayed[j])
elem_abund_decayed[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac_decayed[i] = 0.
|
def function[element_abund_marco, parameter[i_decay, stable_isotope_list, stable_isotope_identifier, mass_fractions_array_not_decayed, mass_fractions_array_decayed]]:
constant[
Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated
]
<ast.Global object at 0x7da18dc9a950>
variable[elem_abund] assign[=] call[name[np].zeros, parameter[name[z_bismuth]]]
<ast.Global object at 0x7da18dc990f0>
variable[elem_abund_decayed] assign[=] call[name[np].zeros, parameter[name[z_bismuth]]]
<ast.Global object at 0x7da18dc98730>
variable[elem_prod_fac] assign[=] call[name[np].zeros, parameter[name[z_bismuth]]]
<ast.Global object at 0x7da18dc99e70>
variable[elem_prod_fac_decayed] assign[=] call[name[np].zeros, parameter[name[z_bismuth]]]
for taget[name[i]] in starred[call[name[range], parameter[name[z_bismuth]]]] begin[:]
variable[dummy] assign[=] constant[0.0]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[spe]]]]]] begin[:]
if <ast.BoolOp object at 0x7da18c4cf910> begin[:]
variable[dummy] assign[=] binary_operation[name[dummy] + call[name[float], parameter[call[name[mass_fractions_array_not_decayed]][name[j]]]]]
call[name[elem_abund]][name[i]] assign[=] name[dummy]
for taget[name[i]] in starred[call[name[range], parameter[name[z_bismuth]]]] begin[:]
if compare[call[name[index_stable]][name[i]] equal[==] constant[1]] begin[:]
call[name[elem_prod_fac]][name[i]] assign[=] call[name[float], parameter[call[name[old_div], parameter[call[name[elem_abund]][name[i]], call[name[solar_elem_abund]][name[i]]]]]]
if compare[name[i_decay] equal[==] constant[2]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[z_bismuth]]]] begin[:]
variable[dummy] assign[=] constant[0.0]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[mass_fractions_array_decayed]]]]]] begin[:]
if compare[call[name[znum_int]][call[name[cl]][call[call[name[stable_isotope_list]][name[j]].capitalize, parameter[]]]] equal[==] binary_operation[name[i] + constant[1]]] begin[:]
variable[dummy] assign[=] binary_operation[name[dummy] + call[name[float], parameter[call[name[mass_fractions_array_decayed]][name[j]]]]]
call[name[elem_abund_decayed]][name[i]] assign[=] name[dummy]
for taget[name[i]] in starred[call[name[range], parameter[name[z_bismuth]]]] begin[:]
if compare[call[name[index_stable]][name[i]] equal[==] constant[1]] begin[:]
call[name[elem_prod_fac_decayed]][name[i]] assign[=] call[name[float], parameter[call[name[old_div], parameter[call[name[elem_abund_decayed]][name[i]], call[name[solar_elem_abund]][name[i]]]]]]
|
keyword[def] identifier[element_abund_marco] ( identifier[i_decay] , identifier[stable_isotope_list] ,
identifier[stable_isotope_identifier] ,
identifier[mass_fractions_array_not_decayed] ,
identifier[mass_fractions_array_decayed] ):
literal[string]
keyword[global] identifier[elem_abund]
identifier[elem_abund] = identifier[np] . identifier[zeros] ( identifier[z_bismuth] )
keyword[global] identifier[elem_abund_decayed]
identifier[elem_abund_decayed] = identifier[np] . identifier[zeros] ( identifier[z_bismuth] )
keyword[global] identifier[elem_prod_fac]
identifier[elem_prod_fac] = identifier[np] . identifier[zeros] ( identifier[z_bismuth] )
keyword[global] identifier[elem_prod_fac_decayed]
identifier[elem_prod_fac_decayed] = identifier[np] . identifier[zeros] ( identifier[z_bismuth] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[z_bismuth] ):
identifier[dummy] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[spe] )):
keyword[if] identifier[znum_int] [ identifier[j] ]== identifier[i] + literal[int] keyword[and] identifier[stable_isotope_identifier] [ identifier[j] ]> literal[int] :
identifier[dummy] = identifier[dummy] + identifier[float] ( identifier[mass_fractions_array_not_decayed] [ identifier[j] ])
identifier[elem_abund] [ identifier[i] ]= identifier[dummy]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[z_bismuth] ):
keyword[if] identifier[index_stable] [ identifier[i] ]== literal[int] :
identifier[elem_prod_fac] [ identifier[i] ]= identifier[float] ( identifier[old_div] ( identifier[elem_abund] [ identifier[i] ], identifier[solar_elem_abund] [ identifier[i] ]))
keyword[elif] identifier[index_stable] [ identifier[i] ]== literal[int] :
identifier[elem_prod_fac] [ identifier[i] ]= literal[int]
keyword[if] identifier[i_decay] == literal[int] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[z_bismuth] ):
identifier[dummy] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[mass_fractions_array_decayed] )):
keyword[if] identifier[znum_int] [ identifier[cl] [ identifier[stable_isotope_list] [ identifier[j] ]. identifier[capitalize] ()]]== identifier[i] + literal[int] :
identifier[dummy] = identifier[dummy] + identifier[float] ( identifier[mass_fractions_array_decayed] [ identifier[j] ])
identifier[elem_abund_decayed] [ identifier[i] ]= identifier[dummy]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[z_bismuth] ):
keyword[if] identifier[index_stable] [ identifier[i] ]== literal[int] :
identifier[elem_prod_fac_decayed] [ identifier[i] ]= identifier[float] ( identifier[old_div] ( identifier[elem_abund_decayed] [ identifier[i] ], identifier[solar_elem_abund] [ identifier[i] ]))
keyword[elif] identifier[index_stable] [ identifier[i] ]== literal[int] :
identifier[elem_prod_fac_decayed] [ identifier[i] ]= literal[int]
|
def element_abund_marco(i_decay, stable_isotope_list, stable_isotope_identifier, mass_fractions_array_not_decayed, mass_fractions_array_decayed):
"""
Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated
"""
# this way is done in a really simple way. May be done better for sure, in a couple of loops.
# I keep this, since I have only to copy over old script. Falk will probably redo it.
#import numpy as np
#from NuGridPy import utils as u
global elem_abund
elem_abund = np.zeros(z_bismuth)
global elem_abund_decayed
elem_abund_decayed = np.zeros(z_bismuth)
global elem_prod_fac
elem_prod_fac = np.zeros(z_bismuth)
global elem_prod_fac_decayed
elem_prod_fac_decayed = np.zeros(z_bismuth)
# notice that elem_abund include all contribution, both from stables and unstables in
# that moment.
for i in range(z_bismuth):
dummy = 0.0
for j in range(len(spe)):
if znum_int[j] == i + 1 and stable_isotope_identifier[j] > 0.5:
dummy = dummy + float(mass_fractions_array_not_decayed[j]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
elem_abund[i] = dummy # depends on [control=['for'], data=['i']]
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac[i] = float(old_div(elem_abund[i], solar_elem_abund[i])) # depends on [control=['if'], data=[]]
elif index_stable[i] == 0:
elem_prod_fac[i] = 0.0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if i_decay == 2:
for i in range(z_bismuth):
dummy = 0.0
for j in range(len(mass_fractions_array_decayed)):
if znum_int[cl[stable_isotope_list[j].capitalize()]] == i + 1:
#print znum_int[cl[stable[j].capitalize()]],cl[stable[j].capitalize()],stable[j]
dummy = dummy + float(mass_fractions_array_decayed[j]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
elem_abund_decayed[i] = dummy # depends on [control=['for'], data=['i']]
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i], solar_elem_abund[i])) # depends on [control=['if'], data=[]]
elif index_stable[i] == 0:
elem_prod_fac_decayed[i] = 0.0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
|
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1,
**kw):
"""
Parse a dictionary of parameters from an INI-style config file.
:param job_ini:
Path to configuration file/archive or dictionary of parameters
:param pkg:
Python package where to find the configuration file (optional)
:param calculators:
Sequence of calculator names (optional) used to restrict the
valid choices for `calculation_mode`
:param hc_id:
Not None only when called from a post calculation
:param validate:
Flag. By default it is true and the parameters are validated
:param kw:
String-valued keyword arguments used to override the job.ini parameters
:returns:
An :class:`openquake.commonlib.oqvalidation.OqParam` instance
containing the validate and casted parameters/values parsed from
the job.ini file as well as a subdictionary 'inputs' containing
absolute paths to all of the files referenced in the job.ini, keyed by
the parameter name.
"""
# UGLY: this is here to avoid circular imports
from openquake.calculators import base
OqParam.calculation_mode.validator.choices = tuple(
calculators or base.calculators)
if not isinstance(job_ini, dict):
basedir = os.path.dirname(pkg.__file__) if pkg else ''
job_ini = get_params([os.path.join(basedir, job_ini)])
if hc_id:
job_ini.update(hazard_calculation_id=str(hc_id))
job_ini.update(kw)
oqparam = OqParam(**job_ini)
if validate:
oqparam.validate()
return oqparam
|
def function[get_oqparam, parameter[job_ini, pkg, calculators, hc_id, validate]]:
constant[
Parse a dictionary of parameters from an INI-style config file.
:param job_ini:
Path to configuration file/archive or dictionary of parameters
:param pkg:
Python package where to find the configuration file (optional)
:param calculators:
Sequence of calculator names (optional) used to restrict the
valid choices for `calculation_mode`
:param hc_id:
Not None only when called from a post calculation
:param validate:
Flag. By default it is true and the parameters are validated
:param kw:
String-valued keyword arguments used to override the job.ini parameters
:returns:
An :class:`openquake.commonlib.oqvalidation.OqParam` instance
containing the validate and casted parameters/values parsed from
the job.ini file as well as a subdictionary 'inputs' containing
absolute paths to all of the files referenced in the job.ini, keyed by
the parameter name.
]
from relative_module[openquake.calculators] import module[base]
name[OqParam].calculation_mode.validator.choices assign[=] call[name[tuple], parameter[<ast.BoolOp object at 0x7da1b15f26b0>]]
if <ast.UnaryOp object at 0x7da1b15f0bb0> begin[:]
variable[basedir] assign[=] <ast.IfExp object at 0x7da1b15f2080>
variable[job_ini] assign[=] call[name[get_params], parameter[list[[<ast.Call object at 0x7da1b15f1660>]]]]
if name[hc_id] begin[:]
call[name[job_ini].update, parameter[]]
call[name[job_ini].update, parameter[name[kw]]]
variable[oqparam] assign[=] call[name[OqParam], parameter[]]
if name[validate] begin[:]
call[name[oqparam].validate, parameter[]]
return[name[oqparam]]
|
keyword[def] identifier[get_oqparam] ( identifier[job_ini] , identifier[pkg] = keyword[None] , identifier[calculators] = keyword[None] , identifier[hc_id] = keyword[None] , identifier[validate] = literal[int] ,
** identifier[kw] ):
literal[string]
keyword[from] identifier[openquake] . identifier[calculators] keyword[import] identifier[base]
identifier[OqParam] . identifier[calculation_mode] . identifier[validator] . identifier[choices] = identifier[tuple] (
identifier[calculators] keyword[or] identifier[base] . identifier[calculators] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[job_ini] , identifier[dict] ):
identifier[basedir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[pkg] . identifier[__file__] ) keyword[if] identifier[pkg] keyword[else] literal[string]
identifier[job_ini] = identifier[get_params] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , identifier[job_ini] )])
keyword[if] identifier[hc_id] :
identifier[job_ini] . identifier[update] ( identifier[hazard_calculation_id] = identifier[str] ( identifier[hc_id] ))
identifier[job_ini] . identifier[update] ( identifier[kw] )
identifier[oqparam] = identifier[OqParam] (** identifier[job_ini] )
keyword[if] identifier[validate] :
identifier[oqparam] . identifier[validate] ()
keyword[return] identifier[oqparam]
|
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1, **kw):
"""
Parse a dictionary of parameters from an INI-style config file.
:param job_ini:
Path to configuration file/archive or dictionary of parameters
:param pkg:
Python package where to find the configuration file (optional)
:param calculators:
Sequence of calculator names (optional) used to restrict the
valid choices for `calculation_mode`
:param hc_id:
Not None only when called from a post calculation
:param validate:
Flag. By default it is true and the parameters are validated
:param kw:
String-valued keyword arguments used to override the job.ini parameters
:returns:
An :class:`openquake.commonlib.oqvalidation.OqParam` instance
containing the validate and casted parameters/values parsed from
the job.ini file as well as a subdictionary 'inputs' containing
absolute paths to all of the files referenced in the job.ini, keyed by
the parameter name.
"""
# UGLY: this is here to avoid circular imports
from openquake.calculators import base
OqParam.calculation_mode.validator.choices = tuple(calculators or base.calculators)
if not isinstance(job_ini, dict):
basedir = os.path.dirname(pkg.__file__) if pkg else ''
job_ini = get_params([os.path.join(basedir, job_ini)]) # depends on [control=['if'], data=[]]
if hc_id:
job_ini.update(hazard_calculation_id=str(hc_id)) # depends on [control=['if'], data=[]]
job_ini.update(kw)
oqparam = OqParam(**job_ini)
if validate:
oqparam.validate() # depends on [control=['if'], data=[]]
return oqparam
|
def ticket_field_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-ticket-field"
api_path = "/api/v2/ticket_fields/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def function[ticket_field_show, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-ticket-field]
variable[api_path] assign[=] constant[/api/v2/ticket_fields/{id}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]]
|
keyword[def] identifier[ticket_field_show] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] )
|
def ticket_field_show(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-ticket-field"""
api_path = '/api/v2/ticket_fields/{id}.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def circular_gaussian_kernel(sd,radius):
"""Create a 2-d Gaussian convolution kernel
sd - standard deviation of the gaussian in pixels
radius - build a circular kernel that convolves all points in the circle
bounded by this radius
"""
i,j = np.mgrid[-radius:radius+1,-radius:radius+1].astype(float) / radius
mask = i**2 + j**2 <= 1
i = i * radius / sd
j = j * radius / sd
kernel = np.zeros((2*radius+1,2*radius+1))
kernel[mask] = np.e ** (-(i[mask]**2+j[mask]**2) /
(2 * sd **2))
#
# Normalize the kernel so that there is no net effect on a uniform image
#
kernel = kernel / np.sum(kernel)
return kernel
|
def function[circular_gaussian_kernel, parameter[sd, radius]]:
constant[Create a 2-d Gaussian convolution kernel
sd - standard deviation of the gaussian in pixels
radius - build a circular kernel that convolves all points in the circle
bounded by this radius
]
<ast.Tuple object at 0x7da1b0528bb0> assign[=] binary_operation[call[call[name[np].mgrid][tuple[[<ast.Slice object at 0x7da1b0529060>, <ast.Slice object at 0x7da18f09ef50>]]].astype, parameter[name[float]]] / name[radius]]
variable[mask] assign[=] compare[binary_operation[binary_operation[name[i] ** constant[2]] + binary_operation[name[j] ** constant[2]]] less_or_equal[<=] constant[1]]
variable[i] assign[=] binary_operation[binary_operation[name[i] * name[radius]] / name[sd]]
variable[j] assign[=] binary_operation[binary_operation[name[j] * name[radius]] / name[sd]]
variable[kernel] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b052bc70>, <ast.BinOp object at 0x7da1b0528190>]]]]
call[name[kernel]][name[mask]] assign[=] binary_operation[name[np].e ** binary_operation[<ast.UnaryOp object at 0x7da1b0528ac0> / binary_operation[constant[2] * binary_operation[name[sd] ** constant[2]]]]]
variable[kernel] assign[=] binary_operation[name[kernel] / call[name[np].sum, parameter[name[kernel]]]]
return[name[kernel]]
|
keyword[def] identifier[circular_gaussian_kernel] ( identifier[sd] , identifier[radius] ):
literal[string]
identifier[i] , identifier[j] = identifier[np] . identifier[mgrid] [- identifier[radius] : identifier[radius] + literal[int] ,- identifier[radius] : identifier[radius] + literal[int] ]. identifier[astype] ( identifier[float] )/ identifier[radius]
identifier[mask] = identifier[i] ** literal[int] + identifier[j] ** literal[int] <= literal[int]
identifier[i] = identifier[i] * identifier[radius] / identifier[sd]
identifier[j] = identifier[j] * identifier[radius] / identifier[sd]
identifier[kernel] = identifier[np] . identifier[zeros] (( literal[int] * identifier[radius] + literal[int] , literal[int] * identifier[radius] + literal[int] ))
identifier[kernel] [ identifier[mask] ]= identifier[np] . identifier[e] **(-( identifier[i] [ identifier[mask] ]** literal[int] + identifier[j] [ identifier[mask] ]** literal[int] )/
( literal[int] * identifier[sd] ** literal[int] ))
identifier[kernel] = identifier[kernel] / identifier[np] . identifier[sum] ( identifier[kernel] )
keyword[return] identifier[kernel]
|
def circular_gaussian_kernel(sd, radius):
"""Create a 2-d Gaussian convolution kernel
sd - standard deviation of the gaussian in pixels
radius - build a circular kernel that convolves all points in the circle
bounded by this radius
"""
(i, j) = np.mgrid[-radius:radius + 1, -radius:radius + 1].astype(float) / radius
mask = i ** 2 + j ** 2 <= 1
i = i * radius / sd
j = j * radius / sd
kernel = np.zeros((2 * radius + 1, 2 * radius + 1))
kernel[mask] = np.e ** (-(i[mask] ** 2 + j[mask] ** 2) / (2 * sd ** 2))
#
# Normalize the kernel so that there is no net effect on a uniform image
#
kernel = kernel / np.sum(kernel)
return kernel
|
def raw_to_bv(self):
"""
A counterpart to FP.raw_to_bv - does nothing and returns itself.
"""
if self.symbolic:
return BVS(next(iter(self.variables)).replace(self.STRING_TYPE_IDENTIFIER, self.GENERATED_BVS_IDENTIFIER), self.length)
else:
return BVV(ord(self.args[0]), self.length)
|
def function[raw_to_bv, parameter[self]]:
constant[
A counterpart to FP.raw_to_bv - does nothing and returns itself.
]
if name[self].symbolic begin[:]
return[call[name[BVS], parameter[call[call[name[next], parameter[call[name[iter], parameter[name[self].variables]]]].replace, parameter[name[self].STRING_TYPE_IDENTIFIER, name[self].GENERATED_BVS_IDENTIFIER]], name[self].length]]]
|
keyword[def] identifier[raw_to_bv] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[symbolic] :
keyword[return] identifier[BVS] ( identifier[next] ( identifier[iter] ( identifier[self] . identifier[variables] )). identifier[replace] ( identifier[self] . identifier[STRING_TYPE_IDENTIFIER] , identifier[self] . identifier[GENERATED_BVS_IDENTIFIER] ), identifier[self] . identifier[length] )
keyword[else] :
keyword[return] identifier[BVV] ( identifier[ord] ( identifier[self] . identifier[args] [ literal[int] ]), identifier[self] . identifier[length] )
|
def raw_to_bv(self):
"""
A counterpart to FP.raw_to_bv - does nothing and returns itself.
"""
if self.symbolic:
return BVS(next(iter(self.variables)).replace(self.STRING_TYPE_IDENTIFIER, self.GENERATED_BVS_IDENTIFIER), self.length) # depends on [control=['if'], data=[]]
else:
return BVV(ord(self.args[0]), self.length)
|
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
'''Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/', vnet_name,
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
def function[get_vnet, parameter[access_token, subscription_id, resource_group, vnet_name]]:
constant[Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b056afe0>, <ast.Constant object at 0x7da1b0569000>, <ast.Name object at 0x7da1b0568be0>, <ast.Constant object at 0x7da1b0568460>, <ast.Name object at 0x7da1b0568f40>, <ast.Constant object at 0x7da1b0568eb0>, <ast.Name object at 0x7da1b0568400>, <ast.Constant object at 0x7da1b056ba60>, <ast.Name object at 0x7da1b056b3d0>]]]]
return[call[name[do_get], parameter[name[endpoint], name[access_token]]]]
|
keyword[def] identifier[get_vnet] ( identifier[access_token] , identifier[subscription_id] , identifier[resource_group] , identifier[vnet_name] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[resource_group] ,
literal[string] , identifier[vnet_name] ,
literal[string] , identifier[NETWORK_API] ])
keyword[return] identifier[do_get] ( identifier[endpoint] , identifier[access_token] )
|
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
"""Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', vnet_name, '?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
def recode_curesim_reads(
curesim_fastq_fo,
rnf_fastq_fo,
fai_fo,
genome_id,
number_of_read_tuples=10**9,
recode_random=False,
):
"""Recode CuReSim output FASTQ file to the RNF-compatible output FASTQ file.
Args:
curesim_fastq_fo (file object): File object of CuReSim FASTQ file.
fastq_rnf_fo (file object): File object of RNF FASTQ.
fai_fo (file object): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
number_of_read_tuples (int): Expected number of read tuples (to estimate number of digits in RNF).
recode_random (bool): Recode random reads.
Raises:
ValueError
"""
curesim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)')
"""
CuReSim read name format
@<#1>_<#2>_<#3>_<#4>_<#5>_<#6>_<#7>_<#8>
1: contig name
2: original position
3: strand (0=forward;1=reverse)
4: random read (0=non-random;1=random)
5: number of insertions
6: number of deletions
7: number of substitution
8: read number (unique within a genome)
"""
max_seq_len = 0
fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
fq_creator = rnftools.rnfformat.FqCreator(
fastq_fo=rnf_fastq_fo,
read_tuple_id_width=read_tuple_id_width,
genome_id_width=2,
chr_id_width=fai_index.chr_id_width,
coor_width=fai_index.coor_width,
info_reads_in_tuple=True,
info_simulator="curesim",
)
# parsing FQ file
read_tuple_id = 0
i = 0
for line in curesim_fastq_fo:
if i % 4 == 0:
m = curesim_pattern.search(line)
if m is None:
rnftools.utils.error(
"Read '{}' was not generated by CuReSim.".format(line[1:]), program="RNFtools",
subprogram="MIShmash", exception=ValueError
)
contig_name = m.group(1)
start_pos = int(m.group(2))
direction = "R" if int(m.group(3)) else "F"
random = bool(m.group(4))
ins_nb = int(m.group(5))
del_nb = int(m.group(6))
subst_nb = int(m.group(7))
rd_id = int(m.group(8))
end_pos = start_pos - 1 - ins_nb + del_nb
chr_id = 0
random = contig_name[:4] == "rand"
# TODO: uncomment when the chromosome naming bug in curesim is corrected
# chr_id = self.dict_chr_ids[contig_name] if self.dict_chr_ids!={} else "0"
elif i % 4 == 1:
bases = line.strip()
end_pos += len(bases)
if recode_random:
left = 0
right = 0
else:
left = start_pos + 1
right = end_pos
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction,
left=left,
right=right,
)
elif i % 4 == 2:
pass
elif i % 4 == 3:
qualities = line.strip()
if random == recode_random:
fq_creator.add_read(
read_tuple_id=read_tuple_id,
bases=bases,
qualities=qualities,
segments=[segment],
)
read_tuple_id += 1
i += 1
fq_creator.flush_read_tuple()
|
def function[recode_curesim_reads, parameter[curesim_fastq_fo, rnf_fastq_fo, fai_fo, genome_id, number_of_read_tuples, recode_random]]:
constant[Recode CuReSim output FASTQ file to the RNF-compatible output FASTQ file.
Args:
curesim_fastq_fo (file object): File object of CuReSim FASTQ file.
fastq_rnf_fo (file object): File object of RNF FASTQ.
fai_fo (file object): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
number_of_read_tuples (int): Expected number of read tuples (to estimate number of digits in RNF).
recode_random (bool): Recode random reads.
Raises:
ValueError
]
variable[curesim_pattern] assign[=] call[name[re].compile, parameter[constant[@(.*)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)]]]
constant[
CuReSim read name format
@<#1>_<#2>_<#3>_<#4>_<#5>_<#6>_<#7>_<#8>
1: contig name
2: original position
3: strand (0=forward;1=reverse)
4: random read (0=non-random;1=random)
5: number of insertions
6: number of deletions
7: number of substitution
8: read number (unique within a genome)
]
variable[max_seq_len] assign[=] constant[0]
variable[fai_index] assign[=] call[name[rnftools].utils.FaIdx, parameter[]]
variable[read_tuple_id_width] assign[=] call[name[len], parameter[call[name[format], parameter[name[number_of_read_tuples], constant[x]]]]]
variable[fq_creator] assign[=] call[name[rnftools].rnfformat.FqCreator, parameter[]]
variable[read_tuple_id] assign[=] constant[0]
variable[i] assign[=] constant[0]
for taget[name[line]] in starred[name[curesim_fastq_fo]] begin[:]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[4]] equal[==] constant[0]] begin[:]
variable[m] assign[=] call[name[curesim_pattern].search, parameter[name[line]]]
if compare[name[m] is constant[None]] begin[:]
call[name[rnftools].utils.error, parameter[call[constant[Read '{}' was not generated by CuReSim.].format, parameter[call[name[line]][<ast.Slice object at 0x7da1b0b7cb80>]]]]]
variable[contig_name] assign[=] call[name[m].group, parameter[constant[1]]]
variable[start_pos] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[2]]]]]
variable[direction] assign[=] <ast.IfExp object at 0x7da1b0b7c430>
variable[random] assign[=] call[name[bool], parameter[call[name[m].group, parameter[constant[4]]]]]
variable[ins_nb] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[5]]]]]
variable[del_nb] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[6]]]]]
variable[subst_nb] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[7]]]]]
variable[rd_id] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[8]]]]]
variable[end_pos] assign[=] binary_operation[binary_operation[binary_operation[name[start_pos] - constant[1]] - name[ins_nb]] + name[del_nb]]
variable[chr_id] assign[=] constant[0]
variable[random] assign[=] compare[call[name[contig_name]][<ast.Slice object at 0x7da18f09dc90>] equal[==] constant[rand]]
<ast.AugAssign object at 0x7da1b0b0ef20>
call[name[fq_creator].flush_read_tuple, parameter[]]
|
keyword[def] identifier[recode_curesim_reads] (
identifier[curesim_fastq_fo] ,
identifier[rnf_fastq_fo] ,
identifier[fai_fo] ,
identifier[genome_id] ,
identifier[number_of_read_tuples] = literal[int] ** literal[int] ,
identifier[recode_random] = keyword[False] ,
):
literal[string]
identifier[curesim_pattern] = identifier[re] . identifier[compile] ( literal[string] )
literal[string]
identifier[max_seq_len] = literal[int]
identifier[fai_index] = identifier[rnftools] . identifier[utils] . identifier[FaIdx] ( identifier[fai_fo] = identifier[fai_fo] )
identifier[read_tuple_id_width] = identifier[len] ( identifier[format] ( identifier[number_of_read_tuples] , literal[string] ))
identifier[fq_creator] = identifier[rnftools] . identifier[rnfformat] . identifier[FqCreator] (
identifier[fastq_fo] = identifier[rnf_fastq_fo] ,
identifier[read_tuple_id_width] = identifier[read_tuple_id_width] ,
identifier[genome_id_width] = literal[int] ,
identifier[chr_id_width] = identifier[fai_index] . identifier[chr_id_width] ,
identifier[coor_width] = identifier[fai_index] . identifier[coor_width] ,
identifier[info_reads_in_tuple] = keyword[True] ,
identifier[info_simulator] = literal[string] ,
)
identifier[read_tuple_id] = literal[int]
identifier[i] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[curesim_fastq_fo] :
keyword[if] identifier[i] % literal[int] == literal[int] :
identifier[m] = identifier[curesim_pattern] . identifier[search] ( identifier[line] )
keyword[if] identifier[m] keyword[is] keyword[None] :
identifier[rnftools] . identifier[utils] . identifier[error] (
literal[string] . identifier[format] ( identifier[line] [ literal[int] :]), identifier[program] = literal[string] ,
identifier[subprogram] = literal[string] , identifier[exception] = identifier[ValueError]
)
identifier[contig_name] = identifier[m] . identifier[group] ( literal[int] )
identifier[start_pos] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[direction] = literal[string] keyword[if] identifier[int] ( identifier[m] . identifier[group] ( literal[int] )) keyword[else] literal[string]
identifier[random] = identifier[bool] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[ins_nb] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[del_nb] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[subst_nb] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[rd_id] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[end_pos] = identifier[start_pos] - literal[int] - identifier[ins_nb] + identifier[del_nb]
identifier[chr_id] = literal[int]
identifier[random] = identifier[contig_name] [: literal[int] ]== literal[string]
keyword[elif] identifier[i] % literal[int] == literal[int] :
identifier[bases] = identifier[line] . identifier[strip] ()
identifier[end_pos] += identifier[len] ( identifier[bases] )
keyword[if] identifier[recode_random] :
identifier[left] = literal[int]
identifier[right] = literal[int]
keyword[else] :
identifier[left] = identifier[start_pos] + literal[int]
identifier[right] = identifier[end_pos]
identifier[segment] = identifier[rnftools] . identifier[rnfformat] . identifier[Segment] (
identifier[genome_id] = identifier[genome_id] ,
identifier[chr_id] = identifier[chr_id] ,
identifier[direction] = identifier[direction] ,
identifier[left] = identifier[left] ,
identifier[right] = identifier[right] ,
)
keyword[elif] identifier[i] % literal[int] == literal[int] :
keyword[pass]
keyword[elif] identifier[i] % literal[int] == literal[int] :
identifier[qualities] = identifier[line] . identifier[strip] ()
keyword[if] identifier[random] == identifier[recode_random] :
identifier[fq_creator] . identifier[add_read] (
identifier[read_tuple_id] = identifier[read_tuple_id] ,
identifier[bases] = identifier[bases] ,
identifier[qualities] = identifier[qualities] ,
identifier[segments] =[ identifier[segment] ],
)
identifier[read_tuple_id] += literal[int]
identifier[i] += literal[int]
identifier[fq_creator] . identifier[flush_read_tuple] ()
|
def recode_curesim_reads(curesim_fastq_fo, rnf_fastq_fo, fai_fo, genome_id, number_of_read_tuples=10 ** 9, recode_random=False):
"""Recode CuReSim output FASTQ file to the RNF-compatible output FASTQ file.
Args:
curesim_fastq_fo (file object): File object of CuReSim FASTQ file.
fastq_rnf_fo (file object): File object of RNF FASTQ.
fai_fo (file object): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
number_of_read_tuples (int): Expected number of read tuples (to estimate number of digits in RNF).
recode_random (bool): Recode random reads.
Raises:
ValueError
"""
curesim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)')
'\n\t\t\tCuReSim read name format\n\n\t\t\t@<#1>_<#2>_<#3>_<#4>_<#5>_<#6>_<#7>_<#8>\n\n\t\t\t1: contig name\n\t\t\t2: original position\n\t\t\t3: strand (0=forward;1=reverse)\n\t\t\t4: random read (0=non-random;1=random)\n\t\t\t5: number of insertions\n\t\t\t6: number of deletions\n\t\t\t7: number of substitution\n\t\t\t8: read number (unique within a genome)\n\t\t'
max_seq_len = 0
fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=rnf_fastq_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator='curesim')
# parsing FQ file
read_tuple_id = 0
i = 0
for line in curesim_fastq_fo:
if i % 4 == 0:
m = curesim_pattern.search(line)
if m is None:
rnftools.utils.error("Read '{}' was not generated by CuReSim.".format(line[1:]), program='RNFtools', subprogram='MIShmash', exception=ValueError) # depends on [control=['if'], data=[]]
contig_name = m.group(1)
start_pos = int(m.group(2))
direction = 'R' if int(m.group(3)) else 'F'
random = bool(m.group(4))
ins_nb = int(m.group(5))
del_nb = int(m.group(6))
subst_nb = int(m.group(7))
rd_id = int(m.group(8))
end_pos = start_pos - 1 - ins_nb + del_nb
chr_id = 0
random = contig_name[:4] == 'rand' # depends on [control=['if'], data=[]]
# TODO: uncomment when the chromosome naming bug in curesim is corrected
# chr_id = self.dict_chr_ids[contig_name] if self.dict_chr_ids!={} else "0"
elif i % 4 == 1:
bases = line.strip()
end_pos += len(bases)
if recode_random:
left = 0
right = 0 # depends on [control=['if'], data=[]]
else:
left = start_pos + 1
right = end_pos
segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right) # depends on [control=['if'], data=[]]
elif i % 4 == 2:
pass # depends on [control=['if'], data=[]]
elif i % 4 == 3:
qualities = line.strip()
if random == recode_random:
fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment]) # depends on [control=['if'], data=[]]
read_tuple_id += 1 # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['for'], data=['line']]
fq_creator.flush_read_tuple()
|
def clean(self):
""" Remove intermediate files created
"""
if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname):
os.remove(self.catalog.catname)
|
def function[clean, parameter[self]]:
constant[ Remove intermediate files created
]
if <ast.BoolOp object at 0x7da1b1badb70> begin[:]
call[name[os].remove, parameter[name[self].catalog.catname]]
|
keyword[def] identifier[clean] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[util] . identifier[is_blank] ( identifier[self] . identifier[catalog] . identifier[catname] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[catalog] . identifier[catname] ):
identifier[os] . identifier[remove] ( identifier[self] . identifier[catalog] . identifier[catname] )
|
def clean(self):
""" Remove intermediate files created
"""
if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname):
os.remove(self.catalog.catname) # depends on [control=['if'], data=[]]
|
def ParseJavaFlags(self, start_line=0):
"""Parse Java style flags (com.google.common.flags)."""
# The java flags prints starts with a "Standard flags" "module"
# that doesn't follow the standard module syntax.
modname = 'Standard flags' # name of current module
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
logging.vlog(2, 'Line: "%s"' % line)
if not line: # blank lines terminate module
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_java_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_java_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
|
def function[ParseJavaFlags, parameter[self, start_line]]:
constant[Parse Java style flags (com.google.common.flags).]
variable[modname] assign[=] constant[Standard flags]
call[name[self].module_list.append, parameter[name[modname]]]
call[name[self].modules.setdefault, parameter[name[modname], list[[]]]]
variable[modlist] assign[=] call[name[self].modules][name[modname]]
variable[flag] assign[=] constant[None]
for taget[name[line_num]] in starred[call[name[range], parameter[name[start_line], call[name[len], parameter[name[self].output]]]]] begin[:]
variable[line] assign[=] call[call[name[self].output][name[line_num]].rstrip, parameter[]]
call[name[logging].vlog, parameter[constant[2], binary_operation[constant[Line: "%s"] <ast.Mod object at 0x7da2590d6920> name[line]]]]
if <ast.UnaryOp object at 0x7da18f58c8e0> begin[:]
if name[flag] begin[:]
call[name[modlist].append, parameter[name[flag]]]
variable[flag] assign[=] constant[None]
continue
variable[mobj] assign[=] call[name[self].module_java_re.match, parameter[name[line]]]
if name[mobj] begin[:]
variable[modname] assign[=] call[name[mobj].group, parameter[constant[1]]]
call[name[logging].debug, parameter[binary_operation[constant[Module: %s] <ast.Mod object at 0x7da2590d6920> name[line]]]]
if name[flag] begin[:]
call[name[modlist].append, parameter[name[flag]]]
call[name[self].module_list.append, parameter[name[modname]]]
call[name[self].modules.setdefault, parameter[name[modname], list[[]]]]
variable[modlist] assign[=] call[name[self].modules][name[modname]]
variable[flag] assign[=] constant[None]
continue
variable[mobj] assign[=] call[name[self].flag_java_re.match, parameter[name[line]]]
if name[mobj] begin[:]
if name[flag] begin[:]
call[name[modlist].append, parameter[name[flag]]]
call[name[logging].debug, parameter[binary_operation[constant[Flag: %s] <ast.Mod object at 0x7da2590d6920> name[line]]]]
variable[flag] assign[=] call[name[Flag], parameter[call[name[mobj].group, parameter[constant[1]]], call[name[mobj].group, parameter[constant[2]]]]]
continue
if name[flag] begin[:]
<ast.AugAssign object at 0x7da1b26aed40>
if name[flag] begin[:]
call[name[modlist].append, parameter[name[flag]]]
|
keyword[def] identifier[ParseJavaFlags] ( identifier[self] , identifier[start_line] = literal[int] ):
literal[string]
identifier[modname] = literal[string]
identifier[self] . identifier[module_list] . identifier[append] ( identifier[modname] )
identifier[self] . identifier[modules] . identifier[setdefault] ( identifier[modname] ,[])
identifier[modlist] = identifier[self] . identifier[modules] [ identifier[modname] ]
identifier[flag] = keyword[None]
keyword[for] identifier[line_num] keyword[in] identifier[range] ( identifier[start_line] , identifier[len] ( identifier[self] . identifier[output] )):
identifier[line] = identifier[self] . identifier[output] [ identifier[line_num] ]. identifier[rstrip] ()
identifier[logging] . identifier[vlog] ( literal[int] , literal[string] % identifier[line] )
keyword[if] keyword[not] identifier[line] :
keyword[if] identifier[flag] :
identifier[modlist] . identifier[append] ( identifier[flag] )
identifier[flag] = keyword[None]
keyword[continue]
identifier[mobj] = identifier[self] . identifier[module_java_re] . identifier[match] ( identifier[line] )
keyword[if] identifier[mobj] :
identifier[modname] = identifier[mobj] . identifier[group] ( literal[int] )
identifier[logging] . identifier[debug] ( literal[string] % identifier[line] )
keyword[if] identifier[flag] :
identifier[modlist] . identifier[append] ( identifier[flag] )
identifier[self] . identifier[module_list] . identifier[append] ( identifier[modname] )
identifier[self] . identifier[modules] . identifier[setdefault] ( identifier[modname] ,[])
identifier[modlist] = identifier[self] . identifier[modules] [ identifier[modname] ]
identifier[flag] = keyword[None]
keyword[continue]
identifier[mobj] = identifier[self] . identifier[flag_java_re] . identifier[match] ( identifier[line] )
keyword[if] identifier[mobj] :
keyword[if] identifier[flag] :
identifier[modlist] . identifier[append] ( identifier[flag] )
identifier[logging] . identifier[debug] ( literal[string] % identifier[line] )
identifier[flag] = identifier[Flag] ( identifier[mobj] . identifier[group] ( literal[int] ), identifier[mobj] . identifier[group] ( literal[int] ))
keyword[continue]
keyword[if] identifier[flag] :
identifier[flag] . identifier[help] += literal[string] + identifier[line] . identifier[strip] ()
keyword[else] :
identifier[logging] . identifier[info] ( literal[string] % identifier[line] )
keyword[if] identifier[flag] :
identifier[modlist] . identifier[append] ( identifier[flag] )
|
def ParseJavaFlags(self, start_line=0):
"""Parse Java style flags (com.google.common.flags)."""
# The java flags prints starts with a "Standard flags" "module"
# that doesn't follow the standard module syntax.
modname = 'Standard flags' # name of current module
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
logging.vlog(2, 'Line: "%s"' % line)
if not line: # blank lines terminate module
if flag: # save last flag
modlist.append(flag)
flag = None # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
mobj = self.module_java_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag) # depends on [control=['if'], data=[]]
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue # depends on [control=['if'], data=[]]
mobj = self.flag_java_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag) # depends on [control=['if'], data=[]]
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue # depends on [control=['if'], data=[]]
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip() # depends on [control=['if'], data=[]]
else:
logging.info('Extra: %s' % line) # depends on [control=['for'], data=['line_num']]
if flag:
modlist.append(flag) # depends on [control=['if'], data=[]]
|
def invoke(ctx, data_file):
"""Invoke the command synchronously"""
click.echo('invoking')
response = ctx.invoke(data_file.read())
log_data = base64.b64decode(response['LogResult'])
click.echo(log_data)
click.echo('Response:')
click.echo(response['Payload'].read())
click.echo('done')
|
def function[invoke, parameter[ctx, data_file]]:
constant[Invoke the command synchronously]
call[name[click].echo, parameter[constant[invoking]]]
variable[response] assign[=] call[name[ctx].invoke, parameter[call[name[data_file].read, parameter[]]]]
variable[log_data] assign[=] call[name[base64].b64decode, parameter[call[name[response]][constant[LogResult]]]]
call[name[click].echo, parameter[name[log_data]]]
call[name[click].echo, parameter[constant[Response:]]]
call[name[click].echo, parameter[call[call[name[response]][constant[Payload]].read, parameter[]]]]
call[name[click].echo, parameter[constant[done]]]
|
keyword[def] identifier[invoke] ( identifier[ctx] , identifier[data_file] ):
literal[string]
identifier[click] . identifier[echo] ( literal[string] )
identifier[response] = identifier[ctx] . identifier[invoke] ( identifier[data_file] . identifier[read] ())
identifier[log_data] = identifier[base64] . identifier[b64decode] ( identifier[response] [ literal[string] ])
identifier[click] . identifier[echo] ( identifier[log_data] )
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( identifier[response] [ literal[string] ]. identifier[read] ())
identifier[click] . identifier[echo] ( literal[string] )
|
def invoke(ctx, data_file):
"""Invoke the command synchronously"""
click.echo('invoking')
response = ctx.invoke(data_file.read())
log_data = base64.b64decode(response['LogResult'])
click.echo(log_data)
click.echo('Response:')
click.echo(response['Payload'].read())
click.echo('done')
|
def _format_arg(arg):
"""Validate one exception specification contract tuple/list."""
# Check that the argument conforms to one of the acceptable types, a string
# (when the default exception type is used), an exception type (when the
# default exception message is used) or a list/tuple to specify both
# exception type and exception message
if not (_isexception(arg) or isinstance(arg, (str, tuple, list))):
raise TypeError("Illegal custom contract exception definition")
# Check that when the argument is a list or tuple, they only have at most
# 2 items, the exception type and the exception message
if isinstance(arg, (tuple, list)) and ((not arg) or (len(arg) > 2)):
raise TypeError("Illegal custom contract exception definition")
# When only an exception message is given (and the default RuntimeError
# exception type is used), check that the message is not empty
if isinstance(arg, str) and (not arg):
raise ValueError("Empty custom contract exception message")
# When only an exception message is defined,
# use the default exception type
if isinstance(arg, str):
return {"msg": arg, "type": RuntimeError}
# When only an exception type is defined,
# use the default exception message
if _isexception(arg):
return {"msg": "Argument `*[argument_name]*` is not valid", "type": arg}
# If a list/tuple definition is used, check that if is not a string, it is
# a valid exception type (i.e. that it actually raises an exception)
if (len(arg) == 1) and (not isinstance(arg[0], str)) and (not _isexception(arg[0])):
raise TypeError("Illegal custom contract exception definition")
if (len(arg) == 2) and (
not (
(isinstance(arg[0], str) and _isexception(arg[1]))
or (isinstance(arg[1], str) and _isexception(arg[0]))
)
):
raise TypeError("Illegal custom contract exception definition")
# Check that the exception definition has a non-empty exception message
# when a list/tuple definition is used
if (len(arg) == 1) and isinstance(arg[0], str) and (not arg[0]):
raise ValueError("Empty custom contract exception message")
if (len(arg) == 2) and (
(isinstance(arg[0], str) and (not arg[0]))
or (isinstance(arg[1], str) and (not arg[1]))
):
raise ValueError("Empty custom contract exception message")
# Return conforming dictionary with default exception type and exception
# message applied (if necessary)
if len(arg) == 1:
return {
"msg": (
arg[0]
if isinstance(arg[0], str)
else "Argument `*[argument_name]*` is not valid"
),
"type": arg[0] if _isexception(arg[0]) else RuntimeError,
}
# len(arg) == 2
return {
"msg": arg[0] if isinstance(arg[0], str) else arg[1],
"type": arg[0] if _isexception(arg[0]) else arg[1],
}
|
def function[_format_arg, parameter[arg]]:
constant[Validate one exception specification contract tuple/list.]
if <ast.UnaryOp object at 0x7da20c6e7850> begin[:]
<ast.Raise object at 0x7da20c6e5d20>
if <ast.BoolOp object at 0x7da20c6e4c10> begin[:]
<ast.Raise object at 0x7da20c6e4ac0>
if <ast.BoolOp object at 0x7da20c6e4730> begin[:]
<ast.Raise object at 0x7da20c6e52d0>
if call[name[isinstance], parameter[name[arg], name[str]]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da20c6e5600>, <ast.Constant object at 0x7da20c6e51e0>], [<ast.Name object at 0x7da20c6e6740>, <ast.Name object at 0x7da20c6e7220>]]]
if call[name[_isexception], parameter[name[arg]]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da20c6e64d0>, <ast.Constant object at 0x7da20c6e7640>], [<ast.Constant object at 0x7da20c6e46a0>, <ast.Name object at 0x7da20c6e6290>]]]
if <ast.BoolOp object at 0x7da20c6e5780> begin[:]
<ast.Raise object at 0x7da20c6e4430>
if <ast.BoolOp object at 0x7da20c6e60b0> begin[:]
<ast.Raise object at 0x7da2054a5450>
if <ast.BoolOp object at 0x7da2054a73a0> begin[:]
<ast.Raise object at 0x7da2054a5930>
if <ast.BoolOp object at 0x7da2054a4130> begin[:]
<ast.Raise object at 0x7da1b2372b30>
if compare[call[name[len], parameter[name[arg]]] equal[==] constant[1]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b2373910>, <ast.Constant object at 0x7da1b2370f40>], [<ast.IfExp object at 0x7da1b2371a50>, <ast.IfExp object at 0x7da1b2370be0>]]]
return[dictionary[[<ast.Constant object at 0x7da2054a7e20>, <ast.Constant object at 0x7da2054a6710>], [<ast.IfExp object at 0x7da2054a4850>, <ast.IfExp object at 0x7da2054a4af0>]]]
|
keyword[def] identifier[_format_arg] ( identifier[arg] ):
literal[string]
keyword[if] keyword[not] ( identifier[_isexception] ( identifier[arg] ) keyword[or] identifier[isinstance] ( identifier[arg] ,( identifier[str] , identifier[tuple] , identifier[list] ))):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[arg] ,( identifier[tuple] , identifier[list] )) keyword[and] (( keyword[not] identifier[arg] ) keyword[or] ( identifier[len] ( identifier[arg] )> literal[int] )):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[str] ) keyword[and] ( keyword[not] identifier[arg] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[str] ):
keyword[return] { literal[string] : identifier[arg] , literal[string] : identifier[RuntimeError] }
keyword[if] identifier[_isexception] ( identifier[arg] ):
keyword[return] { literal[string] : literal[string] , literal[string] : identifier[arg] }
keyword[if] ( identifier[len] ( identifier[arg] )== literal[int] ) keyword[and] ( keyword[not] identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] )) keyword[and] ( keyword[not] identifier[_isexception] ( identifier[arg] [ literal[int] ])):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] ( identifier[len] ( identifier[arg] )== literal[int] ) keyword[and] (
keyword[not] (
( identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[and] identifier[_isexception] ( identifier[arg] [ literal[int] ]))
keyword[or] ( identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[and] identifier[_isexception] ( identifier[arg] [ literal[int] ]))
)
):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] ( identifier[len] ( identifier[arg] )== literal[int] ) keyword[and] identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[and] ( keyword[not] identifier[arg] [ literal[int] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( identifier[len] ( identifier[arg] )== literal[int] ) keyword[and] (
( identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[and] ( keyword[not] identifier[arg] [ literal[int] ]))
keyword[or] ( identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[and] ( keyword[not] identifier[arg] [ literal[int] ]))
):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[arg] )== literal[int] :
keyword[return] {
literal[string] :(
identifier[arg] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] )
keyword[else] literal[string]
),
literal[string] : identifier[arg] [ literal[int] ] keyword[if] identifier[_isexception] ( identifier[arg] [ literal[int] ]) keyword[else] identifier[RuntimeError] ,
}
keyword[return] {
literal[string] : identifier[arg] [ literal[int] ] keyword[if] identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[str] ) keyword[else] identifier[arg] [ literal[int] ],
literal[string] : identifier[arg] [ literal[int] ] keyword[if] identifier[_isexception] ( identifier[arg] [ literal[int] ]) keyword[else] identifier[arg] [ literal[int] ],
}
|
def _format_arg(arg):
"""Validate one exception specification contract tuple/list."""
# Check that the argument conforms to one of the acceptable types, a string
# (when the default exception type is used), an exception type (when the
# default exception message is used) or a list/tuple to specify both
# exception type and exception message
if not (_isexception(arg) or isinstance(arg, (str, tuple, list))):
raise TypeError('Illegal custom contract exception definition') # depends on [control=['if'], data=[]]
# Check that when the argument is a list or tuple, they only have at most
# 2 items, the exception type and the exception message
if isinstance(arg, (tuple, list)) and (not arg or len(arg) > 2):
raise TypeError('Illegal custom contract exception definition') # depends on [control=['if'], data=[]]
# When only an exception message is given (and the default RuntimeError
# exception type is used), check that the message is not empty
if isinstance(arg, str) and (not arg):
raise ValueError('Empty custom contract exception message') # depends on [control=['if'], data=[]]
# When only an exception message is defined,
# use the default exception type
if isinstance(arg, str):
return {'msg': arg, 'type': RuntimeError} # depends on [control=['if'], data=[]]
# When only an exception type is defined,
# use the default exception message
if _isexception(arg):
return {'msg': 'Argument `*[argument_name]*` is not valid', 'type': arg} # depends on [control=['if'], data=[]]
# If a list/tuple definition is used, check that if is not a string, it is
# a valid exception type (i.e. that it actually raises an exception)
if len(arg) == 1 and (not isinstance(arg[0], str)) and (not _isexception(arg[0])):
raise TypeError('Illegal custom contract exception definition') # depends on [control=['if'], data=[]]
if len(arg) == 2 and (not (isinstance(arg[0], str) and _isexception(arg[1]) or (isinstance(arg[1], str) and _isexception(arg[0])))):
raise TypeError('Illegal custom contract exception definition') # depends on [control=['if'], data=[]]
# Check that the exception definition has a non-empty exception message
# when a list/tuple definition is used
if len(arg) == 1 and isinstance(arg[0], str) and (not arg[0]):
raise ValueError('Empty custom contract exception message') # depends on [control=['if'], data=[]]
if len(arg) == 2 and (isinstance(arg[0], str) and (not arg[0]) or (isinstance(arg[1], str) and (not arg[1]))):
raise ValueError('Empty custom contract exception message') # depends on [control=['if'], data=[]]
# Return conforming dictionary with default exception type and exception
# message applied (if necessary)
if len(arg) == 1:
return {'msg': arg[0] if isinstance(arg[0], str) else 'Argument `*[argument_name]*` is not valid', 'type': arg[0] if _isexception(arg[0]) else RuntimeError} # depends on [control=['if'], data=[]]
# len(arg) == 2
return {'msg': arg[0] if isinstance(arg[0], str) else arg[1], 'type': arg[0] if _isexception(arg[0]) else arg[1]}
|
def compare(self, path, prefixed_path, source_storage):
"""
Returns True if the file should be copied.
"""
# First try a method on the command named compare_<comparison_method>
# If that doesn't exist, create a comparitor that calls methods on the
# storage with the name <comparison_method>, passing them the name.
comparitor = getattr(self, 'compare_%s' % self.comparison_method, None)
if not comparitor:
comparitor = self._create_comparitor(self.comparison_method)
return comparitor(path, prefixed_path, source_storage)
|
def function[compare, parameter[self, path, prefixed_path, source_storage]]:
constant[
Returns True if the file should be copied.
]
variable[comparitor] assign[=] call[name[getattr], parameter[name[self], binary_operation[constant[compare_%s] <ast.Mod object at 0x7da2590d6920> name[self].comparison_method], constant[None]]]
if <ast.UnaryOp object at 0x7da18f09c9d0> begin[:]
variable[comparitor] assign[=] call[name[self]._create_comparitor, parameter[name[self].comparison_method]]
return[call[name[comparitor], parameter[name[path], name[prefixed_path], name[source_storage]]]]
|
keyword[def] identifier[compare] ( identifier[self] , identifier[path] , identifier[prefixed_path] , identifier[source_storage] ):
literal[string]
identifier[comparitor] = identifier[getattr] ( identifier[self] , literal[string] % identifier[self] . identifier[comparison_method] , keyword[None] )
keyword[if] keyword[not] identifier[comparitor] :
identifier[comparitor] = identifier[self] . identifier[_create_comparitor] ( identifier[self] . identifier[comparison_method] )
keyword[return] identifier[comparitor] ( identifier[path] , identifier[prefixed_path] , identifier[source_storage] )
|
def compare(self, path, prefixed_path, source_storage):
"""
Returns True if the file should be copied.
"""
# First try a method on the command named compare_<comparison_method>
# If that doesn't exist, create a comparitor that calls methods on the
# storage with the name <comparison_method>, passing them the name.
comparitor = getattr(self, 'compare_%s' % self.comparison_method, None)
if not comparitor:
comparitor = self._create_comparitor(self.comparison_method) # depends on [control=['if'], data=[]]
return comparitor(path, prefixed_path, source_storage)
|
def load_yaml_file(filename):
""" Load a YAML file from disk, throw a ParserError on failure."""
try:
with open(filename, 'r') as f:
return yaml.safe_load(f)
except IOError as e:
raise ParserError('Error opening ' + filename + ': ' + e.message)
except ValueError as e:
raise ParserError('Error parsing processes in {}: {}'
.format(filename, e.message))
|
def function[load_yaml_file, parameter[filename]]:
constant[ Load a YAML file from disk, throw a ParserError on failure.]
<ast.Try object at 0x7da1b0334940>
|
keyword[def] identifier[load_yaml_file] ( identifier[filename] ):
literal[string]
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[yaml] . identifier[safe_load] ( identifier[f] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[raise] identifier[ParserError] ( literal[string] + identifier[filename] + literal[string] + identifier[e] . identifier[message] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[ParserError] ( literal[string]
. identifier[format] ( identifier[filename] , identifier[e] . identifier[message] ))
|
def load_yaml_file(filename):
""" Load a YAML file from disk, throw a ParserError on failure."""
try:
with open(filename, 'r') as f:
return yaml.safe_load(f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError as e:
raise ParserError('Error opening ' + filename + ': ' + e.message) # depends on [control=['except'], data=['e']]
except ValueError as e:
raise ParserError('Error parsing processes in {}: {}'.format(filename, e.message)) # depends on [control=['except'], data=['e']]
|
def _check_index(self):
"""Check that the index is without the bounds of _history."""
assert 0 <= self._index <= len(self._history) - 1
# There should always be the base item at least.
assert len(self._history) >= 1
|
def function[_check_index, parameter[self]]:
constant[Check that the index is without the bounds of _history.]
assert[compare[constant[0] less_or_equal[<=] name[self]._index]]
assert[compare[call[name[len], parameter[name[self]._history]] greater_or_equal[>=] constant[1]]]
|
keyword[def] identifier[_check_index] ( identifier[self] ):
literal[string]
keyword[assert] literal[int] <= identifier[self] . identifier[_index] <= identifier[len] ( identifier[self] . identifier[_history] )- literal[int]
keyword[assert] identifier[len] ( identifier[self] . identifier[_history] )>= literal[int]
|
def _check_index(self):
"""Check that the index is without the bounds of _history."""
assert 0 <= self._index <= len(self._history) - 1
# There should always be the base item at least.
assert len(self._history) >= 1
|
def export_as_file(self, filepath, hyperparameters):
"""Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
"""
if not filepath.endswith('.py'):
filepath += '.py'
file_contents = ''
file_contents += self.source
file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters)
file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator)
with open(filepath, 'wb') as f:
f.write(file_contents.encode('utf8'))
|
def function[export_as_file, parameter[self, filepath, hyperparameters]]:
constant[Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
]
if <ast.UnaryOp object at 0x7da1b26acac0> begin[:]
<ast.AugAssign object at 0x7da1b26ac6d0>
variable[file_contents] assign[=] constant[]
<ast.AugAssign object at 0x7da1b26ac4c0>
<ast.AugAssign object at 0x7da1b26acf40>
<ast.AugAssign object at 0x7da1b26afe20>
with call[name[open], parameter[name[filepath], constant[wb]]] begin[:]
call[name[f].write, parameter[call[name[file_contents].encode, parameter[constant[utf8]]]]]
|
keyword[def] identifier[export_as_file] ( identifier[self] , identifier[filepath] , identifier[hyperparameters] ):
literal[string]
keyword[if] keyword[not] identifier[filepath] . identifier[endswith] ( literal[string] ):
identifier[filepath] += literal[string]
identifier[file_contents] = literal[string]
identifier[file_contents] += identifier[self] . identifier[source]
identifier[file_contents] += literal[string] . identifier[format] ( identifier[hyperparameters] )
identifier[file_contents] += literal[string] . identifier[format] ( identifier[self] . identifier[meta_feature_generator] )
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[file_contents] . identifier[encode] ( literal[string] ))
|
def export_as_file(self, filepath, hyperparameters):
"""Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
"""
if not filepath.endswith('.py'):
filepath += '.py' # depends on [control=['if'], data=[]]
file_contents = ''
file_contents += self.source
file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters)
file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator)
with open(filepath, 'wb') as f:
f.write(file_contents.encode('utf8')) # depends on [control=['with'], data=['f']]
|
def mixer(servo1, servo2, mixtype=1, gain=0.5):
'''mix two servos'''
s1 = servo1 - 1500
s2 = servo2 - 1500
v1 = (s1-s2)*gain
v2 = (s1+s2)*gain
if mixtype == 2:
v2 = -v2
elif mixtype == 3:
v1 = -v1
elif mixtype == 4:
v1 = -v1
v2 = -v2
if v1 > 600:
v1 = 600
elif v1 < -600:
v1 = -600
if v2 > 600:
v2 = 600
elif v2 < -600:
v2 = -600
return (1500+v1,1500+v2)
|
def function[mixer, parameter[servo1, servo2, mixtype, gain]]:
constant[mix two servos]
variable[s1] assign[=] binary_operation[name[servo1] - constant[1500]]
variable[s2] assign[=] binary_operation[name[servo2] - constant[1500]]
variable[v1] assign[=] binary_operation[binary_operation[name[s1] - name[s2]] * name[gain]]
variable[v2] assign[=] binary_operation[binary_operation[name[s1] + name[s2]] * name[gain]]
if compare[name[mixtype] equal[==] constant[2]] begin[:]
variable[v2] assign[=] <ast.UnaryOp object at 0x7da1b17dc310>
if compare[name[v1] greater[>] constant[600]] begin[:]
variable[v1] assign[=] constant[600]
if compare[name[v2] greater[>] constant[600]] begin[:]
variable[v2] assign[=] constant[600]
return[tuple[[<ast.BinOp object at 0x7da18f58d720>, <ast.BinOp object at 0x7da18f58ed10>]]]
|
keyword[def] identifier[mixer] ( identifier[servo1] , identifier[servo2] , identifier[mixtype] = literal[int] , identifier[gain] = literal[int] ):
literal[string]
identifier[s1] = identifier[servo1] - literal[int]
identifier[s2] = identifier[servo2] - literal[int]
identifier[v1] =( identifier[s1] - identifier[s2] )* identifier[gain]
identifier[v2] =( identifier[s1] + identifier[s2] )* identifier[gain]
keyword[if] identifier[mixtype] == literal[int] :
identifier[v2] =- identifier[v2]
keyword[elif] identifier[mixtype] == literal[int] :
identifier[v1] =- identifier[v1]
keyword[elif] identifier[mixtype] == literal[int] :
identifier[v1] =- identifier[v1]
identifier[v2] =- identifier[v2]
keyword[if] identifier[v1] > literal[int] :
identifier[v1] = literal[int]
keyword[elif] identifier[v1] <- literal[int] :
identifier[v1] =- literal[int]
keyword[if] identifier[v2] > literal[int] :
identifier[v2] = literal[int]
keyword[elif] identifier[v2] <- literal[int] :
identifier[v2] =- literal[int]
keyword[return] ( literal[int] + identifier[v1] , literal[int] + identifier[v2] )
|
def mixer(servo1, servo2, mixtype=1, gain=0.5):
"""mix two servos"""
s1 = servo1 - 1500
s2 = servo2 - 1500
v1 = (s1 - s2) * gain
v2 = (s1 + s2) * gain
if mixtype == 2:
v2 = -v2 # depends on [control=['if'], data=[]]
elif mixtype == 3:
v1 = -v1 # depends on [control=['if'], data=[]]
elif mixtype == 4:
v1 = -v1
v2 = -v2 # depends on [control=['if'], data=[]]
if v1 > 600:
v1 = 600 # depends on [control=['if'], data=['v1']]
elif v1 < -600:
v1 = -600 # depends on [control=['if'], data=['v1']]
if v2 > 600:
v2 = 600 # depends on [control=['if'], data=['v2']]
elif v2 < -600:
v2 = -600 # depends on [control=['if'], data=['v2']]
return (1500 + v1, 1500 + v2)
|
def threshold(self, value):
"""Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int
"""
if isinstance(value, SpamThreshold):
self._threshold = value
else:
self._threshold = SpamThreshold(value)
|
def function[threshold, parameter[self, value]]:
constant[Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int
]
if call[name[isinstance], parameter[name[value], name[SpamThreshold]]] begin[:]
name[self]._threshold assign[=] name[value]
|
keyword[def] identifier[threshold] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[SpamThreshold] ):
identifier[self] . identifier[_threshold] = identifier[value]
keyword[else] :
identifier[self] . identifier[_threshold] = identifier[SpamThreshold] ( identifier[value] )
|
def threshold(self, value):
"""Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int
"""
if isinstance(value, SpamThreshold):
self._threshold = value # depends on [control=['if'], data=[]]
else:
self._threshold = SpamThreshold(value)
|
def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj)
else:
f.append(obj)
return (t, f)
|
def function[fsplit, parameter[pred, objs]]:
constant[Split a list into two classes according to the predicate.]
variable[t] assign[=] list[[]]
variable[f] assign[=] list[[]]
for taget[name[obj]] in starred[name[objs]] begin[:]
if call[name[pred], parameter[name[obj]]] begin[:]
call[name[t].append, parameter[name[obj]]]
return[tuple[[<ast.Name object at 0x7da2041da290>, <ast.Name object at 0x7da2041d9930>]]]
|
keyword[def] identifier[fsplit] ( identifier[pred] , identifier[objs] ):
literal[string]
identifier[t] =[]
identifier[f] =[]
keyword[for] identifier[obj] keyword[in] identifier[objs] :
keyword[if] identifier[pred] ( identifier[obj] ):
identifier[t] . identifier[append] ( identifier[obj] )
keyword[else] :
identifier[f] . identifier[append] ( identifier[obj] )
keyword[return] ( identifier[t] , identifier[f] )
|
def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj) # depends on [control=['if'], data=[]]
else:
f.append(obj) # depends on [control=['for'], data=['obj']]
return (t, f)
|
def encode(self):
"""Base64-encode the data contained in the reply when appropriate.
:return: encoded data.
:returntype: `unicode`
"""
if self.data is None:
return ""
elif not self.data:
return "="
else:
ret = standard_b64encode(self.data)
return ret.decode("us-ascii")
|
def function[encode, parameter[self]]:
constant[Base64-encode the data contained in the reply when appropriate.
:return: encoded data.
:returntype: `unicode`
]
if compare[name[self].data is constant[None]] begin[:]
return[constant[]]
|
keyword[def] identifier[encode] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[data] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[elif] keyword[not] identifier[self] . identifier[data] :
keyword[return] literal[string]
keyword[else] :
identifier[ret] = identifier[standard_b64encode] ( identifier[self] . identifier[data] )
keyword[return] identifier[ret] . identifier[decode] ( literal[string] )
|
def encode(self):
"""Base64-encode the data contained in the reply when appropriate.
:return: encoded data.
:returntype: `unicode`
"""
if self.data is None:
return '' # depends on [control=['if'], data=[]]
elif not self.data:
return '=' # depends on [control=['if'], data=[]]
else:
ret = standard_b64encode(self.data)
return ret.decode('us-ascii')
|
def get_user_roles(self, user):
"""Get roles associated with the given user.
Args:
user (string): User name.
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure.
"""
return self.service.get_user_roles(
user, self.url_prefix, self.auth, self.session, self.session_send_opts)
|
def function[get_user_roles, parameter[self, user]]:
constant[Get roles associated with the given user.
Args:
user (string): User name.
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure.
]
return[call[name[self].service.get_user_roles, parameter[name[user], name[self].url_prefix, name[self].auth, name[self].session, name[self].session_send_opts]]]
|
keyword[def] identifier[get_user_roles] ( identifier[self] , identifier[user] ):
literal[string]
keyword[return] identifier[self] . identifier[service] . identifier[get_user_roles] (
identifier[user] , identifier[self] . identifier[url_prefix] , identifier[self] . identifier[auth] , identifier[self] . identifier[session] , identifier[self] . identifier[session_send_opts] )
|
def get_user_roles(self, user):
"""Get roles associated with the given user.
Args:
user (string): User name.
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure.
"""
return self.service.get_user_roles(user, self.url_prefix, self.auth, self.session, self.session_send_opts)
|
def _to_dict(objects):
'''
Potentially interprets a string as JSON for usage with mongo
'''
try:
if isinstance(objects, six.string_types):
objects = salt.utils.json.loads(objects)
except ValueError as err:
log.error("Could not parse objects: %s", err)
raise err
return objects
|
def function[_to_dict, parameter[objects]]:
constant[
Potentially interprets a string as JSON for usage with mongo
]
<ast.Try object at 0x7da18bc73820>
return[name[objects]]
|
keyword[def] identifier[_to_dict] ( identifier[objects] ):
literal[string]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[objects] , identifier[six] . identifier[string_types] ):
identifier[objects] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[objects] )
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
identifier[log] . identifier[error] ( literal[string] , identifier[err] )
keyword[raise] identifier[err]
keyword[return] identifier[objects]
|
def _to_dict(objects):
"""
Potentially interprets a string as JSON for usage with mongo
"""
try:
if isinstance(objects, six.string_types):
objects = salt.utils.json.loads(objects) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError as err:
log.error('Could not parse objects: %s', err)
raise err # depends on [control=['except'], data=['err']]
return objects
|
def _proc_loop(proc_id, alive, queue, fn):
"""Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
"""
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close()
|
def function[_proc_loop, parameter[proc_id, alive, queue, fn]]:
constant[Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
]
call[name[print], parameter[call[constant[proc {} started].format, parameter[name[proc_id]]]]]
<ast.Try object at 0x7da18f810fa0>
call[name[print], parameter[call[constant[Closing process {}].format, parameter[name[proc_id]]]]]
call[name[queue].close, parameter[]]
|
keyword[def] identifier[_proc_loop] ( identifier[proc_id] , identifier[alive] , identifier[queue] , identifier[fn] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[proc_id] ))
keyword[try] :
keyword[while] identifier[alive] . identifier[value] :
identifier[data] = identifier[fn] ()
identifier[put_success] = keyword[False]
keyword[while] identifier[alive] . identifier[value] keyword[and] keyword[not] identifier[put_success] :
keyword[try] :
identifier[queue] . identifier[put] ( identifier[data] , identifier[timeout] = literal[int] )
identifier[put_success] = keyword[True]
keyword[except] identifier[QFullExcept] :
keyword[pass]
keyword[except] identifier[KeyboardInterrupt] :
identifier[print] ( literal[string] . identifier[format] ( identifier[proc_id] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[proc_id] ))
identifier[queue] . identifier[close] ()
|
def _proc_loop(proc_id, alive, queue, fn):
"""Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
"""
print('proc {} started'.format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and (not put_success):
try:
queue.put(data, timeout=0.5)
put_success = True # depends on [control=['try'], data=[]]
except QFullExcept:
# print("Queue Full")
pass # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
print('W: interrupt received, stopping process {} ...'.format(proc_id)) # depends on [control=['except'], data=[]]
print('Closing process {}'.format(proc_id))
queue.close()
|
def _wrapper(func, *args, **kwargs):
'Decorator for the methods that follow'
try:
if func.__name__ == "init":
# init may not fail, as its return code is just stored as
# private_data field of struct fuse_context
return func(*args, **kwargs) or 0
else:
try:
return func(*args, **kwargs) or 0
except OSError as e:
if e.errno > 0:
log.debug(
"FUSE operation %s raised a %s, returning errno %s.",
func.__name__, type(e), e.errno, exc_info=True)
return -e.errno
else:
log.error(
"FUSE operation %s raised an OSError with negative "
"errno %s, returning errno.EINVAL.",
func.__name__, e.errno, exc_info=True)
return -errno.EINVAL
except Exception:
log.error("Uncaught exception from FUSE operation %s, "
"returning errno.EINVAL.",
func.__name__, exc_info=True)
return -errno.EINVAL
except BaseException as e:
self.__critical_exception = e
log.critical(
"Uncaught critical exception from FUSE operation %s, aborting.",
func.__name__, exc_info=True)
# the raised exception (even SystemExit) will be caught by FUSE
# potentially causing SIGSEGV, so tell system to stop/interrupt FUSE
fuse_exit()
return -errno.EFAULT
|
def function[_wrapper, parameter[func]]:
constant[Decorator for the methods that follow]
<ast.Try object at 0x7da2047eaad0>
|
keyword[def] identifier[_wrapper] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[if] identifier[func] . identifier[__name__] == literal[string] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[or] literal[int]
keyword[else] :
keyword[try] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[or] literal[int]
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] > literal[int] :
identifier[log] . identifier[debug] (
literal[string] ,
identifier[func] . identifier[__name__] , identifier[type] ( identifier[e] ), identifier[e] . identifier[errno] , identifier[exc_info] = keyword[True] )
keyword[return] - identifier[e] . identifier[errno]
keyword[else] :
identifier[log] . identifier[error] (
literal[string]
literal[string] ,
identifier[func] . identifier[__name__] , identifier[e] . identifier[errno] , identifier[exc_info] = keyword[True] )
keyword[return] - identifier[errno] . identifier[EINVAL]
keyword[except] identifier[Exception] :
identifier[log] . identifier[error] ( literal[string]
literal[string] ,
identifier[func] . identifier[__name__] , identifier[exc_info] = keyword[True] )
keyword[return] - identifier[errno] . identifier[EINVAL]
keyword[except] identifier[BaseException] keyword[as] identifier[e] :
identifier[self] . identifier[__critical_exception] = identifier[e]
identifier[log] . identifier[critical] (
literal[string] ,
identifier[func] . identifier[__name__] , identifier[exc_info] = keyword[True] )
identifier[fuse_exit] ()
keyword[return] - identifier[errno] . identifier[EFAULT]
|
def _wrapper(func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
if func.__name__ == 'init':
# init may not fail, as its return code is just stored as
# private_data field of struct fuse_context
return func(*args, **kwargs) or 0 # depends on [control=['if'], data=[]]
else:
try:
return func(*args, **kwargs) or 0 # depends on [control=['try'], data=[]]
except OSError as e:
if e.errno > 0:
log.debug('FUSE operation %s raised a %s, returning errno %s.', func.__name__, type(e), e.errno, exc_info=True)
return -e.errno # depends on [control=['if'], data=[]]
else:
log.error('FUSE operation %s raised an OSError with negative errno %s, returning errno.EINVAL.', func.__name__, e.errno, exc_info=True)
return -errno.EINVAL # depends on [control=['except'], data=['e']]
except Exception:
log.error('Uncaught exception from FUSE operation %s, returning errno.EINVAL.', func.__name__, exc_info=True)
return -errno.EINVAL # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except BaseException as e:
self.__critical_exception = e
log.critical('Uncaught critical exception from FUSE operation %s, aborting.', func.__name__, exc_info=True)
# the raised exception (even SystemExit) will be caught by FUSE
# potentially causing SIGSEGV, so tell system to stop/interrupt FUSE
fuse_exit()
return -errno.EFAULT # depends on [control=['except'], data=['e']]
|
def get_asset_content_form_for_create(self, asset_id, asset_content_record_types):
"""Gets an asset content form for creating new assets.
arg: asset_id (osid.id.Id): the ``Id`` of an ``Asset``
arg: asset_content_record_types (osid.type.Type[]): array of
asset content record types
return: (osid.repository.AssetContentForm) - the asset content
form
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` or
``asset_content_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityAdminSession.get_activity_form_for_create_template
if not isinstance(asset_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
for arg in asset_content_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if asset_content_record_types == []:
# WHY are we passing repository_id = self._catalog_id below, seems redundant:
obj_form = objects.AssetContentForm(
repository_id=self._catalog_id,
asset_id=asset_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
proxy=self._proxy)
else:
obj_form = objects.AssetContentForm(
repository_id=self._catalog_id,
record_types=asset_content_record_types,
asset_id=asset_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
proxy=self._proxy)
obj_form._for_update = False
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
|
def function[get_asset_content_form_for_create, parameter[self, asset_id, asset_content_record_types]]:
constant[Gets an asset content form for creating new assets.
arg: asset_id (osid.id.Id): the ``Id`` of an ``Asset``
arg: asset_content_record_types (osid.type.Type[]): array of
asset content record types
return: (osid.repository.AssetContentForm) - the asset content
form
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` or
``asset_content_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
]
if <ast.UnaryOp object at 0x7da1b0a70910> begin[:]
<ast.Raise object at 0x7da1b0a70af0>
for taget[name[arg]] in starred[name[asset_content_record_types]] begin[:]
if <ast.UnaryOp object at 0x7da1b092fcd0> begin[:]
<ast.Raise object at 0x7da1b092e050>
if compare[name[asset_content_record_types] equal[==] list[[]]] begin[:]
variable[obj_form] assign[=] call[name[objects].AssetContentForm, parameter[]]
name[obj_form]._for_update assign[=] constant[False]
call[name[self]._forms][call[call[name[obj_form].get_id, parameter[]].get_identifier, parameter[]]] assign[=] <ast.UnaryOp object at 0x7da1b26aee30>
return[name[obj_form]]
|
keyword[def] identifier[get_asset_content_form_for_create] ( identifier[self] , identifier[asset_id] , identifier[asset_content_record_types] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[asset_id] , identifier[ABCId] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[for] identifier[arg] keyword[in] identifier[asset_content_record_types] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[arg] , identifier[ABCType] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[if] identifier[asset_content_record_types] ==[]:
identifier[obj_form] = identifier[objects] . identifier[AssetContentForm] (
identifier[repository_id] = identifier[self] . identifier[_catalog_id] ,
identifier[asset_id] = identifier[asset_id] ,
identifier[catalog_id] = identifier[self] . identifier[_catalog_id] ,
identifier[runtime] = identifier[self] . identifier[_runtime] ,
identifier[proxy] = identifier[self] . identifier[_proxy] )
keyword[else] :
identifier[obj_form] = identifier[objects] . identifier[AssetContentForm] (
identifier[repository_id] = identifier[self] . identifier[_catalog_id] ,
identifier[record_types] = identifier[asset_content_record_types] ,
identifier[asset_id] = identifier[asset_id] ,
identifier[catalog_id] = identifier[self] . identifier[_catalog_id] ,
identifier[runtime] = identifier[self] . identifier[_runtime] ,
identifier[proxy] = identifier[self] . identifier[_proxy] )
identifier[obj_form] . identifier[_for_update] = keyword[False]
identifier[self] . identifier[_forms] [ identifier[obj_form] . identifier[get_id] (). identifier[get_identifier] ()]= keyword[not] identifier[CREATED]
keyword[return] identifier[obj_form]
|
def get_asset_content_form_for_create(self, asset_id, asset_content_record_types):
"""Gets an asset content form for creating new assets.
arg: asset_id (osid.id.Id): the ``Id`` of an ``Asset``
arg: asset_content_record_types (osid.type.Type[]): array of
asset content record types
return: (osid.repository.AssetContentForm) - the asset content
form
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` or
``asset_content_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityAdminSession.get_activity_form_for_create_template
if not isinstance(asset_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id') # depends on [control=['if'], data=[]]
for arg in asset_content_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']]
if asset_content_record_types == []:
# WHY are we passing repository_id = self._catalog_id below, seems redundant:
obj_form = objects.AssetContentForm(repository_id=self._catalog_id, asset_id=asset_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) # depends on [control=['if'], data=[]]
else:
obj_form = objects.AssetContentForm(repository_id=self._catalog_id, record_types=asset_content_record_types, asset_id=asset_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy)
obj_form._for_update = False
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
|
def drawSquiggle(self, p1, p2, breadth = 2):
"""Draw a squiggly line from p1 to p2.
"""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis
i_mat = ~matrix # get original position
k = 2.4142135623765633 # y of drawCurve helper point
points = [] # stores edges
for i in range (1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -k) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, k) * mb
else: # else on connection line
p = Point(i, 0) * mb
points.append(p * i_mat)
points = [p1] + points + [p2]
cnt = len(points)
i = 0
while i + 2 < cnt:
self.drawCurve(points[i], points[i+1], points[i+2])
i += 2
return p2
|
def function[drawSquiggle, parameter[self, p1, p2, breadth]]:
constant[Draw a squiggly line from p1 to p2.
]
variable[p1] assign[=] call[name[Point], parameter[name[p1]]]
variable[p2] assign[=] call[name[Point], parameter[name[p2]]]
variable[S] assign[=] binary_operation[name[p2] - name[p1]]
variable[rad] assign[=] call[name[abs], parameter[name[S]]]
variable[cnt] assign[=] binary_operation[constant[4] * call[name[int], parameter[call[name[round], parameter[binary_operation[name[rad] / binary_operation[constant[4] * name[breadth]]], constant[0]]]]]]
if compare[name[cnt] less[<] constant[4]] begin[:]
<ast.Raise object at 0x7da1b18c2290>
variable[mb] assign[=] binary_operation[name[rad] / name[cnt]]
variable[matrix] assign[=] call[name[TOOLS]._hor_matrix, parameter[name[p1], name[p2]]]
variable[i_mat] assign[=] <ast.UnaryOp object at 0x7da1b18c1e40>
variable[k] assign[=] constant[2.4142135623765633]
variable[points] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[cnt]]]] begin[:]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[4]] equal[==] constant[1]] begin[:]
variable[p] assign[=] binary_operation[call[name[Point], parameter[name[i], <ast.UnaryOp object at 0x7da1b18c1c00>]] * name[mb]]
call[name[points].append, parameter[binary_operation[name[p] * name[i_mat]]]]
variable[points] assign[=] binary_operation[binary_operation[list[[<ast.Name object at 0x7da1b18c1b70>]] + name[points]] + list[[<ast.Name object at 0x7da1b18c0130>]]]
variable[cnt] assign[=] call[name[len], parameter[name[points]]]
variable[i] assign[=] constant[0]
while compare[binary_operation[name[i] + constant[2]] less[<] name[cnt]] begin[:]
call[name[self].drawCurve, parameter[call[name[points]][name[i]], call[name[points]][binary_operation[name[i] + constant[1]]], call[name[points]][binary_operation[name[i] + constant[2]]]]]
<ast.AugAssign object at 0x7da1b18c0340>
return[name[p2]]
|
keyword[def] identifier[drawSquiggle] ( identifier[self] , identifier[p1] , identifier[p2] , identifier[breadth] = literal[int] ):
literal[string]
identifier[p1] = identifier[Point] ( identifier[p1] )
identifier[p2] = identifier[Point] ( identifier[p2] )
identifier[S] = identifier[p2] - identifier[p1]
identifier[rad] = identifier[abs] ( identifier[S] )
identifier[cnt] = literal[int] * identifier[int] ( identifier[round] ( identifier[rad] /( literal[int] * identifier[breadth] ), literal[int] ))
keyword[if] identifier[cnt] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[mb] = identifier[rad] / identifier[cnt]
identifier[matrix] = identifier[TOOLS] . identifier[_hor_matrix] ( identifier[p1] , identifier[p2] )
identifier[i_mat] =~ identifier[matrix]
identifier[k] = literal[int]
identifier[points] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[cnt] ):
keyword[if] identifier[i] % literal[int] == literal[int] :
identifier[p] = identifier[Point] ( identifier[i] ,- identifier[k] )* identifier[mb]
keyword[elif] identifier[i] % literal[int] == literal[int] :
identifier[p] = identifier[Point] ( identifier[i] , identifier[k] )* identifier[mb]
keyword[else] :
identifier[p] = identifier[Point] ( identifier[i] , literal[int] )* identifier[mb]
identifier[points] . identifier[append] ( identifier[p] * identifier[i_mat] )
identifier[points] =[ identifier[p1] ]+ identifier[points] +[ identifier[p2] ]
identifier[cnt] = identifier[len] ( identifier[points] )
identifier[i] = literal[int]
keyword[while] identifier[i] + literal[int] < identifier[cnt] :
identifier[self] . identifier[drawCurve] ( identifier[points] [ identifier[i] ], identifier[points] [ identifier[i] + literal[int] ], identifier[points] [ identifier[i] + literal[int] ])
identifier[i] += literal[int]
keyword[return] identifier[p2]
|
def drawSquiggle(self, p1, p2, breadth=2):
"""Draw a squiggly line from p1 to p2.
"""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError('points too close') # depends on [control=['if'], data=[]]
mb = rad / cnt # revised breadth
matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis
i_mat = ~matrix # get original position
k = 2.4142135623765633 # y of drawCurve helper point
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -k) * mb # depends on [control=['if'], data=[]]
elif i % 4 == 3: # point "below" connection
p = Point(i, k) * mb # depends on [control=['if'], data=[]]
else: # else on connection line
p = Point(i, 0) * mb
points.append(p * i_mat) # depends on [control=['for'], data=['i']]
points = [p1] + points + [p2]
cnt = len(points)
i = 0
while i + 2 < cnt:
self.drawCurve(points[i], points[i + 1], points[i + 2])
i += 2 # depends on [control=['while'], data=[]]
return p2
|
def symbols_expanding_pre_event_input_accelerators(editor, event):
"""
Implements symbols expanding pre event input accelerators.
:param editor: Document editor.
:type editor: QWidget
:param event: Event being handled.
:type event: QEvent
:return: Process event.
:rtype: bool
"""
process_event = True
symbols_pairs = get_editor_capability(editor, "symbols_pairs")
if not symbols_pairs:
return process_event
text = foundations.strings.to_string(event.text())
if text in symbols_pairs:
cursor = editor.textCursor()
if not is_symbols_pair_complete(editor, text):
cursor.insertText(event.text())
else:
if not cursor.hasSelection():
cursor.insertText(event.text())
# TODO: Provide an efficient code alternative.
# position = cursor.position()
# cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
# selected_text = foundations.strings.to_string(cursor.selectedText())
# cursor.setPosition(position)
# if not selected_text.strip():
cursor.insertText(symbols_pairs[text])
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor)
else:
selected_text = cursor.selectedText()
cursor.insertText(event.text())
cursor.insertText(selected_text)
cursor.insertText(symbols_pairs[text])
editor.setTextCursor(cursor)
process_event = False
if event.key() in (Qt.Key_Backspace,):
cursor = editor.textCursor()
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor)
left_text = cursor.selectedText()
foundations.common.repeat(lambda: cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor), 2)
right_text = cursor.selectedText()
if symbols_pairs.get(foundations.strings.to_string(left_text)) == foundations.strings.to_string(right_text):
cursor.deleteChar()
return process_event
|
def function[symbols_expanding_pre_event_input_accelerators, parameter[editor, event]]:
constant[
Implements symbols expanding pre event input accelerators.
:param editor: Document editor.
:type editor: QWidget
:param event: Event being handled.
:type event: QEvent
:return: Process event.
:rtype: bool
]
variable[process_event] assign[=] constant[True]
variable[symbols_pairs] assign[=] call[name[get_editor_capability], parameter[name[editor], constant[symbols_pairs]]]
if <ast.UnaryOp object at 0x7da204622fb0> begin[:]
return[name[process_event]]
variable[text] assign[=] call[name[foundations].strings.to_string, parameter[call[name[event].text, parameter[]]]]
if compare[name[text] in name[symbols_pairs]] begin[:]
variable[cursor] assign[=] call[name[editor].textCursor, parameter[]]
if <ast.UnaryOp object at 0x7da204620070> begin[:]
call[name[cursor].insertText, parameter[call[name[event].text, parameter[]]]]
call[name[editor].setTextCursor, parameter[name[cursor]]]
variable[process_event] assign[=] constant[False]
if compare[call[name[event].key, parameter[]] in tuple[[<ast.Attribute object at 0x7da204622bf0>]]] begin[:]
variable[cursor] assign[=] call[name[editor].textCursor, parameter[]]
call[name[cursor].movePosition, parameter[name[QTextCursor].Left, name[QTextCursor].KeepAnchor]]
variable[left_text] assign[=] call[name[cursor].selectedText, parameter[]]
call[name[foundations].common.repeat, parameter[<ast.Lambda object at 0x7da1b08ad870>, constant[2]]]
variable[right_text] assign[=] call[name[cursor].selectedText, parameter[]]
if compare[call[name[symbols_pairs].get, parameter[call[name[foundations].strings.to_string, parameter[name[left_text]]]]] equal[==] call[name[foundations].strings.to_string, parameter[name[right_text]]]] begin[:]
call[name[cursor].deleteChar, parameter[]]
return[name[process_event]]
|
keyword[def] identifier[symbols_expanding_pre_event_input_accelerators] ( identifier[editor] , identifier[event] ):
literal[string]
identifier[process_event] = keyword[True]
identifier[symbols_pairs] = identifier[get_editor_capability] ( identifier[editor] , literal[string] )
keyword[if] keyword[not] identifier[symbols_pairs] :
keyword[return] identifier[process_event]
identifier[text] = identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[event] . identifier[text] ())
keyword[if] identifier[text] keyword[in] identifier[symbols_pairs] :
identifier[cursor] = identifier[editor] . identifier[textCursor] ()
keyword[if] keyword[not] identifier[is_symbols_pair_complete] ( identifier[editor] , identifier[text] ):
identifier[cursor] . identifier[insertText] ( identifier[event] . identifier[text] ())
keyword[else] :
keyword[if] keyword[not] identifier[cursor] . identifier[hasSelection] ():
identifier[cursor] . identifier[insertText] ( identifier[event] . identifier[text] ())
identifier[cursor] . identifier[insertText] ( identifier[symbols_pairs] [ identifier[text] ])
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[Left] , identifier[QTextCursor] . identifier[MoveAnchor] )
keyword[else] :
identifier[selected_text] = identifier[cursor] . identifier[selectedText] ()
identifier[cursor] . identifier[insertText] ( identifier[event] . identifier[text] ())
identifier[cursor] . identifier[insertText] ( identifier[selected_text] )
identifier[cursor] . identifier[insertText] ( identifier[symbols_pairs] [ identifier[text] ])
identifier[editor] . identifier[setTextCursor] ( identifier[cursor] )
identifier[process_event] = keyword[False]
keyword[if] identifier[event] . identifier[key] () keyword[in] ( identifier[Qt] . identifier[Key_Backspace] ,):
identifier[cursor] = identifier[editor] . identifier[textCursor] ()
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[Left] , identifier[QTextCursor] . identifier[KeepAnchor] )
identifier[left_text] = identifier[cursor] . identifier[selectedText] ()
identifier[foundations] . identifier[common] . identifier[repeat] ( keyword[lambda] : identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[Right] , identifier[QTextCursor] . identifier[KeepAnchor] ), literal[int] )
identifier[right_text] = identifier[cursor] . identifier[selectedText] ()
keyword[if] identifier[symbols_pairs] . identifier[get] ( identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[left_text] ))== identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[right_text] ):
identifier[cursor] . identifier[deleteChar] ()
keyword[return] identifier[process_event]
|
def symbols_expanding_pre_event_input_accelerators(editor, event):
"""
Implements symbols expanding pre event input accelerators.
:param editor: Document editor.
:type editor: QWidget
:param event: Event being handled.
:type event: QEvent
:return: Process event.
:rtype: bool
"""
process_event = True
symbols_pairs = get_editor_capability(editor, 'symbols_pairs')
if not symbols_pairs:
return process_event # depends on [control=['if'], data=[]]
text = foundations.strings.to_string(event.text())
if text in symbols_pairs:
cursor = editor.textCursor()
if not is_symbols_pair_complete(editor, text):
cursor.insertText(event.text()) # depends on [control=['if'], data=[]]
elif not cursor.hasSelection():
cursor.insertText(event.text())
# TODO: Provide an efficient code alternative.
# position = cursor.position()
# cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
# selected_text = foundations.strings.to_string(cursor.selectedText())
# cursor.setPosition(position)
# if not selected_text.strip():
cursor.insertText(symbols_pairs[text])
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor) # depends on [control=['if'], data=[]]
else:
selected_text = cursor.selectedText()
cursor.insertText(event.text())
cursor.insertText(selected_text)
cursor.insertText(symbols_pairs[text])
editor.setTextCursor(cursor)
process_event = False # depends on [control=['if'], data=['text', 'symbols_pairs']]
if event.key() in (Qt.Key_Backspace,):
cursor = editor.textCursor()
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor)
left_text = cursor.selectedText()
foundations.common.repeat(lambda : cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor), 2)
right_text = cursor.selectedText()
if symbols_pairs.get(foundations.strings.to_string(left_text)) == foundations.strings.to_string(right_text):
cursor.deleteChar() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return process_event
|
def concatenate(*args, **kwargs):
"""
Concatenates the given strings.
Usage::
{% load libs_tags %}
{% concatenate "foo" "bar" as new_string %}
{% concatenate "foo" "bar" divider="_" as another_string %}
The above would result in the strings "foobar" and "foo_bar".
"""
divider = kwargs.get('divider', '')
result = ''
for arg in args:
if result == '':
result += arg
else:
result += '{0}{1}'.format(divider, arg)
return result
|
def function[concatenate, parameter[]]:
constant[
Concatenates the given strings.
Usage::
{% load libs_tags %}
{% concatenate "foo" "bar" as new_string %}
{% concatenate "foo" "bar" divider="_" as another_string %}
The above would result in the strings "foobar" and "foo_bar".
]
variable[divider] assign[=] call[name[kwargs].get, parameter[constant[divider], constant[]]]
variable[result] assign[=] constant[]
for taget[name[arg]] in starred[name[args]] begin[:]
if compare[name[result] equal[==] constant[]] begin[:]
<ast.AugAssign object at 0x7da1b25392a0>
return[name[result]]
|
keyword[def] identifier[concatenate] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[divider] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[result] = literal[string]
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[result] == literal[string] :
identifier[result] += identifier[arg]
keyword[else] :
identifier[result] += literal[string] . identifier[format] ( identifier[divider] , identifier[arg] )
keyword[return] identifier[result]
|
def concatenate(*args, **kwargs):
"""
Concatenates the given strings.
Usage::
{% load libs_tags %}
{% concatenate "foo" "bar" as new_string %}
{% concatenate "foo" "bar" divider="_" as another_string %}
The above would result in the strings "foobar" and "foo_bar".
"""
divider = kwargs.get('divider', '')
result = ''
for arg in args:
if result == '':
result += arg # depends on [control=['if'], data=['result']]
else:
result += '{0}{1}'.format(divider, arg) # depends on [control=['for'], data=['arg']]
return result
|
def orientality(self):
""" Returns the orientality of the object. """
sun = self.chart.getObject(const.SUN)
return orientality(self.obj, sun)
|
def function[orientality, parameter[self]]:
constant[ Returns the orientality of the object. ]
variable[sun] assign[=] call[name[self].chart.getObject, parameter[name[const].SUN]]
return[call[name[orientality], parameter[name[self].obj, name[sun]]]]
|
keyword[def] identifier[orientality] ( identifier[self] ):
literal[string]
identifier[sun] = identifier[self] . identifier[chart] . identifier[getObject] ( identifier[const] . identifier[SUN] )
keyword[return] identifier[orientality] ( identifier[self] . identifier[obj] , identifier[sun] )
|
def orientality(self):
""" Returns the orientality of the object. """
sun = self.chart.getObject(const.SUN)
return orientality(self.obj, sun)
|
def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
|
def function[fillTriangle, parameter[self, x0, y0, x1, y1, x2, y2, color, aa]]:
constant[
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
]
call[name[md].fill_triangle, parameter[name[self].set, name[x0], name[y0], name[x1], name[y1], name[x2], name[y2], name[color], name[aa]]]
|
keyword[def] identifier[fillTriangle] ( identifier[self] , identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[color] = keyword[None] , identifier[aa] = keyword[False] ):
literal[string]
identifier[md] . identifier[fill_triangle] ( identifier[self] . identifier[set] , identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[color] , identifier[aa] )
|
def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
|
async def get_session_data(self):
"""Get Tautulli sessions."""
cmd = 'get_activity'
url = self.base_url + cmd
try:
async with async_timeout.timeout(8, loop=self._loop):
response = await self._session.get(url)
logger("Status from Tautulli: " + str(response.status))
self.tautulli_session_data = await response.json()
logger(self.tautulli_session_data)
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror,
AttributeError) as error:
msg = "Can not load data from Tautulli: {} - {}".format(url, error)
logger(msg, 40)
|
<ast.AsyncFunctionDef object at 0x7da18f811450>
|
keyword[async] keyword[def] identifier[get_session_data] ( identifier[self] ):
literal[string]
identifier[cmd] = literal[string]
identifier[url] = identifier[self] . identifier[base_url] + identifier[cmd]
keyword[try] :
keyword[async] keyword[with] identifier[async_timeout] . identifier[timeout] ( literal[int] , identifier[loop] = identifier[self] . identifier[_loop] ):
identifier[response] = keyword[await] identifier[self] . identifier[_session] . identifier[get] ( identifier[url] )
identifier[logger] ( literal[string] + identifier[str] ( identifier[response] . identifier[status] ))
identifier[self] . identifier[tautulli_session_data] = keyword[await] identifier[response] . identifier[json] ()
identifier[logger] ( identifier[self] . identifier[tautulli_session_data] )
keyword[except] ( identifier[asyncio] . identifier[TimeoutError] , identifier[aiohttp] . identifier[ClientError] , identifier[socket] . identifier[gaierror] ,
identifier[AttributeError] ) keyword[as] identifier[error] :
identifier[msg] = literal[string] . identifier[format] ( identifier[url] , identifier[error] )
identifier[logger] ( identifier[msg] , literal[int] )
|
async def get_session_data(self):
"""Get Tautulli sessions."""
cmd = 'get_activity'
url = self.base_url + cmd
try:
async with async_timeout.timeout(8, loop=self._loop):
response = await self._session.get(url)
logger('Status from Tautulli: ' + str(response.status))
self.tautulli_session_data = await response.json()
logger(self.tautulli_session_data) # depends on [control=['try'], data=[]]
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error:
msg = 'Can not load data from Tautulli: {} - {}'.format(url, error)
logger(msg, 40) # depends on [control=['except'], data=['error']]
|
def run(self):
"""Loop forever, pushing out stats."""
self.graphite.start()
while True:
log.debug('Graphite pusher is sleeping for %d seconds', self.period)
time.sleep(self.period)
log.debug('Pushing stats to Graphite')
try:
self.push()
log.debug('Done pushing stats to Graphite')
except:
log.exception('Exception while pushing stats to Graphite')
raise
|
def function[run, parameter[self]]:
constant[Loop forever, pushing out stats.]
call[name[self].graphite.start, parameter[]]
while constant[True] begin[:]
call[name[log].debug, parameter[constant[Graphite pusher is sleeping for %d seconds], name[self].period]]
call[name[time].sleep, parameter[name[self].period]]
call[name[log].debug, parameter[constant[Pushing stats to Graphite]]]
<ast.Try object at 0x7da20c992950>
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[graphite] . identifier[start] ()
keyword[while] keyword[True] :
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[period] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[period] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[self] . identifier[push] ()
identifier[log] . identifier[debug] ( literal[string] )
keyword[except] :
identifier[log] . identifier[exception] ( literal[string] )
keyword[raise]
|
def run(self):
"""Loop forever, pushing out stats."""
self.graphite.start()
while True:
log.debug('Graphite pusher is sleeping for %d seconds', self.period)
time.sleep(self.period)
log.debug('Pushing stats to Graphite')
try:
self.push()
log.debug('Done pushing stats to Graphite') # depends on [control=['try'], data=[]]
except:
log.exception('Exception while pushing stats to Graphite')
raise # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
|
def json_schema_type(schema_file: str, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.JsonSchema` type.
This function will automatically load the schema and set it as an attribute
of the class along with the description and example.
:param schema_file: The full path to the json schema file to load.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.JsonSchema`
"""
# Importing here to avoid circular dependencies
from doctor.resource import ResourceSchema
schema = ResourceSchema.from_file(schema_file)
kwargs['schema'] = schema
# Look up the description, example and type in the schema.
definition_key = kwargs.get('definition_key')
if definition_key:
params = [definition_key]
request_schema = schema._create_request_schema(params, params)
try:
definition = request_schema['definitions'][definition_key]
except KeyError:
raise TypeSystemError(
'Definition `{}` is not defined in the schema.'.format(
definition_key))
description = get_value_from_schema(
schema, definition, 'description', definition_key)
example = get_value_from_schema(
schema, definition, 'example', definition_key)
json_type = get_value_from_schema(
schema, definition, 'type', definition_key)
json_type, native_type = get_types(json_type)
kwargs['description'] = description
kwargs['example'] = example
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type
else:
try:
kwargs['description'] = schema.schema['description']
except KeyError:
raise TypeSystemError('Schema is missing a description.')
try:
json_type = schema.schema['type']
except KeyError:
raise TypeSystemError('Schema is missing a type.')
json_type, native_type = get_types(json_type)
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type
try:
kwargs['example'] = schema.schema['example']
except KeyError:
# Attempt to load from properties, if defined.
if schema.schema.get('properties'):
example = {}
for prop, definition in schema.schema['properties'].items():
example[prop] = get_value_from_schema(
schema, definition, 'example', 'root')
kwargs['example'] = example
else:
raise TypeSystemError('Schema is missing an example.')
return type('JsonSchema', (JsonSchema,), kwargs)
|
def function[json_schema_type, parameter[schema_file]]:
constant[Create a :class:`~doctor.types.JsonSchema` type.
This function will automatically load the schema and set it as an attribute
of the class along with the description and example.
:param schema_file: The full path to the json schema file to load.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.JsonSchema`
]
from relative_module[doctor.resource] import module[ResourceSchema]
variable[schema] assign[=] call[name[ResourceSchema].from_file, parameter[name[schema_file]]]
call[name[kwargs]][constant[schema]] assign[=] name[schema]
variable[definition_key] assign[=] call[name[kwargs].get, parameter[constant[definition_key]]]
if name[definition_key] begin[:]
variable[params] assign[=] list[[<ast.Name object at 0x7da18f58c820>]]
variable[request_schema] assign[=] call[name[schema]._create_request_schema, parameter[name[params], name[params]]]
<ast.Try object at 0x7da18f58f8b0>
variable[description] assign[=] call[name[get_value_from_schema], parameter[name[schema], name[definition], constant[description], name[definition_key]]]
variable[example] assign[=] call[name[get_value_from_schema], parameter[name[schema], name[definition], constant[example], name[definition_key]]]
variable[json_type] assign[=] call[name[get_value_from_schema], parameter[name[schema], name[definition], constant[type], name[definition_key]]]
<ast.Tuple object at 0x7da18f58f670> assign[=] call[name[get_types], parameter[name[json_type]]]
call[name[kwargs]][constant[description]] assign[=] name[description]
call[name[kwargs]][constant[example]] assign[=] name[example]
call[name[kwargs]][constant[json_type]] assign[=] name[json_type]
call[name[kwargs]][constant[native_type]] assign[=] name[native_type]
return[call[name[type], parameter[constant[JsonSchema], tuple[[<ast.Name object at 0x7da18f58d810>]], name[kwargs]]]]
|
keyword[def] identifier[json_schema_type] ( identifier[schema_file] : identifier[str] ,** identifier[kwargs] )-> identifier[typing] . identifier[Type] :
literal[string]
keyword[from] identifier[doctor] . identifier[resource] keyword[import] identifier[ResourceSchema]
identifier[schema] = identifier[ResourceSchema] . identifier[from_file] ( identifier[schema_file] )
identifier[kwargs] [ literal[string] ]= identifier[schema]
identifier[definition_key] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[definition_key] :
identifier[params] =[ identifier[definition_key] ]
identifier[request_schema] = identifier[schema] . identifier[_create_request_schema] ( identifier[params] , identifier[params] )
keyword[try] :
identifier[definition] = identifier[request_schema] [ literal[string] ][ identifier[definition_key] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[TypeSystemError] (
literal[string] . identifier[format] (
identifier[definition_key] ))
identifier[description] = identifier[get_value_from_schema] (
identifier[schema] , identifier[definition] , literal[string] , identifier[definition_key] )
identifier[example] = identifier[get_value_from_schema] (
identifier[schema] , identifier[definition] , literal[string] , identifier[definition_key] )
identifier[json_type] = identifier[get_value_from_schema] (
identifier[schema] , identifier[definition] , literal[string] , identifier[definition_key] )
identifier[json_type] , identifier[native_type] = identifier[get_types] ( identifier[json_type] )
identifier[kwargs] [ literal[string] ]= identifier[description]
identifier[kwargs] [ literal[string] ]= identifier[example]
identifier[kwargs] [ literal[string] ]= identifier[json_type]
identifier[kwargs] [ literal[string] ]= identifier[native_type]
keyword[else] :
keyword[try] :
identifier[kwargs] [ literal[string] ]= identifier[schema] . identifier[schema] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[TypeSystemError] ( literal[string] )
keyword[try] :
identifier[json_type] = identifier[schema] . identifier[schema] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[TypeSystemError] ( literal[string] )
identifier[json_type] , identifier[native_type] = identifier[get_types] ( identifier[json_type] )
identifier[kwargs] [ literal[string] ]= identifier[json_type]
identifier[kwargs] [ literal[string] ]= identifier[native_type]
keyword[try] :
identifier[kwargs] [ literal[string] ]= identifier[schema] . identifier[schema] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[schema] . identifier[schema] . identifier[get] ( literal[string] ):
identifier[example] ={}
keyword[for] identifier[prop] , identifier[definition] keyword[in] identifier[schema] . identifier[schema] [ literal[string] ]. identifier[items] ():
identifier[example] [ identifier[prop] ]= identifier[get_value_from_schema] (
identifier[schema] , identifier[definition] , literal[string] , literal[string] )
identifier[kwargs] [ literal[string] ]= identifier[example]
keyword[else] :
keyword[raise] identifier[TypeSystemError] ( literal[string] )
keyword[return] identifier[type] ( literal[string] ,( identifier[JsonSchema] ,), identifier[kwargs] )
|
def json_schema_type(schema_file: str, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.JsonSchema` type.
This function will automatically load the schema and set it as an attribute
of the class along with the description and example.
:param schema_file: The full path to the json schema file to load.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.JsonSchema`
"""
# Importing here to avoid circular dependencies
from doctor.resource import ResourceSchema
schema = ResourceSchema.from_file(schema_file)
kwargs['schema'] = schema
# Look up the description, example and type in the schema.
definition_key = kwargs.get('definition_key')
if definition_key:
params = [definition_key]
request_schema = schema._create_request_schema(params, params)
try:
definition = request_schema['definitions'][definition_key] # depends on [control=['try'], data=[]]
except KeyError:
raise TypeSystemError('Definition `{}` is not defined in the schema.'.format(definition_key)) # depends on [control=['except'], data=[]]
description = get_value_from_schema(schema, definition, 'description', definition_key)
example = get_value_from_schema(schema, definition, 'example', definition_key)
json_type = get_value_from_schema(schema, definition, 'type', definition_key)
(json_type, native_type) = get_types(json_type)
kwargs['description'] = description
kwargs['example'] = example
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type # depends on [control=['if'], data=[]]
else:
try:
kwargs['description'] = schema.schema['description'] # depends on [control=['try'], data=[]]
except KeyError:
raise TypeSystemError('Schema is missing a description.') # depends on [control=['except'], data=[]]
try:
json_type = schema.schema['type'] # depends on [control=['try'], data=[]]
except KeyError:
raise TypeSystemError('Schema is missing a type.') # depends on [control=['except'], data=[]]
(json_type, native_type) = get_types(json_type)
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type
try:
kwargs['example'] = schema.schema['example'] # depends on [control=['try'], data=[]]
except KeyError:
# Attempt to load from properties, if defined.
if schema.schema.get('properties'):
example = {}
for (prop, definition) in schema.schema['properties'].items():
example[prop] = get_value_from_schema(schema, definition, 'example', 'root') # depends on [control=['for'], data=[]]
kwargs['example'] = example # depends on [control=['if'], data=[]]
else:
raise TypeSystemError('Schema is missing an example.') # depends on [control=['except'], data=[]]
return type('JsonSchema', (JsonSchema,), kwargs)
|
def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
link_name = os.path.join('dist', 'bin')
if self._platform.system == 'Darwin' and self._target == 'firefox':
ff_loc = glob.glob('*.app/Contents/MacOS/firefox')
assert len(ff_loc) == 1
os.symlink(os.path.join(os.pardir, os.path.dirname(ff_loc[0])), # pylint: disable=no-member
link_name)
os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'symbols'), # pylint: disable=no-member
os.path.join(os.path.dirname(ff_loc[0]), 'symbols'))
elif self._platform.system == 'Linux':
os.symlink(os.pardir, link_name) # pylint: disable=no-member
elif self._platform.system == 'Windows':
# create a junction point at dist\bin pointing to the firefox.exe path
junction_path.symlink(os.curdir, link_name)
finally:
os.chdir(old_dir)
|
def function[_layout_for_domfuzz, parameter[self, path]]:
constant[
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
]
variable[old_dir] assign[=] call[name[os].getcwd, parameter[]]
call[name[os].chdir, parameter[call[name[os].path.join, parameter[name[path]]]]]
<ast.Try object at 0x7da20c6aa770>
|
keyword[def] identifier[_layout_for_domfuzz] ( identifier[self] , identifier[path] ):
literal[string]
identifier[old_dir] = identifier[os] . identifier[getcwd] ()
identifier[os] . identifier[chdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] ))
keyword[try] :
identifier[os] . identifier[mkdir] ( literal[string] )
identifier[link_name] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[_platform] . identifier[system] == literal[string] keyword[and] identifier[self] . identifier[_target] == literal[string] :
identifier[ff_loc] = identifier[glob] . identifier[glob] ( literal[string] )
keyword[assert] identifier[len] ( identifier[ff_loc] )== literal[int]
identifier[os] . identifier[symlink] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[pardir] , identifier[os] . identifier[path] . identifier[dirname] ( identifier[ff_loc] [ literal[int] ])),
identifier[link_name] )
identifier[os] . identifier[symlink] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[pardir] , identifier[os] . identifier[pardir] , identifier[os] . identifier[pardir] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[ff_loc] [ literal[int] ]), literal[string] ))
keyword[elif] identifier[self] . identifier[_platform] . identifier[system] == literal[string] :
identifier[os] . identifier[symlink] ( identifier[os] . identifier[pardir] , identifier[link_name] )
keyword[elif] identifier[self] . identifier[_platform] . identifier[system] == literal[string] :
identifier[junction_path] . identifier[symlink] ( identifier[os] . identifier[curdir] , identifier[link_name] )
keyword[finally] :
identifier[os] . identifier[chdir] ( identifier[old_dir] )
|
def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
link_name = os.path.join('dist', 'bin')
if self._platform.system == 'Darwin' and self._target == 'firefox':
ff_loc = glob.glob('*.app/Contents/MacOS/firefox')
assert len(ff_loc) == 1 # pylint: disable=no-member
os.symlink(os.path.join(os.pardir, os.path.dirname(ff_loc[0])), link_name) # pylint: disable=no-member
os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'symbols'), os.path.join(os.path.dirname(ff_loc[0]), 'symbols')) # depends on [control=['if'], data=[]]
elif self._platform.system == 'Linux':
os.symlink(os.pardir, link_name) # pylint: disable=no-member # depends on [control=['if'], data=[]]
elif self._platform.system == 'Windows':
# create a junction point at dist\bin pointing to the firefox.exe path
junction_path.symlink(os.curdir, link_name) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
os.chdir(old_dir)
|
def _ParseCommentRecord(self, structure):
"""Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
"""
comment = structure[1]
if comment.startswith('Version'):
_, _, self._version = comment.partition(':')
elif comment.startswith('Software'):
_, _, self._software = comment.partition(':')
elif comment.startswith('Time'):
_, _, time_format = comment.partition(':')
if 'local' in time_format.lower():
self._use_local_timezone = True
|
def function[_ParseCommentRecord, parameter[self, structure]]:
constant[Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
]
variable[comment] assign[=] call[name[structure]][constant[1]]
if call[name[comment].startswith, parameter[constant[Version]]] begin[:]
<ast.Tuple object at 0x7da207f99120> assign[=] call[name[comment].partition, parameter[constant[:]]]
|
keyword[def] identifier[_ParseCommentRecord] ( identifier[self] , identifier[structure] ):
literal[string]
identifier[comment] = identifier[structure] [ literal[int] ]
keyword[if] identifier[comment] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[_] , identifier[self] . identifier[_version] = identifier[comment] . identifier[partition] ( literal[string] )
keyword[elif] identifier[comment] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[_] , identifier[self] . identifier[_software] = identifier[comment] . identifier[partition] ( literal[string] )
keyword[elif] identifier[comment] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[_] , identifier[time_format] = identifier[comment] . identifier[partition] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[time_format] . identifier[lower] ():
identifier[self] . identifier[_use_local_timezone] = keyword[True]
|
def _ParseCommentRecord(self, structure):
"""Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
"""
comment = structure[1]
if comment.startswith('Version'):
(_, _, self._version) = comment.partition(':') # depends on [control=['if'], data=[]]
elif comment.startswith('Software'):
(_, _, self._software) = comment.partition(':') # depends on [control=['if'], data=[]]
elif comment.startswith('Time'):
(_, _, time_format) = comment.partition(':')
if 'local' in time_format.lower():
self._use_local_timezone = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def publish(self, events):
"""Publish events."""
assert len(events) > 0
with self.create_producer() as producer:
for event in events:
producer.publish(event)
|
def function[publish, parameter[self, events]]:
constant[Publish events.]
assert[compare[call[name[len], parameter[name[events]]] greater[>] constant[0]]]
with call[name[self].create_producer, parameter[]] begin[:]
for taget[name[event]] in starred[name[events]] begin[:]
call[name[producer].publish, parameter[name[event]]]
|
keyword[def] identifier[publish] ( identifier[self] , identifier[events] ):
literal[string]
keyword[assert] identifier[len] ( identifier[events] )> literal[int]
keyword[with] identifier[self] . identifier[create_producer] () keyword[as] identifier[producer] :
keyword[for] identifier[event] keyword[in] identifier[events] :
identifier[producer] . identifier[publish] ( identifier[event] )
|
def publish(self, events):
"""Publish events."""
assert len(events) > 0
with self.create_producer() as producer:
for event in events:
producer.publish(event) # depends on [control=['for'], data=['event']] # depends on [control=['with'], data=['producer']]
|
def find_id_in_folder(self, name, parent_folder_id=0):
"""Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
if name is None or len(name) == 0:
return parent_folder_id
offset = 0
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
total = int(resp['total_count'])
while offset < total:
found = self.__find_name(resp, name)
if found is not None:
return found
offset += int(len(resp['entries']))
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
return None
|
def function[find_id_in_folder, parameter[self, name, parent_folder_id]]:
constant[Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
]
if <ast.BoolOp object at 0x7da207f99960> begin[:]
return[name[parent_folder_id]]
variable[offset] assign[=] constant[0]
variable[resp] assign[=] call[name[self].get_folder_items, parameter[name[parent_folder_id]]]
variable[total] assign[=] call[name[int], parameter[call[name[resp]][constant[total_count]]]]
while compare[name[offset] less[<] name[total]] begin[:]
variable[found] assign[=] call[name[self].__find_name, parameter[name[resp], name[name]]]
if compare[name[found] is_not constant[None]] begin[:]
return[name[found]]
<ast.AugAssign object at 0x7da207f9b4c0>
variable[resp] assign[=] call[name[self].get_folder_items, parameter[name[parent_folder_id]]]
return[constant[None]]
|
keyword[def] identifier[find_id_in_folder] ( identifier[self] , identifier[name] , identifier[parent_folder_id] = literal[int] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[name] )== literal[int] :
keyword[return] identifier[parent_folder_id]
identifier[offset] = literal[int]
identifier[resp] = identifier[self] . identifier[get_folder_items] ( identifier[parent_folder_id] ,
identifier[limit] = literal[int] , identifier[offset] = identifier[offset] ,
identifier[fields_list] =[ literal[string] ])
identifier[total] = identifier[int] ( identifier[resp] [ literal[string] ])
keyword[while] identifier[offset] < identifier[total] :
identifier[found] = identifier[self] . identifier[__find_name] ( identifier[resp] , identifier[name] )
keyword[if] identifier[found] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[found]
identifier[offset] += identifier[int] ( identifier[len] ( identifier[resp] [ literal[string] ]))
identifier[resp] = identifier[self] . identifier[get_folder_items] ( identifier[parent_folder_id] ,
identifier[limit] = literal[int] , identifier[offset] = identifier[offset] ,
identifier[fields_list] =[ literal[string] ])
keyword[return] keyword[None]
|
def find_id_in_folder(self, name, parent_folder_id=0):
"""Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
if name is None or len(name) == 0:
return parent_folder_id # depends on [control=['if'], data=[]]
offset = 0
resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name'])
total = int(resp['total_count'])
while offset < total:
found = self.__find_name(resp, name)
if found is not None:
return found # depends on [control=['if'], data=['found']]
offset += int(len(resp['entries']))
resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name']) # depends on [control=['while'], data=['offset']]
return None
|
def define_symbol(name, open_brace, comma, i, j,
close_brace, variables, **kwds):
r"""Define a nice symbol with matrix indices.
>>> name = "rho"
>>> from sympy import symbols
>>> t, x, y, z = symbols("t, x, y, z", positive=True)
>>> variables = [t, x, y, z]
>>> open_brace = ""
>>> comma = ""
>>> close_brace = ""
>>> i = 0
>>> j = 1
>>> f = define_symbol(name, open_brace, comma, i, j, close_brace,
... variables, positive=True)
>>> print f
rho12(t, x, y, z)
"""
if variables is None:
return Symbol(name+open_brace+str(i+1)+comma+str(j+1) +
close_brace, **kwds)
else:
return Function(name+open_brace+str(i+1)+comma+str(j+1) +
close_brace, **kwds)(*variables)
|
def function[define_symbol, parameter[name, open_brace, comma, i, j, close_brace, variables]]:
constant[Define a nice symbol with matrix indices.
>>> name = "rho"
>>> from sympy import symbols
>>> t, x, y, z = symbols("t, x, y, z", positive=True)
>>> variables = [t, x, y, z]
>>> open_brace = ""
>>> comma = ""
>>> close_brace = ""
>>> i = 0
>>> j = 1
>>> f = define_symbol(name, open_brace, comma, i, j, close_brace,
... variables, positive=True)
>>> print f
rho12(t, x, y, z)
]
if compare[name[variables] is constant[None]] begin[:]
return[call[name[Symbol], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[name] + name[open_brace]] + call[name[str], parameter[binary_operation[name[i] + constant[1]]]]] + name[comma]] + call[name[str], parameter[binary_operation[name[j] + constant[1]]]]] + name[close_brace]]]]]
|
keyword[def] identifier[define_symbol] ( identifier[name] , identifier[open_brace] , identifier[comma] , identifier[i] , identifier[j] ,
identifier[close_brace] , identifier[variables] ,** identifier[kwds] ):
literal[string]
keyword[if] identifier[variables] keyword[is] keyword[None] :
keyword[return] identifier[Symbol] ( identifier[name] + identifier[open_brace] + identifier[str] ( identifier[i] + literal[int] )+ identifier[comma] + identifier[str] ( identifier[j] + literal[int] )+
identifier[close_brace] ,** identifier[kwds] )
keyword[else] :
keyword[return] identifier[Function] ( identifier[name] + identifier[open_brace] + identifier[str] ( identifier[i] + literal[int] )+ identifier[comma] + identifier[str] ( identifier[j] + literal[int] )+
identifier[close_brace] ,** identifier[kwds] )(* identifier[variables] )
|
def define_symbol(name, open_brace, comma, i, j, close_brace, variables, **kwds):
"""Define a nice symbol with matrix indices.
>>> name = "rho"
>>> from sympy import symbols
>>> t, x, y, z = symbols("t, x, y, z", positive=True)
>>> variables = [t, x, y, z]
>>> open_brace = ""
>>> comma = ""
>>> close_brace = ""
>>> i = 0
>>> j = 1
>>> f = define_symbol(name, open_brace, comma, i, j, close_brace,
... variables, positive=True)
>>> print f
rho12(t, x, y, z)
"""
if variables is None:
return Symbol(name + open_brace + str(i + 1) + comma + str(j + 1) + close_brace, **kwds) # depends on [control=['if'], data=[]]
else:
return Function(name + open_brace + str(i + 1) + comma + str(j + 1) + close_brace, **kwds)(*variables)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.