code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def schedule_host_downtime(self, host, start_time, end_time, fixed,
trigger_id, duration, author, comment):
"""Schedule a host downtime
Format of the line that triggers function call::
SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param host: host to schedule downtime
:type host: alignak.object.host.Host
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
data = {'ref': host.uuid, 'ref_type': host.my_type, 'start_time': start_time,
'end_time': end_time, 'fixed': fixed, 'trigger_id': trigger_id,
'duration': duration, 'author': author, 'comment': comment}
downtime = Downtime(data)
downtime.add_automatic_comment(host)
host.add_downtime(downtime)
self.send_an_element(host.get_update_status_brok())
if trigger_id not in ('', 0):
for item in self.daemon.hosts:
if trigger_id in item.downtimes:
host.downtimes[trigger_id].trigger_me(downtime.uuid) | def function[schedule_host_downtime, parameter[self, host, start_time, end_time, fixed, trigger_id, duration, author, comment]]:
constant[Schedule a host downtime
Format of the line that triggers function call::
SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param host: host to schedule downtime
:type host: alignak.object.host.Host
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6500>, <ast.Constant object at 0x7da2054a4d00>, <ast.Constant object at 0x7da2054a4040>, <ast.Constant object at 0x7da2054a45b0>, <ast.Constant object at 0x7da2054a5330>, <ast.Constant object at 0x7da2054a42b0>, <ast.Constant object at 0x7da2054a57b0>, <ast.Constant object at 0x7da2054a7040>, <ast.Constant object at 0x7da18bc73f70>], [<ast.Attribute object at 0x7da18bc71d80>, <ast.Attribute object at 0x7da18bc71d20>, <ast.Name object at 0x7da18bc715d0>, <ast.Name object at 0x7da18bc73df0>, <ast.Name object at 0x7da18bc71cc0>, <ast.Name object at 0x7da18bc72770>, <ast.Name object at 0x7da18bc72170>, <ast.Name object at 0x7da18bc719f0>, <ast.Name object at 0x7da18bc736a0>]]
variable[downtime] assign[=] call[name[Downtime], parameter[name[data]]]
call[name[downtime].add_automatic_comment, parameter[name[host]]]
call[name[host].add_downtime, parameter[name[downtime]]]
call[name[self].send_an_element, parameter[call[name[host].get_update_status_brok, parameter[]]]]
if compare[name[trigger_id] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18bc72fe0>, <ast.Constant object at 0x7da18bc71cf0>]]] begin[:]
for taget[name[item]] in starred[name[self].daemon.hosts] begin[:]
if compare[name[trigger_id] in name[item].downtimes] begin[:]
call[call[name[host].downtimes][name[trigger_id]].trigger_me, parameter[name[downtime].uuid]] | keyword[def] identifier[schedule_host_downtime] ( identifier[self] , identifier[host] , identifier[start_time] , identifier[end_time] , identifier[fixed] ,
identifier[trigger_id] , identifier[duration] , identifier[author] , identifier[comment] ):
literal[string]
identifier[data] ={ literal[string] : identifier[host] . identifier[uuid] , literal[string] : identifier[host] . identifier[my_type] , literal[string] : identifier[start_time] ,
literal[string] : identifier[end_time] , literal[string] : identifier[fixed] , literal[string] : identifier[trigger_id] ,
literal[string] : identifier[duration] , literal[string] : identifier[author] , literal[string] : identifier[comment] }
identifier[downtime] = identifier[Downtime] ( identifier[data] )
identifier[downtime] . identifier[add_automatic_comment] ( identifier[host] )
identifier[host] . identifier[add_downtime] ( identifier[downtime] )
identifier[self] . identifier[send_an_element] ( identifier[host] . identifier[get_update_status_brok] ())
keyword[if] identifier[trigger_id] keyword[not] keyword[in] ( literal[string] , literal[int] ):
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[daemon] . identifier[hosts] :
keyword[if] identifier[trigger_id] keyword[in] identifier[item] . identifier[downtimes] :
identifier[host] . identifier[downtimes] [ identifier[trigger_id] ]. identifier[trigger_me] ( identifier[downtime] . identifier[uuid] ) | def schedule_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment):
"""Schedule a host downtime
Format of the line that triggers function call::
SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param host: host to schedule downtime
:type host: alignak.object.host.Host
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
data = {'ref': host.uuid, 'ref_type': host.my_type, 'start_time': start_time, 'end_time': end_time, 'fixed': fixed, 'trigger_id': trigger_id, 'duration': duration, 'author': author, 'comment': comment}
downtime = Downtime(data)
downtime.add_automatic_comment(host)
host.add_downtime(downtime)
self.send_an_element(host.get_update_status_brok())
if trigger_id not in ('', 0):
for item in self.daemon.hosts:
if trigger_id in item.downtimes:
host.downtimes[trigger_id].trigger_me(downtime.uuid) # depends on [control=['if'], data=['trigger_id']] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=['trigger_id']] |
def loadSource(self, source, searchpaths=None):
"""load source"""
self.source = self._load(source, searchpaths=searchpaths) | def function[loadSource, parameter[self, source, searchpaths]]:
constant[load source]
name[self].source assign[=] call[name[self]._load, parameter[name[source]]] | keyword[def] identifier[loadSource] ( identifier[self] , identifier[source] , identifier[searchpaths] = keyword[None] ):
literal[string]
identifier[self] . identifier[source] = identifier[self] . identifier[_load] ( identifier[source] , identifier[searchpaths] = identifier[searchpaths] ) | def loadSource(self, source, searchpaths=None):
"""load source"""
self.source = self._load(source, searchpaths=searchpaths) |
def convert_to_decimal(string):
"""
Decode the exif-gps format into a decimal point.
'[51, 4, 1234/34]' -> 51.074948366
"""
number_or_fraction = '(?:\d{1,2}) | (?:\d{1,10} \\ \d{1,10})'
m = re.compile('''\[?\s? # opening bracket
\d{{1,2}}\s?,\s? # first number
{0} \s?,\s? # second number (can be a fraction)
{0} \s?,\s? # third number (can be a fraction)
\]?\s? # closing bracket
'''.format(number_or_fraction), re.VERBOSE)
if not m.match(string):
raise ValueError
h, m, s = re.sub('\[|\]', '', string).split(', ')
result = int(h)
if '/' in m:
m = m.split('/')
result += int(m[0]) * 1.0 / int(m[1]) / 60
else:
result += int(m) * 1.0 / 60
if '/' in s:
s = s.split('/')
result += int(s[0]) * 1.0 / int(s[1]) / 3600
else:
result += int(s) * 1.0 / 60
return result | def function[convert_to_decimal, parameter[string]]:
constant[
Decode the exif-gps format into a decimal point.
'[51, 4, 1234/34]' -> 51.074948366
]
variable[number_or_fraction] assign[=] constant[(?:\d{1,2}) | (?:\d{1,10} \ \d{1,10})]
variable[m] assign[=] call[name[re].compile, parameter[call[constant[\[?\s? # opening bracket
\d{{1,2}}\s?,\s? # first number
{0} \s?,\s? # second number (can be a fraction)
{0} \s?,\s? # third number (can be a fraction)
\]?\s? # closing bracket
].format, parameter[name[number_or_fraction]]], name[re].VERBOSE]]
if <ast.UnaryOp object at 0x7da20c6a8040> begin[:]
<ast.Raise object at 0x7da20c6a8280>
<ast.Tuple object at 0x7da20c6abb20> assign[=] call[call[name[re].sub, parameter[constant[\[|\]], constant[], name[string]]].split, parameter[constant[, ]]]
variable[result] assign[=] call[name[int], parameter[name[h]]]
if compare[constant[/] in name[m]] begin[:]
variable[m] assign[=] call[name[m].split, parameter[constant[/]]]
<ast.AugAssign object at 0x7da20c6ab5b0>
if compare[constant[/] in name[s]] begin[:]
variable[s] assign[=] call[name[s].split, parameter[constant[/]]]
<ast.AugAssign object at 0x7da20eb2bdf0>
return[name[result]] | keyword[def] identifier[convert_to_decimal] ( identifier[string] ):
literal[string]
identifier[number_or_fraction] = literal[string]
identifier[m] = identifier[re] . identifier[compile] ( literal[string] . identifier[format] ( identifier[number_or_fraction] ), identifier[re] . identifier[VERBOSE] )
keyword[if] keyword[not] identifier[m] . identifier[match] ( identifier[string] ):
keyword[raise] identifier[ValueError]
identifier[h] , identifier[m] , identifier[s] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[string] ). identifier[split] ( literal[string] )
identifier[result] = identifier[int] ( identifier[h] )
keyword[if] literal[string] keyword[in] identifier[m] :
identifier[m] = identifier[m] . identifier[split] ( literal[string] )
identifier[result] += identifier[int] ( identifier[m] [ literal[int] ])* literal[int] / identifier[int] ( identifier[m] [ literal[int] ])/ literal[int]
keyword[else] :
identifier[result] += identifier[int] ( identifier[m] )* literal[int] / literal[int]
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[s] = identifier[s] . identifier[split] ( literal[string] )
identifier[result] += identifier[int] ( identifier[s] [ literal[int] ])* literal[int] / identifier[int] ( identifier[s] [ literal[int] ])/ literal[int]
keyword[else] :
identifier[result] += identifier[int] ( identifier[s] )* literal[int] / literal[int]
keyword[return] identifier[result] | def convert_to_decimal(string):
"""
Decode the exif-gps format into a decimal point.
'[51, 4, 1234/34]' -> 51.074948366
"""
number_or_fraction = '(?:\\d{1,2}) | (?:\\d{1,10} \\ \\d{1,10})'
m = re.compile('\\[?\\s? # opening bracket\n \\d{{1,2}}\\s?,\\s? # first number\n {0} \\s?,\\s? # second number (can be a fraction)\n {0} \\s?,\\s? # third number (can be a fraction)\n \\]?\\s? # closing bracket\n '.format(number_or_fraction), re.VERBOSE)
if not m.match(string):
raise ValueError # depends on [control=['if'], data=[]]
(h, m, s) = re.sub('\\[|\\]', '', string).split(', ')
result = int(h)
if '/' in m:
m = m.split('/')
result += int(m[0]) * 1.0 / int(m[1]) / 60 # depends on [control=['if'], data=['m']]
else:
result += int(m) * 1.0 / 60
if '/' in s:
s = s.split('/')
result += int(s[0]) * 1.0 / int(s[1]) / 3600 # depends on [control=['if'], data=['s']]
else:
result += int(s) * 1.0 / 60
return result |
def convert(self, value, param, ctx):
""" Try to find correct kernel regarding version. """
self.gandi = ctx.obj
# Exact match first
if value in self.choices:
return value
# Also try with x86-64 suffix
new_value = '%s-x86_64' % value
if new_value in self.choices:
return new_value
self.fail('invalid choice: %s. (choose from %s)' %
(value, ', '.join(self.choices)), param, ctx) | def function[convert, parameter[self, value, param, ctx]]:
constant[ Try to find correct kernel regarding version. ]
name[self].gandi assign[=] name[ctx].obj
if compare[name[value] in name[self].choices] begin[:]
return[name[value]]
variable[new_value] assign[=] binary_operation[constant[%s-x86_64] <ast.Mod object at 0x7da2590d6920> name[value]]
if compare[name[new_value] in name[self].choices] begin[:]
return[name[new_value]]
call[name[self].fail, parameter[binary_operation[constant[invalid choice: %s. (choose from %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc04760>, <ast.Call object at 0x7da18dc047f0>]]], name[param], name[ctx]]] | keyword[def] identifier[convert] ( identifier[self] , identifier[value] , identifier[param] , identifier[ctx] ):
literal[string]
identifier[self] . identifier[gandi] = identifier[ctx] . identifier[obj]
keyword[if] identifier[value] keyword[in] identifier[self] . identifier[choices] :
keyword[return] identifier[value]
identifier[new_value] = literal[string] % identifier[value]
keyword[if] identifier[new_value] keyword[in] identifier[self] . identifier[choices] :
keyword[return] identifier[new_value]
identifier[self] . identifier[fail] ( literal[string] %
( identifier[value] , literal[string] . identifier[join] ( identifier[self] . identifier[choices] )), identifier[param] , identifier[ctx] ) | def convert(self, value, param, ctx):
""" Try to find correct kernel regarding version. """
self.gandi = ctx.obj
# Exact match first
if value in self.choices:
return value # depends on [control=['if'], data=['value']]
# Also try with x86-64 suffix
new_value = '%s-x86_64' % value
if new_value in self.choices:
return new_value # depends on [control=['if'], data=['new_value']]
self.fail('invalid choice: %s. (choose from %s)' % (value, ', '.join(self.choices)), param, ctx) |
async def put_annotations(self, annotation):
"""
PUT /api/annotations/{annotation}.{_format}
Updates an annotation.
:param annotation \w+ string The annotation ID
Will returns annotation for this entry
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/annotations/{annotation}.{ext}'.format(
annotation=annotation, ext=self.format)
return await self.query(url, "put", **params) | <ast.AsyncFunctionDef object at 0x7da1b257fd30> | keyword[async] keyword[def] identifier[put_annotations] ( identifier[self] , identifier[annotation] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[token] }
identifier[url] = literal[string] . identifier[format] (
identifier[annotation] = identifier[annotation] , identifier[ext] = identifier[self] . identifier[format] )
keyword[return] keyword[await] identifier[self] . identifier[query] ( identifier[url] , literal[string] ,** identifier[params] ) | async def put_annotations(self, annotation):
"""
PUT /api/annotations/{annotation}.{_format}
Updates an annotation.
:param annotation \\w+ string The annotation ID
Will returns annotation for this entry
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/annotations/{annotation}.{ext}'.format(annotation=annotation, ext=self.format)
return await self.query(url, 'put', **params) |
def get_local_ep(*args, **kwargs):
"""
Warning:
DEPRECATED: Use ``globus_sdk.LocalGlobusConnectPersonal().endpoint_id`` instead.
"""
if kwargs.get("warn", True):
raise DeprecationWarning("'get_local_ep()' has been deprecated in favor of "
"'globus_sdk.LocalGlobusConnectPersonal().endpoint_id'. "
"To override, pass in 'warn=False'.")
else:
import warnings
warnings.warn("'get_local_ep()' has been deprecated in favor of "
"'globus_sdk.LocalGlobusConnectPersonal().endpoint_id'.")
return globus_sdk.LocalGlobusConnectPersonal().endpoint_id | def function[get_local_ep, parameter[]]:
constant[
Warning:
DEPRECATED: Use ``globus_sdk.LocalGlobusConnectPersonal().endpoint_id`` instead.
]
if call[name[kwargs].get, parameter[constant[warn], constant[True]]] begin[:]
<ast.Raise object at 0x7da1b236b0d0>
return[call[name[globus_sdk].LocalGlobusConnectPersonal, parameter[]].endpoint_id] | keyword[def] identifier[get_local_ep] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] ):
keyword[raise] identifier[DeprecationWarning] ( literal[string]
literal[string]
literal[string] )
keyword[else] :
keyword[import] identifier[warnings]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] )
keyword[return] identifier[globus_sdk] . identifier[LocalGlobusConnectPersonal] (). identifier[endpoint_id] | def get_local_ep(*args, **kwargs):
"""
Warning:
DEPRECATED: Use ``globus_sdk.LocalGlobusConnectPersonal().endpoint_id`` instead.
"""
if kwargs.get('warn', True):
raise DeprecationWarning("'get_local_ep()' has been deprecated in favor of 'globus_sdk.LocalGlobusConnectPersonal().endpoint_id'. To override, pass in 'warn=False'.") # depends on [control=['if'], data=[]]
else:
import warnings
warnings.warn("'get_local_ep()' has been deprecated in favor of 'globus_sdk.LocalGlobusConnectPersonal().endpoint_id'.")
return globus_sdk.LocalGlobusConnectPersonal().endpoint_id |
def _pump(self):
'''
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
'''
while (not self._busy) and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1])
except Exception as e:
self.notify('error', exception=e)
if self._debug:
traceback.print_exc() | def function[_pump, parameter[self]]:
constant[
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
]
while <ast.BoolOp object at 0x7da1b1e99990> begin[:]
variable[cmd] assign[=] call[name[self]._queue.pop, parameter[constant[0]]]
name[self]._name assign[=] call[name[cmd]][constant[2]]
<ast.Try object at 0x7da20c796020> | keyword[def] identifier[_pump] ( identifier[self] ):
literal[string]
keyword[while] ( keyword[not] identifier[self] . identifier[_busy] ) keyword[and] identifier[len] ( identifier[self] . identifier[_queue] ):
identifier[cmd] = identifier[self] . identifier[_queue] . identifier[pop] ( literal[int] )
identifier[self] . identifier[_name] = identifier[cmd] [ literal[int] ]
keyword[try] :
identifier[cmd] [ literal[int] ](* identifier[cmd] [ literal[int] ])
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[notify] ( literal[string] , identifier[exception] = identifier[e] )
keyword[if] identifier[self] . identifier[_debug] :
identifier[traceback] . identifier[print_exc] () | def _pump(self):
"""
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
"""
while not self._busy and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1]) # depends on [control=['try'], data=[]]
except Exception as e:
self.notify('error', exception=e)
if self._debug:
traceback.print_exc() # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] |
def _move_file_to_dest_dir(self, filename):
'''Move the file to the ``move_to`` directory.'''
assert self._params.move_to
if os.path.isdir(self._params.move_to):
_logger.debug('Moved {} to {}.', self._warc_filename,
self._params.move_to)
shutil.move(filename, self._params.move_to)
else:
_logger.error('{} is not a directory; not moving {}.',
self._params.move_to, filename) | def function[_move_file_to_dest_dir, parameter[self, filename]]:
constant[Move the file to the ``move_to`` directory.]
assert[name[self]._params.move_to]
if call[name[os].path.isdir, parameter[name[self]._params.move_to]] begin[:]
call[name[_logger].debug, parameter[constant[Moved {} to {}.], name[self]._warc_filename, name[self]._params.move_to]]
call[name[shutil].move, parameter[name[filename], name[self]._params.move_to]] | keyword[def] identifier[_move_file_to_dest_dir] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[assert] identifier[self] . identifier[_params] . identifier[move_to]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[_params] . identifier[move_to] ):
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[_warc_filename] ,
identifier[self] . identifier[_params] . identifier[move_to] )
identifier[shutil] . identifier[move] ( identifier[filename] , identifier[self] . identifier[_params] . identifier[move_to] )
keyword[else] :
identifier[_logger] . identifier[error] ( literal[string] ,
identifier[self] . identifier[_params] . identifier[move_to] , identifier[filename] ) | def _move_file_to_dest_dir(self, filename):
"""Move the file to the ``move_to`` directory."""
assert self._params.move_to
if os.path.isdir(self._params.move_to):
_logger.debug('Moved {} to {}.', self._warc_filename, self._params.move_to)
shutil.move(filename, self._params.move_to) # depends on [control=['if'], data=[]]
else:
_logger.error('{} is not a directory; not moving {}.', self._params.move_to, filename) |
def calculate_size(name, permits):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
return data_size | def function[calculate_size, parameter[name, permits]]:
constant[ Calculates the request payload size]
variable[data_size] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b26af7c0>
<ast.AugAssign object at 0x7da1b26af280>
return[name[data_size]] | keyword[def] identifier[calculate_size] ( identifier[name] , identifier[permits] ):
literal[string]
identifier[data_size] = literal[int]
identifier[data_size] += identifier[calculate_size_str] ( identifier[name] )
identifier[data_size] += identifier[INT_SIZE_IN_BYTES]
keyword[return] identifier[data_size] | def calculate_size(name, permits):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
return data_size |
async def full_dispatch_websocket(
self, websocket_context: Optional[WebsocketContext]=None,
) -> Optional[Response]:
"""Adds pre and post processing to the websocket dispatching.
Arguments:
websocket_context: The websocket context, optional to match
the Flask convention.
"""
await self.try_trigger_before_first_request_functions()
await websocket_started.send(self)
try:
result = await self.preprocess_websocket(websocket_context)
if result is None:
result = await self.dispatch_websocket(websocket_context)
except Exception as error:
result = await self.handle_user_exception(error)
return await self.finalize_websocket(result, websocket_context) | <ast.AsyncFunctionDef object at 0x7da20e9b0e50> | keyword[async] keyword[def] identifier[full_dispatch_websocket] (
identifier[self] , identifier[websocket_context] : identifier[Optional] [ identifier[WebsocketContext] ]= keyword[None] ,
)-> identifier[Optional] [ identifier[Response] ]:
literal[string]
keyword[await] identifier[self] . identifier[try_trigger_before_first_request_functions] ()
keyword[await] identifier[websocket_started] . identifier[send] ( identifier[self] )
keyword[try] :
identifier[result] = keyword[await] identifier[self] . identifier[preprocess_websocket] ( identifier[websocket_context] )
keyword[if] identifier[result] keyword[is] keyword[None] :
identifier[result] = keyword[await] identifier[self] . identifier[dispatch_websocket] ( identifier[websocket_context] )
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[result] = keyword[await] identifier[self] . identifier[handle_user_exception] ( identifier[error] )
keyword[return] keyword[await] identifier[self] . identifier[finalize_websocket] ( identifier[result] , identifier[websocket_context] ) | async def full_dispatch_websocket(self, websocket_context: Optional[WebsocketContext]=None) -> Optional[Response]:
"""Adds pre and post processing to the websocket dispatching.
Arguments:
websocket_context: The websocket context, optional to match
the Flask convention.
"""
await self.try_trigger_before_first_request_functions()
await websocket_started.send(self)
try:
result = await self.preprocess_websocket(websocket_context)
if result is None:
result = await self.dispatch_websocket(websocket_context) # depends on [control=['if'], data=['result']] # depends on [control=['try'], data=[]]
except Exception as error:
result = await self.handle_user_exception(error) # depends on [control=['except'], data=['error']]
return await self.finalize_websocket(result, websocket_context) |
def get_last_depth(self, symbol, _type, _async=False):
"""
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol, 'type': _type}
url = u.MARKET_URL + '/market/depth'
return http_get_request(url, params, _async=_async) | def function[get_last_depth, parameter[self, symbol, _type, _async]]:
constant[
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f09f5b0>, <ast.Constant object at 0x7da18f09d6c0>], [<ast.Name object at 0x7da18f09ead0>, <ast.Name object at 0x7da18f09dff0>]]
variable[url] assign[=] binary_operation[name[u].MARKET_URL + constant[/market/depth]]
return[call[name[http_get_request], parameter[name[url], name[params]]]] | keyword[def] identifier[get_last_depth] ( identifier[self] , identifier[symbol] , identifier[_type] , identifier[_async] = keyword[False] ):
literal[string]
identifier[params] ={ literal[string] : identifier[symbol] , literal[string] : identifier[_type] }
identifier[url] = identifier[u] . identifier[MARKET_URL] + literal[string]
keyword[return] identifier[http_get_request] ( identifier[url] , identifier[params] , identifier[_async] = identifier[_async] ) | def get_last_depth(self, symbol, _type, _async=False):
"""
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol, 'type': _type}
url = u.MARKET_URL + '/market/depth'
return http_get_request(url, params, _async=_async) |
def _preprocess_inputs(durations, event_observed, timeline, entry, weights):
"""
Cleans and confirms input to what lifelines expects downstream
"""
n = len(durations)
durations = np.asarray(pass_for_numeric_dtypes_or_raise_array(durations)).reshape((n,))
# set to all observed if event_observed is none
if event_observed is None:
event_observed = np.ones(n, dtype=int)
else:
event_observed = np.asarray(event_observed).reshape((n,)).copy().astype(int)
if entry is not None:
entry = np.asarray(entry).reshape((n,))
event_table = survival_table_from_events(durations, event_observed, entry, weights=weights)
if timeline is None:
timeline = event_table.index.values
else:
timeline = np.asarray(timeline)
return (durations, event_observed, timeline.astype(float), entry, event_table) | def function[_preprocess_inputs, parameter[durations, event_observed, timeline, entry, weights]]:
constant[
Cleans and confirms input to what lifelines expects downstream
]
variable[n] assign[=] call[name[len], parameter[name[durations]]]
variable[durations] assign[=] call[call[name[np].asarray, parameter[call[name[pass_for_numeric_dtypes_or_raise_array], parameter[name[durations]]]]].reshape, parameter[tuple[[<ast.Name object at 0x7da20c6e6020>]]]]
if compare[name[event_observed] is constant[None]] begin[:]
variable[event_observed] assign[=] call[name[np].ones, parameter[name[n]]]
if compare[name[entry] is_not constant[None]] begin[:]
variable[entry] assign[=] call[call[name[np].asarray, parameter[name[entry]]].reshape, parameter[tuple[[<ast.Name object at 0x7da20c6e5b10>]]]]
variable[event_table] assign[=] call[name[survival_table_from_events], parameter[name[durations], name[event_observed], name[entry]]]
if compare[name[timeline] is constant[None]] begin[:]
variable[timeline] assign[=] name[event_table].index.values
return[tuple[[<ast.Name object at 0x7da20c6e7550>, <ast.Name object at 0x7da20c6e52a0>, <ast.Call object at 0x7da20c6e6f80>, <ast.Name object at 0x7da20c6e5630>, <ast.Name object at 0x7da20c6e64d0>]]] | keyword[def] identifier[_preprocess_inputs] ( identifier[durations] , identifier[event_observed] , identifier[timeline] , identifier[entry] , identifier[weights] ):
literal[string]
identifier[n] = identifier[len] ( identifier[durations] )
identifier[durations] = identifier[np] . identifier[asarray] ( identifier[pass_for_numeric_dtypes_or_raise_array] ( identifier[durations] )). identifier[reshape] (( identifier[n] ,))
keyword[if] identifier[event_observed] keyword[is] keyword[None] :
identifier[event_observed] = identifier[np] . identifier[ones] ( identifier[n] , identifier[dtype] = identifier[int] )
keyword[else] :
identifier[event_observed] = identifier[np] . identifier[asarray] ( identifier[event_observed] ). identifier[reshape] (( identifier[n] ,)). identifier[copy] (). identifier[astype] ( identifier[int] )
keyword[if] identifier[entry] keyword[is] keyword[not] keyword[None] :
identifier[entry] = identifier[np] . identifier[asarray] ( identifier[entry] ). identifier[reshape] (( identifier[n] ,))
identifier[event_table] = identifier[survival_table_from_events] ( identifier[durations] , identifier[event_observed] , identifier[entry] , identifier[weights] = identifier[weights] )
keyword[if] identifier[timeline] keyword[is] keyword[None] :
identifier[timeline] = identifier[event_table] . identifier[index] . identifier[values]
keyword[else] :
identifier[timeline] = identifier[np] . identifier[asarray] ( identifier[timeline] )
keyword[return] ( identifier[durations] , identifier[event_observed] , identifier[timeline] . identifier[astype] ( identifier[float] ), identifier[entry] , identifier[event_table] ) | def _preprocess_inputs(durations, event_observed, timeline, entry, weights):
"""
Cleans and confirms input to what lifelines expects downstream
"""
n = len(durations)
durations = np.asarray(pass_for_numeric_dtypes_or_raise_array(durations)).reshape((n,))
# set to all observed if event_observed is none
if event_observed is None:
event_observed = np.ones(n, dtype=int) # depends on [control=['if'], data=['event_observed']]
else:
event_observed = np.asarray(event_observed).reshape((n,)).copy().astype(int)
if entry is not None:
entry = np.asarray(entry).reshape((n,)) # depends on [control=['if'], data=['entry']]
event_table = survival_table_from_events(durations, event_observed, entry, weights=weights)
if timeline is None:
timeline = event_table.index.values # depends on [control=['if'], data=['timeline']]
else:
timeline = np.asarray(timeline)
return (durations, event_observed, timeline.astype(float), entry, event_table) |
def ip_hide_as_path_holder_as_path_access_list_seq_keyword(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_as_path_holder = ET.SubElement(ip, "hide-as-path-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
as_path = ET.SubElement(hide_as_path_holder, "as-path")
access_list = ET.SubElement(as_path, "access-list")
name_key = ET.SubElement(access_list, "name")
name_key.text = kwargs.pop('name')
instance_key = ET.SubElement(access_list, "instance")
instance_key.text = kwargs.pop('instance')
seq_keyword = ET.SubElement(access_list, "seq-keyword")
seq_keyword.text = kwargs.pop('seq_keyword')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[ip_hide_as_path_holder_as_path_access_list_seq_keyword, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[ip] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ip]]]
variable[hide_as_path_holder] assign[=] call[name[ET].SubElement, parameter[name[ip], constant[hide-as-path-holder]]]
variable[as_path] assign[=] call[name[ET].SubElement, parameter[name[hide_as_path_holder], constant[as-path]]]
variable[access_list] assign[=] call[name[ET].SubElement, parameter[name[as_path], constant[access-list]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[access_list], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[instance_key] assign[=] call[name[ET].SubElement, parameter[name[access_list], constant[instance]]]
name[instance_key].text assign[=] call[name[kwargs].pop, parameter[constant[instance]]]
variable[seq_keyword] assign[=] call[name[ET].SubElement, parameter[name[access_list], constant[seq-keyword]]]
name[seq_keyword].text assign[=] call[name[kwargs].pop, parameter[constant[seq_keyword]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[ip_hide_as_path_holder_as_path_access_list_seq_keyword] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[ip] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[hide_as_path_holder] = identifier[ET] . identifier[SubElement] ( identifier[ip] , literal[string] , identifier[xmlns] = literal[string] )
identifier[as_path] = identifier[ET] . identifier[SubElement] ( identifier[hide_as_path_holder] , literal[string] )
identifier[access_list] = identifier[ET] . identifier[SubElement] ( identifier[as_path] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[access_list] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[instance_key] = identifier[ET] . identifier[SubElement] ( identifier[access_list] , literal[string] )
identifier[instance_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[seq_keyword] = identifier[ET] . identifier[SubElement] ( identifier[access_list] , literal[string] )
identifier[seq_keyword] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def ip_hide_as_path_holder_as_path_access_list_seq_keyword(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
ip = ET.SubElement(config, 'ip', xmlns='urn:brocade.com:mgmt:brocade-common-def')
hide_as_path_holder = ET.SubElement(ip, 'hide-as-path-holder', xmlns='urn:brocade.com:mgmt:brocade-ip-policy')
as_path = ET.SubElement(hide_as_path_holder, 'as-path')
access_list = ET.SubElement(as_path, 'access-list')
name_key = ET.SubElement(access_list, 'name')
name_key.text = kwargs.pop('name')
instance_key = ET.SubElement(access_list, 'instance')
instance_key.text = kwargs.pop('instance')
seq_keyword = ET.SubElement(access_list, 'seq-keyword')
seq_keyword.text = kwargs.pop('seq_keyword')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_pplan(self, topologyName, pplan):
""" create physical plan """
if not pplan or not pplan.IsInitialized():
raise_(StateException("Physical Plan protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_pplan_path(topologyName)
LOG.info("Adding topology: {0} to path: {1}".format(
topologyName, path))
pplanString = pplan.SerializeToString()
try:
self.client.create(path, value=pplanString, makepath=True)
return True
except NoNodeError:
raise_(StateException("NoNodeError while creating pplan",
StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
except NodeExistsError:
raise_(StateException("NodeExistsError while creating pplan",
StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
except ZookeeperError:
raise_(StateException("Zookeeper while creating pplan",
StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
except Exception:
# Just re raise the exception.
raise | def function[create_pplan, parameter[self, topologyName, pplan]]:
constant[ create physical plan ]
if <ast.BoolOp object at 0x7da18f58ec20> begin[:]
call[name[raise_], parameter[call[name[StateException], parameter[constant[Physical Plan protobuf not init properly], name[StateException].EX_TYPE_PROTOBUF_ERROR]], call[call[name[sys].exc_info, parameter[]]][constant[2]]]]
variable[path] assign[=] call[name[self].get_pplan_path, parameter[name[topologyName]]]
call[name[LOG].info, parameter[call[constant[Adding topology: {0} to path: {1}].format, parameter[name[topologyName], name[path]]]]]
variable[pplanString] assign[=] call[name[pplan].SerializeToString, parameter[]]
<ast.Try object at 0x7da18c4cdc90> | keyword[def] identifier[create_pplan] ( identifier[self] , identifier[topologyName] , identifier[pplan] ):
literal[string]
keyword[if] keyword[not] identifier[pplan] keyword[or] keyword[not] identifier[pplan] . identifier[IsInitialized] ():
identifier[raise_] ( identifier[StateException] ( literal[string] ,
identifier[StateException] . identifier[EX_TYPE_PROTOBUF_ERROR] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ])
identifier[path] = identifier[self] . identifier[get_pplan_path] ( identifier[topologyName] )
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] (
identifier[topologyName] , identifier[path] ))
identifier[pplanString] = identifier[pplan] . identifier[SerializeToString] ()
keyword[try] :
identifier[self] . identifier[client] . identifier[create] ( identifier[path] , identifier[value] = identifier[pplanString] , identifier[makepath] = keyword[True] )
keyword[return] keyword[True]
keyword[except] identifier[NoNodeError] :
identifier[raise_] ( identifier[StateException] ( literal[string] ,
identifier[StateException] . identifier[EX_TYPE_NO_NODE_ERROR] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ])
keyword[except] identifier[NodeExistsError] :
identifier[raise_] ( identifier[StateException] ( literal[string] ,
identifier[StateException] . identifier[EX_TYPE_NODE_EXISTS_ERROR] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ])
keyword[except] identifier[ZookeeperError] :
identifier[raise_] ( identifier[StateException] ( literal[string] ,
identifier[StateException] . identifier[EX_TYPE_ZOOKEEPER_ERROR] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ])
keyword[except] identifier[Exception] :
keyword[raise] | def create_pplan(self, topologyName, pplan):
""" create physical plan """
if not pplan or not pplan.IsInitialized():
raise_(StateException('Physical Plan protobuf not init properly', StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2]) # depends on [control=['if'], data=[]]
path = self.get_pplan_path(topologyName)
LOG.info('Adding topology: {0} to path: {1}'.format(topologyName, path))
pplanString = pplan.SerializeToString()
try:
self.client.create(path, value=pplanString, makepath=True)
return True # depends on [control=['try'], data=[]]
except NoNodeError:
raise_(StateException('NoNodeError while creating pplan', StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2]) # depends on [control=['except'], data=[]]
except NodeExistsError:
raise_(StateException('NodeExistsError while creating pplan', StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2]) # depends on [control=['except'], data=[]]
except ZookeeperError:
raise_(StateException('Zookeeper while creating pplan', StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2]) # depends on [control=['except'], data=[]]
except Exception:
# Just re raise the exception.
raise # depends on [control=['except'], data=[]] |
def depth_march_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_march_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_march_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_march_average_ground_temperature`'.format(value))
self._depth_march_average_ground_temperature = value | def function[depth_march_average_ground_temperature, parameter[self, value]]:
constant[Corresponds to IDD Field `depth_march_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_march_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0ffb580>
name[self]._depth_march_average_ground_temperature assign[=] name[value] | keyword[def] identifier[depth_march_average_ground_temperature] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[self] . identifier[_depth_march_average_ground_temperature] = identifier[value] | def depth_march_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_march_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_march_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_march_average_ground_temperature`'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']]
self._depth_march_average_ground_temperature = value |
def cv_precompute(self, mask, b):
'''
Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`.
'''
# Get current chunk and mask outliers
m1 = self.get_masked_chunk(b)
flux = self.fraw[m1]
K = GetCovariance(self.kernel, self.kernel_params,
self.time[m1], self.fraw_err[m1])
med = np.nanmedian(flux)
# Now mask the validation set
M = lambda x, axis = 0: np.delete(x, mask, axis=axis)
m2 = M(m1)
mK = M(M(K, axis=0), axis=1)
f = M(flux) - med
# Pre-compute the matrices
A = [None for i in range(self.pld_order)]
B = [None for i in range(self.pld_order)]
for n in range(self.pld_order):
# Only compute up to the current PLD order
if self.lam_idx >= n:
X2 = self.X(n, m2)
X1 = self.X(n, m1)
A[n] = np.dot(X2, X2.T)
B[n] = np.dot(X1, X2.T)
del X1, X2
if self.transit_model is None:
C = 0
else:
C = np.zeros((len(m2), len(m2)))
mean_transit_model = med * \
np.sum([tm.depth * tm(self.time[m2])
for tm in self.transit_model], axis=0)
f -= mean_transit_model
for tm in self.transit_model:
X2 = tm(self.time[m2]).reshape(-1, 1)
C += tm.var_depth * np.dot(X2, X2.T)
del X2
return A, B, C, mK, f, m1, m2 | def function[cv_precompute, parameter[self, mask, b]]:
constant[
Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`.
]
variable[m1] assign[=] call[name[self].get_masked_chunk, parameter[name[b]]]
variable[flux] assign[=] call[name[self].fraw][name[m1]]
variable[K] assign[=] call[name[GetCovariance], parameter[name[self].kernel, name[self].kernel_params, call[name[self].time][name[m1]], call[name[self].fraw_err][name[m1]]]]
variable[med] assign[=] call[name[np].nanmedian, parameter[name[flux]]]
variable[M] assign[=] <ast.Lambda object at 0x7da1b0fcf550>
variable[m2] assign[=] call[name[M], parameter[name[m1]]]
variable[mK] assign[=] call[name[M], parameter[call[name[M], parameter[name[K]]]]]
variable[f] assign[=] binary_operation[call[name[M], parameter[name[flux]]] - name[med]]
variable[A] assign[=] <ast.ListComp object at 0x7da1b0fcd9f0>
variable[B] assign[=] <ast.ListComp object at 0x7da1b0fcd150>
for taget[name[n]] in starred[call[name[range], parameter[name[self].pld_order]]] begin[:]
if compare[name[self].lam_idx greater_or_equal[>=] name[n]] begin[:]
variable[X2] assign[=] call[name[self].X, parameter[name[n], name[m2]]]
variable[X1] assign[=] call[name[self].X, parameter[name[n], name[m1]]]
call[name[A]][name[n]] assign[=] call[name[np].dot, parameter[name[X2], name[X2].T]]
call[name[B]][name[n]] assign[=] call[name[np].dot, parameter[name[X1], name[X2].T]]
<ast.Delete object at 0x7da1b0fcf850>
if compare[name[self].transit_model is constant[None]] begin[:]
variable[C] assign[=] constant[0]
return[tuple[[<ast.Name object at 0x7da1b0fcee30>, <ast.Name object at 0x7da1b0fcd990>, <ast.Name object at 0x7da1b0fcfbe0>, <ast.Name object at 0x7da1b0fcc1c0>, <ast.Name object at 0x7da1b0e30520>, <ast.Name object at 0x7da1b0e30550>, <ast.Name object at 0x7da1b0e305b0>]]] | keyword[def] identifier[cv_precompute] ( identifier[self] , identifier[mask] , identifier[b] ):
literal[string]
identifier[m1] = identifier[self] . identifier[get_masked_chunk] ( identifier[b] )
identifier[flux] = identifier[self] . identifier[fraw] [ identifier[m1] ]
identifier[K] = identifier[GetCovariance] ( identifier[self] . identifier[kernel] , identifier[self] . identifier[kernel_params] ,
identifier[self] . identifier[time] [ identifier[m1] ], identifier[self] . identifier[fraw_err] [ identifier[m1] ])
identifier[med] = identifier[np] . identifier[nanmedian] ( identifier[flux] )
identifier[M] = keyword[lambda] identifier[x] , identifier[axis] = literal[int] : identifier[np] . identifier[delete] ( identifier[x] , identifier[mask] , identifier[axis] = identifier[axis] )
identifier[m2] = identifier[M] ( identifier[m1] )
identifier[mK] = identifier[M] ( identifier[M] ( identifier[K] , identifier[axis] = literal[int] ), identifier[axis] = literal[int] )
identifier[f] = identifier[M] ( identifier[flux] )- identifier[med]
identifier[A] =[ keyword[None] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[pld_order] )]
identifier[B] =[ keyword[None] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[pld_order] )]
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[self] . identifier[pld_order] ):
keyword[if] identifier[self] . identifier[lam_idx] >= identifier[n] :
identifier[X2] = identifier[self] . identifier[X] ( identifier[n] , identifier[m2] )
identifier[X1] = identifier[self] . identifier[X] ( identifier[n] , identifier[m1] )
identifier[A] [ identifier[n] ]= identifier[np] . identifier[dot] ( identifier[X2] , identifier[X2] . identifier[T] )
identifier[B] [ identifier[n] ]= identifier[np] . identifier[dot] ( identifier[X1] , identifier[X2] . identifier[T] )
keyword[del] identifier[X1] , identifier[X2]
keyword[if] identifier[self] . identifier[transit_model] keyword[is] keyword[None] :
identifier[C] = literal[int]
keyword[else] :
identifier[C] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[m2] ), identifier[len] ( identifier[m2] )))
identifier[mean_transit_model] = identifier[med] * identifier[np] . identifier[sum] ([ identifier[tm] . identifier[depth] * identifier[tm] ( identifier[self] . identifier[time] [ identifier[m2] ])
keyword[for] identifier[tm] keyword[in] identifier[self] . identifier[transit_model] ], identifier[axis] = literal[int] )
identifier[f] -= identifier[mean_transit_model]
keyword[for] identifier[tm] keyword[in] identifier[self] . identifier[transit_model] :
identifier[X2] = identifier[tm] ( identifier[self] . identifier[time] [ identifier[m2] ]). identifier[reshape] (- literal[int] , literal[int] )
identifier[C] += identifier[tm] . identifier[var_depth] * identifier[np] . identifier[dot] ( identifier[X2] , identifier[X2] . identifier[T] )
keyword[del] identifier[X2]
keyword[return] identifier[A] , identifier[B] , identifier[C] , identifier[mK] , identifier[f] , identifier[m1] , identifier[m2] | def cv_precompute(self, mask, b):
"""
Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`.
"""
# Get current chunk and mask outliers
m1 = self.get_masked_chunk(b)
flux = self.fraw[m1]
K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1])
med = np.nanmedian(flux)
# Now mask the validation set
M = lambda x, axis=0: np.delete(x, mask, axis=axis)
m2 = M(m1)
mK = M(M(K, axis=0), axis=1)
f = M(flux) - med
# Pre-compute the matrices
A = [None for i in range(self.pld_order)]
B = [None for i in range(self.pld_order)]
for n in range(self.pld_order):
# Only compute up to the current PLD order
if self.lam_idx >= n:
X2 = self.X(n, m2)
X1 = self.X(n, m1)
A[n] = np.dot(X2, X2.T)
B[n] = np.dot(X1, X2.T)
del X1, X2 # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']]
if self.transit_model is None:
C = 0 # depends on [control=['if'], data=[]]
else:
C = np.zeros((len(m2), len(m2)))
mean_transit_model = med * np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0)
f -= mean_transit_model
for tm in self.transit_model:
X2 = tm(self.time[m2]).reshape(-1, 1)
C += tm.var_depth * np.dot(X2, X2.T)
del X2 # depends on [control=['for'], data=['tm']]
return (A, B, C, mK, f, m1, m2) |
def id_token(access_token, nonce=None, claims_request=None):
"""
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
"""
handlers = HANDLERS['id_token']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('id_token', {}) if claims_request else {}
scope_request = provider.scope.to_names(access_token.scope)
if nonce:
claims_request_section.update({'nonce': {'value': nonce}})
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims) | def function[id_token, parameter[access_token, nonce, claims_request]]:
constant[
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
]
variable[handlers] assign[=] call[name[HANDLERS]][constant[id_token]]
variable[claims_request_section] assign[=] <ast.IfExp object at 0x7da1b04a76a0>
variable[scope_request] assign[=] call[name[provider].scope.to_names, parameter[name[access_token].scope]]
if name[nonce] begin[:]
call[name[claims_request_section].update, parameter[dictionary[[<ast.Constant object at 0x7da1b04a7730>], [<ast.Dict object at 0x7da1b04a56f0>]]]]
<ast.Tuple object at 0x7da1b04a45b0> assign[=] call[name[collect], parameter[name[handlers], name[access_token]]]
return[call[name[IDToken], parameter[name[access_token], name[scopes], name[claims]]]] | keyword[def] identifier[id_token] ( identifier[access_token] , identifier[nonce] = keyword[None] , identifier[claims_request] = keyword[None] ):
literal[string]
identifier[handlers] = identifier[HANDLERS] [ literal[string] ]
identifier[claims_request_section] = identifier[claims_request] . identifier[get] ( literal[string] ,{}) keyword[if] identifier[claims_request] keyword[else] {}
identifier[scope_request] = identifier[provider] . identifier[scope] . identifier[to_names] ( identifier[access_token] . identifier[scope] )
keyword[if] identifier[nonce] :
identifier[claims_request_section] . identifier[update] ({ literal[string] :{ literal[string] : identifier[nonce] }})
identifier[scopes] , identifier[claims] = identifier[collect] (
identifier[handlers] ,
identifier[access_token] ,
identifier[scope_request] = identifier[scope_request] ,
identifier[claims_request] = identifier[claims_request_section] ,
)
keyword[return] identifier[IDToken] ( identifier[access_token] , identifier[scopes] , identifier[claims] ) | def id_token(access_token, nonce=None, claims_request=None):
"""
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
"""
handlers = HANDLERS['id_token']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('id_token', {}) if claims_request else {}
scope_request = provider.scope.to_names(access_token.scope)
if nonce:
claims_request_section.update({'nonce': {'value': nonce}}) # depends on [control=['if'], data=[]]
(scopes, claims) = collect(handlers, access_token, scope_request=scope_request, claims_request=claims_request_section)
return IDToken(access_token, scopes, claims) |
def phot_mag(*args, **kwargs):
"""Wrapper around phot which only returns the computed magnitude directly."""
try:
return phot(*args, **kwargs)
except IndexError:
raise TaskError("No photometric records returned for {0}".format(kwargs)) | def function[phot_mag, parameter[]]:
constant[Wrapper around phot which only returns the computed magnitude directly.]
<ast.Try object at 0x7da1b191fb80> | keyword[def] identifier[phot_mag] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[phot] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[IndexError] :
keyword[raise] identifier[TaskError] ( literal[string] . identifier[format] ( identifier[kwargs] )) | def phot_mag(*args, **kwargs):
"""Wrapper around phot which only returns the computed magnitude directly."""
try:
return phot(*args, **kwargs) # depends on [control=['try'], data=[]]
except IndexError:
raise TaskError('No photometric records returned for {0}'.format(kwargs)) # depends on [control=['except'], data=[]] |
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=(obj_id,))
else:
return urls.reverse(self.url)
except urls.NoReverseMatch as ex:
LOG.info('No reverse found for "%(url)s": %(exception)s',
{'url': self.url, 'exception': ex})
return self.url | def function[get_link_url, parameter[self, datum]]:
constant[Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
]
if <ast.UnaryOp object at 0x7da1b19cc760> begin[:]
<ast.Raise object at 0x7da1b19cf760>
if call[name[callable], parameter[name[self].url]] begin[:]
return[call[name[self].url, parameter[name[datum]]]]
<ast.Try object at 0x7da1b184f340> | keyword[def] identifier[get_link_url] ( identifier[self] , identifier[datum] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[url] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[callable] ( identifier[self] . identifier[url] ):
keyword[return] identifier[self] . identifier[url] ( identifier[datum] ,** identifier[self] . identifier[kwargs] )
keyword[try] :
keyword[if] identifier[datum] :
identifier[obj_id] = identifier[self] . identifier[table] . identifier[get_object_id] ( identifier[datum] )
keyword[return] identifier[urls] . identifier[reverse] ( identifier[self] . identifier[url] , identifier[args] =( identifier[obj_id] ,))
keyword[else] :
keyword[return] identifier[urls] . identifier[reverse] ( identifier[self] . identifier[url] )
keyword[except] identifier[urls] . identifier[NoReverseMatch] keyword[as] identifier[ex] :
identifier[LOG] . identifier[info] ( literal[string] ,
{ literal[string] : identifier[self] . identifier[url] , literal[string] : identifier[ex] })
keyword[return] identifier[self] . identifier[url] | def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a url attribute or define its own get_link_url method.') # depends on [control=['if'], data=[]]
if callable(self.url):
return self.url(datum, **self.kwargs) # depends on [control=['if'], data=[]]
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=(obj_id,)) # depends on [control=['if'], data=[]]
else:
return urls.reverse(self.url) # depends on [control=['try'], data=[]]
except urls.NoReverseMatch as ex:
LOG.info('No reverse found for "%(url)s": %(exception)s', {'url': self.url, 'exception': ex})
return self.url # depends on [control=['except'], data=['ex']] |
def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad] | def function[simulated_binary_crossover, parameter[random, mom, dad, args]]:
constant[Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
]
variable[crossover_rate] assign[=] call[name[args].setdefault, parameter[constant[crossover_rate], constant[1.0]]]
if compare[call[name[random].random, parameter[]] less[<] name[crossover_rate]] begin[:]
variable[di] assign[=] call[name[args].setdefault, parameter[constant[sbx_distribution_index], constant[10]]]
variable[bounder] assign[=] call[name[args]][constant[_ec]].bounder
variable[bro] assign[=] call[name[copy].copy, parameter[name[dad]]]
variable[sis] assign[=] call[name[copy].copy, parameter[name[mom]]]
for taget[tuple[[<ast.Name object at 0x7da18eb54070>, <ast.Tuple object at 0x7da18eb57730>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[mom], name[dad], name[bounder].lower_bound, name[bounder].upper_bound]]]]] begin[:]
<ast.Try object at 0x7da18eb54370>
return[list[[<ast.Name object at 0x7da1b15e8dc0>, <ast.Name object at 0x7da1b15eae00>]]] | keyword[def] identifier[simulated_binary_crossover] ( identifier[random] , identifier[mom] , identifier[dad] , identifier[args] ):
literal[string]
identifier[crossover_rate] = identifier[args] . identifier[setdefault] ( literal[string] , literal[int] )
keyword[if] identifier[random] . identifier[random] ()< identifier[crossover_rate] :
identifier[di] = identifier[args] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[bounder] = identifier[args] [ literal[string] ]. identifier[bounder]
identifier[bro] = identifier[copy] . identifier[copy] ( identifier[dad] )
identifier[sis] = identifier[copy] . identifier[copy] ( identifier[mom] )
keyword[for] identifier[i] ,( identifier[m] , identifier[d] , identifier[lb] , identifier[ub] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[mom] , identifier[dad] , identifier[bounder] . identifier[lower_bound] , identifier[bounder] . identifier[upper_bound] )):
keyword[try] :
keyword[if] identifier[m] > identifier[d] :
identifier[m] , identifier[d] = identifier[d] , identifier[m]
identifier[beta] = literal[int] + literal[int] * identifier[min] ( identifier[m] - identifier[lb] , identifier[ub] - identifier[d] )/ identifier[float] ( identifier[d] - identifier[m] )
identifier[alpha] = literal[int] - literal[int] / identifier[beta] **( identifier[di] + literal[int] )
identifier[u] = identifier[random] . identifier[random] ()
keyword[if] identifier[u] <=( literal[int] / identifier[alpha] ):
identifier[beta_q] =( identifier[u] * identifier[alpha] )**( literal[int] / identifier[float] ( identifier[di] + literal[int] ))
keyword[else] :
identifier[beta_q] =( literal[int] /( literal[int] - identifier[u] * identifier[alpha] ))**( literal[int] / identifier[float] ( identifier[di] + literal[int] ))
identifier[bro_val] = literal[int] *(( identifier[m] + identifier[d] )- identifier[beta_q] *( identifier[d] - identifier[m] ))
identifier[bro_val] = identifier[max] ( identifier[min] ( identifier[bro_val] , identifier[ub] ), identifier[lb] )
identifier[sis_val] = literal[int] *(( identifier[m] + identifier[d] )+ identifier[beta_q] *( identifier[d] - identifier[m] ))
identifier[sis_val] = identifier[max] ( identifier[min] ( identifier[sis_val] , identifier[ub] ), identifier[lb] )
keyword[if] identifier[random] . identifier[random] ()> literal[int] :
identifier[bro_val] , identifier[sis_val] = identifier[sis_val] , identifier[bro_val]
identifier[bro] [ identifier[i] ]= identifier[bro_val]
identifier[sis] [ identifier[i] ]= identifier[sis_val]
keyword[except] identifier[ZeroDivisionError] :
keyword[pass]
keyword[return] [ identifier[bro] , identifier[sis] ]
keyword[else] :
keyword[return] [ identifier[mom] , identifier[dad] ] | def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for (i, (m, d, lb, ub)) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
(m, d) = (d, m) # depends on [control=['if'], data=['m', 'd']]
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta ** (di + 1.0)
u = random.random()
if u <= 1.0 / alpha:
beta_q = (u * alpha) ** (1.0 / float(di + 1.0)) # depends on [control=['if'], data=['u']]
else:
beta_q = (1.0 / (2.0 - u * alpha)) ** (1.0 / float(di + 1.0))
bro_val = 0.5 * (m + d - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * (m + d + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
(bro_val, sis_val) = (sis_val, bro_val) # depends on [control=['if'], data=[]]
bro[i] = bro_val
sis[i] = sis_val # depends on [control=['try'], data=[]]
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return [bro, sis] # depends on [control=['if'], data=[]]
else:
return [mom, dad] |
def list_nodes():
'''
Return a list of registered VMs
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes
'''
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {
'id': nodes[node]['UUID'],
'image': nodes[node]['Guest OS'],
'name': nodes[node]['Name'],
'state': None,
'private_ips': [],
'public_ips': [],
}
ret[node]['size'] = '{0} RAM, {1} CPU'.format(
nodes[node]['Memory size'],
nodes[node]['Number of CPUs'],
)
return ret | def function[list_nodes, parameter[]]:
constant[
Return a list of registered VMs
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes
]
variable[ret] assign[=] dictionary[[], []]
variable[nodes] assign[=] call[name[list_nodes_full], parameter[]]
for taget[name[node]] in starred[name[nodes]] begin[:]
call[name[ret]][name[node]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c0d9f0>, <ast.Constant object at 0x7da1b1c0e980>, <ast.Constant object at 0x7da1b1c0c9d0>, <ast.Constant object at 0x7da1b1c0d600>, <ast.Constant object at 0x7da1b1c0d450>, <ast.Constant object at 0x7da1b1c0e500>], [<ast.Subscript object at 0x7da1b1c0cb80>, <ast.Subscript object at 0x7da1b1c0ff40>, <ast.Subscript object at 0x7da1b1c0fdf0>, <ast.Constant object at 0x7da1b1c0d2a0>, <ast.List object at 0x7da1b1c0c040>, <ast.List object at 0x7da1b1c0e7d0>]]
call[call[name[ret]][name[node]]][constant[size]] assign[=] call[constant[{0} RAM, {1} CPU].format, parameter[call[call[name[nodes]][name[node]]][constant[Memory size]], call[call[name[nodes]][name[node]]][constant[Number of CPUs]]]]
return[name[ret]] | keyword[def] identifier[list_nodes] ():
literal[string]
identifier[ret] ={}
identifier[nodes] = identifier[list_nodes_full] ()
keyword[for] identifier[node] keyword[in] identifier[nodes] :
identifier[ret] [ identifier[node] ]={
literal[string] : identifier[nodes] [ identifier[node] ][ literal[string] ],
literal[string] : identifier[nodes] [ identifier[node] ][ literal[string] ],
literal[string] : identifier[nodes] [ identifier[node] ][ literal[string] ],
literal[string] : keyword[None] ,
literal[string] :[],
literal[string] :[],
}
identifier[ret] [ identifier[node] ][ literal[string] ]= literal[string] . identifier[format] (
identifier[nodes] [ identifier[node] ][ literal[string] ],
identifier[nodes] [ identifier[node] ][ literal[string] ],
)
keyword[return] identifier[ret] | def list_nodes():
"""
Return a list of registered VMs
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes
"""
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {'id': nodes[node]['UUID'], 'image': nodes[node]['Guest OS'], 'name': nodes[node]['Name'], 'state': None, 'private_ips': [], 'public_ips': []}
ret[node]['size'] = '{0} RAM, {1} CPU'.format(nodes[node]['Memory size'], nodes[node]['Number of CPUs']) # depends on [control=['for'], data=['node']]
return ret |
def corpus(self):
'''Command to add a corpus to the dsrt library'''
# Initialize the addcorpus subcommand's argparser
description = '''The corpus subcommand has a number of subcommands of its own, including:
list\t-\tlists all available corpora in dsrt's library
add\t-\tadds a corpus to dsrt's library'''
parser = argparse.ArgumentParser(description=description)
self.init_corpus_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'corpus_' + args.corpus_command
if not hasattr(self, corpus_command):
print('Unrecognized corpus command.')
parser.print_help()
exit(1)
getattr(self, corpus_command)() | def function[corpus, parameter[self]]:
constant[Command to add a corpus to the dsrt library]
variable[description] assign[=] constant[The corpus subcommand has a number of subcommands of its own, including:
list - lists all available corpora in dsrt's library
add - adds a corpus to dsrt's library]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[self].init_corpus_args, parameter[name[parser]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[call[name[sys].argv][<ast.Slice object at 0x7da1b1649a50>]]]
variable[corpus_command] assign[=] binary_operation[constant[corpus_] + name[args].corpus_command]
if <ast.UnaryOp object at 0x7da1b164a0b0> begin[:]
call[name[print], parameter[constant[Unrecognized corpus command.]]]
call[name[parser].print_help, parameter[]]
call[name[exit], parameter[constant[1]]]
call[call[name[getattr], parameter[name[self], name[corpus_command]]], parameter[]] | keyword[def] identifier[corpus] ( identifier[self] ):
literal[string]
identifier[description] = literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[description] )
identifier[self] . identifier[init_corpus_args] ( identifier[parser] )
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[sys] . identifier[argv] [ literal[int] : literal[int] ])
identifier[corpus_command] = literal[string] + identifier[args] . identifier[corpus_command]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , identifier[corpus_command] ):
identifier[print] ( literal[string] )
identifier[parser] . identifier[print_help] ()
identifier[exit] ( literal[int] )
identifier[getattr] ( identifier[self] , identifier[corpus_command] )() | def corpus(self):
"""Command to add a corpus to the dsrt library"""
# Initialize the addcorpus subcommand's argparser
description = "The corpus subcommand has a number of subcommands of its own, including:\n list\t-\tlists all available corpora in dsrt's library\n add\t-\tadds a corpus to dsrt's library"
parser = argparse.ArgumentParser(description=description)
self.init_corpus_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'corpus_' + args.corpus_command
if not hasattr(self, corpus_command):
print('Unrecognized corpus command.')
parser.print_help()
exit(1) # depends on [control=['if'], data=[]]
getattr(self, corpus_command)() |
def spectral_pars_from_catalog(cat):
"""Create spectral parameters from 3FGL catalog columns."""
spectrum_type = cat['SpectrumType']
pars = get_function_defaults(cat['SpectrumType'])
par_idxs = {k: i for i, k in
enumerate(get_function_par_names(cat['SpectrumType']))}
for k in pars:
pars[k]['value'] = cat['param_values'][par_idxs[k]]
if spectrum_type == 'PowerLaw':
pars['Index']['value'] *= -1.0
pars['Index']['scale'] = -1.0
pars['Scale']['scale'] = 1.0
pars['Index']['max'] = max(5.0, pars['Index']['value'] + 1.0)
pars['Index']['min'] = min(0.0, pars['Index']['value'] - 1.0)
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index'] = make_parameter_dict(pars['Index'], False, False)
elif spectrum_type == 'LogParabola':
pars['norm'] = make_parameter_dict(pars['norm'], False, True)
pars['Eb'] = make_parameter_dict(pars['Eb'], True, False)
pars['alpha'] = make_parameter_dict(pars['alpha'], False, False)
pars['beta'] = make_parameter_dict(pars['beta'], False, False)
elif spectrum_type == 'PLSuperExpCutoff':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Cutoff'] = make_parameter_dict(pars['Cutoff'], False, True)
elif spectrum_type == 'PLSuperExpCutoff2':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Expfactor'] = make_parameter_dict(pars['Expfactor'], False, True)
else:
raise Exception('Unsupported spectral type:' + spectrum_type)
return pars | def function[spectral_pars_from_catalog, parameter[cat]]:
constant[Create spectral parameters from 3FGL catalog columns.]
variable[spectrum_type] assign[=] call[name[cat]][constant[SpectrumType]]
variable[pars] assign[=] call[name[get_function_defaults], parameter[call[name[cat]][constant[SpectrumType]]]]
variable[par_idxs] assign[=] <ast.DictComp object at 0x7da18bcc8190>
for taget[name[k]] in starred[name[pars]] begin[:]
call[call[name[pars]][name[k]]][constant[value]] assign[=] call[call[name[cat]][constant[param_values]]][call[name[par_idxs]][name[k]]]
if compare[name[spectrum_type] equal[==] constant[PowerLaw]] begin[:]
<ast.AugAssign object at 0x7da20e749720>
call[call[name[pars]][constant[Index]]][constant[scale]] assign[=] <ast.UnaryOp object at 0x7da20e748820>
call[call[name[pars]][constant[Scale]]][constant[scale]] assign[=] constant[1.0]
call[call[name[pars]][constant[Index]]][constant[max]] assign[=] call[name[max], parameter[constant[5.0], binary_operation[call[call[name[pars]][constant[Index]]][constant[value]] + constant[1.0]]]]
call[call[name[pars]][constant[Index]]][constant[min]] assign[=] call[name[min], parameter[constant[0.0], binary_operation[call[call[name[pars]][constant[Index]]][constant[value]] - constant[1.0]]]]
call[name[pars]][constant[Prefactor]] assign[=] call[name[make_parameter_dict], parameter[call[name[pars]][constant[Prefactor]]]]
call[name[pars]][constant[Scale]] assign[=] call[name[make_parameter_dict], parameter[call[name[pars]][constant[Scale]], constant[True], constant[False]]]
call[name[pars]][constant[Index]] assign[=] call[name[make_parameter_dict], parameter[call[name[pars]][constant[Index]], constant[False], constant[False]]]
return[name[pars]] | keyword[def] identifier[spectral_pars_from_catalog] ( identifier[cat] ):
literal[string]
identifier[spectrum_type] = identifier[cat] [ literal[string] ]
identifier[pars] = identifier[get_function_defaults] ( identifier[cat] [ literal[string] ])
identifier[par_idxs] ={ identifier[k] : identifier[i] keyword[for] identifier[i] , identifier[k] keyword[in]
identifier[enumerate] ( identifier[get_function_par_names] ( identifier[cat] [ literal[string] ]))}
keyword[for] identifier[k] keyword[in] identifier[pars] :
identifier[pars] [ identifier[k] ][ literal[string] ]= identifier[cat] [ literal[string] ][ identifier[par_idxs] [ identifier[k] ]]
keyword[if] identifier[spectrum_type] == literal[string] :
identifier[pars] [ literal[string] ][ literal[string] ]*=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]= literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]= identifier[max] ( literal[int] , identifier[pars] [ literal[string] ][ literal[string] ]+ literal[int] )
identifier[pars] [ literal[string] ][ literal[string] ]= identifier[min] ( literal[int] , identifier[pars] [ literal[string] ][ literal[string] ]- literal[int] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ])
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[True] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
keyword[elif] identifier[spectrum_type] == literal[string] :
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[True] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[True] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
keyword[elif] identifier[spectrum_type] == literal[string] :
identifier[pars] [ literal[string] ][ literal[string] ]*=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]= literal[int]
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ])
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[True] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[True] )
keyword[elif] identifier[spectrum_type] == literal[string] :
identifier[pars] [ literal[string] ][ literal[string] ]*=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]=- literal[int]
identifier[pars] [ literal[string] ][ literal[string] ]= literal[int]
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ])
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[True] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[False] )
identifier[pars] [ literal[string] ]= identifier[make_parameter_dict] ( identifier[pars] [ literal[string] ], keyword[False] , keyword[True] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] + identifier[spectrum_type] )
keyword[return] identifier[pars] | def spectral_pars_from_catalog(cat):
"""Create spectral parameters from 3FGL catalog columns."""
spectrum_type = cat['SpectrumType']
pars = get_function_defaults(cat['SpectrumType'])
par_idxs = {k: i for (i, k) in enumerate(get_function_par_names(cat['SpectrumType']))}
for k in pars:
pars[k]['value'] = cat['param_values'][par_idxs[k]] # depends on [control=['for'], data=['k']]
if spectrum_type == 'PowerLaw':
pars['Index']['value'] *= -1.0
pars['Index']['scale'] = -1.0
pars['Scale']['scale'] = 1.0
pars['Index']['max'] = max(5.0, pars['Index']['value'] + 1.0)
pars['Index']['min'] = min(0.0, pars['Index']['value'] - 1.0)
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index'] = make_parameter_dict(pars['Index'], False, False) # depends on [control=['if'], data=[]]
elif spectrum_type == 'LogParabola':
pars['norm'] = make_parameter_dict(pars['norm'], False, True)
pars['Eb'] = make_parameter_dict(pars['Eb'], True, False)
pars['alpha'] = make_parameter_dict(pars['alpha'], False, False)
pars['beta'] = make_parameter_dict(pars['beta'], False, False) # depends on [control=['if'], data=[]]
elif spectrum_type == 'PLSuperExpCutoff':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Cutoff'] = make_parameter_dict(pars['Cutoff'], False, True) # depends on [control=['if'], data=[]]
elif spectrum_type == 'PLSuperExpCutoff2':
pars['Index1']['value'] *= -1.0
pars['Index1']['scale'] = -1.0
pars['Index2']['scale'] = 1.0
pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])
pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)
pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)
pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)
pars['Expfactor'] = make_parameter_dict(pars['Expfactor'], False, True) # depends on [control=['if'], data=[]]
else:
raise Exception('Unsupported spectral type:' + spectrum_type)
return pars |
def state_to_cookie(state, name, path, encryption_key):
"""
Saves a state to a cookie
:type state: satosa.state.State
:type name: str
:type path: str
:type encryption_key: str
:rtype: http.cookies.SimpleCookie
:param state: The state to save
:param name: Name identifier of the cookie
:param path: Endpoint path the cookie will be associated to
:param encryption_key: Key to encrypt the state information
:return: A cookie
"""
cookie_data = "" if state.delete else state.urlstate(encryption_key)
max_age = 0 if state.delete else STATE_COOKIE_MAX_AGE
satosa_logging(logger, logging.DEBUG,
"Saving state as cookie, secure: %s, max-age: %s, path: %s" %
(STATE_COOKIE_SECURE, STATE_COOKIE_MAX_AGE, path), state)
cookie = SimpleCookie()
cookie[name] = cookie_data
cookie[name]["secure"] = STATE_COOKIE_SECURE
cookie[name]["path"] = path
cookie[name]["max-age"] = max_age
return cookie | def function[state_to_cookie, parameter[state, name, path, encryption_key]]:
constant[
Saves a state to a cookie
:type state: satosa.state.State
:type name: str
:type path: str
:type encryption_key: str
:rtype: http.cookies.SimpleCookie
:param state: The state to save
:param name: Name identifier of the cookie
:param path: Endpoint path the cookie will be associated to
:param encryption_key: Key to encrypt the state information
:return: A cookie
]
variable[cookie_data] assign[=] <ast.IfExp object at 0x7da1b15184f0>
variable[max_age] assign[=] <ast.IfExp object at 0x7da1b15181f0>
call[name[satosa_logging], parameter[name[logger], name[logging].DEBUG, binary_operation[constant[Saving state as cookie, secure: %s, max-age: %s, path: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b15183d0>, <ast.Name object at 0x7da1b1519c60>, <ast.Name object at 0x7da1b1518a60>]]], name[state]]]
variable[cookie] assign[=] call[name[SimpleCookie], parameter[]]
call[name[cookie]][name[name]] assign[=] name[cookie_data]
call[call[name[cookie]][name[name]]][constant[secure]] assign[=] name[STATE_COOKIE_SECURE]
call[call[name[cookie]][name[name]]][constant[path]] assign[=] name[path]
call[call[name[cookie]][name[name]]][constant[max-age]] assign[=] name[max_age]
return[name[cookie]] | keyword[def] identifier[state_to_cookie] ( identifier[state] , identifier[name] , identifier[path] , identifier[encryption_key] ):
literal[string]
identifier[cookie_data] = literal[string] keyword[if] identifier[state] . identifier[delete] keyword[else] identifier[state] . identifier[urlstate] ( identifier[encryption_key] )
identifier[max_age] = literal[int] keyword[if] identifier[state] . identifier[delete] keyword[else] identifier[STATE_COOKIE_MAX_AGE]
identifier[satosa_logging] ( identifier[logger] , identifier[logging] . identifier[DEBUG] ,
literal[string] %
( identifier[STATE_COOKIE_SECURE] , identifier[STATE_COOKIE_MAX_AGE] , identifier[path] ), identifier[state] )
identifier[cookie] = identifier[SimpleCookie] ()
identifier[cookie] [ identifier[name] ]= identifier[cookie_data]
identifier[cookie] [ identifier[name] ][ literal[string] ]= identifier[STATE_COOKIE_SECURE]
identifier[cookie] [ identifier[name] ][ literal[string] ]= identifier[path]
identifier[cookie] [ identifier[name] ][ literal[string] ]= identifier[max_age]
keyword[return] identifier[cookie] | def state_to_cookie(state, name, path, encryption_key):
"""
Saves a state to a cookie
:type state: satosa.state.State
:type name: str
:type path: str
:type encryption_key: str
:rtype: http.cookies.SimpleCookie
:param state: The state to save
:param name: Name identifier of the cookie
:param path: Endpoint path the cookie will be associated to
:param encryption_key: Key to encrypt the state information
:return: A cookie
"""
cookie_data = '' if state.delete else state.urlstate(encryption_key)
max_age = 0 if state.delete else STATE_COOKIE_MAX_AGE
satosa_logging(logger, logging.DEBUG, 'Saving state as cookie, secure: %s, max-age: %s, path: %s' % (STATE_COOKIE_SECURE, STATE_COOKIE_MAX_AGE, path), state)
cookie = SimpleCookie()
cookie[name] = cookie_data
cookie[name]['secure'] = STATE_COOKIE_SECURE
cookie[name]['path'] = path
cookie[name]['max-age'] = max_age
return cookie |
def skip_redundant(iterable, skipset=None):
"""
Redundant items are repeated items or items in the original skipset.
"""
if skipset is None:
skipset = set()
for item in iterable:
if item not in skipset:
skipset.add(item)
yield item | def function[skip_redundant, parameter[iterable, skipset]]:
constant[
Redundant items are repeated items or items in the original skipset.
]
if compare[name[skipset] is constant[None]] begin[:]
variable[skipset] assign[=] call[name[set], parameter[]]
for taget[name[item]] in starred[name[iterable]] begin[:]
if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[skipset]] begin[:]
call[name[skipset].add, parameter[name[item]]]
<ast.Yield object at 0x7da1b0668e50> | keyword[def] identifier[skip_redundant] ( identifier[iterable] , identifier[skipset] = keyword[None] ):
literal[string]
keyword[if] identifier[skipset] keyword[is] keyword[None] :
identifier[skipset] = identifier[set] ()
keyword[for] identifier[item] keyword[in] identifier[iterable] :
keyword[if] identifier[item] keyword[not] keyword[in] identifier[skipset] :
identifier[skipset] . identifier[add] ( identifier[item] )
keyword[yield] identifier[item] | def skip_redundant(iterable, skipset=None):
"""
Redundant items are repeated items or items in the original skipset.
"""
if skipset is None:
skipset = set() # depends on [control=['if'], data=['skipset']]
for item in iterable:
if item not in skipset:
skipset.add(item)
yield item # depends on [control=['if'], data=['item', 'skipset']] # depends on [control=['for'], data=['item']] |
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors.
internals (dict): Dict of prior internal state tensors.
actions (dict): Dict of action tensors.
terminal: 1D boolean is-terminal tensor.
reward: 1D (float) rewards tensor.
next_states (dict): Dict of successor state tensors.
next_internals (dict): Dict of posterior internal state tensors.
Returns:
Optimizer arguments as dict to be used as **kwargs to the optimizer.
"""
arguments = dict(
time=self.global_timestep,
variables=self.get_variables(),
arguments=dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=tf.constant(value=True)
),
fn_reference=self.fn_reference,
fn_loss=self.fn_loss
)
if self.global_model is not None:
arguments['global_variables'] = self.global_model.get_variables()
return arguments | def function[optimizer_arguments, parameter[self, states, internals, actions, terminal, reward, next_states, next_internals]]:
constant[
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors.
internals (dict): Dict of prior internal state tensors.
actions (dict): Dict of action tensors.
terminal: 1D boolean is-terminal tensor.
reward: 1D (float) rewards tensor.
next_states (dict): Dict of successor state tensors.
next_internals (dict): Dict of posterior internal state tensors.
Returns:
Optimizer arguments as dict to be used as **kwargs to the optimizer.
]
variable[arguments] assign[=] call[name[dict], parameter[]]
if compare[name[self].global_model is_not constant[None]] begin[:]
call[name[arguments]][constant[global_variables]] assign[=] call[name[self].global_model.get_variables, parameter[]]
return[name[arguments]] | keyword[def] identifier[optimizer_arguments] ( identifier[self] , identifier[states] , identifier[internals] , identifier[actions] , identifier[terminal] , identifier[reward] , identifier[next_states] , identifier[next_internals] ):
literal[string]
identifier[arguments] = identifier[dict] (
identifier[time] = identifier[self] . identifier[global_timestep] ,
identifier[variables] = identifier[self] . identifier[get_variables] (),
identifier[arguments] = identifier[dict] (
identifier[states] = identifier[states] ,
identifier[internals] = identifier[internals] ,
identifier[actions] = identifier[actions] ,
identifier[terminal] = identifier[terminal] ,
identifier[reward] = identifier[reward] ,
identifier[next_states] = identifier[next_states] ,
identifier[next_internals] = identifier[next_internals] ,
identifier[update] = identifier[tf] . identifier[constant] ( identifier[value] = keyword[True] )
),
identifier[fn_reference] = identifier[self] . identifier[fn_reference] ,
identifier[fn_loss] = identifier[self] . identifier[fn_loss]
)
keyword[if] identifier[self] . identifier[global_model] keyword[is] keyword[not] keyword[None] :
identifier[arguments] [ literal[string] ]= identifier[self] . identifier[global_model] . identifier[get_variables] ()
keyword[return] identifier[arguments] | def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors.
internals (dict): Dict of prior internal state tensors.
actions (dict): Dict of action tensors.
terminal: 1D boolean is-terminal tensor.
reward: 1D (float) rewards tensor.
next_states (dict): Dict of successor state tensors.
next_internals (dict): Dict of posterior internal state tensors.
Returns:
Optimizer arguments as dict to be used as **kwargs to the optimizer.
"""
arguments = dict(time=self.global_timestep, variables=self.get_variables(), arguments=dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=tf.constant(value=True)), fn_reference=self.fn_reference, fn_loss=self.fn_loss)
if self.global_model is not None:
arguments['global_variables'] = self.global_model.get_variables() # depends on [control=['if'], data=[]]
return arguments |
def write_pid_file():
"""Write a file with the PID of this server instance.
Call when setting up a command line testserver.
"""
pidfile = os.path.basename(sys.argv[0])[:-3] + '.pid' # strip .py, add .pid
with open(pidfile, 'w') as fh:
fh.write("%d\n" % os.getpid())
fh.close() | def function[write_pid_file, parameter[]]:
constant[Write a file with the PID of this server instance.
Call when setting up a command line testserver.
]
variable[pidfile] assign[=] binary_operation[call[call[name[os].path.basename, parameter[call[name[sys].argv][constant[0]]]]][<ast.Slice object at 0x7da20c6c5990>] + constant[.pid]]
with call[name[open], parameter[name[pidfile], constant[w]]] begin[:]
call[name[fh].write, parameter[binary_operation[constant[%d
] <ast.Mod object at 0x7da2590d6920> call[name[os].getpid, parameter[]]]]]
call[name[fh].close, parameter[]] | keyword[def] identifier[write_pid_file] ():
literal[string]
identifier[pidfile] = identifier[os] . identifier[path] . identifier[basename] ( identifier[sys] . identifier[argv] [ literal[int] ])[:- literal[int] ]+ literal[string]
keyword[with] identifier[open] ( identifier[pidfile] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( literal[string] % identifier[os] . identifier[getpid] ())
identifier[fh] . identifier[close] () | def write_pid_file():
"""Write a file with the PID of this server instance.
Call when setting up a command line testserver.
"""
pidfile = os.path.basename(sys.argv[0])[:-3] + '.pid' # strip .py, add .pid
with open(pidfile, 'w') as fh:
fh.write('%d\n' % os.getpid())
fh.close() # depends on [control=['with'], data=['fh']] |
def descriptors(package):
"""Returns a dictionary of descriptors deserialized from JSON for the
specified package.
Args:
package (str): name of the python package to get settings for.
"""
from os import path
dpath = _descriptor_path(package)
if path.isfile(dpath):
import json
with open(dpath) as f:
jdb = json.load(f)
return jdb
else:
return None | def function[descriptors, parameter[package]]:
constant[Returns a dictionary of descriptors deserialized from JSON for the
specified package.
Args:
package (str): name of the python package to get settings for.
]
from relative_module[os] import module[path]
variable[dpath] assign[=] call[name[_descriptor_path], parameter[name[package]]]
if call[name[path].isfile, parameter[name[dpath]]] begin[:]
import module[json]
with call[name[open], parameter[name[dpath]]] begin[:]
variable[jdb] assign[=] call[name[json].load, parameter[name[f]]]
return[name[jdb]] | keyword[def] identifier[descriptors] ( identifier[package] ):
literal[string]
keyword[from] identifier[os] keyword[import] identifier[path]
identifier[dpath] = identifier[_descriptor_path] ( identifier[package] )
keyword[if] identifier[path] . identifier[isfile] ( identifier[dpath] ):
keyword[import] identifier[json]
keyword[with] identifier[open] ( identifier[dpath] ) keyword[as] identifier[f] :
identifier[jdb] = identifier[json] . identifier[load] ( identifier[f] )
keyword[return] identifier[jdb]
keyword[else] :
keyword[return] keyword[None] | def descriptors(package):
"""Returns a dictionary of descriptors deserialized from JSON for the
specified package.
Args:
package (str): name of the python package to get settings for.
"""
from os import path
dpath = _descriptor_path(package)
if path.isfile(dpath):
import json
with open(dpath) as f:
jdb = json.load(f) # depends on [control=['with'], data=['f']]
return jdb # depends on [control=['if'], data=[]]
else:
return None |
def _extract_methods(self):
"""Obtains the methods used in the service."""
service = self._service
all_urls = set()
urls_with_options = set()
if not service.http:
return
for rule in service.http.rules:
http_method, url = _detect_pattern_option(rule)
if not url or not http_method or not rule.selector:
_logger.error(u'invalid HTTP binding encountered')
continue
# Obtain the method info
method_info = self._get_or_create_method_info(rule.selector)
if rule.body:
method_info.body_field_path = rule.body
if not self._register(http_method, url, method_info):
continue # detected an invalid url
all_urls.add(url)
if http_method == self._OPTIONS:
urls_with_options.add(url)
self._add_cors_options_selectors(all_urls - urls_with_options)
self._update_usage()
self._update_system_parameters() | def function[_extract_methods, parameter[self]]:
constant[Obtains the methods used in the service.]
variable[service] assign[=] name[self]._service
variable[all_urls] assign[=] call[name[set], parameter[]]
variable[urls_with_options] assign[=] call[name[set], parameter[]]
if <ast.UnaryOp object at 0x7da1b04030d0> begin[:]
return[None]
for taget[name[rule]] in starred[name[service].http.rules] begin[:]
<ast.Tuple object at 0x7da1b0402590> assign[=] call[name[_detect_pattern_option], parameter[name[rule]]]
if <ast.BoolOp object at 0x7da1b0403910> begin[:]
call[name[_logger].error, parameter[constant[invalid HTTP binding encountered]]]
continue
variable[method_info] assign[=] call[name[self]._get_or_create_method_info, parameter[name[rule].selector]]
if name[rule].body begin[:]
name[method_info].body_field_path assign[=] name[rule].body
if <ast.UnaryOp object at 0x7da18f09e530> begin[:]
continue
call[name[all_urls].add, parameter[name[url]]]
if compare[name[http_method] equal[==] name[self]._OPTIONS] begin[:]
call[name[urls_with_options].add, parameter[name[url]]]
call[name[self]._add_cors_options_selectors, parameter[binary_operation[name[all_urls] - name[urls_with_options]]]]
call[name[self]._update_usage, parameter[]]
call[name[self]._update_system_parameters, parameter[]] | keyword[def] identifier[_extract_methods] ( identifier[self] ):
literal[string]
identifier[service] = identifier[self] . identifier[_service]
identifier[all_urls] = identifier[set] ()
identifier[urls_with_options] = identifier[set] ()
keyword[if] keyword[not] identifier[service] . identifier[http] :
keyword[return]
keyword[for] identifier[rule] keyword[in] identifier[service] . identifier[http] . identifier[rules] :
identifier[http_method] , identifier[url] = identifier[_detect_pattern_option] ( identifier[rule] )
keyword[if] keyword[not] identifier[url] keyword[or] keyword[not] identifier[http_method] keyword[or] keyword[not] identifier[rule] . identifier[selector] :
identifier[_logger] . identifier[error] ( literal[string] )
keyword[continue]
identifier[method_info] = identifier[self] . identifier[_get_or_create_method_info] ( identifier[rule] . identifier[selector] )
keyword[if] identifier[rule] . identifier[body] :
identifier[method_info] . identifier[body_field_path] = identifier[rule] . identifier[body]
keyword[if] keyword[not] identifier[self] . identifier[_register] ( identifier[http_method] , identifier[url] , identifier[method_info] ):
keyword[continue]
identifier[all_urls] . identifier[add] ( identifier[url] )
keyword[if] identifier[http_method] == identifier[self] . identifier[_OPTIONS] :
identifier[urls_with_options] . identifier[add] ( identifier[url] )
identifier[self] . identifier[_add_cors_options_selectors] ( identifier[all_urls] - identifier[urls_with_options] )
identifier[self] . identifier[_update_usage] ()
identifier[self] . identifier[_update_system_parameters] () | def _extract_methods(self):
"""Obtains the methods used in the service."""
service = self._service
all_urls = set()
urls_with_options = set()
if not service.http:
return # depends on [control=['if'], data=[]]
for rule in service.http.rules:
(http_method, url) = _detect_pattern_option(rule)
if not url or not http_method or (not rule.selector):
_logger.error(u'invalid HTTP binding encountered')
continue # depends on [control=['if'], data=[]]
# Obtain the method info
method_info = self._get_or_create_method_info(rule.selector)
if rule.body:
method_info.body_field_path = rule.body # depends on [control=['if'], data=[]]
if not self._register(http_method, url, method_info):
continue # detected an invalid url # depends on [control=['if'], data=[]]
all_urls.add(url)
if http_method == self._OPTIONS:
urls_with_options.add(url) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']]
self._add_cors_options_selectors(all_urls - urls_with_options)
self._update_usage()
self._update_system_parameters() |
def indexXY(self, index):
"""Coordinates for the test row at *index*
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.indexXY>`
"""
# just want the top left of row selected
row = index.row()
if row == -1:
row = self.model().rowCount()
y = self.rowHeight(0)*row
return 0, y | def function[indexXY, parameter[self, index]]:
constant[Coordinates for the test row at *index*
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.indexXY>`
]
variable[row] assign[=] call[name[index].row, parameter[]]
if compare[name[row] equal[==] <ast.UnaryOp object at 0x7da18bcca3e0>] begin[:]
variable[row] assign[=] call[call[name[self].model, parameter[]].rowCount, parameter[]]
variable[y] assign[=] binary_operation[call[name[self].rowHeight, parameter[constant[0]]] * name[row]]
return[tuple[[<ast.Constant object at 0x7da18bcca170>, <ast.Name object at 0x7da18bcc8700>]]] | keyword[def] identifier[indexXY] ( identifier[self] , identifier[index] ):
literal[string]
identifier[row] = identifier[index] . identifier[row] ()
keyword[if] identifier[row] ==- literal[int] :
identifier[row] = identifier[self] . identifier[model] (). identifier[rowCount] ()
identifier[y] = identifier[self] . identifier[rowHeight] ( literal[int] )* identifier[row]
keyword[return] literal[int] , identifier[y] | def indexXY(self, index):
"""Coordinates for the test row at *index*
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.indexXY>`
"""
# just want the top left of row selected
row = index.row()
if row == -1:
row = self.model().rowCount() # depends on [control=['if'], data=['row']]
y = self.rowHeight(0) * row
return (0, y) |
def urisplit(uristring):
"""Split a well-formed URI reference string into a tuple with five
components corresponding to a URI's general structure::
<scheme>://<authority>/<path>?<query>#<fragment>
"""
if isinstance(uristring, bytes):
result = SplitResultBytes
else:
result = SplitResultUnicode
return result(*result.RE.match(uristring).groups()) | def function[urisplit, parameter[uristring]]:
constant[Split a well-formed URI reference string into a tuple with five
components corresponding to a URI's general structure::
<scheme>://<authority>/<path>?<query>#<fragment>
]
if call[name[isinstance], parameter[name[uristring], name[bytes]]] begin[:]
variable[result] assign[=] name[SplitResultBytes]
return[call[name[result], parameter[<ast.Starred object at 0x7da1b25d53f0>]]] | keyword[def] identifier[urisplit] ( identifier[uristring] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[uristring] , identifier[bytes] ):
identifier[result] = identifier[SplitResultBytes]
keyword[else] :
identifier[result] = identifier[SplitResultUnicode]
keyword[return] identifier[result] (* identifier[result] . identifier[RE] . identifier[match] ( identifier[uristring] ). identifier[groups] ()) | def urisplit(uristring):
"""Split a well-formed URI reference string into a tuple with five
components corresponding to a URI's general structure::
<scheme>://<authority>/<path>?<query>#<fragment>
"""
if isinstance(uristring, bytes):
result = SplitResultBytes # depends on [control=['if'], data=[]]
else:
result = SplitResultUnicode
return result(*result.RE.match(uristring).groups()) |
def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):
"""Recursively copies a dict and returns the result.
Args:
d: The dict to copy.
f: A function to apply to values when copying that takes the value and the
list of keys from the root of the dict to the value and returns a value
for the new dict.
discardNoneKeys: If True, discard key-value pairs when f returns None for
the value.
deepCopy: If True, all values in returned dict are true copies (not the
same object).
Returns:
A new dict with keys and values from d replaced with the result of f.
"""
# Optionally deep copy the dict.
if deepCopy:
d = copy.deepcopy(d)
newDict = {}
toCopy = [(k, v, newDict, ()) for k, v in d.iteritems()]
while len(toCopy) > 0:
k, v, d, prevKeys = toCopy.pop()
prevKeys = prevKeys + (k,)
if isinstance(v, dict):
d[k] = dict()
toCopy[0:0] = [(innerK, innerV, d[k], prevKeys)
for innerK, innerV in v.iteritems()]
else:
#print k, v, prevKeys
newV = f(v, prevKeys)
if not discardNoneKeys or newV is not None:
d[k] = newV
return newDict | def function[rCopy, parameter[d, f, discardNoneKeys, deepCopy]]:
constant[Recursively copies a dict and returns the result.
Args:
d: The dict to copy.
f: A function to apply to values when copying that takes the value and the
list of keys from the root of the dict to the value and returns a value
for the new dict.
discardNoneKeys: If True, discard key-value pairs when f returns None for
the value.
deepCopy: If True, all values in returned dict are true copies (not the
same object).
Returns:
A new dict with keys and values from d replaced with the result of f.
]
if name[deepCopy] begin[:]
variable[d] assign[=] call[name[copy].deepcopy, parameter[name[d]]]
variable[newDict] assign[=] dictionary[[], []]
variable[toCopy] assign[=] <ast.ListComp object at 0x7da18dc9bd90>
while compare[call[name[len], parameter[name[toCopy]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da18dc9b550> assign[=] call[name[toCopy].pop, parameter[]]
variable[prevKeys] assign[=] binary_operation[name[prevKeys] + tuple[[<ast.Name object at 0x7da18dc99210>]]]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
call[name[d]][name[k]] assign[=] call[name[dict], parameter[]]
call[name[toCopy]][<ast.Slice object at 0x7da18dc9b9a0>] assign[=] <ast.ListComp object at 0x7da18dc9ab60>
return[name[newDict]] | keyword[def] identifier[rCopy] ( identifier[d] , identifier[f] = identifier[identityConversion] , identifier[discardNoneKeys] = keyword[True] , identifier[deepCopy] = keyword[True] ):
literal[string]
keyword[if] identifier[deepCopy] :
identifier[d] = identifier[copy] . identifier[deepcopy] ( identifier[d] )
identifier[newDict] ={}
identifier[toCopy] =[( identifier[k] , identifier[v] , identifier[newDict] ,()) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[iteritems] ()]
keyword[while] identifier[len] ( identifier[toCopy] )> literal[int] :
identifier[k] , identifier[v] , identifier[d] , identifier[prevKeys] = identifier[toCopy] . identifier[pop] ()
identifier[prevKeys] = identifier[prevKeys] +( identifier[k] ,)
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[d] [ identifier[k] ]= identifier[dict] ()
identifier[toCopy] [ literal[int] : literal[int] ]=[( identifier[innerK] , identifier[innerV] , identifier[d] [ identifier[k] ], identifier[prevKeys] )
keyword[for] identifier[innerK] , identifier[innerV] keyword[in] identifier[v] . identifier[iteritems] ()]
keyword[else] :
identifier[newV] = identifier[f] ( identifier[v] , identifier[prevKeys] )
keyword[if] keyword[not] identifier[discardNoneKeys] keyword[or] identifier[newV] keyword[is] keyword[not] keyword[None] :
identifier[d] [ identifier[k] ]= identifier[newV]
keyword[return] identifier[newDict] | def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):
"""Recursively copies a dict and returns the result.
Args:
d: The dict to copy.
f: A function to apply to values when copying that takes the value and the
list of keys from the root of the dict to the value and returns a value
for the new dict.
discardNoneKeys: If True, discard key-value pairs when f returns None for
the value.
deepCopy: If True, all values in returned dict are true copies (not the
same object).
Returns:
A new dict with keys and values from d replaced with the result of f.
"""
# Optionally deep copy the dict.
if deepCopy:
d = copy.deepcopy(d) # depends on [control=['if'], data=[]]
newDict = {}
toCopy = [(k, v, newDict, ()) for (k, v) in d.iteritems()]
while len(toCopy) > 0:
(k, v, d, prevKeys) = toCopy.pop()
prevKeys = prevKeys + (k,)
if isinstance(v, dict):
d[k] = dict()
toCopy[0:0] = [(innerK, innerV, d[k], prevKeys) for (innerK, innerV) in v.iteritems()] # depends on [control=['if'], data=[]]
else:
#print k, v, prevKeys
newV = f(v, prevKeys)
if not discardNoneKeys or newV is not None:
d[k] = newV # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return newDict |
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks() | def function[InitializeDownload, parameter[self, http_request, http, client]]:
constant[Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
]
call[name[self].EnsureUninitialized, parameter[]]
if <ast.BoolOp object at 0x7da1b07d4a60> begin[:]
<ast.Raise object at 0x7da1b07d4c40>
variable[http] assign[=] <ast.BoolOp object at 0x7da1b07d5540>
if compare[name[client] is_not constant[None]] begin[:]
name[http_request].url assign[=] call[name[client].FinalizeTransferUrl, parameter[name[http_request].url]]
variable[url] assign[=] name[http_request].url
if name[self].auto_transfer begin[:]
variable[end_byte] assign[=] call[name[self].__ComputeEndByte, parameter[constant[0]]]
call[name[self].__SetRangeHeader, parameter[name[http_request], constant[0], name[end_byte]]]
variable[response] assign[=] call[name[http_wrapper].MakeRequest, parameter[<ast.BoolOp object at 0x7da1b085a350>, name[http_request]]]
if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> name[self]._ACCEPTABLE_STATUSES] begin[:]
<ast.Raise object at 0x7da1b085a890>
name[self].__initial_response assign[=] name[response]
call[name[self].__SetTotal, parameter[name[response].info]]
variable[url] assign[=] call[name[response].info.get, parameter[constant[content-location], name[response].request_url]]
if compare[name[client] is_not constant[None]] begin[:]
variable[url] assign[=] call[name[client].FinalizeTransferUrl, parameter[name[url]]]
call[name[self]._Initialize, parameter[name[http], name[url]]]
if name[self].auto_transfer begin[:]
call[name[self].StreamInChunks, parameter[]] | keyword[def] identifier[InitializeDownload] ( identifier[self] , identifier[http_request] , identifier[http] = keyword[None] , identifier[client] = keyword[None] ):
literal[string]
identifier[self] . identifier[EnsureUninitialized] ()
keyword[if] identifier[http] keyword[is] keyword[None] keyword[and] identifier[client] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[UserError] ( literal[string] )
identifier[http] = identifier[http] keyword[or] identifier[client] . identifier[http]
keyword[if] identifier[client] keyword[is] keyword[not] keyword[None] :
identifier[http_request] . identifier[url] = identifier[client] . identifier[FinalizeTransferUrl] ( identifier[http_request] . identifier[url] )
identifier[url] = identifier[http_request] . identifier[url]
keyword[if] identifier[self] . identifier[auto_transfer] :
identifier[end_byte] = identifier[self] . identifier[__ComputeEndByte] ( literal[int] )
identifier[self] . identifier[__SetRangeHeader] ( identifier[http_request] , literal[int] , identifier[end_byte] )
identifier[response] = identifier[http_wrapper] . identifier[MakeRequest] (
identifier[self] . identifier[bytes_http] keyword[or] identifier[http] , identifier[http_request] )
keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] identifier[self] . identifier[_ACCEPTABLE_STATUSES] :
keyword[raise] identifier[exceptions] . identifier[HttpError] . identifier[FromResponse] ( identifier[response] )
identifier[self] . identifier[__initial_response] = identifier[response]
identifier[self] . identifier[__SetTotal] ( identifier[response] . identifier[info] )
identifier[url] = identifier[response] . identifier[info] . identifier[get] ( literal[string] , identifier[response] . identifier[request_url] )
keyword[if] identifier[client] keyword[is] keyword[not] keyword[None] :
identifier[url] = identifier[client] . identifier[FinalizeTransferUrl] ( identifier[url] )
identifier[self] . identifier[_Initialize] ( identifier[http] , identifier[url] )
keyword[if] identifier[self] . identifier[auto_transfer] :
identifier[self] . identifier[StreamInChunks] () | def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.') # depends on [control=['if'], data=[]]
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url) # depends on [control=['if'], data=['client']]
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response) # depends on [control=['if'], data=[]]
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url) # depends on [control=['if'], data=[]]
if client is not None:
url = client.FinalizeTransferUrl(url) # depends on [control=['if'], data=['client']]
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks() # depends on [control=['if'], data=[]] |
def add_left_space(self, nspace=1):
"""elem_add n cols of spaces before the first col.
(for texttable 0.8.3)"""
sp = ' ' * nspace
for item in self._rows:
item[0] = sp + item[0] | def function[add_left_space, parameter[self, nspace]]:
constant[elem_add n cols of spaces before the first col.
(for texttable 0.8.3)]
variable[sp] assign[=] binary_operation[constant[ ] * name[nspace]]
for taget[name[item]] in starred[name[self]._rows] begin[:]
call[name[item]][constant[0]] assign[=] binary_operation[name[sp] + call[name[item]][constant[0]]] | keyword[def] identifier[add_left_space] ( identifier[self] , identifier[nspace] = literal[int] ):
literal[string]
identifier[sp] = literal[string] * identifier[nspace]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[_rows] :
identifier[item] [ literal[int] ]= identifier[sp] + identifier[item] [ literal[int] ] | def add_left_space(self, nspace=1):
"""elem_add n cols of spaces before the first col.
(for texttable 0.8.3)"""
sp = ' ' * nspace
for item in self._rows:
item[0] = sp + item[0] # depends on [control=['for'], data=['item']] |
def make_gunicorn_config(
_gunicorn_config_path='',
):
"""makes gunicorn.conf file for launching in docker
Notes:
https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
renders gunicorn.config (python) file in running dir
looks for GUNICORN_{option} in environment vars
Args:
_gunicorn_config_path (str): TEST HOOK, path to dump file
"""
gunicorn_py = '''"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config
Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
"""
from os import environ
for key, value in environ.items():
if key.startswith('GUNICORN_'):
gunicorn_key = key.split('_', 1)[1].lower()
locals()[gunicorn_key] = value
'''
gunicorn_file = 'gunicorn.conf'
if _gunicorn_config_path:
gunicorn_file = _gunicorn_config_path
with open(gunicorn_file, 'w') as gunicorn_cfg:
gunicorn_cfg.write(gunicorn_py) | def function[make_gunicorn_config, parameter[_gunicorn_config_path]]:
constant[makes gunicorn.conf file for launching in docker
Notes:
https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
renders gunicorn.config (python) file in running dir
looks for GUNICORN_{option} in environment vars
Args:
_gunicorn_config_path (str): TEST HOOK, path to dump file
]
variable[gunicorn_py] assign[=] constant["""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config
Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
"""
from os import environ
for key, value in environ.items():
if key.startswith('GUNICORN_'):
gunicorn_key = key.split('_', 1)[1].lower()
locals()[gunicorn_key] = value
]
variable[gunicorn_file] assign[=] constant[gunicorn.conf]
if name[_gunicorn_config_path] begin[:]
variable[gunicorn_file] assign[=] name[_gunicorn_config_path]
with call[name[open], parameter[name[gunicorn_file], constant[w]]] begin[:]
call[name[gunicorn_cfg].write, parameter[name[gunicorn_py]]] | keyword[def] identifier[make_gunicorn_config] (
identifier[_gunicorn_config_path] = literal[string] ,
):
literal[string]
identifier[gunicorn_py] = literal[string]
identifier[gunicorn_file] = literal[string]
keyword[if] identifier[_gunicorn_config_path] :
identifier[gunicorn_file] = identifier[_gunicorn_config_path]
keyword[with] identifier[open] ( identifier[gunicorn_file] , literal[string] ) keyword[as] identifier[gunicorn_cfg] :
identifier[gunicorn_cfg] . identifier[write] ( identifier[gunicorn_py] ) | def make_gunicorn_config(_gunicorn_config_path=''):
"""makes gunicorn.conf file for launching in docker
Notes:
https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
renders gunicorn.config (python) file in running dir
looks for GUNICORN_{option} in environment vars
Args:
_gunicorn_config_path (str): TEST HOOK, path to dump file
"""
gunicorn_py = '"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config\nBased off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/\n"""\nfrom os import environ\n\nfor key, value in environ.items():\n if key.startswith(\'GUNICORN_\'):\n gunicorn_key = key.split(\'_\', 1)[1].lower()\n locals()[gunicorn_key] = value\n\n'
gunicorn_file = 'gunicorn.conf'
if _gunicorn_config_path:
gunicorn_file = _gunicorn_config_path # depends on [control=['if'], data=[]]
with open(gunicorn_file, 'w') as gunicorn_cfg:
gunicorn_cfg.write(gunicorn_py) # depends on [control=['with'], data=['gunicorn_cfg']] |
def wrap(self, value):
''' Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value wrapped with ``DictField.value_type``
'''
self.validate_wrap(value)
ret = {}
for k, v in value.items():
ret[k] = self.value_type.wrap(v)
return ret | def function[wrap, parameter[self, value]]:
constant[ Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value wrapped with ``DictField.value_type``
]
call[name[self].validate_wrap, parameter[name[value]]]
variable[ret] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204623d30>, <ast.Name object at 0x7da204620250>]]] in starred[call[name[value].items, parameter[]]] begin[:]
call[name[ret]][name[k]] assign[=] call[name[self].value_type.wrap, parameter[name[v]]]
return[name[ret]] | keyword[def] identifier[wrap] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[validate_wrap] ( identifier[value] )
identifier[ret] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[value] . identifier[items] ():
identifier[ret] [ identifier[k] ]= identifier[self] . identifier[value_type] . identifier[wrap] ( identifier[v] )
keyword[return] identifier[ret] | def wrap(self, value):
""" Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value wrapped with ``DictField.value_type``
"""
self.validate_wrap(value)
ret = {}
for (k, v) in value.items():
ret[k] = self.value_type.wrap(v) # depends on [control=['for'], data=[]]
return ret |
def json(self, **kwargs):
'''
If the response's body is valid json, we load it as a python dict
and return it.
'''
body = self._decompress(self.encoding)
return _json.loads(body, **kwargs) | def function[json, parameter[self]]:
constant[
If the response's body is valid json, we load it as a python dict
and return it.
]
variable[body] assign[=] call[name[self]._decompress, parameter[name[self].encoding]]
return[call[name[_json].loads, parameter[name[body]]]] | keyword[def] identifier[json] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[body] = identifier[self] . identifier[_decompress] ( identifier[self] . identifier[encoding] )
keyword[return] identifier[_json] . identifier[loads] ( identifier[body] ,** identifier[kwargs] ) | def json(self, **kwargs):
"""
If the response's body is valid json, we load it as a python dict
and return it.
"""
body = self._decompress(self.encoding)
return _json.loads(body, **kwargs) |
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers | def function[get_configured_providers, parameter[self]]:
constant[
Return the configured providers
]
variable[providers] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2185750>, <ast.Name object at 0x7da1b2187040>]]] in starred[call[name[six].iteritems, parameter[call[name[self].opts][constant[providers]]]]] begin[:]
if compare[call[name[len], parameter[name[drivers]]] greater[>] constant[1]] begin[:]
for taget[name[driver]] in starred[name[drivers]] begin[:]
call[name[providers].add, parameter[call[constant[{0}:{1}].format, parameter[name[alias], name[driver]]]]]
continue
call[name[providers].add, parameter[name[alias]]]
return[name[providers]] | keyword[def] identifier[get_configured_providers] ( identifier[self] ):
literal[string]
identifier[providers] = identifier[set] ()
keyword[for] identifier[alias] , identifier[drivers] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[opts] [ literal[string] ]):
keyword[if] identifier[len] ( identifier[drivers] )> literal[int] :
keyword[for] identifier[driver] keyword[in] identifier[drivers] :
identifier[providers] . identifier[add] ( literal[string] . identifier[format] ( identifier[alias] , identifier[driver] ))
keyword[continue]
identifier[providers] . identifier[add] ( identifier[alias] )
keyword[return] identifier[providers] | def get_configured_providers(self):
"""
Return the configured providers
"""
providers = set()
for (alias, drivers) in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver)) # depends on [control=['for'], data=['driver']]
continue # depends on [control=['if'], data=[]]
providers.add(alias) # depends on [control=['for'], data=[]]
return providers |
def identify_assembly_points(graph, bgtree, target_multicolor, exclude=None, verbose=False, verbose_destination=None):
"""
The main granular assembling function, that IDENTIFIES assembly points, but does not perform the assembly on its own
It DOES NOT change the supplied breakpoint graph in any way!!!
"""
if verbose:
print(">>Identifying assemblies for target multicolor:",
[e.name for e in target_multicolor.multicolors.elements()], file=verbose_destination)
guidance = bgtree.consistent_multicolors[:]
offset = len(Multicolor.split_colors(target_multicolor, guidance=guidance,
account_for_color_multiplicity_in_guidance=False)) - 1
threshold = 1 if offset == 0 else 2
assemblies = [] # the overall result
if exclude is None:
exclude = [] # a container with single colors of genomes, that are to be considered fully assembled
p_t_consistent_multicolors_in_target = Multicolor.split_colors(target_multicolor, guidance=guidance,
account_for_color_multiplicity_in_guidance=False)
t_consistent_multicolors_in_target = []
for tcmc in p_t_consistent_multicolors_in_target:
t_consistent_multicolors_in_target.append(sorted(color.name for color in tcmc.colors))
# we work with each connected component separately, as connected components usually preserve fragmentation points
################################################################################################
#
# its important that we iterate over connected components making each particular one a deepcopy of an
# underlying breakpoint graph connected component
#
################################################################################################
for i, cc in enumerate(graph.connected_components_subgraphs(copy=True)):
# we filter current connected component of uninteresting / ambiguous edges and retrieve a list of
# connected components that are left in the original connected components after filtration
irregular_subnets = get_irregular_subnets(cc, target_multicolor, exclude)
if len(irregular_subnets) > 0 and verbose:
print(">>Processing", str(i) + "th", "connected component", file=verbose_destination)
print("\tcontains", len(irregular_subnets), "subnet groups", file=verbose_destination)
# each subnet can be processed separately
for subnet in irregular_subnets:
supporting_edge_scores = get_support_edge_scores(graph, subnet, target_multicolor, bgtree)
# we create a new dummy graph for the purpose of computing maximum weight matching for support edges in it
new_graph = nx.Graph()
if verbose:
print("\tcontains", len(supporting_edge_scores), "possible assembly points", file=verbose_destination)
# we'll keep track of possible assembly points for future reference
support_edge_dict = {}
for (v1, v2), before, after, ex_data in supporting_edge_scores:
ex_data["tcmc"] = t_consistent_multicolors_in_target
##########################################################################################
#
# INSERT YOUR CODE ASSEMBLY SCORE THRESHOLD FILTRATION HERE IF NEED BE
#
##########################################################################################
if before - after - offset < threshold:
continue
##########################################################################################
#
# by default networkx assumes all edges, that have weight >= 0 are good
#
##########################################################################################
new_graph.add_edge(v1, v2, weight=before - after - offset)
support_edge_dict[(v1, v2)] = (before, after + offset, ex_data)
support_edge_dict[(v2, v1)] = (before, after + offset, ex_data)
maximal_matching = nx.max_weight_matching(new_graph)
if verbose:
print("\t", len(maximal_matching) // 2, "assembly points are identified", file=verbose_destination)
# as networkx provides a maximum matching in a form of adjacency list, every identified edge
# (pair of vertices) is present their twice (i.e. matching[v1]=v2 and matching[v2]=v1)
# we need to make sure we only add every edge only once
visited = set()
for v1, v2 in maximal_matching.items():
if v1 in visited or v2 in visited:
continue
visited.add(v1)
visited.add(v2)
assemblies.append((v1, v2, support_edge_dict[(v1, v2)]))
# we return the result as a list of assembly points that were identified for the targeted multicolor
# as a list of tuples (v1, v2, ("before", "after"))
# where v1 and v2 correspond to assembly point and "before" and "after" are used to compute the assembly score
return assemblies | def function[identify_assembly_points, parameter[graph, bgtree, target_multicolor, exclude, verbose, verbose_destination]]:
constant[
The main granular assembling function, that IDENTIFIES assembly points, but does not perform the assembly on its own
It DOES NOT change the supplied breakpoint graph in any way!!!
]
if name[verbose] begin[:]
call[name[print], parameter[constant[>>Identifying assemblies for target multicolor:], <ast.ListComp object at 0x7da1b141ac80>]]
variable[guidance] assign[=] call[name[bgtree].consistent_multicolors][<ast.Slice object at 0x7da1b141a920>]
variable[offset] assign[=] binary_operation[call[name[len], parameter[call[name[Multicolor].split_colors, parameter[name[target_multicolor]]]]] - constant[1]]
variable[threshold] assign[=] <ast.IfExp object at 0x7da1b1418130>
variable[assemblies] assign[=] list[[]]
if compare[name[exclude] is constant[None]] begin[:]
variable[exclude] assign[=] list[[]]
variable[p_t_consistent_multicolors_in_target] assign[=] call[name[Multicolor].split_colors, parameter[name[target_multicolor]]]
variable[t_consistent_multicolors_in_target] assign[=] list[[]]
for taget[name[tcmc]] in starred[name[p_t_consistent_multicolors_in_target]] begin[:]
call[name[t_consistent_multicolors_in_target].append, parameter[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b1418850>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1418a60>, <ast.Name object at 0x7da1b1418a90>]]] in starred[call[name[enumerate], parameter[call[name[graph].connected_components_subgraphs, parameter[]]]]] begin[:]
variable[irregular_subnets] assign[=] call[name[get_irregular_subnets], parameter[name[cc], name[target_multicolor], name[exclude]]]
if <ast.BoolOp object at 0x7da1b1418d90> begin[:]
call[name[print], parameter[constant[>>Processing], binary_operation[call[name[str], parameter[name[i]]] + constant[th]], constant[connected component]]]
call[name[print], parameter[constant[ contains], call[name[len], parameter[name[irregular_subnets]]], constant[subnet groups]]]
for taget[name[subnet]] in starred[name[irregular_subnets]] begin[:]
variable[supporting_edge_scores] assign[=] call[name[get_support_edge_scores], parameter[name[graph], name[subnet], name[target_multicolor], name[bgtree]]]
variable[new_graph] assign[=] call[name[nx].Graph, parameter[]]
if name[verbose] begin[:]
call[name[print], parameter[constant[ contains], call[name[len], parameter[name[supporting_edge_scores]]], constant[possible assembly points]]]
variable[support_edge_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Tuple object at 0x7da1b14199c0>, <ast.Name object at 0x7da1b1419a50>, <ast.Name object at 0x7da1b1419a80>, <ast.Name object at 0x7da1b1419ab0>]]] in starred[name[supporting_edge_scores]] begin[:]
call[name[ex_data]][constant[tcmc]] assign[=] name[t_consistent_multicolors_in_target]
if compare[binary_operation[binary_operation[name[before] - name[after]] - name[offset]] less[<] name[threshold]] begin[:]
continue
call[name[new_graph].add_edge, parameter[name[v1], name[v2]]]
call[name[support_edge_dict]][tuple[[<ast.Name object at 0x7da1b141a620>, <ast.Name object at 0x7da1b141a5f0>]]] assign[=] tuple[[<ast.Name object at 0x7da1b141a590>, <ast.BinOp object at 0x7da1b141a560>, <ast.Name object at 0x7da1b141a4d0>]]
call[name[support_edge_dict]][tuple[[<ast.Name object at 0x7da1b141a3e0>, <ast.Name object at 0x7da1b141a3b0>]]] assign[=] tuple[[<ast.Name object at 0x7da1b141a350>, <ast.BinOp object at 0x7da1b141a320>, <ast.Name object at 0x7da1b141a290>]]
variable[maximal_matching] assign[=] call[name[nx].max_weight_matching, parameter[name[new_graph]]]
if name[verbose] begin[:]
call[name[print], parameter[constant[ ], binary_operation[call[name[len], parameter[name[maximal_matching]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]], constant[assembly points are identified]]]
variable[visited] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1579f60>, <ast.Name object at 0x7da1b1578310>]]] in starred[call[name[maximal_matching].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b15788b0> begin[:]
continue
call[name[visited].add, parameter[name[v1]]]
call[name[visited].add, parameter[name[v2]]]
call[name[assemblies].append, parameter[tuple[[<ast.Name object at 0x7da1b157a860>, <ast.Name object at 0x7da1b1579630>, <ast.Subscript object at 0x7da1b15796c0>]]]]
return[name[assemblies]] | keyword[def] identifier[identify_assembly_points] ( identifier[graph] , identifier[bgtree] , identifier[target_multicolor] , identifier[exclude] = keyword[None] , identifier[verbose] = keyword[False] , identifier[verbose_destination] = keyword[None] ):
literal[string]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] ,
[ identifier[e] . identifier[name] keyword[for] identifier[e] keyword[in] identifier[target_multicolor] . identifier[multicolors] . identifier[elements] ()], identifier[file] = identifier[verbose_destination] )
identifier[guidance] = identifier[bgtree] . identifier[consistent_multicolors] [:]
identifier[offset] = identifier[len] ( identifier[Multicolor] . identifier[split_colors] ( identifier[target_multicolor] , identifier[guidance] = identifier[guidance] ,
identifier[account_for_color_multiplicity_in_guidance] = keyword[False] ))- literal[int]
identifier[threshold] = literal[int] keyword[if] identifier[offset] == literal[int] keyword[else] literal[int]
identifier[assemblies] =[]
keyword[if] identifier[exclude] keyword[is] keyword[None] :
identifier[exclude] =[]
identifier[p_t_consistent_multicolors_in_target] = identifier[Multicolor] . identifier[split_colors] ( identifier[target_multicolor] , identifier[guidance] = identifier[guidance] ,
identifier[account_for_color_multiplicity_in_guidance] = keyword[False] )
identifier[t_consistent_multicolors_in_target] =[]
keyword[for] identifier[tcmc] keyword[in] identifier[p_t_consistent_multicolors_in_target] :
identifier[t_consistent_multicolors_in_target] . identifier[append] ( identifier[sorted] ( identifier[color] . identifier[name] keyword[for] identifier[color] keyword[in] identifier[tcmc] . identifier[colors] ))
keyword[for] identifier[i] , identifier[cc] keyword[in] identifier[enumerate] ( identifier[graph] . identifier[connected_components_subgraphs] ( identifier[copy] = keyword[True] )):
identifier[irregular_subnets] = identifier[get_irregular_subnets] ( identifier[cc] , identifier[target_multicolor] , identifier[exclude] )
keyword[if] identifier[len] ( identifier[irregular_subnets] )> literal[int] keyword[and] identifier[verbose] :
identifier[print] ( literal[string] , identifier[str] ( identifier[i] )+ literal[string] , literal[string] , identifier[file] = identifier[verbose_destination] )
identifier[print] ( literal[string] , identifier[len] ( identifier[irregular_subnets] ), literal[string] , identifier[file] = identifier[verbose_destination] )
keyword[for] identifier[subnet] keyword[in] identifier[irregular_subnets] :
identifier[supporting_edge_scores] = identifier[get_support_edge_scores] ( identifier[graph] , identifier[subnet] , identifier[target_multicolor] , identifier[bgtree] )
identifier[new_graph] = identifier[nx] . identifier[Graph] ()
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , identifier[len] ( identifier[supporting_edge_scores] ), literal[string] , identifier[file] = identifier[verbose_destination] )
identifier[support_edge_dict] ={}
keyword[for] ( identifier[v1] , identifier[v2] ), identifier[before] , identifier[after] , identifier[ex_data] keyword[in] identifier[supporting_edge_scores] :
identifier[ex_data] [ literal[string] ]= identifier[t_consistent_multicolors_in_target]
keyword[if] identifier[before] - identifier[after] - identifier[offset] < identifier[threshold] :
keyword[continue]
identifier[new_graph] . identifier[add_edge] ( identifier[v1] , identifier[v2] , identifier[weight] = identifier[before] - identifier[after] - identifier[offset] )
identifier[support_edge_dict] [( identifier[v1] , identifier[v2] )]=( identifier[before] , identifier[after] + identifier[offset] , identifier[ex_data] )
identifier[support_edge_dict] [( identifier[v2] , identifier[v1] )]=( identifier[before] , identifier[after] + identifier[offset] , identifier[ex_data] )
identifier[maximal_matching] = identifier[nx] . identifier[max_weight_matching] ( identifier[new_graph] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , identifier[len] ( identifier[maximal_matching] )// literal[int] , literal[string] , identifier[file] = identifier[verbose_destination] )
identifier[visited] = identifier[set] ()
keyword[for] identifier[v1] , identifier[v2] keyword[in] identifier[maximal_matching] . identifier[items] ():
keyword[if] identifier[v1] keyword[in] identifier[visited] keyword[or] identifier[v2] keyword[in] identifier[visited] :
keyword[continue]
identifier[visited] . identifier[add] ( identifier[v1] )
identifier[visited] . identifier[add] ( identifier[v2] )
identifier[assemblies] . identifier[append] (( identifier[v1] , identifier[v2] , identifier[support_edge_dict] [( identifier[v1] , identifier[v2] )]))
keyword[return] identifier[assemblies] | def identify_assembly_points(graph, bgtree, target_multicolor, exclude=None, verbose=False, verbose_destination=None):
"""
The main granular assembling function, that IDENTIFIES assembly points, but does not perform the assembly on its own
It DOES NOT change the supplied breakpoint graph in any way!!!
"""
if verbose:
print('>>Identifying assemblies for target multicolor:', [e.name for e in target_multicolor.multicolors.elements()], file=verbose_destination) # depends on [control=['if'], data=[]]
guidance = bgtree.consistent_multicolors[:]
offset = len(Multicolor.split_colors(target_multicolor, guidance=guidance, account_for_color_multiplicity_in_guidance=False)) - 1
threshold = 1 if offset == 0 else 2
assemblies = [] # the overall result
if exclude is None:
exclude = [] # a container with single colors of genomes, that are to be considered fully assembled # depends on [control=['if'], data=['exclude']]
p_t_consistent_multicolors_in_target = Multicolor.split_colors(target_multicolor, guidance=guidance, account_for_color_multiplicity_in_guidance=False)
t_consistent_multicolors_in_target = []
for tcmc in p_t_consistent_multicolors_in_target:
t_consistent_multicolors_in_target.append(sorted((color.name for color in tcmc.colors))) # depends on [control=['for'], data=['tcmc']]
# we work with each connected component separately, as connected components usually preserve fragmentation points
################################################################################################
#
# its important that we iterate over connected components making each particular one a deepcopy of an
# underlying breakpoint graph connected component
#
################################################################################################
for (i, cc) in enumerate(graph.connected_components_subgraphs(copy=True)):
# we filter current connected component of uninteresting / ambiguous edges and retrieve a list of
# connected components that are left in the original connected components after filtration
irregular_subnets = get_irregular_subnets(cc, target_multicolor, exclude)
if len(irregular_subnets) > 0 and verbose:
print('>>Processing', str(i) + 'th', 'connected component', file=verbose_destination)
print('\tcontains', len(irregular_subnets), 'subnet groups', file=verbose_destination) # depends on [control=['if'], data=[]]
# each subnet can be processed separately
for subnet in irregular_subnets:
supporting_edge_scores = get_support_edge_scores(graph, subnet, target_multicolor, bgtree)
# we create a new dummy graph for the purpose of computing maximum weight matching for support edges in it
new_graph = nx.Graph()
if verbose:
print('\tcontains', len(supporting_edge_scores), 'possible assembly points', file=verbose_destination) # depends on [control=['if'], data=[]]
# we'll keep track of possible assembly points for future reference
support_edge_dict = {}
for ((v1, v2), before, after, ex_data) in supporting_edge_scores:
ex_data['tcmc'] = t_consistent_multicolors_in_target
##########################################################################################
#
# INSERT YOUR CODE ASSEMBLY SCORE THRESHOLD FILTRATION HERE IF NEED BE
#
##########################################################################################
if before - after - offset < threshold:
continue # depends on [control=['if'], data=[]]
##########################################################################################
#
# by default networkx assumes all edges, that have weight >= 0 are good
#
##########################################################################################
new_graph.add_edge(v1, v2, weight=before - after - offset)
support_edge_dict[v1, v2] = (before, after + offset, ex_data)
support_edge_dict[v2, v1] = (before, after + offset, ex_data) # depends on [control=['for'], data=[]]
maximal_matching = nx.max_weight_matching(new_graph)
if verbose:
print('\t', len(maximal_matching) // 2, 'assembly points are identified', file=verbose_destination) # depends on [control=['if'], data=[]]
# as networkx provides a maximum matching in a form of adjacency list, every identified edge
# (pair of vertices) is present their twice (i.e. matching[v1]=v2 and matching[v2]=v1)
# we need to make sure we only add every edge only once
visited = set()
for (v1, v2) in maximal_matching.items():
if v1 in visited or v2 in visited:
continue # depends on [control=['if'], data=[]]
visited.add(v1)
visited.add(v2)
assemblies.append((v1, v2, support_edge_dict[v1, v2])) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['subnet']] # depends on [control=['for'], data=[]]
# we return the result as a list of assembly points that were identified for the targeted multicolor
# as a list of tuples (v1, v2, ("before", "after"))
# where v1 and v2 correspond to assembly point and "before" and "after" are used to compute the assembly score
return assemblies |
def get_schema(self, table, with_headers=False):
"""Retrieve the database schema for a particular table."""
f = self.fetch('desc ' + wrap(table))
if not isinstance(f[0], list):
f = [f]
# Replace None with ''
schema = [['' if col is None else col for col in row] for row in f]
# If with_headers is True, insert headers to first row before returning
if with_headers:
schema.insert(0, ['Column', 'Type', 'Null', 'Key', 'Default', 'Extra'])
return schema | def function[get_schema, parameter[self, table, with_headers]]:
constant[Retrieve the database schema for a particular table.]
variable[f] assign[=] call[name[self].fetch, parameter[binary_operation[constant[desc ] + call[name[wrap], parameter[name[table]]]]]]
if <ast.UnaryOp object at 0x7da1b0bd0970> begin[:]
variable[f] assign[=] list[[<ast.Name object at 0x7da1b0bd2e90>]]
variable[schema] assign[=] <ast.ListComp object at 0x7da1b0bd2050>
if name[with_headers] begin[:]
call[name[schema].insert, parameter[constant[0], list[[<ast.Constant object at 0x7da1b0bd3220>, <ast.Constant object at 0x7da1b0bd18d0>, <ast.Constant object at 0x7da1b0bd1750>, <ast.Constant object at 0x7da1b0bd0340>, <ast.Constant object at 0x7da1b0bd2b60>, <ast.Constant object at 0x7da1b0bd12d0>]]]]
return[name[schema]] | keyword[def] identifier[get_schema] ( identifier[self] , identifier[table] , identifier[with_headers] = keyword[False] ):
literal[string]
identifier[f] = identifier[self] . identifier[fetch] ( literal[string] + identifier[wrap] ( identifier[table] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[f] [ literal[int] ], identifier[list] ):
identifier[f] =[ identifier[f] ]
identifier[schema] =[[ literal[string] keyword[if] identifier[col] keyword[is] keyword[None] keyword[else] identifier[col] keyword[for] identifier[col] keyword[in] identifier[row] ] keyword[for] identifier[row] keyword[in] identifier[f] ]
keyword[if] identifier[with_headers] :
identifier[schema] . identifier[insert] ( literal[int] ,[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[return] identifier[schema] | def get_schema(self, table, with_headers=False):
"""Retrieve the database schema for a particular table."""
f = self.fetch('desc ' + wrap(table))
if not isinstance(f[0], list):
f = [f] # depends on [control=['if'], data=[]]
# Replace None with ''
schema = [['' if col is None else col for col in row] for row in f]
# If with_headers is True, insert headers to first row before returning
if with_headers:
schema.insert(0, ['Column', 'Type', 'Null', 'Key', 'Default', 'Extra']) # depends on [control=['if'], data=[]]
return schema |
def _parse_add_url(url):
""" return a tuple (host, job_name) from a url """
parsed_url = urlparse(url)
job_name = None
paths = parsed_url.path.strip("/").split("/")
for i, path in enumerate(paths):
if path == "job" and len(paths) > i:
job_name = paths[i + 1]
if job_name is None:
raise ConfigException("Unable to parse valid job from {0}".format(url))
return (
"{0}://{1}".format(parsed_url.scheme, parsed_url.netloc),
job_name
) | def function[_parse_add_url, parameter[url]]:
constant[ return a tuple (host, job_name) from a url ]
variable[parsed_url] assign[=] call[name[urlparse], parameter[name[url]]]
variable[job_name] assign[=] constant[None]
variable[paths] assign[=] call[call[name[parsed_url].path.strip, parameter[constant[/]]].split, parameter[constant[/]]]
for taget[tuple[[<ast.Name object at 0x7da1b141df90>, <ast.Name object at 0x7da1b141cdf0>]]] in starred[call[name[enumerate], parameter[name[paths]]]] begin[:]
if <ast.BoolOp object at 0x7da1b141e1a0> begin[:]
variable[job_name] assign[=] call[name[paths]][binary_operation[name[i] + constant[1]]]
if compare[name[job_name] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b141ece0>
return[tuple[[<ast.Call object at 0x7da1b141f490>, <ast.Name object at 0x7da1b141c7c0>]]] | keyword[def] identifier[_parse_add_url] ( identifier[url] ):
literal[string]
identifier[parsed_url] = identifier[urlparse] ( identifier[url] )
identifier[job_name] = keyword[None]
identifier[paths] = identifier[parsed_url] . identifier[path] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[i] , identifier[path] keyword[in] identifier[enumerate] ( identifier[paths] ):
keyword[if] identifier[path] == literal[string] keyword[and] identifier[len] ( identifier[paths] )> identifier[i] :
identifier[job_name] = identifier[paths] [ identifier[i] + literal[int] ]
keyword[if] identifier[job_name] keyword[is] keyword[None] :
keyword[raise] identifier[ConfigException] ( literal[string] . identifier[format] ( identifier[url] ))
keyword[return] (
literal[string] . identifier[format] ( identifier[parsed_url] . identifier[scheme] , identifier[parsed_url] . identifier[netloc] ),
identifier[job_name]
) | def _parse_add_url(url):
""" return a tuple (host, job_name) from a url """
parsed_url = urlparse(url)
job_name = None
paths = parsed_url.path.strip('/').split('/')
for (i, path) in enumerate(paths):
if path == 'job' and len(paths) > i:
job_name = paths[i + 1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if job_name is None:
raise ConfigException('Unable to parse valid job from {0}'.format(url)) # depends on [control=['if'], data=[]]
return ('{0}://{1}'.format(parsed_url.scheme, parsed_url.netloc), job_name) |
def _read_with_mask(raster, masked):
""" returns if we should read from rasterio using the masked
"""
if masked is None:
mask_flags = raster.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
masked = per_dataset_mask
return masked | def function[_read_with_mask, parameter[raster, masked]]:
constant[ returns if we should read from rasterio using the masked
]
if compare[name[masked] is constant[None]] begin[:]
variable[mask_flags] assign[=] name[raster].mask_flag_enums
variable[per_dataset_mask] assign[=] call[name[all], parameter[<ast.ListComp object at 0x7da18bc72080>]]
variable[masked] assign[=] name[per_dataset_mask]
return[name[masked]] | keyword[def] identifier[_read_with_mask] ( identifier[raster] , identifier[masked] ):
literal[string]
keyword[if] identifier[masked] keyword[is] keyword[None] :
identifier[mask_flags] = identifier[raster] . identifier[mask_flag_enums]
identifier[per_dataset_mask] = identifier[all] ([ identifier[rasterio] . identifier[enums] . identifier[MaskFlags] . identifier[per_dataset] keyword[in] identifier[flags] keyword[for] identifier[flags] keyword[in] identifier[mask_flags] ])
identifier[masked] = identifier[per_dataset_mask]
keyword[return] identifier[masked] | def _read_with_mask(raster, masked):
""" returns if we should read from rasterio using the masked
"""
if masked is None:
mask_flags = raster.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
masked = per_dataset_mask # depends on [control=['if'], data=['masked']]
return masked |
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim
in zip(nonzeros, self.dims)) | def function[_nonzero, parameter[self]]:
constant[ Equivalent numpy's nonzero but returns a tuple of Varibles. ]
variable[nonzeros] assign[=] call[name[np].nonzero, parameter[name[self].data]]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18f09f250>]]] | keyword[def] identifier[_nonzero] ( identifier[self] ):
literal[string]
identifier[nonzeros] = identifier[np] . identifier[nonzero] ( identifier[self] . identifier[data] )
keyword[return] identifier[tuple] ( identifier[Variable] (( identifier[dim] ), identifier[nz] ) keyword[for] identifier[nz] , identifier[dim]
keyword[in] identifier[zip] ( identifier[nonzeros] , identifier[self] . identifier[dims] )) | def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple((Variable(dim, nz) for (nz, dim) in zip(nonzeros, self.dims))) |
async def _read_data(self):
"""Response reader task."""
last_error = ConnectionClosedError(
"Connection has been closed by server")
while not self._reader.at_eof():
try:
obj = await self._reader.readobj()
except asyncio.CancelledError:
# NOTE: reader can get cancelled from `close()` method only.
last_error = RuntimeError('this is unexpected')
break
except ProtocolError as exc:
# ProtocolError is fatal
# so connection must be closed
if self._in_transaction is not None:
self._transaction_error = exc
last_error = exc
break
except Exception as exc:
# NOTE: for QUIT command connection error can be received
# before response
last_error = exc
break
else:
if (obj == b'' or obj is None) and self._reader.at_eof():
logger.debug("Connection has been closed by server,"
" response: %r", obj)
last_error = ConnectionClosedError("Reader at end of file")
break
if isinstance(obj, MaxClientsError):
last_error = obj
break
if self._in_pubsub:
self._process_pubsub(obj)
else:
self._process_data(obj)
self._closing = True
self._loop.call_soon(self._do_close, last_error) | <ast.AsyncFunctionDef object at 0x7da2041daa70> | keyword[async] keyword[def] identifier[_read_data] ( identifier[self] ):
literal[string]
identifier[last_error] = identifier[ConnectionClosedError] (
literal[string] )
keyword[while] keyword[not] identifier[self] . identifier[_reader] . identifier[at_eof] ():
keyword[try] :
identifier[obj] = keyword[await] identifier[self] . identifier[_reader] . identifier[readobj] ()
keyword[except] identifier[asyncio] . identifier[CancelledError] :
identifier[last_error] = identifier[RuntimeError] ( literal[string] )
keyword[break]
keyword[except] identifier[ProtocolError] keyword[as] identifier[exc] :
keyword[if] identifier[self] . identifier[_in_transaction] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_transaction_error] = identifier[exc]
identifier[last_error] = identifier[exc]
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[last_error] = identifier[exc]
keyword[break]
keyword[else] :
keyword[if] ( identifier[obj] == literal[string] keyword[or] identifier[obj] keyword[is] keyword[None] ) keyword[and] identifier[self] . identifier[_reader] . identifier[at_eof] ():
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[obj] )
identifier[last_error] = identifier[ConnectionClosedError] ( literal[string] )
keyword[break]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[MaxClientsError] ):
identifier[last_error] = identifier[obj]
keyword[break]
keyword[if] identifier[self] . identifier[_in_pubsub] :
identifier[self] . identifier[_process_pubsub] ( identifier[obj] )
keyword[else] :
identifier[self] . identifier[_process_data] ( identifier[obj] )
identifier[self] . identifier[_closing] = keyword[True]
identifier[self] . identifier[_loop] . identifier[call_soon] ( identifier[self] . identifier[_do_close] , identifier[last_error] ) | async def _read_data(self):
"""Response reader task."""
last_error = ConnectionClosedError('Connection has been closed by server')
while not self._reader.at_eof():
try:
obj = await self._reader.readobj() # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
# NOTE: reader can get cancelled from `close()` method only.
last_error = RuntimeError('this is unexpected')
break # depends on [control=['except'], data=[]]
except ProtocolError as exc:
# ProtocolError is fatal
# so connection must be closed
if self._in_transaction is not None:
self._transaction_error = exc # depends on [control=['if'], data=[]]
last_error = exc
break # depends on [control=['except'], data=['exc']]
except Exception as exc:
# NOTE: for QUIT command connection error can be received
# before response
last_error = exc
break # depends on [control=['except'], data=['exc']]
else:
if (obj == b'' or obj is None) and self._reader.at_eof():
logger.debug('Connection has been closed by server, response: %r', obj)
last_error = ConnectionClosedError('Reader at end of file')
break # depends on [control=['if'], data=[]]
if isinstance(obj, MaxClientsError):
last_error = obj
break # depends on [control=['if'], data=[]]
if self._in_pubsub:
self._process_pubsub(obj) # depends on [control=['if'], data=[]]
else:
self._process_data(obj) # depends on [control=['while'], data=[]]
self._closing = True
self._loop.call_soon(self._do_close, last_error) |
def has_predecessor(self, graph, dest, orig, branch, turn, tick, *, forward=None):
"""Return whether an edge connects the destination to the origin at the given time.
Doesn't require the edge's index, which makes it slower than retrieving a
particular edge.
"""
if forward is None:
forward = self.db._forward
return orig in self._get_origcache(graph, dest, branch, turn, tick, forward=forward) | def function[has_predecessor, parameter[self, graph, dest, orig, branch, turn, tick]]:
constant[Return whether an edge connects the destination to the origin at the given time.
Doesn't require the edge's index, which makes it slower than retrieving a
particular edge.
]
if compare[name[forward] is constant[None]] begin[:]
variable[forward] assign[=] name[self].db._forward
return[compare[name[orig] in call[name[self]._get_origcache, parameter[name[graph], name[dest], name[branch], name[turn], name[tick]]]]] | keyword[def] identifier[has_predecessor] ( identifier[self] , identifier[graph] , identifier[dest] , identifier[orig] , identifier[branch] , identifier[turn] , identifier[tick] ,*, identifier[forward] = keyword[None] ):
literal[string]
keyword[if] identifier[forward] keyword[is] keyword[None] :
identifier[forward] = identifier[self] . identifier[db] . identifier[_forward]
keyword[return] identifier[orig] keyword[in] identifier[self] . identifier[_get_origcache] ( identifier[graph] , identifier[dest] , identifier[branch] , identifier[turn] , identifier[tick] , identifier[forward] = identifier[forward] ) | def has_predecessor(self, graph, dest, orig, branch, turn, tick, *, forward=None):
"""Return whether an edge connects the destination to the origin at the given time.
Doesn't require the edge's index, which makes it slower than retrieving a
particular edge.
"""
if forward is None:
forward = self.db._forward # depends on [control=['if'], data=['forward']]
return orig in self._get_origcache(graph, dest, branch, turn, tick, forward=forward) |
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
_logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
_logging_debug('idler %s removed', callback.__name__)
return True | def function[run_idle, parameter[self]]:
constant[Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
]
if <ast.BoolOp object at 0x7da18c4ceaa0> begin[:]
return[constant[False]]
variable[idler] assign[=] call[name[self].idlers.popleft, parameter[]]
<ast.Tuple object at 0x7da20c6e5ab0> assign[=] name[idler]
call[name[_logging_debug], parameter[constant[idler: %s], name[callback].__name__]]
variable[res] assign[=] call[name[callback], parameter[<ast.Starred object at 0x7da20c6e5a80>]]
if compare[name[res] is_not constant[None]] begin[:]
if name[res] begin[:]
name[self].inactive assign[=] constant[0]
call[name[self].idlers.append, parameter[name[idler]]]
return[constant[True]] | keyword[def] identifier[run_idle] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[idlers] keyword[or] identifier[self] . identifier[inactive] >= identifier[len] ( identifier[self] . identifier[idlers] ):
keyword[return] keyword[False]
identifier[idler] = identifier[self] . identifier[idlers] . identifier[popleft] ()
identifier[callback] , identifier[args] , identifier[kwds] = identifier[idler]
identifier[_logging_debug] ( literal[string] , identifier[callback] . identifier[__name__] )
identifier[res] = identifier[callback] (* identifier[args] ,** identifier[kwds] )
keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[res] :
identifier[self] . identifier[inactive] = literal[int]
keyword[else] :
identifier[self] . identifier[inactive] += literal[int]
identifier[self] . identifier[idlers] . identifier[append] ( identifier[idler] )
keyword[else] :
identifier[_logging_debug] ( literal[string] , identifier[callback] . identifier[__name__] )
keyword[return] keyword[True] | def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False # depends on [control=['if'], data=[]]
idler = self.idlers.popleft()
(callback, args, kwds) = idler
_logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0 # depends on [control=['if'], data=[]]
else:
self.inactive += 1
self.idlers.append(idler) # depends on [control=['if'], data=['res']]
else:
_logging_debug('idler %s removed', callback.__name__)
return True |
def render(self, **kwargs):
""" Displays the voxels and the control points. """
# Calling parent function
super(VisVoxel, self).render(**kwargs)
# Initialize variables
legend_proxy = []
legend_names = []
# Start plotting of the surface and the control points grid
fig = plt.figure(figsize=self.vconf.figure_size, dpi=self.vconf.figure_dpi)
ax = Axes3D(fig)
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
pts = np.array(plot['ptsarr'], dtype=self.vconf.dtype)
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], color=plot['color'], marker='^', s=20, depthshade=True)
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='none', color=plot['color'], marker='^')
legend_proxy.append(plot_proxy)
legend_names.append(plot['name'])
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
faces = np.array(plot['ptsarr'][1], dtype=self.vconf.dtype)
filled = np.array(plot['ptsarr'][2], dtype=self.vconf.dtype)
# Find filled voxels
faces_filled = np.concatenate(faces[filled == 1.0])
# Create a single Poly3DCollection object
pc3d = Poly3DCollection(faces_filled, facecolors=plot['color'], edgecolors='k')
ax.add_collection3d(pc3d)
# Set axis limits
gf_min = np.amin(faces_filled, axis=(0, 1))
gf_max = np.amax(faces_filled, axis=(0, 1))
ax.set_xlim([gf_min[0], gf_max[0]])
ax.set_ylim([gf_min[1], gf_max[1]])
ax.set_zlim([gf_min[2], gf_max[2]])
# Legend
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='none', color=plot['color'], marker='o')
legend_proxy.append(plot_proxy)
legend_names.append(plot['name'])
# Plot bounding box
if plot['type'] == 'bbox' and self.vconf.display_bbox:
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2], color=plot['color'], linestyle='--')
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='--', color=plot['color'])
legend_proxy.append(plot_proxy)
legend_names.append(plot['name'])
# Plot extras
if plot['type'] == 'extras':
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2],
color=plot['color'][0], linestyle='-', linewidth=plot['color'][1])
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='-', color=plot['color'][0])
legend_proxy.append(plot_proxy)
legend_names.append(plot['name'])
# Add legend to 3D plot, @ref: https://stackoverflow.com/a/20505720
if self.vconf.display_legend:
ax.legend(legend_proxy, legend_names, numpoints=1)
# Remove axes
if not self.vconf.display_axes:
plt.axis('off')
# Set axes equal
if self.vconf.axes_equal:
self.vconf.set_axes_equal(ax)
# Axis labels
if self.vconf.display_labels:
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Process keyword arguments
fig_filename = kwargs.get('fig_save_as', None)
fig_display = kwargs.get('display_plot', True)
# Display the plot
if fig_display:
plt.show()
else:
fig_filename = self.vconf.figure_image_filename if fig_filename is None else fig_filename
# Save the figure
self.vconf.save_figure_as(fig, fig_filename)
# Return the figure object
return fig | def function[render, parameter[self]]:
constant[ Displays the voxels and the control points. ]
call[call[name[super], parameter[name[VisVoxel], name[self]]].render, parameter[]]
variable[legend_proxy] assign[=] list[[]]
variable[legend_names] assign[=] list[[]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[Axes3D], parameter[name[fig]]]
for taget[name[plot]] in starred[name[self]._plots] begin[:]
if <ast.BoolOp object at 0x7da1b17b6e00> begin[:]
variable[pts] assign[=] call[name[np].array, parameter[call[name[plot]][constant[ptsarr]]]]
call[name[ax].scatter, parameter[call[name[pts]][tuple[[<ast.Slice object at 0x7da1b17b71f0>, <ast.Constant object at 0x7da1b17b7f70>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b17f8820>, <ast.Constant object at 0x7da1b17f92a0>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b17fb970>, <ast.Constant object at 0x7da1b17f9bd0>]]]]]
variable[plot_proxy] assign[=] call[name[mpl].lines.Line2D, parameter[list[[<ast.Constant object at 0x7da1b17fa4a0>]], list[[<ast.Constant object at 0x7da1b17f9a50>]]]]
call[name[legend_proxy].append, parameter[name[plot_proxy]]]
call[name[legend_names].append, parameter[call[name[plot]][constant[name]]]]
if <ast.BoolOp object at 0x7da1b17f9fc0> begin[:]
variable[faces] assign[=] call[name[np].array, parameter[call[call[name[plot]][constant[ptsarr]]][constant[1]]]]
variable[filled] assign[=] call[name[np].array, parameter[call[call[name[plot]][constant[ptsarr]]][constant[2]]]]
variable[faces_filled] assign[=] call[name[np].concatenate, parameter[call[name[faces]][compare[name[filled] equal[==] constant[1.0]]]]]
variable[pc3d] assign[=] call[name[Poly3DCollection], parameter[name[faces_filled]]]
call[name[ax].add_collection3d, parameter[name[pc3d]]]
variable[gf_min] assign[=] call[name[np].amin, parameter[name[faces_filled]]]
variable[gf_max] assign[=] call[name[np].amax, parameter[name[faces_filled]]]
call[name[ax].set_xlim, parameter[list[[<ast.Subscript object at 0x7da1b17fa380>, <ast.Subscript object at 0x7da1b17fb7c0>]]]]
call[name[ax].set_ylim, parameter[list[[<ast.Subscript object at 0x7da1b16a8a90>, <ast.Subscript object at 0x7da1b16abf10>]]]]
call[name[ax].set_zlim, parameter[list[[<ast.Subscript object at 0x7da1b16aba60>, <ast.Subscript object at 0x7da1b16a99f0>]]]]
variable[plot_proxy] assign[=] call[name[mpl].lines.Line2D, parameter[list[[<ast.Constant object at 0x7da1b16a84f0>]], list[[<ast.Constant object at 0x7da1b16abdf0>]]]]
call[name[legend_proxy].append, parameter[name[plot_proxy]]]
call[name[legend_names].append, parameter[call[name[plot]][constant[name]]]]
if <ast.BoolOp object at 0x7da1b16a9120> begin[:]
call[name[ax].plot, parameter[call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16ab7f0>, <ast.Constant object at 0x7da1b16ab6a0>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16aa9b0>, <ast.Constant object at 0x7da1b16aa320>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16aa440>, <ast.Constant object at 0x7da1b16a94b0>]]]]]
variable[plot_proxy] assign[=] call[name[mpl].lines.Line2D, parameter[list[[<ast.Constant object at 0x7da1b16a8c70>]], list[[<ast.Constant object at 0x7da1b16aa890>]]]]
call[name[legend_proxy].append, parameter[name[plot_proxy]]]
call[name[legend_names].append, parameter[call[name[plot]][constant[name]]]]
if compare[call[name[plot]][constant[type]] equal[==] constant[extras]] begin[:]
call[name[ax].plot, parameter[call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16a9360>, <ast.Constant object at 0x7da1b16aa950>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16a9bd0>, <ast.Constant object at 0x7da1b16a8a60>]]], call[name[pts]][tuple[[<ast.Slice object at 0x7da1b16d16f0>, <ast.Constant object at 0x7da1b16d1780>]]]]]
variable[plot_proxy] assign[=] call[name[mpl].lines.Line2D, parameter[list[[<ast.Constant object at 0x7da1b16d05e0>]], list[[<ast.Constant object at 0x7da1b16d0190>]]]]
call[name[legend_proxy].append, parameter[name[plot_proxy]]]
call[name[legend_names].append, parameter[call[name[plot]][constant[name]]]]
if name[self].vconf.display_legend begin[:]
call[name[ax].legend, parameter[name[legend_proxy], name[legend_names]]]
if <ast.UnaryOp object at 0x7da1b16e4520> begin[:]
call[name[plt].axis, parameter[constant[off]]]
if name[self].vconf.axes_equal begin[:]
call[name[self].vconf.set_axes_equal, parameter[name[ax]]]
if name[self].vconf.display_labels begin[:]
call[name[ax].set_xlabel, parameter[constant[x]]]
call[name[ax].set_ylabel, parameter[constant[y]]]
call[name[ax].set_zlabel, parameter[constant[z]]]
variable[fig_filename] assign[=] call[name[kwargs].get, parameter[constant[fig_save_as], constant[None]]]
variable[fig_display] assign[=] call[name[kwargs].get, parameter[constant[display_plot], constant[True]]]
if name[fig_display] begin[:]
call[name[plt].show, parameter[]]
call[name[self].vconf.save_figure_as, parameter[name[fig], name[fig_filename]]]
return[name[fig]] | keyword[def] identifier[render] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[super] ( identifier[VisVoxel] , identifier[self] ). identifier[render] (** identifier[kwargs] )
identifier[legend_proxy] =[]
identifier[legend_names] =[]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[self] . identifier[vconf] . identifier[figure_size] , identifier[dpi] = identifier[self] . identifier[vconf] . identifier[figure_dpi] )
identifier[ax] = identifier[Axes3D] ( identifier[fig] )
keyword[for] identifier[plot] keyword[in] identifier[self] . identifier[_plots] :
keyword[if] identifier[plot] [ literal[string] ]== literal[string] keyword[and] identifier[self] . identifier[vconf] . identifier[display_ctrlpts] :
identifier[pts] = identifier[np] . identifier[array] ( identifier[plot] [ literal[string] ], identifier[dtype] = identifier[self] . identifier[vconf] . identifier[dtype] )
identifier[ax] . identifier[scatter] ( identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ], identifier[color] = identifier[plot] [ literal[string] ], identifier[marker] = literal[string] , identifier[s] = literal[int] , identifier[depthshade] = keyword[True] )
identifier[plot_proxy] = identifier[mpl] . identifier[lines] . identifier[Line2D] ([ literal[int] ],[ literal[int] ], identifier[linestyle] = literal[string] , identifier[color] = identifier[plot] [ literal[string] ], identifier[marker] = literal[string] )
identifier[legend_proxy] . identifier[append] ( identifier[plot_proxy] )
identifier[legend_names] . identifier[append] ( identifier[plot] [ literal[string] ])
keyword[if] identifier[plot] [ literal[string] ]== literal[string] keyword[and] identifier[self] . identifier[vconf] . identifier[display_evalpts] :
identifier[faces] = identifier[np] . identifier[array] ( identifier[plot] [ literal[string] ][ literal[int] ], identifier[dtype] = identifier[self] . identifier[vconf] . identifier[dtype] )
identifier[filled] = identifier[np] . identifier[array] ( identifier[plot] [ literal[string] ][ literal[int] ], identifier[dtype] = identifier[self] . identifier[vconf] . identifier[dtype] )
identifier[faces_filled] = identifier[np] . identifier[concatenate] ( identifier[faces] [ identifier[filled] == literal[int] ])
identifier[pc3d] = identifier[Poly3DCollection] ( identifier[faces_filled] , identifier[facecolors] = identifier[plot] [ literal[string] ], identifier[edgecolors] = literal[string] )
identifier[ax] . identifier[add_collection3d] ( identifier[pc3d] )
identifier[gf_min] = identifier[np] . identifier[amin] ( identifier[faces_filled] , identifier[axis] =( literal[int] , literal[int] ))
identifier[gf_max] = identifier[np] . identifier[amax] ( identifier[faces_filled] , identifier[axis] =( literal[int] , literal[int] ))
identifier[ax] . identifier[set_xlim] ([ identifier[gf_min] [ literal[int] ], identifier[gf_max] [ literal[int] ]])
identifier[ax] . identifier[set_ylim] ([ identifier[gf_min] [ literal[int] ], identifier[gf_max] [ literal[int] ]])
identifier[ax] . identifier[set_zlim] ([ identifier[gf_min] [ literal[int] ], identifier[gf_max] [ literal[int] ]])
identifier[plot_proxy] = identifier[mpl] . identifier[lines] . identifier[Line2D] ([ literal[int] ],[ literal[int] ], identifier[linestyle] = literal[string] , identifier[color] = identifier[plot] [ literal[string] ], identifier[marker] = literal[string] )
identifier[legend_proxy] . identifier[append] ( identifier[plot_proxy] )
identifier[legend_names] . identifier[append] ( identifier[plot] [ literal[string] ])
keyword[if] identifier[plot] [ literal[string] ]== literal[string] keyword[and] identifier[self] . identifier[vconf] . identifier[display_bbox] :
identifier[ax] . identifier[plot] ( identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ], identifier[color] = identifier[plot] [ literal[string] ], identifier[linestyle] = literal[string] )
identifier[plot_proxy] = identifier[mpl] . identifier[lines] . identifier[Line2D] ([ literal[int] ],[ literal[int] ], identifier[linestyle] = literal[string] , identifier[color] = identifier[plot] [ literal[string] ])
identifier[legend_proxy] . identifier[append] ( identifier[plot_proxy] )
identifier[legend_names] . identifier[append] ( identifier[plot] [ literal[string] ])
keyword[if] identifier[plot] [ literal[string] ]== literal[string] :
identifier[ax] . identifier[plot] ( identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ], identifier[pts] [:, literal[int] ],
identifier[color] = identifier[plot] [ literal[string] ][ literal[int] ], identifier[linestyle] = literal[string] , identifier[linewidth] = identifier[plot] [ literal[string] ][ literal[int] ])
identifier[plot_proxy] = identifier[mpl] . identifier[lines] . identifier[Line2D] ([ literal[int] ],[ literal[int] ], identifier[linestyle] = literal[string] , identifier[color] = identifier[plot] [ literal[string] ][ literal[int] ])
identifier[legend_proxy] . identifier[append] ( identifier[plot_proxy] )
identifier[legend_names] . identifier[append] ( identifier[plot] [ literal[string] ])
keyword[if] identifier[self] . identifier[vconf] . identifier[display_legend] :
identifier[ax] . identifier[legend] ( identifier[legend_proxy] , identifier[legend_names] , identifier[numpoints] = literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[vconf] . identifier[display_axes] :
identifier[plt] . identifier[axis] ( literal[string] )
keyword[if] identifier[self] . identifier[vconf] . identifier[axes_equal] :
identifier[self] . identifier[vconf] . identifier[set_axes_equal] ( identifier[ax] )
keyword[if] identifier[self] . identifier[vconf] . identifier[display_labels] :
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_zlabel] ( literal[string] )
identifier[fig_filename] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[fig_display] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] )
keyword[if] identifier[fig_display] :
identifier[plt] . identifier[show] ()
keyword[else] :
identifier[fig_filename] = identifier[self] . identifier[vconf] . identifier[figure_image_filename] keyword[if] identifier[fig_filename] keyword[is] keyword[None] keyword[else] identifier[fig_filename]
identifier[self] . identifier[vconf] . identifier[save_figure_as] ( identifier[fig] , identifier[fig_filename] )
keyword[return] identifier[fig] | def render(self, **kwargs):
""" Displays the voxels and the control points. """
# Calling parent function
super(VisVoxel, self).render(**kwargs)
# Initialize variables
legend_proxy = []
legend_names = []
# Start plotting of the surface and the control points grid
fig = plt.figure(figsize=self.vconf.figure_size, dpi=self.vconf.figure_dpi)
ax = Axes3D(fig)
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
pts = np.array(plot['ptsarr'], dtype=self.vconf.dtype)
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], color=plot['color'], marker='^', s=20, depthshade=True)
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='none', color=plot['color'], marker='^')
legend_proxy.append(plot_proxy)
legend_names.append(plot['name']) # depends on [control=['if'], data=[]]
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
faces = np.array(plot['ptsarr'][1], dtype=self.vconf.dtype)
filled = np.array(plot['ptsarr'][2], dtype=self.vconf.dtype)
# Find filled voxels
faces_filled = np.concatenate(faces[filled == 1.0])
# Create a single Poly3DCollection object
pc3d = Poly3DCollection(faces_filled, facecolors=plot['color'], edgecolors='k')
ax.add_collection3d(pc3d)
# Set axis limits
gf_min = np.amin(faces_filled, axis=(0, 1))
gf_max = np.amax(faces_filled, axis=(0, 1))
ax.set_xlim([gf_min[0], gf_max[0]])
ax.set_ylim([gf_min[1], gf_max[1]])
ax.set_zlim([gf_min[2], gf_max[2]])
# Legend
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='none', color=plot['color'], marker='o')
legend_proxy.append(plot_proxy)
legend_names.append(plot['name']) # depends on [control=['if'], data=[]]
# Plot bounding box
if plot['type'] == 'bbox' and self.vconf.display_bbox:
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2], color=plot['color'], linestyle='--')
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='--', color=plot['color'])
legend_proxy.append(plot_proxy)
legend_names.append(plot['name']) # depends on [control=['if'], data=[]]
# Plot extras
if plot['type'] == 'extras':
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2], color=plot['color'][0], linestyle='-', linewidth=plot['color'][1])
plot_proxy = mpl.lines.Line2D([0], [0], linestyle='-', color=plot['color'][0])
legend_proxy.append(plot_proxy)
legend_names.append(plot['name']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['plot']]
# Add legend to 3D plot, @ref: https://stackoverflow.com/a/20505720
if self.vconf.display_legend:
ax.legend(legend_proxy, legend_names, numpoints=1) # depends on [control=['if'], data=[]]
# Remove axes
if not self.vconf.display_axes:
plt.axis('off') # depends on [control=['if'], data=[]]
# Set axes equal
if self.vconf.axes_equal:
self.vconf.set_axes_equal(ax) # depends on [control=['if'], data=[]]
# Axis labels
if self.vconf.display_labels:
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z') # depends on [control=['if'], data=[]]
# Process keyword arguments
fig_filename = kwargs.get('fig_save_as', None)
fig_display = kwargs.get('display_plot', True)
# Display the plot
if fig_display:
plt.show() # depends on [control=['if'], data=[]]
else:
fig_filename = self.vconf.figure_image_filename if fig_filename is None else fig_filename
# Save the figure
self.vconf.save_figure_as(fig, fig_filename)
# Return the figure object
return fig |
def set_data(self, pos=None, color=None, width=None, connect=None,
arrows=None):
"""Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
"""
if arrows is not None:
self._arrows = arrows
self._arrows_changed = True
LineVisual.set_data(self, pos, color, width, connect) | def function[set_data, parameter[self, pos, color, width, connect, arrows]]:
constant[Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
]
if compare[name[arrows] is_not constant[None]] begin[:]
name[self]._arrows assign[=] name[arrows]
name[self]._arrows_changed assign[=] constant[True]
call[name[LineVisual].set_data, parameter[name[self], name[pos], name[color], name[width], name[connect]]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[pos] = keyword[None] , identifier[color] = keyword[None] , identifier[width] = keyword[None] , identifier[connect] = keyword[None] ,
identifier[arrows] = keyword[None] ):
literal[string]
keyword[if] identifier[arrows] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_arrows] = identifier[arrows]
identifier[self] . identifier[_arrows_changed] = keyword[True]
identifier[LineVisual] . identifier[set_data] ( identifier[self] , identifier[pos] , identifier[color] , identifier[width] , identifier[connect] ) | def set_data(self, pos=None, color=None, width=None, connect=None, arrows=None):
"""Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
"""
if arrows is not None:
self._arrows = arrows
self._arrows_changed = True # depends on [control=['if'], data=['arrows']]
LineVisual.set_data(self, pos, color, width, connect) |
def _get_previous_assessment_section(self, assessment_section_id):
"""Gets the previous section before section_id.
Assumes that section list exists in taken and section_id is in section list.
Assumes that Section parts only exist as children of Assessments
"""
if self._my_map['sections'][0] == str(assessment_section_id):
raise errors.IllegalState('already at the first section')
else:
return self._get_assessment_section(
Id(self._my_map['sections'][self._my_map['sections'].index(str(assessment_section_id)) - 1])) | def function[_get_previous_assessment_section, parameter[self, assessment_section_id]]:
constant[Gets the previous section before section_id.
Assumes that section list exists in taken and section_id is in section list.
Assumes that Section parts only exist as children of Assessments
]
if compare[call[call[name[self]._my_map][constant[sections]]][constant[0]] equal[==] call[name[str], parameter[name[assessment_section_id]]]] begin[:]
<ast.Raise object at 0x7da20c7c8310> | keyword[def] identifier[_get_previous_assessment_section] ( identifier[self] , identifier[assessment_section_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_my_map] [ literal[string] ][ literal[int] ]== identifier[str] ( identifier[assessment_section_id] ):
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
keyword[else] :
keyword[return] identifier[self] . identifier[_get_assessment_section] (
identifier[Id] ( identifier[self] . identifier[_my_map] [ literal[string] ][ identifier[self] . identifier[_my_map] [ literal[string] ]. identifier[index] ( identifier[str] ( identifier[assessment_section_id] ))- literal[int] ])) | def _get_previous_assessment_section(self, assessment_section_id):
"""Gets the previous section before section_id.
Assumes that section list exists in taken and section_id is in section list.
Assumes that Section parts only exist as children of Assessments
"""
if self._my_map['sections'][0] == str(assessment_section_id):
raise errors.IllegalState('already at the first section') # depends on [control=['if'], data=[]]
else:
return self._get_assessment_section(Id(self._my_map['sections'][self._my_map['sections'].index(str(assessment_section_id)) - 1])) |
def get_members(self, group_id):
"""
Returns a list of restclients.GroupMember objects for the group
identified by the passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}/member".format(self.API, group_id)
data = self._get_resource(url)
members = []
for datum in data.get("data"):
members.append(self._group_member_from_json(datum))
return members | def function[get_members, parameter[self, group_id]]:
constant[
Returns a list of restclients.GroupMember objects for the group
identified by the passed group ID.
]
call[name[self]._valid_group_id, parameter[name[group_id]]]
variable[url] assign[=] call[constant[{}/group/{}/member].format, parameter[name[self].API, name[group_id]]]
variable[data] assign[=] call[name[self]._get_resource, parameter[name[url]]]
variable[members] assign[=] list[[]]
for taget[name[datum]] in starred[call[name[data].get, parameter[constant[data]]]] begin[:]
call[name[members].append, parameter[call[name[self]._group_member_from_json, parameter[name[datum]]]]]
return[name[members]] | keyword[def] identifier[get_members] ( identifier[self] , identifier[group_id] ):
literal[string]
identifier[self] . identifier[_valid_group_id] ( identifier[group_id] )
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[API] , identifier[group_id] )
identifier[data] = identifier[self] . identifier[_get_resource] ( identifier[url] )
identifier[members] =[]
keyword[for] identifier[datum] keyword[in] identifier[data] . identifier[get] ( literal[string] ):
identifier[members] . identifier[append] ( identifier[self] . identifier[_group_member_from_json] ( identifier[datum] ))
keyword[return] identifier[members] | def get_members(self, group_id):
"""
Returns a list of restclients.GroupMember objects for the group
identified by the passed group ID.
"""
self._valid_group_id(group_id)
url = '{}/group/{}/member'.format(self.API, group_id)
data = self._get_resource(url)
members = []
for datum in data.get('data'):
members.append(self._group_member_from_json(datum)) # depends on [control=['for'], data=['datum']]
return members |
def get_vendor_ies(self, mac_block=None, oui_type=None):
"""vendor information element querying
:mac_block: str
first 3 bytes of mac addresses in format of
00-11-22 or 00:11:22 or 001122
:oui_type: int
vendors ie type
:return: int
is valid mac_block format
-1 is unknown
:return: dict[]
list of oui information elements
-1 on error (invalid v
"""
vendor_ies = []
if mac_block is not None:
if Management.is_valid_mac_oui(mac_block):
mac_block = mac_block.upper()
if ':' in mac_block:
mac_block.replace(':', '-')
else:
logging.warning("invalid oui macblock")
return None
for elem in self.tagged_params:
tag_num = elem['number']
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
if mac_block is None:
vendor_ies.append(elem)
elif elem['payload']['oui'] == mac_block.encode('ascii'):
if oui_type is None:
vendor_ies.append(elem)
elif elem['payload']['oui_type'] == oui_type:
vendor_ies.append(elem)
return vendor_ies | def function[get_vendor_ies, parameter[self, mac_block, oui_type]]:
constant[vendor information element querying
:mac_block: str
first 3 bytes of mac addresses in format of
00-11-22 or 00:11:22 or 001122
:oui_type: int
vendors ie type
:return: int
is valid mac_block format
-1 is unknown
:return: dict[]
list of oui information elements
-1 on error (invalid v
]
variable[vendor_ies] assign[=] list[[]]
if compare[name[mac_block] is_not constant[None]] begin[:]
if call[name[Management].is_valid_mac_oui, parameter[name[mac_block]]] begin[:]
variable[mac_block] assign[=] call[name[mac_block].upper, parameter[]]
if compare[constant[:] in name[mac_block]] begin[:]
call[name[mac_block].replace, parameter[constant[:], constant[-]]]
for taget[name[elem]] in starred[name[self].tagged_params] begin[:]
variable[tag_num] assign[=] call[name[elem]][constant[number]]
if compare[call[name[MNGMT_TAGS]][name[tag_num]] equal[==] constant[TAG_VENDOR_SPECIFIC_IE]] begin[:]
if compare[name[mac_block] is constant[None]] begin[:]
call[name[vendor_ies].append, parameter[name[elem]]]
return[name[vendor_ies]] | keyword[def] identifier[get_vendor_ies] ( identifier[self] , identifier[mac_block] = keyword[None] , identifier[oui_type] = keyword[None] ):
literal[string]
identifier[vendor_ies] =[]
keyword[if] identifier[mac_block] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[Management] . identifier[is_valid_mac_oui] ( identifier[mac_block] ):
identifier[mac_block] = identifier[mac_block] . identifier[upper] ()
keyword[if] literal[string] keyword[in] identifier[mac_block] :
identifier[mac_block] . identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[logging] . identifier[warning] ( literal[string] )
keyword[return] keyword[None]
keyword[for] identifier[elem] keyword[in] identifier[self] . identifier[tagged_params] :
identifier[tag_num] = identifier[elem] [ literal[string] ]
keyword[if] identifier[MNGMT_TAGS] [ identifier[tag_num] ]== literal[string] :
keyword[if] identifier[mac_block] keyword[is] keyword[None] :
identifier[vendor_ies] . identifier[append] ( identifier[elem] )
keyword[elif] identifier[elem] [ literal[string] ][ literal[string] ]== identifier[mac_block] . identifier[encode] ( literal[string] ):
keyword[if] identifier[oui_type] keyword[is] keyword[None] :
identifier[vendor_ies] . identifier[append] ( identifier[elem] )
keyword[elif] identifier[elem] [ literal[string] ][ literal[string] ]== identifier[oui_type] :
identifier[vendor_ies] . identifier[append] ( identifier[elem] )
keyword[return] identifier[vendor_ies] | def get_vendor_ies(self, mac_block=None, oui_type=None):
"""vendor information element querying
:mac_block: str
first 3 bytes of mac addresses in format of
00-11-22 or 00:11:22 or 001122
:oui_type: int
vendors ie type
:return: int
is valid mac_block format
-1 is unknown
:return: dict[]
list of oui information elements
-1 on error (invalid v
"""
vendor_ies = []
if mac_block is not None:
if Management.is_valid_mac_oui(mac_block):
mac_block = mac_block.upper()
if ':' in mac_block:
mac_block.replace(':', '-') # depends on [control=['if'], data=['mac_block']] # depends on [control=['if'], data=[]]
else:
logging.warning('invalid oui macblock')
return None # depends on [control=['if'], data=['mac_block']]
for elem in self.tagged_params:
tag_num = elem['number']
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
if mac_block is None:
vendor_ies.append(elem) # depends on [control=['if'], data=[]]
elif elem['payload']['oui'] == mac_block.encode('ascii'):
if oui_type is None:
vendor_ies.append(elem) # depends on [control=['if'], data=[]]
elif elem['payload']['oui_type'] == oui_type:
vendor_ies.append(elem) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['elem']]
return vendor_ies |
def get_all_letters(self, params=None):
"""
Get all letters
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_letters_per_page, resource=LETTERS, **{'params': params}) | def function[get_all_letters, parameter[self, params]]:
constant[
Get all letters
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
]
if <ast.UnaryOp object at 0x7da20c6e7a30> begin[:]
variable[params] assign[=] dictionary[[], []]
return[call[name[self]._iterate_through_pages, parameter[name[self].get_letters_per_page]]] | keyword[def] identifier[get_all_letters] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[params] :
identifier[params] ={}
keyword[return] identifier[self] . identifier[_iterate_through_pages] ( identifier[self] . identifier[get_letters_per_page] , identifier[resource] = identifier[LETTERS] ,**{ literal[string] : identifier[params] }) | def get_all_letters(self, params=None):
"""
Get all letters
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {} # depends on [control=['if'], data=[]]
return self._iterate_through_pages(self.get_letters_per_page, resource=LETTERS, **{'params': params}) |
def model_train(self):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
"""
assert self.runner is not None, (
"""Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU
instantiate a Runner object at initialization time.""")
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params()
# Clean up the queue
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
'Not all training examples are used.')
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
# Save model
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.") | def function[model_train, parameter[self]]:
constant[
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
]
assert[compare[name[self].runner is_not constant[None]]]
variable[hparams] assign[=] name[self].hparams
variable[batch_size] assign[=] name[hparams].batch_size
variable[nb_epochs] assign[=] name[hparams].nb_epochs
variable[train_dir] assign[=] name[hparams].save_dir
variable[filename] assign[=] constant[model.ckpt]
variable[X_train] assign[=] name[self].X_train
variable[Y_train] assign[=] name[self].Y_train
variable[sess] assign[=] name[self].sess
with call[name[sess].as_default, parameter[]] begin[:]
variable[X_batch] assign[=] call[name[X_train]][<ast.Slice object at 0x7da18bc73910>]
variable[Y_batch] assign[=] call[name[Y_train]][<ast.Slice object at 0x7da18bc714e0>]
call[name[self]._init_tf, parameter[name[X_batch], name[Y_batch]]]
for taget[name[epoch]] in starred[call[name[six].moves.xrange, parameter[name[nb_epochs]]]] begin[:]
call[name[logging].info, parameter[binary_operation[constant[Epoch ] + call[name[str], parameter[name[epoch]]]]]]
variable[nb_batches] assign[=] call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[call[name[float], parameter[call[name[len], parameter[name[X_train]]]]] / name[batch_size]]]]]]
assert[compare[binary_operation[name[nb_batches] * name[batch_size]] greater_or_equal[>=] call[name[len], parameter[name[X_train]]]]]
variable[index_shuf] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[X_train]]]]]]]
call[name[self].rng.shuffle, parameter[name[index_shuf]]]
variable[prev] assign[=] call[name[time].time, parameter[]]
for taget[name[batch]] in starred[call[name[range], parameter[name[nb_batches]]]] begin[:]
<ast.Tuple object at 0x7da204962b90> assign[=] call[name[batch_indices], parameter[name[batch], call[name[len], parameter[name[X_train]]], name[batch_size]]]
call[name[self]._update_learning_params, parameter[]]
variable[X_batch] assign[=] call[name[X_train]][call[name[index_shuf]][<ast.Slice object at 0x7da204961e10>]]
variable[Y_batch] assign[=] call[name[Y_train]][call[name[index_shuf]][<ast.Slice object at 0x7da204961360>]]
call[name[self]._run, parameter[dictionary[[<ast.Constant object at 0x7da204961810>, <ast.Constant object at 0x7da204961fc0>], [<ast.Name object at 0x7da204962020>, <ast.Name object at 0x7da204963550>]]]]
call[name[self]._sync_params, parameter[]]
while <ast.UnaryOp object at 0x7da2044c15a0> begin[:]
call[name[self]._run, parameter[]]
call[name[self]._sync_params, parameter[]]
assert[compare[name[end] greater_or_equal[>=] call[name[len], parameter[name[X_train]]]]]
variable[cur] assign[=] call[name[time].time, parameter[]]
call[name[logging].info, parameter[binary_operation[binary_operation[constant[ Epoch took ] + call[name[str], parameter[binary_operation[name[cur] - name[prev]]]]] + constant[ seconds]]]]
variable[prev] assign[=] name[cur]
call[name[self].eval, parameter[]]
variable[cond] assign[=] <ast.BoolOp object at 0x7da2044c2b60>
if <ast.BoolOp object at 0x7da2044c1270> begin[:]
variable[save_path] assign[=] call[name[os].path.join, parameter[name[train_dir], name[filename]]]
variable[saver] assign[=] call[name[tf].train.Saver, parameter[]]
call[name[saver].save, parameter[name[sess], name[save_path]]]
call[name[logging].info, parameter[binary_operation[constant[Model saved at: ] + call[name[str], parameter[name[save_path]]]]]]
call[name[logging].info, parameter[constant[Completed model training.]]] | keyword[def] identifier[model_train] ( identifier[self] ):
literal[string]
keyword[assert] identifier[self] . identifier[runner] keyword[is] keyword[not] keyword[None] ,(
literal[string] )
identifier[hparams] = identifier[self] . identifier[hparams]
identifier[batch_size] = identifier[hparams] . identifier[batch_size]
identifier[nb_epochs] = identifier[hparams] . identifier[nb_epochs]
identifier[train_dir] = identifier[hparams] . identifier[save_dir]
identifier[filename] = literal[string]
identifier[X_train] = identifier[self] . identifier[X_train]
identifier[Y_train] = identifier[self] . identifier[Y_train]
identifier[sess] = identifier[self] . identifier[sess]
keyword[with] identifier[sess] . identifier[as_default] ():
identifier[X_batch] = identifier[X_train] [: identifier[batch_size] ]
identifier[Y_batch] = identifier[Y_train] [: identifier[batch_size] ]
identifier[self] . identifier[_init_tf] ( identifier[X_batch] , identifier[Y_batch] )
keyword[for] identifier[epoch] keyword[in] identifier[six] . identifier[moves] . identifier[xrange] ( identifier[nb_epochs] ):
identifier[logging] . identifier[info] ( literal[string] + identifier[str] ( identifier[epoch] ))
identifier[nb_batches] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[float] ( identifier[len] ( identifier[X_train] ))/ identifier[batch_size] ))
keyword[assert] identifier[nb_batches] * identifier[batch_size] >= identifier[len] ( identifier[X_train] )
identifier[index_shuf] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[X_train] )))
identifier[self] . identifier[rng] . identifier[shuffle] ( identifier[index_shuf] )
identifier[prev] = identifier[time] . identifier[time] ()
keyword[for] identifier[batch] keyword[in] identifier[range] ( identifier[nb_batches] ):
identifier[start] , identifier[end] = identifier[batch_indices] (
identifier[batch] , identifier[len] ( identifier[X_train] ), identifier[batch_size] )
identifier[self] . identifier[_update_learning_params] ()
identifier[X_batch] = identifier[X_train] [ identifier[index_shuf] [ identifier[start] : identifier[end] ]]
identifier[Y_batch] = identifier[Y_train] [ identifier[index_shuf] [ identifier[start] : identifier[end] ]]
identifier[self] . identifier[_run] ({ literal[string] : identifier[X_batch] , literal[string] : identifier[Y_batch] })
identifier[self] . identifier[_sync_params] ()
keyword[while] keyword[not] identifier[self] . identifier[runner] . identifier[is_finished] ():
identifier[self] . identifier[_run] ()
identifier[self] . identifier[_sync_params] ( identifier[forced] = keyword[True] )
keyword[assert] identifier[end] >= identifier[len] ( identifier[X_train] ),(
literal[string] )
identifier[cur] = identifier[time] . identifier[time] ()
identifier[logging] . identifier[info] ( literal[string] + identifier[str] ( identifier[cur] - identifier[prev] )+ literal[string] )
identifier[prev] = identifier[cur]
identifier[self] . identifier[eval] ()
identifier[cond] =(( identifier[epoch] + literal[int] )% identifier[hparams] . identifier[save_steps] == literal[int]
keyword[or] identifier[epoch] == identifier[nb_epochs] )
keyword[if] identifier[hparams] . identifier[save] keyword[and] identifier[cond] :
identifier[save_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[train_dir] , identifier[filename] )
identifier[saver] = identifier[tf] . identifier[train] . identifier[Saver] ()
identifier[saver] . identifier[save] ( identifier[sess] , identifier[save_path] )
identifier[logging] . identifier[info] ( literal[string] + identifier[str] ( identifier[save_path] ))
identifier[logging] . identifier[info] ( literal[string] ) | def model_train(self):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
"""
assert self.runner is not None, 'Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU\n instantiate a Runner object at initialization time.'
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info('Epoch ' + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
(start, end) = batch_indices(batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params() # depends on [control=['for'], data=['batch']]
# Clean up the queue
while not self.runner.is_finished():
self._run() # depends on [control=['while'], data=[]]
self._sync_params(forced=True)
assert end >= len(X_train), 'Not all training examples are used.'
cur = time.time()
logging.info('\tEpoch took ' + str(cur - prev) + ' seconds')
prev = cur
self.eval()
# Save model
cond = (epoch + 1) % hparams.save_steps == 0 or epoch == nb_epochs
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info('Model saved at: ' + str(save_path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['epoch']] # depends on [control=['with'], data=[]]
logging.info('Completed model training.') |
def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs) :
"""Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
"""
pivot_columns = _as_labels(pivot_columns)
selected = self.select(pivot_columns + [value_column])
grouped = selected.groups(pivot_columns, collect=lambda x:x)
# refine bins by taking a histogram over all the data
if bins is not None:
vargs['bins'] = bins
_, rbins = np.histogram(self[value_column],**vargs)
# create a table with these bins a first column and counts for each group
vargs['bins'] = rbins
binned = type(self)().with_column('bin',rbins)
for group in grouped.rows:
col_label = "-".join(map(str,group[0:-1]))
col_vals = group[-1]
counts,_ = np.histogram(col_vals,**vargs)
binned[col_label] = np.append(counts,0)
return binned | def function[pivot_bin, parameter[self, pivot_columns, value_column, bins]]:
constant[Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
]
variable[pivot_columns] assign[=] call[name[_as_labels], parameter[name[pivot_columns]]]
variable[selected] assign[=] call[name[self].select, parameter[binary_operation[name[pivot_columns] + list[[<ast.Name object at 0x7da204566b00>]]]]]
variable[grouped] assign[=] call[name[selected].groups, parameter[name[pivot_columns]]]
if compare[name[bins] is_not constant[None]] begin[:]
call[name[vargs]][constant[bins]] assign[=] name[bins]
<ast.Tuple object at 0x7da204566140> assign[=] call[name[np].histogram, parameter[call[name[self]][name[value_column]]]]
call[name[vargs]][constant[bins]] assign[=] name[rbins]
variable[binned] assign[=] call[call[call[name[type], parameter[name[self]]], parameter[]].with_column, parameter[constant[bin], name[rbins]]]
for taget[name[group]] in starred[name[grouped].rows] begin[:]
variable[col_label] assign[=] call[constant[-].join, parameter[call[name[map], parameter[name[str], call[name[group]][<ast.Slice object at 0x7da204564f70>]]]]]
variable[col_vals] assign[=] call[name[group]][<ast.UnaryOp object at 0x7da204565690>]
<ast.Tuple object at 0x7da2045673d0> assign[=] call[name[np].histogram, parameter[name[col_vals]]]
call[name[binned]][name[col_label]] assign[=] call[name[np].append, parameter[name[counts], constant[0]]]
return[name[binned]] | keyword[def] identifier[pivot_bin] ( identifier[self] , identifier[pivot_columns] , identifier[value_column] , identifier[bins] = keyword[None] ,** identifier[vargs] ):
literal[string]
identifier[pivot_columns] = identifier[_as_labels] ( identifier[pivot_columns] )
identifier[selected] = identifier[self] . identifier[select] ( identifier[pivot_columns] +[ identifier[value_column] ])
identifier[grouped] = identifier[selected] . identifier[groups] ( identifier[pivot_columns] , identifier[collect] = keyword[lambda] identifier[x] : identifier[x] )
keyword[if] identifier[bins] keyword[is] keyword[not] keyword[None] :
identifier[vargs] [ literal[string] ]= identifier[bins]
identifier[_] , identifier[rbins] = identifier[np] . identifier[histogram] ( identifier[self] [ identifier[value_column] ],** identifier[vargs] )
identifier[vargs] [ literal[string] ]= identifier[rbins]
identifier[binned] = identifier[type] ( identifier[self] )(). identifier[with_column] ( literal[string] , identifier[rbins] )
keyword[for] identifier[group] keyword[in] identifier[grouped] . identifier[rows] :
identifier[col_label] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[group] [ literal[int] :- literal[int] ]))
identifier[col_vals] = identifier[group] [- literal[int] ]
identifier[counts] , identifier[_] = identifier[np] . identifier[histogram] ( identifier[col_vals] ,** identifier[vargs] )
identifier[binned] [ identifier[col_label] ]= identifier[np] . identifier[append] ( identifier[counts] , literal[int] )
keyword[return] identifier[binned] | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs):
"""Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
"""
pivot_columns = _as_labels(pivot_columns)
selected = self.select(pivot_columns + [value_column])
grouped = selected.groups(pivot_columns, collect=lambda x: x)
# refine bins by taking a histogram over all the data
if bins is not None:
vargs['bins'] = bins # depends on [control=['if'], data=['bins']]
(_, rbins) = np.histogram(self[value_column], **vargs)
# create a table with these bins a first column and counts for each group
vargs['bins'] = rbins
binned = type(self)().with_column('bin', rbins)
for group in grouped.rows:
col_label = '-'.join(map(str, group[0:-1]))
col_vals = group[-1]
(counts, _) = np.histogram(col_vals, **vargs)
binned[col_label] = np.append(counts, 0) # depends on [control=['for'], data=['group']]
return binned |
def _delete_text_ngrams(self, text_id):
"""Deletes all n-grams associated with `text_id` from the data
store.
:param text_id: database ID of text
:type text_id: `int`
"""
with self._conn:
self._conn.execute(constants.DELETE_TEXT_NGRAMS_SQL, [text_id])
self._conn.execute(constants.DELETE_TEXT_HAS_NGRAMS_SQL, [text_id]) | def function[_delete_text_ngrams, parameter[self, text_id]]:
constant[Deletes all n-grams associated with `text_id` from the data
store.
:param text_id: database ID of text
:type text_id: `int`
]
with name[self]._conn begin[:]
call[name[self]._conn.execute, parameter[name[constants].DELETE_TEXT_NGRAMS_SQL, list[[<ast.Name object at 0x7da1b191c130>]]]]
call[name[self]._conn.execute, parameter[name[constants].DELETE_TEXT_HAS_NGRAMS_SQL, list[[<ast.Name object at 0x7da1b191cd90>]]]] | keyword[def] identifier[_delete_text_ngrams] ( identifier[self] , identifier[text_id] ):
literal[string]
keyword[with] identifier[self] . identifier[_conn] :
identifier[self] . identifier[_conn] . identifier[execute] ( identifier[constants] . identifier[DELETE_TEXT_NGRAMS_SQL] ,[ identifier[text_id] ])
identifier[self] . identifier[_conn] . identifier[execute] ( identifier[constants] . identifier[DELETE_TEXT_HAS_NGRAMS_SQL] ,[ identifier[text_id] ]) | def _delete_text_ngrams(self, text_id):
"""Deletes all n-grams associated with `text_id` from the data
store.
:param text_id: database ID of text
:type text_id: `int`
"""
with self._conn:
self._conn.execute(constants.DELETE_TEXT_NGRAMS_SQL, [text_id])
self._conn.execute(constants.DELETE_TEXT_HAS_NGRAMS_SQL, [text_id]) # depends on [control=['with'], data=[]] |
def get_google_songs(
self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False,
uploaded=True, purchased=True):
"""Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
uploaded (bool): Include uploaded songs. Default: ``True``.
purchased (bool): Include purchased songs. Default: ``True``.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
"""
if not uploaded and not purchased:
raise ValueError("One or both of uploaded/purchased parameters must be True.")
logger.info("Loading Google Music songs...")
google_songs = []
if uploaded:
google_songs += self.api.get_uploaded_songs()
if purchased:
for song in self.api.get_purchased_songs():
if song not in google_songs:
google_songs.append(song)
matched_songs, filtered_songs = filter_google_songs(
google_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google Music songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google Music songs".format(len(matched_songs)))
return matched_songs, filtered_songs | def function[get_google_songs, parameter[self, include_filters, exclude_filters, all_includes, all_excludes, uploaded, purchased]]:
constant[Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
uploaded (bool): Include uploaded songs. Default: ``True``.
purchased (bool): Include purchased songs. Default: ``True``.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
]
if <ast.BoolOp object at 0x7da1aff74a90> begin[:]
<ast.Raise object at 0x7da1aff75a50>
call[name[logger].info, parameter[constant[Loading Google Music songs...]]]
variable[google_songs] assign[=] list[[]]
if name[uploaded] begin[:]
<ast.AugAssign object at 0x7da1aff75cc0>
if name[purchased] begin[:]
for taget[name[song]] in starred[call[name[self].api.get_purchased_songs, parameter[]]] begin[:]
if compare[name[song] <ast.NotIn object at 0x7da2590d7190> name[google_songs]] begin[:]
call[name[google_songs].append, parameter[name[song]]]
<ast.Tuple object at 0x7da18f813430> assign[=] call[name[filter_google_songs], parameter[name[google_songs]]]
call[name[logger].info, parameter[call[constant[Filtered {0} Google Music songs].format, parameter[call[name[len], parameter[name[filtered_songs]]]]]]]
call[name[logger].info, parameter[call[constant[Loaded {0} Google Music songs].format, parameter[call[name[len], parameter[name[matched_songs]]]]]]]
return[tuple[[<ast.Name object at 0x7da18f810850>, <ast.Name object at 0x7da18f810b80>]]] | keyword[def] identifier[get_google_songs] (
identifier[self] , identifier[include_filters] = keyword[None] , identifier[exclude_filters] = keyword[None] , identifier[all_includes] = keyword[False] , identifier[all_excludes] = keyword[False] ,
identifier[uploaded] = keyword[True] , identifier[purchased] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[uploaded] keyword[and] keyword[not] identifier[purchased] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[google_songs] =[]
keyword[if] identifier[uploaded] :
identifier[google_songs] += identifier[self] . identifier[api] . identifier[get_uploaded_songs] ()
keyword[if] identifier[purchased] :
keyword[for] identifier[song] keyword[in] identifier[self] . identifier[api] . identifier[get_purchased_songs] ():
keyword[if] identifier[song] keyword[not] keyword[in] identifier[google_songs] :
identifier[google_songs] . identifier[append] ( identifier[song] )
identifier[matched_songs] , identifier[filtered_songs] = identifier[filter_google_songs] (
identifier[google_songs] , identifier[include_filters] = identifier[include_filters] , identifier[exclude_filters] = identifier[exclude_filters] ,
identifier[all_includes] = identifier[all_includes] , identifier[all_excludes] = identifier[all_excludes]
)
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[filtered_songs] )))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[matched_songs] )))
keyword[return] identifier[matched_songs] , identifier[filtered_songs] | def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False, uploaded=True, purchased=True):
"""Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
uploaded (bool): Include uploaded songs. Default: ``True``.
purchased (bool): Include purchased songs. Default: ``True``.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
"""
if not uploaded and (not purchased):
raise ValueError('One or both of uploaded/purchased parameters must be True.') # depends on [control=['if'], data=[]]
logger.info('Loading Google Music songs...')
google_songs = []
if uploaded:
google_songs += self.api.get_uploaded_songs() # depends on [control=['if'], data=[]]
if purchased:
for song in self.api.get_purchased_songs():
if song not in google_songs:
google_songs.append(song) # depends on [control=['if'], data=['song', 'google_songs']] # depends on [control=['for'], data=['song']] # depends on [control=['if'], data=[]]
(matched_songs, filtered_songs) = filter_google_songs(google_songs, include_filters=include_filters, exclude_filters=exclude_filters, all_includes=all_includes, all_excludes=all_excludes)
logger.info('Filtered {0} Google Music songs'.format(len(filtered_songs)))
logger.info('Loaded {0} Google Music songs'.format(len(matched_songs)))
return (matched_songs, filtered_songs) |
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out | def function[get_symbol, parameter[num_classes, nms_thresh, force_suppress, nms_topk]]:
constant[
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
]
variable[net] assign[=] call[name[get_symbol_train], parameter[name[num_classes]]]
variable[cls_preds] assign[=] call[call[name[net].get_internals, parameter[]]][constant[multibox_cls_pred_output]]
variable[loc_preds] assign[=] call[call[name[net].get_internals, parameter[]]][constant[multibox_loc_pred_output]]
variable[anchor_boxes] assign[=] call[call[name[net].get_internals, parameter[]]][constant[multibox_anchors_output]]
variable[cls_prob] assign[=] call[name[mx].symbol.softmax, parameter[]]
variable[out] assign[=] call[name[mx].symbol.contrib.MultiBoxDetection, parameter[<ast.Starred object at 0x7da1b1f221d0>]]
return[name[out]] | keyword[def] identifier[get_symbol] ( identifier[num_classes] = literal[int] , identifier[nms_thresh] = literal[int] , identifier[force_suppress] = keyword[False] ,
identifier[nms_topk] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[net] = identifier[get_symbol_train] ( identifier[num_classes] )
identifier[cls_preds] = identifier[net] . identifier[get_internals] ()[ literal[string] ]
identifier[loc_preds] = identifier[net] . identifier[get_internals] ()[ literal[string] ]
identifier[anchor_boxes] = identifier[net] . identifier[get_internals] ()[ literal[string] ]
identifier[cls_prob] = identifier[mx] . identifier[symbol] . identifier[softmax] ( identifier[data] = identifier[cls_preds] , identifier[axis] = literal[int] , identifier[name] = literal[string] )
identifier[out] = identifier[mx] . identifier[symbol] . identifier[contrib] . identifier[MultiBoxDetection] (*[ identifier[cls_prob] , identifier[loc_preds] , identifier[anchor_boxes] ], identifier[name] = literal[string] , identifier[nms_threshold] = identifier[nms_thresh] , identifier[force_suppress] = identifier[force_suppress] ,
identifier[variances] =( literal[int] , literal[int] , literal[int] , literal[int] ), identifier[nms_topk] = identifier[nms_topk] )
keyword[return] identifier[out] | def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()['multibox_cls_pred_output']
loc_preds = net.get_internals()['multibox_loc_pred_output']
anchor_boxes = net.get_internals()['multibox_anchors_output']
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], name='detection', nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out |
def getHostDetailsByMACAddress(self, macAddress, lanInterfaceId=1, timeout=1):
"""Get host details for a host specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: return the host details if found otherwise an Exception will be raised
:rtype: HostDetails
"""
namespace = Lan.getServiceType("getHostDetailsByMACAddress") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetSpecificHostEntry", timeout=timeout, NewMACAddress=macAddress)
return HostDetails(results, macAddress=macAddress) | def function[getHostDetailsByMACAddress, parameter[self, macAddress, lanInterfaceId, timeout]]:
constant[Get host details for a host specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: return the host details if found otherwise an Exception will be raised
:rtype: HostDetails
]
variable[namespace] assign[=] binary_operation[call[name[Lan].getServiceType, parameter[constant[getHostDetailsByMACAddress]]] + call[name[str], parameter[name[lanInterfaceId]]]]
variable[uri] assign[=] call[name[self].getControlURL, parameter[name[namespace]]]
variable[results] assign[=] call[name[self].execute, parameter[name[uri], name[namespace], constant[GetSpecificHostEntry]]]
return[call[name[HostDetails], parameter[name[results]]]] | keyword[def] identifier[getHostDetailsByMACAddress] ( identifier[self] , identifier[macAddress] , identifier[lanInterfaceId] = literal[int] , identifier[timeout] = literal[int] ):
literal[string]
identifier[namespace] = identifier[Lan] . identifier[getServiceType] ( literal[string] )+ identifier[str] ( identifier[lanInterfaceId] )
identifier[uri] = identifier[self] . identifier[getControlURL] ( identifier[namespace] )
identifier[results] = identifier[self] . identifier[execute] ( identifier[uri] , identifier[namespace] , literal[string] , identifier[timeout] = identifier[timeout] , identifier[NewMACAddress] = identifier[macAddress] )
keyword[return] identifier[HostDetails] ( identifier[results] , identifier[macAddress] = identifier[macAddress] ) | def getHostDetailsByMACAddress(self, macAddress, lanInterfaceId=1, timeout=1):
"""Get host details for a host specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: return the host details if found otherwise an Exception will be raised
:rtype: HostDetails
"""
namespace = Lan.getServiceType('getHostDetailsByMACAddress') + str(lanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, 'GetSpecificHostEntry', timeout=timeout, NewMACAddress=macAddress)
return HostDetails(results, macAddress=macAddress) |
def function(x, y, amp, a_x, a_y, center_x, center_y):
"""
returns torus (ellipse with constant surface brightnes) profile
"""
x_shift = x - center_x
y_shift = y - center_y
A = np.pi * a_x * a_y
dist = (x_shift/a_x)**2 + (y_shift/a_y)**2
torus = np.zeros_like(x)
torus[dist <= 1] = 1
return amp/A * torus | def function[function, parameter[x, y, amp, a_x, a_y, center_x, center_y]]:
constant[
returns torus (ellipse with constant surface brightnes) profile
]
variable[x_shift] assign[=] binary_operation[name[x] - name[center_x]]
variable[y_shift] assign[=] binary_operation[name[y] - name[center_y]]
variable[A] assign[=] binary_operation[binary_operation[name[np].pi * name[a_x]] * name[a_y]]
variable[dist] assign[=] binary_operation[binary_operation[binary_operation[name[x_shift] / name[a_x]] ** constant[2]] + binary_operation[binary_operation[name[y_shift] / name[a_y]] ** constant[2]]]
variable[torus] assign[=] call[name[np].zeros_like, parameter[name[x]]]
call[name[torus]][compare[name[dist] less_or_equal[<=] constant[1]]] assign[=] constant[1]
return[binary_operation[binary_operation[name[amp] / name[A]] * name[torus]]] | keyword[def] identifier[function] ( identifier[x] , identifier[y] , identifier[amp] , identifier[a_x] , identifier[a_y] , identifier[center_x] , identifier[center_y] ):
literal[string]
identifier[x_shift] = identifier[x] - identifier[center_x]
identifier[y_shift] = identifier[y] - identifier[center_y]
identifier[A] = identifier[np] . identifier[pi] * identifier[a_x] * identifier[a_y]
identifier[dist] =( identifier[x_shift] / identifier[a_x] )** literal[int] +( identifier[y_shift] / identifier[a_y] )** literal[int]
identifier[torus] = identifier[np] . identifier[zeros_like] ( identifier[x] )
identifier[torus] [ identifier[dist] <= literal[int] ]= literal[int]
keyword[return] identifier[amp] / identifier[A] * identifier[torus] | def function(x, y, amp, a_x, a_y, center_x, center_y):
"""
returns torus (ellipse with constant surface brightnes) profile
"""
x_shift = x - center_x
y_shift = y - center_y
A = np.pi * a_x * a_y
dist = (x_shift / a_x) ** 2 + (y_shift / a_y) ** 2
torus = np.zeros_like(x)
torus[dist <= 1] = 1
return amp / A * torus |
def ask(question, escape=True):
"Return the answer"
answer = raw_input(question)
if escape:
answer.replace('"', '\\"')
return answer.decode('utf') | def function[ask, parameter[question, escape]]:
constant[Return the answer]
variable[answer] assign[=] call[name[raw_input], parameter[name[question]]]
if name[escape] begin[:]
call[name[answer].replace, parameter[constant["], constant[\"]]]
return[call[name[answer].decode, parameter[constant[utf]]]] | keyword[def] identifier[ask] ( identifier[question] , identifier[escape] = keyword[True] ):
literal[string]
identifier[answer] = identifier[raw_input] ( identifier[question] )
keyword[if] identifier[escape] :
identifier[answer] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[answer] . identifier[decode] ( literal[string] ) | def ask(question, escape=True):
"""Return the answer"""
answer = raw_input(question)
if escape:
answer.replace('"', '\\"') # depends on [control=['if'], data=[]]
return answer.decode('utf') |
def get_list(self,
key,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts comma separated values to a list.
Args:
key: the dict key.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
"""
def parse_list(v):
parts = v.split(',')
results = []
for part in parts:
part = part.strip()
if part:
results.append(part)
return results
return self._get_typed_value(key=key,
target_type=list,
type_convert=parse_list,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) | def function[get_list, parameter[self, key, is_optional, is_secret, is_local, default, options]]:
constant[
Get a the value corresponding to the key and converts comma separated values to a list.
Args:
key: the dict key.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
]
def function[parse_list, parameter[v]]:
variable[parts] assign[=] call[name[v].split, parameter[constant[,]]]
variable[results] assign[=] list[[]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[part] assign[=] call[name[part].strip, parameter[]]
if name[part] begin[:]
call[name[results].append, parameter[name[part]]]
return[name[results]]
return[call[name[self]._get_typed_value, parameter[]]] | keyword[def] identifier[get_list] ( identifier[self] ,
identifier[key] ,
identifier[is_optional] = keyword[False] ,
identifier[is_secret] = keyword[False] ,
identifier[is_local] = keyword[False] ,
identifier[default] = keyword[None] ,
identifier[options] = keyword[None] ):
literal[string]
keyword[def] identifier[parse_list] ( identifier[v] ):
identifier[parts] = identifier[v] . identifier[split] ( literal[string] )
identifier[results] =[]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[part] = identifier[part] . identifier[strip] ()
keyword[if] identifier[part] :
identifier[results] . identifier[append] ( identifier[part] )
keyword[return] identifier[results]
keyword[return] identifier[self] . identifier[_get_typed_value] ( identifier[key] = identifier[key] ,
identifier[target_type] = identifier[list] ,
identifier[type_convert] = identifier[parse_list] ,
identifier[is_optional] = identifier[is_optional] ,
identifier[is_secret] = identifier[is_secret] ,
identifier[is_local] = identifier[is_local] ,
identifier[default] = identifier[default] ,
identifier[options] = identifier[options] ) | def get_list(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None):
"""
Get a the value corresponding to the key and converts comma separated values to a list.
Args:
key: the dict key.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
"""
def parse_list(v):
parts = v.split(',')
results = []
for part in parts:
part = part.strip()
if part:
results.append(part) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']]
return results
return self._get_typed_value(key=key, target_type=list, type_convert=parse_list, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) |
def run_spider(spider_cls, **kwargs):
"""Runs a spider and returns the scraped items (by default).
Parameters
----------
spider_cls : scrapy.Spider
A spider class to run.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : list or scrapy.crawler.Crawler instance
The scraped items by default or the crawler instance if
``return_crawler`` is ``True``.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
return wait_for(timeout, _run_spider_in_reactor, spider_cls, **kwargs) | def function[run_spider, parameter[spider_cls]]:
constant[Runs a spider and returns the scraped items (by default).
Parameters
----------
spider_cls : scrapy.Spider
A spider class to run.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : list or scrapy.crawler.Crawler instance
The scraped items by default or the crawler instance if
``return_crawler`` is ``True``.
Raises
------
crochet.TimeoutError
]
variable[timeout] assign[=] call[name[kwargs].pop, parameter[constant[timeout], name[DEFAULT_TIMEOUT]]]
return[call[name[wait_for], parameter[name[timeout], name[_run_spider_in_reactor], name[spider_cls]]]] | keyword[def] identifier[run_spider] ( identifier[spider_cls] ,** identifier[kwargs] ):
literal[string]
identifier[timeout] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_TIMEOUT] )
keyword[return] identifier[wait_for] ( identifier[timeout] , identifier[_run_spider_in_reactor] , identifier[spider_cls] ,** identifier[kwargs] ) | def run_spider(spider_cls, **kwargs):
"""Runs a spider and returns the scraped items (by default).
Parameters
----------
spider_cls : scrapy.Spider
A spider class to run.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : list or scrapy.crawler.Crawler instance
The scraped items by default or the crawler instance if
``return_crawler`` is ``True``.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
return wait_for(timeout, _run_spider_in_reactor, spider_cls, **kwargs) |
def get_used_files():
"""Get files used by processes with name scanpy."""
import psutil
loop_over_scanpy_processes = (proc for proc in psutil.process_iter()
if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path)
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass
return set(filenames) | def function[get_used_files, parameter[]]:
constant[Get files used by processes with name scanpy.]
import module[psutil]
variable[loop_over_scanpy_processes] assign[=] <ast.GeneratorExp object at 0x7da207f02800>
variable[filenames] assign[=] list[[]]
for taget[name[proc]] in starred[name[loop_over_scanpy_processes]] begin[:]
<ast.Try object at 0x7da2054a4490>
return[call[name[set], parameter[name[filenames]]]] | keyword[def] identifier[get_used_files] ():
literal[string]
keyword[import] identifier[psutil]
identifier[loop_over_scanpy_processes] =( identifier[proc] keyword[for] identifier[proc] keyword[in] identifier[psutil] . identifier[process_iter] ()
keyword[if] identifier[proc] . identifier[name] ()== literal[string] )
identifier[filenames] =[]
keyword[for] identifier[proc] keyword[in] identifier[loop_over_scanpy_processes] :
keyword[try] :
identifier[flist] = identifier[proc] . identifier[open_files] ()
keyword[for] identifier[nt] keyword[in] identifier[flist] :
identifier[filenames] . identifier[append] ( identifier[nt] . identifier[path] )
keyword[except] identifier[psutil] . identifier[NoSuchProcess] keyword[as] identifier[err] :
keyword[pass]
keyword[return] identifier[set] ( identifier[filenames] ) | def get_used_files():
"""Get files used by processes with name scanpy."""
import psutil
loop_over_scanpy_processes = (proc for proc in psutil.process_iter() if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path) # depends on [control=['for'], data=['nt']] # depends on [control=['try'], data=[]]
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['proc']]
return set(filenames) |
def conditional_http_tween_factory(handler, registry):
"""
Tween that adds ETag headers and tells Pyramid to enable
conditional responses where appropriate.
"""
settings = registry.settings if hasattr(registry, 'settings') else {}
not_cacheble_list = []
if 'not.cachable.list' in settings:
not_cacheble_list = settings.get('not.cachable.list').split()
def conditional_http_tween(request):
response = handler(request)
if request.path not in not_cacheble_list:
# If the Last-Modified header has been set, we want to enable the
# conditional response processing.
if response.last_modified is not None:
response.conditional_response = True
# We want to only enable the conditional machinery if either we
# were given an explicit ETag header by the view or we have a
# buffered response and can generate the ETag header ourself.
if response.etag is not None:
response.conditional_response = True
elif (isinstance(response.app_iter, Sequence) and
len(response.app_iter) == 1) and response.body is not None:
response.conditional_response = True
response.md5_etag()
return response
return conditional_http_tween | def function[conditional_http_tween_factory, parameter[handler, registry]]:
constant[
Tween that adds ETag headers and tells Pyramid to enable
conditional responses where appropriate.
]
variable[settings] assign[=] <ast.IfExp object at 0x7da1b1803670>
variable[not_cacheble_list] assign[=] list[[]]
if compare[constant[not.cachable.list] in name[settings]] begin[:]
variable[not_cacheble_list] assign[=] call[call[name[settings].get, parameter[constant[not.cachable.list]]].split, parameter[]]
def function[conditional_http_tween, parameter[request]]:
variable[response] assign[=] call[name[handler], parameter[name[request]]]
if compare[name[request].path <ast.NotIn object at 0x7da2590d7190> name[not_cacheble_list]] begin[:]
if compare[name[response].last_modified is_not constant[None]] begin[:]
name[response].conditional_response assign[=] constant[True]
if compare[name[response].etag is_not constant[None]] begin[:]
name[response].conditional_response assign[=] constant[True]
return[name[response]]
return[name[conditional_http_tween]] | keyword[def] identifier[conditional_http_tween_factory] ( identifier[handler] , identifier[registry] ):
literal[string]
identifier[settings] = identifier[registry] . identifier[settings] keyword[if] identifier[hasattr] ( identifier[registry] , literal[string] ) keyword[else] {}
identifier[not_cacheble_list] =[]
keyword[if] literal[string] keyword[in] identifier[settings] :
identifier[not_cacheble_list] = identifier[settings] . identifier[get] ( literal[string] ). identifier[split] ()
keyword[def] identifier[conditional_http_tween] ( identifier[request] ):
identifier[response] = identifier[handler] ( identifier[request] )
keyword[if] identifier[request] . identifier[path] keyword[not] keyword[in] identifier[not_cacheble_list] :
keyword[if] identifier[response] . identifier[last_modified] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[conditional_response] = keyword[True]
keyword[if] identifier[response] . identifier[etag] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[conditional_response] = keyword[True]
keyword[elif] ( identifier[isinstance] ( identifier[response] . identifier[app_iter] , identifier[Sequence] ) keyword[and]
identifier[len] ( identifier[response] . identifier[app_iter] )== literal[int] ) keyword[and] identifier[response] . identifier[body] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[conditional_response] = keyword[True]
identifier[response] . identifier[md5_etag] ()
keyword[return] identifier[response]
keyword[return] identifier[conditional_http_tween] | def conditional_http_tween_factory(handler, registry):
"""
Tween that adds ETag headers and tells Pyramid to enable
conditional responses where appropriate.
"""
settings = registry.settings if hasattr(registry, 'settings') else {}
not_cacheble_list = []
if 'not.cachable.list' in settings:
not_cacheble_list = settings.get('not.cachable.list').split() # depends on [control=['if'], data=['settings']]
def conditional_http_tween(request):
response = handler(request)
if request.path not in not_cacheble_list:
# If the Last-Modified header has been set, we want to enable the
# conditional response processing.
if response.last_modified is not None:
response.conditional_response = True # depends on [control=['if'], data=[]]
# We want to only enable the conditional machinery if either we
# were given an explicit ETag header by the view or we have a
# buffered response and can generate the ETag header ourself.
if response.etag is not None:
response.conditional_response = True # depends on [control=['if'], data=[]]
elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None:
response.conditional_response = True
response.md5_etag() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return response
return conditional_http_tween |
def open(filepath, edit_local=False):
"""Open any wt5 file, returning the top-level object (data or collection).
Parameters
----------
filepath : path-like
Path to file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
edit_local : boolean (optional)
If True, the file itself will be opened for editing. Otherwise, a
copy will be created. Default is False.
Returns
-------
WrightTools Collection or Data
Root-level object in file.
"""
filepath = os.fspath(filepath)
ds = np.DataSource(None)
if edit_local is False:
tf = tempfile.mkstemp(prefix="", suffix=".wt5")
with _open(tf[1], "w+b") as tff:
with ds.open(str(filepath), "rb") as f:
tff.write(f.read())
filepath = tf[1]
f = h5py.File(filepath)
class_name = f["/"].attrs["class"]
name = f["/"].attrs["name"]
if class_name == "Data":
obj = wt_data.Data(filepath=str(filepath), name=name, edit_local=True)
elif class_name == "Collection":
obj = wt_collection.Collection(filepath=str(filepath), name=name, edit_local=True)
else:
obj = wt_group.Group(filepath=str(filepath), name=name, edit_local=True)
if edit_local is False:
setattr(obj, "_tmpfile", tf)
weakref.finalize(obj, obj.close)
return obj | def function[open, parameter[filepath, edit_local]]:
constant[Open any wt5 file, returning the top-level object (data or collection).
Parameters
----------
filepath : path-like
Path to file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
edit_local : boolean (optional)
If True, the file itself will be opened for editing. Otherwise, a
copy will be created. Default is False.
Returns
-------
WrightTools Collection or Data
Root-level object in file.
]
variable[filepath] assign[=] call[name[os].fspath, parameter[name[filepath]]]
variable[ds] assign[=] call[name[np].DataSource, parameter[constant[None]]]
if compare[name[edit_local] is constant[False]] begin[:]
variable[tf] assign[=] call[name[tempfile].mkstemp, parameter[]]
with call[name[_open], parameter[call[name[tf]][constant[1]], constant[w+b]]] begin[:]
with call[name[ds].open, parameter[call[name[str], parameter[name[filepath]]], constant[rb]]] begin[:]
call[name[tff].write, parameter[call[name[f].read, parameter[]]]]
variable[filepath] assign[=] call[name[tf]][constant[1]]
variable[f] assign[=] call[name[h5py].File, parameter[name[filepath]]]
variable[class_name] assign[=] call[call[name[f]][constant[/]].attrs][constant[class]]
variable[name] assign[=] call[call[name[f]][constant[/]].attrs][constant[name]]
if compare[name[class_name] equal[==] constant[Data]] begin[:]
variable[obj] assign[=] call[name[wt_data].Data, parameter[]]
if compare[name[edit_local] is constant[False]] begin[:]
call[name[setattr], parameter[name[obj], constant[_tmpfile], name[tf]]]
call[name[weakref].finalize, parameter[name[obj], name[obj].close]]
return[name[obj]] | keyword[def] identifier[open] ( identifier[filepath] , identifier[edit_local] = keyword[False] ):
literal[string]
identifier[filepath] = identifier[os] . identifier[fspath] ( identifier[filepath] )
identifier[ds] = identifier[np] . identifier[DataSource] ( keyword[None] )
keyword[if] identifier[edit_local] keyword[is] keyword[False] :
identifier[tf] = identifier[tempfile] . identifier[mkstemp] ( identifier[prefix] = literal[string] , identifier[suffix] = literal[string] )
keyword[with] identifier[_open] ( identifier[tf] [ literal[int] ], literal[string] ) keyword[as] identifier[tff] :
keyword[with] identifier[ds] . identifier[open] ( identifier[str] ( identifier[filepath] ), literal[string] ) keyword[as] identifier[f] :
identifier[tff] . identifier[write] ( identifier[f] . identifier[read] ())
identifier[filepath] = identifier[tf] [ literal[int] ]
identifier[f] = identifier[h5py] . identifier[File] ( identifier[filepath] )
identifier[class_name] = identifier[f] [ literal[string] ]. identifier[attrs] [ literal[string] ]
identifier[name] = identifier[f] [ literal[string] ]. identifier[attrs] [ literal[string] ]
keyword[if] identifier[class_name] == literal[string] :
identifier[obj] = identifier[wt_data] . identifier[Data] ( identifier[filepath] = identifier[str] ( identifier[filepath] ), identifier[name] = identifier[name] , identifier[edit_local] = keyword[True] )
keyword[elif] identifier[class_name] == literal[string] :
identifier[obj] = identifier[wt_collection] . identifier[Collection] ( identifier[filepath] = identifier[str] ( identifier[filepath] ), identifier[name] = identifier[name] , identifier[edit_local] = keyword[True] )
keyword[else] :
identifier[obj] = identifier[wt_group] . identifier[Group] ( identifier[filepath] = identifier[str] ( identifier[filepath] ), identifier[name] = identifier[name] , identifier[edit_local] = keyword[True] )
keyword[if] identifier[edit_local] keyword[is] keyword[False] :
identifier[setattr] ( identifier[obj] , literal[string] , identifier[tf] )
identifier[weakref] . identifier[finalize] ( identifier[obj] , identifier[obj] . identifier[close] )
keyword[return] identifier[obj] | def open(filepath, edit_local=False):
"""Open any wt5 file, returning the top-level object (data or collection).
Parameters
----------
filepath : path-like
Path to file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
edit_local : boolean (optional)
If True, the file itself will be opened for editing. Otherwise, a
copy will be created. Default is False.
Returns
-------
WrightTools Collection or Data
Root-level object in file.
"""
filepath = os.fspath(filepath)
ds = np.DataSource(None)
if edit_local is False:
tf = tempfile.mkstemp(prefix='', suffix='.wt5')
with _open(tf[1], 'w+b') as tff:
with ds.open(str(filepath), 'rb') as f:
tff.write(f.read()) # depends on [control=['with'], data=['f']] # depends on [control=['with'], data=['tff']]
filepath = tf[1] # depends on [control=['if'], data=[]]
f = h5py.File(filepath)
class_name = f['/'].attrs['class']
name = f['/'].attrs['name']
if class_name == 'Data':
obj = wt_data.Data(filepath=str(filepath), name=name, edit_local=True) # depends on [control=['if'], data=[]]
elif class_name == 'Collection':
obj = wt_collection.Collection(filepath=str(filepath), name=name, edit_local=True) # depends on [control=['if'], data=[]]
else:
obj = wt_group.Group(filepath=str(filepath), name=name, edit_local=True)
if edit_local is False:
setattr(obj, '_tmpfile', tf)
weakref.finalize(obj, obj.close) # depends on [control=['if'], data=[]]
return obj |
def _rmv_deps_answer(self):
"""Remove dependencies answer
"""
if self.meta.remove_deps_answer in ["y", "Y"]:
remove_dep = self.meta.remove_deps_answer
else:
try:
remove_dep = raw_input(
"\nRemove dependencies (maybe used by "
"other packages) [y/N]? ")
print("")
except EOFError:
print("") # new line at exit
raise SystemExit()
return remove_dep | def function[_rmv_deps_answer, parameter[self]]:
constant[Remove dependencies answer
]
if compare[name[self].meta.remove_deps_answer in list[[<ast.Constant object at 0x7da18f721420>, <ast.Constant object at 0x7da18f720f10>]]] begin[:]
variable[remove_dep] assign[=] name[self].meta.remove_deps_answer
return[name[remove_dep]] | keyword[def] identifier[_rmv_deps_answer] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[meta] . identifier[remove_deps_answer] keyword[in] [ literal[string] , literal[string] ]:
identifier[remove_dep] = identifier[self] . identifier[meta] . identifier[remove_deps_answer]
keyword[else] :
keyword[try] :
identifier[remove_dep] = identifier[raw_input] (
literal[string]
literal[string] )
identifier[print] ( literal[string] )
keyword[except] identifier[EOFError] :
identifier[print] ( literal[string] )
keyword[raise] identifier[SystemExit] ()
keyword[return] identifier[remove_dep] | def _rmv_deps_answer(self):
"""Remove dependencies answer
"""
if self.meta.remove_deps_answer in ['y', 'Y']:
remove_dep = self.meta.remove_deps_answer # depends on [control=['if'], data=[]]
else:
try:
remove_dep = raw_input('\nRemove dependencies (maybe used by other packages) [y/N]? ')
print('') # depends on [control=['try'], data=[]]
except EOFError:
print('') # new line at exit
raise SystemExit() # depends on [control=['except'], data=[]]
return remove_dep |
def qteSplitApplet(self, applet: (QtmacsApplet, str)=None,
splitHoriz: bool=True,
windowObj: QtmacsWindow=None):
"""
Reveal ``applet`` by splitting the space occupied by the
current applet.
If ``applet`` is already visible then the method does
nothing. Furthermore, this method does not change the focus,
ie. the currently active applet will remain active.
If ``applet`` is **None** then the next invisible applet
will be shown. If ``windowObj`` is **None** then the
currently active window will be used.
The ``applet`` parameter can either be an instance of
``QtmacsApplet`` or a string denoting an applet ID. In the
latter case the ``qteGetAppletHandle`` method is used to fetch
the respective applet instance.
|Args|
* ``applet`` (**QtmacsApplet**, **str**): the applet to reveal.
* ``splitHoriz`` (**bool**): whether to split horizontally
or vertically.
* ``windowObj`` (**QtmacsWindow**): the window in which to
reveal ``applet``.
|Returns|
* **bool**: if **True**, ``applet`` was revealed.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# If ``newAppObj`` was specified by its ID (ie. a string) then
# fetch the associated ``QtmacsApplet`` instance. If
# ``newAppObj`` is already an instance of ``QtmacsApplet``
# then use it directly.
if isinstance(applet, str):
newAppObj = self.qteGetAppletHandle(applet)
else:
newAppObj = applet
# Use the currently active window if none was specified.
if windowObj is None:
windowObj = self.qteActiveWindow()
if windowObj is None:
msg = 'Cannot determine the currently active window.'
self.qteLogger.error(msg, stack_info=True)
return
# Convert ``splitHoriz`` to the respective Qt constant.
if splitHoriz:
splitOrientation = QtCore.Qt.Horizontal
else:
splitOrientation = QtCore.Qt.Vertical
if newAppObj is None:
# If no new applet was specified use the next available
# invisible applet.
newAppObj = self.qteNextApplet(skipVisible=True,
skipInvisible=False)
else:
# Do nothing if the new applet is already visible.
if newAppObj.qteIsVisible():
return False
# If we still have not found an applet then there are no
# invisible applets left to show. Therefore, splitting makes
# no sense.
if newAppObj is None:
self.qteLogger.warning('All applets are already visible.')
return False
# If the root splitter is empty then add the new applet and
# return immediately.
if windowObj.qteAppletSplitter.count() == 0:
windowObj.qteAppletSplitter.qteAddWidget(newAppObj)
windowObj.qteAppletSplitter.setOrientation(splitOrientation)
return True
# ------------------------------------------------------------
# The root splitter contains at least one widget, if we got
# this far.
# ------------------------------------------------------------
# Shorthand to last active applet in the current window. Query
# this applet with qteNextApplet method because
# self._qteActiveApplet may be a mini applet, and we are only
# interested in genuine applets.
curApp = self.qteNextApplet(numSkip=0, windowObj=windowObj)
# Get a reference to the splitter in which the currently
# active applet lives. This may be the root splitter, or one
# of its child splitters.
split = self._qteFindAppletInSplitter(
curApp, windowObj.qteAppletSplitter)
if split is None:
msg = 'Active applet <b>{}</b> not in the layout.'
msg = msg.format(curApp.qteAppletID())
self.qteLogger.error(msg, stack_info=True)
return False
# If 'curApp' lives in the root splitter, and the root
# splitter contains only a single element, then simply add the
# new applet as the second element and return.
if split is windowObj.qteAppletSplitter:
if split.count() == 1:
split.qteAddWidget(newAppObj)
split.setOrientation(splitOrientation)
return True
# ------------------------------------------------------------
# The splitter (root or not) contains two widgets, if we got
# this far.
# ------------------------------------------------------------
# Determine the index of the applet inside the splitter.
curAppIdx = split.indexOf(curApp)
# Create a new splitter and populate it with 'curApp' and the
# previously invisible ``newAppObj``. Then insert this new splitter at
# the position where the old applet was taken from. Note: widgets are
# inserted with ``qteAddWidget`` (because they are ``QtmacsApplet``
# instances), whereas splitters are added with ``insertWidget``, NOT
# ``qteInsertWidget``. The reason is that splitters do not require the
# extra TLC necessary for applets in terms of how and where to show
# them.
newSplit = QtmacsSplitter(splitOrientation, windowObj)
curApp.setParent(None)
newSplit.qteAddWidget(curApp)
newSplit.qteAddWidget(newAppObj)
split.insertWidget(curAppIdx, newSplit)
# Adjust the size of two widgets in ``split`` (ie. ``newSplit`` and
# whatever other widget) to take up equal space. The same adjusment is
# made for ``newSplit``, but there the ``qteAddWidget`` methods have
# already taken care of it.
split.qteAdjustWidgetSizes()
return True | def function[qteSplitApplet, parameter[self, applet, splitHoriz, windowObj]]:
constant[
Reveal ``applet`` by splitting the space occupied by the
current applet.
If ``applet`` is already visible then the method does
nothing. Furthermore, this method does not change the focus,
ie. the currently active applet will remain active.
If ``applet`` is **None** then the next invisible applet
will be shown. If ``windowObj`` is **None** then the
currently active window will be used.
The ``applet`` parameter can either be an instance of
``QtmacsApplet`` or a string denoting an applet ID. In the
latter case the ``qteGetAppletHandle`` method is used to fetch
the respective applet instance.
|Args|
* ``applet`` (**QtmacsApplet**, **str**): the applet to reveal.
* ``splitHoriz`` (**bool**): whether to split horizontally
or vertically.
* ``windowObj`` (**QtmacsWindow**): the window in which to
reveal ``applet``.
|Returns|
* **bool**: if **True**, ``applet`` was revealed.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
]
if call[name[isinstance], parameter[name[applet], name[str]]] begin[:]
variable[newAppObj] assign[=] call[name[self].qteGetAppletHandle, parameter[name[applet]]]
if compare[name[windowObj] is constant[None]] begin[:]
variable[windowObj] assign[=] call[name[self].qteActiveWindow, parameter[]]
if compare[name[windowObj] is constant[None]] begin[:]
variable[msg] assign[=] constant[Cannot determine the currently active window.]
call[name[self].qteLogger.error, parameter[name[msg]]]
return[None]
if name[splitHoriz] begin[:]
variable[splitOrientation] assign[=] name[QtCore].Qt.Horizontal
if compare[name[newAppObj] is constant[None]] begin[:]
variable[newAppObj] assign[=] call[name[self].qteNextApplet, parameter[]]
if compare[name[newAppObj] is constant[None]] begin[:]
call[name[self].qteLogger.warning, parameter[constant[All applets are already visible.]]]
return[constant[False]]
if compare[call[name[windowObj].qteAppletSplitter.count, parameter[]] equal[==] constant[0]] begin[:]
call[name[windowObj].qteAppletSplitter.qteAddWidget, parameter[name[newAppObj]]]
call[name[windowObj].qteAppletSplitter.setOrientation, parameter[name[splitOrientation]]]
return[constant[True]]
variable[curApp] assign[=] call[name[self].qteNextApplet, parameter[]]
variable[split] assign[=] call[name[self]._qteFindAppletInSplitter, parameter[name[curApp], name[windowObj].qteAppletSplitter]]
if compare[name[split] is constant[None]] begin[:]
variable[msg] assign[=] constant[Active applet <b>{}</b> not in the layout.]
variable[msg] assign[=] call[name[msg].format, parameter[call[name[curApp].qteAppletID, parameter[]]]]
call[name[self].qteLogger.error, parameter[name[msg]]]
return[constant[False]]
if compare[name[split] is name[windowObj].qteAppletSplitter] begin[:]
if compare[call[name[split].count, parameter[]] equal[==] constant[1]] begin[:]
call[name[split].qteAddWidget, parameter[name[newAppObj]]]
call[name[split].setOrientation, parameter[name[splitOrientation]]]
return[constant[True]]
variable[curAppIdx] assign[=] call[name[split].indexOf, parameter[name[curApp]]]
variable[newSplit] assign[=] call[name[QtmacsSplitter], parameter[name[splitOrientation], name[windowObj]]]
call[name[curApp].setParent, parameter[constant[None]]]
call[name[newSplit].qteAddWidget, parameter[name[curApp]]]
call[name[newSplit].qteAddWidget, parameter[name[newAppObj]]]
call[name[split].insertWidget, parameter[name[curAppIdx], name[newSplit]]]
call[name[split].qteAdjustWidgetSizes, parameter[]]
return[constant[True]] | keyword[def] identifier[qteSplitApplet] ( identifier[self] , identifier[applet] :( identifier[QtmacsApplet] , identifier[str] )= keyword[None] ,
identifier[splitHoriz] : identifier[bool] = keyword[True] ,
identifier[windowObj] : identifier[QtmacsWindow] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[applet] , identifier[str] ):
identifier[newAppObj] = identifier[self] . identifier[qteGetAppletHandle] ( identifier[applet] )
keyword[else] :
identifier[newAppObj] = identifier[applet]
keyword[if] identifier[windowObj] keyword[is] keyword[None] :
identifier[windowObj] = identifier[self] . identifier[qteActiveWindow] ()
keyword[if] identifier[windowObj] keyword[is] keyword[None] :
identifier[msg] = literal[string]
identifier[self] . identifier[qteLogger] . identifier[error] ( identifier[msg] , identifier[stack_info] = keyword[True] )
keyword[return]
keyword[if] identifier[splitHoriz] :
identifier[splitOrientation] = identifier[QtCore] . identifier[Qt] . identifier[Horizontal]
keyword[else] :
identifier[splitOrientation] = identifier[QtCore] . identifier[Qt] . identifier[Vertical]
keyword[if] identifier[newAppObj] keyword[is] keyword[None] :
identifier[newAppObj] = identifier[self] . identifier[qteNextApplet] ( identifier[skipVisible] = keyword[True] ,
identifier[skipInvisible] = keyword[False] )
keyword[else] :
keyword[if] identifier[newAppObj] . identifier[qteIsVisible] ():
keyword[return] keyword[False]
keyword[if] identifier[newAppObj] keyword[is] keyword[None] :
identifier[self] . identifier[qteLogger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[windowObj] . identifier[qteAppletSplitter] . identifier[count] ()== literal[int] :
identifier[windowObj] . identifier[qteAppletSplitter] . identifier[qteAddWidget] ( identifier[newAppObj] )
identifier[windowObj] . identifier[qteAppletSplitter] . identifier[setOrientation] ( identifier[splitOrientation] )
keyword[return] keyword[True]
identifier[curApp] = identifier[self] . identifier[qteNextApplet] ( identifier[numSkip] = literal[int] , identifier[windowObj] = identifier[windowObj] )
identifier[split] = identifier[self] . identifier[_qteFindAppletInSplitter] (
identifier[curApp] , identifier[windowObj] . identifier[qteAppletSplitter] )
keyword[if] identifier[split] keyword[is] keyword[None] :
identifier[msg] = literal[string]
identifier[msg] = identifier[msg] . identifier[format] ( identifier[curApp] . identifier[qteAppletID] ())
identifier[self] . identifier[qteLogger] . identifier[error] ( identifier[msg] , identifier[stack_info] = keyword[True] )
keyword[return] keyword[False]
keyword[if] identifier[split] keyword[is] identifier[windowObj] . identifier[qteAppletSplitter] :
keyword[if] identifier[split] . identifier[count] ()== literal[int] :
identifier[split] . identifier[qteAddWidget] ( identifier[newAppObj] )
identifier[split] . identifier[setOrientation] ( identifier[splitOrientation] )
keyword[return] keyword[True]
identifier[curAppIdx] = identifier[split] . identifier[indexOf] ( identifier[curApp] )
identifier[newSplit] = identifier[QtmacsSplitter] ( identifier[splitOrientation] , identifier[windowObj] )
identifier[curApp] . identifier[setParent] ( keyword[None] )
identifier[newSplit] . identifier[qteAddWidget] ( identifier[curApp] )
identifier[newSplit] . identifier[qteAddWidget] ( identifier[newAppObj] )
identifier[split] . identifier[insertWidget] ( identifier[curAppIdx] , identifier[newSplit] )
identifier[split] . identifier[qteAdjustWidgetSizes] ()
keyword[return] keyword[True] | def qteSplitApplet(self, applet: (QtmacsApplet, str)=None, splitHoriz: bool=True, windowObj: QtmacsWindow=None):
"""
Reveal ``applet`` by splitting the space occupied by the
current applet.
If ``applet`` is already visible then the method does
nothing. Furthermore, this method does not change the focus,
ie. the currently active applet will remain active.
If ``applet`` is **None** then the next invisible applet
will be shown. If ``windowObj`` is **None** then the
currently active window will be used.
The ``applet`` parameter can either be an instance of
``QtmacsApplet`` or a string denoting an applet ID. In the
latter case the ``qteGetAppletHandle`` method is used to fetch
the respective applet instance.
|Args|
* ``applet`` (**QtmacsApplet**, **str**): the applet to reveal.
* ``splitHoriz`` (**bool**): whether to split horizontally
or vertically.
* ``windowObj`` (**QtmacsWindow**): the window in which to
reveal ``applet``.
|Returns|
* **bool**: if **True**, ``applet`` was revealed.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# If ``newAppObj`` was specified by its ID (ie. a string) then
# fetch the associated ``QtmacsApplet`` instance. If
# ``newAppObj`` is already an instance of ``QtmacsApplet``
# then use it directly.
if isinstance(applet, str):
newAppObj = self.qteGetAppletHandle(applet) # depends on [control=['if'], data=[]]
else:
newAppObj = applet
# Use the currently active window if none was specified.
if windowObj is None:
windowObj = self.qteActiveWindow()
if windowObj is None:
msg = 'Cannot determine the currently active window.'
self.qteLogger.error(msg, stack_info=True)
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['windowObj']]
# Convert ``splitHoriz`` to the respective Qt constant.
if splitHoriz:
splitOrientation = QtCore.Qt.Horizontal # depends on [control=['if'], data=[]]
else:
splitOrientation = QtCore.Qt.Vertical
if newAppObj is None:
# If no new applet was specified use the next available
# invisible applet.
newAppObj = self.qteNextApplet(skipVisible=True, skipInvisible=False) # depends on [control=['if'], data=['newAppObj']]
# Do nothing if the new applet is already visible.
elif newAppObj.qteIsVisible():
return False # depends on [control=['if'], data=[]]
# If we still have not found an applet then there are no
# invisible applets left to show. Therefore, splitting makes
# no sense.
if newAppObj is None:
self.qteLogger.warning('All applets are already visible.')
return False # depends on [control=['if'], data=[]]
# If the root splitter is empty then add the new applet and
# return immediately.
if windowObj.qteAppletSplitter.count() == 0:
windowObj.qteAppletSplitter.qteAddWidget(newAppObj)
windowObj.qteAppletSplitter.setOrientation(splitOrientation)
return True # depends on [control=['if'], data=[]]
# ------------------------------------------------------------
# The root splitter contains at least one widget, if we got
# this far.
# ------------------------------------------------------------
# Shorthand to last active applet in the current window. Query
# this applet with qteNextApplet method because
# self._qteActiveApplet may be a mini applet, and we are only
# interested in genuine applets.
curApp = self.qteNextApplet(numSkip=0, windowObj=windowObj)
# Get a reference to the splitter in which the currently
# active applet lives. This may be the root splitter, or one
# of its child splitters.
split = self._qteFindAppletInSplitter(curApp, windowObj.qteAppletSplitter)
if split is None:
msg = 'Active applet <b>{}</b> not in the layout.'
msg = msg.format(curApp.qteAppletID())
self.qteLogger.error(msg, stack_info=True)
return False # depends on [control=['if'], data=[]]
# If 'curApp' lives in the root splitter, and the root
# splitter contains only a single element, then simply add the
# new applet as the second element and return.
if split is windowObj.qteAppletSplitter:
if split.count() == 1:
split.qteAddWidget(newAppObj)
split.setOrientation(splitOrientation)
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['split']]
# ------------------------------------------------------------
# The splitter (root or not) contains two widgets, if we got
# this far.
# ------------------------------------------------------------
# Determine the index of the applet inside the splitter.
curAppIdx = split.indexOf(curApp)
# Create a new splitter and populate it with 'curApp' and the
# previously invisible ``newAppObj``. Then insert this new splitter at
# the position where the old applet was taken from. Note: widgets are
# inserted with ``qteAddWidget`` (because they are ``QtmacsApplet``
# instances), whereas splitters are added with ``insertWidget``, NOT
# ``qteInsertWidget``. The reason is that splitters do not require the
# extra TLC necessary for applets in terms of how and where to show
# them.
newSplit = QtmacsSplitter(splitOrientation, windowObj)
curApp.setParent(None)
newSplit.qteAddWidget(curApp)
newSplit.qteAddWidget(newAppObj)
split.insertWidget(curAppIdx, newSplit)
# Adjust the size of two widgets in ``split`` (ie. ``newSplit`` and
# whatever other widget) to take up equal space. The same adjusment is
# made for ``newSplit``, but there the ``qteAddWidget`` methods have
# already taken care of it.
split.qteAdjustWidgetSizes()
return True |
def buildhtml(self):
"""Build the HTML page
Create the htmlheader with css / js
Create html page
Add Js code for nvd3
"""
self.buildcontent()
self.content = self.htmlcontent
self.htmlcontent = self.template_page_nvd3.render(chart=self) | def function[buildhtml, parameter[self]]:
constant[Build the HTML page
Create the htmlheader with css / js
Create html page
Add Js code for nvd3
]
call[name[self].buildcontent, parameter[]]
name[self].content assign[=] name[self].htmlcontent
name[self].htmlcontent assign[=] call[name[self].template_page_nvd3.render, parameter[]] | keyword[def] identifier[buildhtml] ( identifier[self] ):
literal[string]
identifier[self] . identifier[buildcontent] ()
identifier[self] . identifier[content] = identifier[self] . identifier[htmlcontent]
identifier[self] . identifier[htmlcontent] = identifier[self] . identifier[template_page_nvd3] . identifier[render] ( identifier[chart] = identifier[self] ) | def buildhtml(self):
"""Build the HTML page
Create the htmlheader with css / js
Create html page
Add Js code for nvd3
"""
self.buildcontent()
self.content = self.htmlcontent
self.htmlcontent = self.template_page_nvd3.render(chart=self) |
def ratio_value_number_to_time_series_length(self, x):
"""
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\
/tsfresh/feature_extraction/feature_calculators.py#L830>`_
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns: # unique values / # values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
ratio = feature_calculators.ratio_value_number_to_time_series_length(x)
logging.debug("ratio value number to time series length by tsfresh calculated")
return ratio | def function[ratio_value_number_to_time_series_length, parameter[self, x]]:
constant[
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master /tsfresh/feature_extraction/feature_calculators.py#L830>`_
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns: # unique values / # values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
]
variable[ratio] assign[=] call[name[feature_calculators].ratio_value_number_to_time_series_length, parameter[name[x]]]
call[name[logging].debug, parameter[constant[ratio value number to time series length by tsfresh calculated]]]
return[name[ratio]] | keyword[def] identifier[ratio_value_number_to_time_series_length] ( identifier[self] , identifier[x] ):
literal[string]
identifier[ratio] = identifier[feature_calculators] . identifier[ratio_value_number_to_time_series_length] ( identifier[x] )
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return] identifier[ratio] | def ratio_value_number_to_time_series_length(self, x):
"""
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master /tsfresh/feature_extraction/feature_calculators.py#L830>`_
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns: # unique values / # values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
ratio = feature_calculators.ratio_value_number_to_time_series_length(x)
logging.debug('ratio value number to time series length by tsfresh calculated')
return ratio |
def wheels(opts, whitelist=None, context=None):
'''
Returns the wheels modules
'''
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, 'wheel'),
opts,
tag='wheel',
whitelist=whitelist,
pack={'__context__': context},
) | def function[wheels, parameter[opts, whitelist, context]]:
constant[
Returns the wheels modules
]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] dictionary[[], []]
return[call[name[LazyLoader], parameter[call[name[_module_dirs], parameter[name[opts], constant[wheel]]], name[opts]]]] | keyword[def] identifier[wheels] ( identifier[opts] , identifier[whitelist] = keyword[None] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] ={}
keyword[return] identifier[LazyLoader] (
identifier[_module_dirs] ( identifier[opts] , literal[string] ),
identifier[opts] ,
identifier[tag] = literal[string] ,
identifier[whitelist] = identifier[whitelist] ,
identifier[pack] ={ literal[string] : identifier[context] },
) | def wheels(opts, whitelist=None, context=None):
"""
Returns the wheels modules
"""
if context is None:
context = {} # depends on [control=['if'], data=['context']]
return LazyLoader(_module_dirs(opts, 'wheel'), opts, tag='wheel', whitelist=whitelist, pack={'__context__': context}) |
def process_request(self, req, resp):
""" Process the request before routing it. """
goldman.sess.req = req
if goldman.config.STORE:
goldman.sess.store = goldman.config.STORE() | def function[process_request, parameter[self, req, resp]]:
constant[ Process the request before routing it. ]
name[goldman].sess.req assign[=] name[req]
if name[goldman].config.STORE begin[:]
name[goldman].sess.store assign[=] call[name[goldman].config.STORE, parameter[]] | keyword[def] identifier[process_request] ( identifier[self] , identifier[req] , identifier[resp] ):
literal[string]
identifier[goldman] . identifier[sess] . identifier[req] = identifier[req]
keyword[if] identifier[goldman] . identifier[config] . identifier[STORE] :
identifier[goldman] . identifier[sess] . identifier[store] = identifier[goldman] . identifier[config] . identifier[STORE] () | def process_request(self, req, resp):
""" Process the request before routing it. """
goldman.sess.req = req
if goldman.config.STORE:
goldman.sess.store = goldman.config.STORE() # depends on [control=['if'], data=[]] |
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush() | def function[listen, parameter[self, io_in, io_out, io_err]]:
constant[Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
]
for taget[name[line]] in starred[name[io_in]] begin[:]
if compare[call[call[name[line].strip, parameter[]].lower, parameter[]] equal[==] constant[exit]] begin[:]
break
<ast.Try object at 0x7da20c76cd60> | keyword[def] identifier[listen] ( identifier[self] , identifier[io_in] , identifier[io_out] , identifier[io_err] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[io_in] :
keyword[if] identifier[line] . identifier[strip] (). identifier[lower] ()== literal[string] :
keyword[break]
keyword[try] :
identifier[y_pred] = identifier[self] . identifier[process_line] ( identifier[line] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[io_out] . identifier[write] ( literal[string] )
identifier[io_err] . identifier[write] (
literal[string]
literal[string] . identifier[format] ( identifier[line] , identifier[type] ( identifier[e] ), identifier[e] ))
identifier[io_err] . identifier[flush] ()
keyword[else] :
identifier[io_out] . identifier[write] ( identifier[ujson] . identifier[dumps] ( identifier[y_pred] . identifier[tolist] ()))
identifier[io_out] . identifier[write] ( literal[string] )
identifier[io_out] . identifier[flush] () | def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break # depends on [control=['if'], data=[]]
try:
y_pred = self.process_line(line) # depends on [control=['try'], data=[]]
except Exception as e:
io_out.write('[]\n')
io_err.write('Error while processing input row: {}{}: {}\n'.format(line, type(e), e))
io_err.flush() # depends on [control=['except'], data=['e']]
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush() # depends on [control=['for'], data=['line']] |
def refresh(self):
"""
::
GET /:login/machines/:id/snapshots/:name
Fetch the existing state and values for the snapshot
and commit the values locally.
"""
data = self.machine.raw_snapshot_data(self.name)
self._save(data) | def function[refresh, parameter[self]]:
constant[
::
GET /:login/machines/:id/snapshots/:name
Fetch the existing state and values for the snapshot
and commit the values locally.
]
variable[data] assign[=] call[name[self].machine.raw_snapshot_data, parameter[name[self].name]]
call[name[self]._save, parameter[name[data]]] | keyword[def] identifier[refresh] ( identifier[self] ):
literal[string]
identifier[data] = identifier[self] . identifier[machine] . identifier[raw_snapshot_data] ( identifier[self] . identifier[name] )
identifier[self] . identifier[_save] ( identifier[data] ) | def refresh(self):
"""
::
GET /:login/machines/:id/snapshots/:name
Fetch the existing state and values for the snapshot
and commit the values locally.
"""
data = self.machine.raw_snapshot_data(self.name)
self._save(data) |
def decompress(self, value: bytes, max_length: int = 0) -> bytes:
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length) | def function[decompress, parameter[self, value, max_length]]:
constant[Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
]
return[call[name[self].decompressobj.decompress, parameter[name[value], name[max_length]]]] | keyword[def] identifier[decompress] ( identifier[self] , identifier[value] : identifier[bytes] , identifier[max_length] : identifier[int] = literal[int] )-> identifier[bytes] :
literal[string]
keyword[return] identifier[self] . identifier[decompressobj] . identifier[decompress] ( identifier[value] , identifier[max_length] ) | def decompress(self, value: bytes, max_length: int=0) -> bytes:
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length) |
def _GetCurrentControlSet(self, key_path_suffix):
"""Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available.
"""
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select'
select_key = self.GetKeyByPath(select_key_path)
if not select_key:
return None
# To determine the current control set check:
# 1. The "Current" value.
# 2. The "Default" value.
# 3. The "LastKnownGood" value.
control_set = None
for value_name in ('Current', 'Default', 'LastKnownGood'):
value = select_key.GetValueByName(value_name)
if not value or not value.DataIsInteger():
continue
control_set = value.GetDataAsObject()
# If the control set is 0 then we need to check the other values.
if control_set > 0 or control_set <= 999:
break
if not control_set or control_set <= 0 or control_set > 999:
return None
control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format(
control_set)
key_path = ''.join([control_set_path, key_path_suffix])
return self.GetKeyByPath(key_path) | def function[_GetCurrentControlSet, parameter[self, key_path_suffix]]:
constant[Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available.
]
variable[select_key_path] assign[=] constant[HKEY_LOCAL_MACHINE\System\Select]
variable[select_key] assign[=] call[name[self].GetKeyByPath, parameter[name[select_key_path]]]
if <ast.UnaryOp object at 0x7da20c6c5780> begin[:]
return[constant[None]]
variable[control_set] assign[=] constant[None]
for taget[name[value_name]] in starred[tuple[[<ast.Constant object at 0x7da20c6c5720>, <ast.Constant object at 0x7da20c6c7820>, <ast.Constant object at 0x7da20c6c48e0>]]] begin[:]
variable[value] assign[=] call[name[select_key].GetValueByName, parameter[name[value_name]]]
if <ast.BoolOp object at 0x7da20c6c68c0> begin[:]
continue
variable[control_set] assign[=] call[name[value].GetDataAsObject, parameter[]]
if <ast.BoolOp object at 0x7da20c6c6c50> begin[:]
break
if <ast.BoolOp object at 0x7da20c6c54e0> begin[:]
return[constant[None]]
variable[control_set_path] assign[=] call[constant[HKEY_LOCAL_MACHINE\System\ControlSet{0:03d}].format, parameter[name[control_set]]]
variable[key_path] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da20c6c77f0>, <ast.Name object at 0x7da20c6c7d00>]]]]
return[call[name[self].GetKeyByPath, parameter[name[key_path]]]] | keyword[def] identifier[_GetCurrentControlSet] ( identifier[self] , identifier[key_path_suffix] ):
literal[string]
identifier[select_key_path] = literal[string]
identifier[select_key] = identifier[self] . identifier[GetKeyByPath] ( identifier[select_key_path] )
keyword[if] keyword[not] identifier[select_key] :
keyword[return] keyword[None]
identifier[control_set] = keyword[None]
keyword[for] identifier[value_name] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[value] = identifier[select_key] . identifier[GetValueByName] ( identifier[value_name] )
keyword[if] keyword[not] identifier[value] keyword[or] keyword[not] identifier[value] . identifier[DataIsInteger] ():
keyword[continue]
identifier[control_set] = identifier[value] . identifier[GetDataAsObject] ()
keyword[if] identifier[control_set] > literal[int] keyword[or] identifier[control_set] <= literal[int] :
keyword[break]
keyword[if] keyword[not] identifier[control_set] keyword[or] identifier[control_set] <= literal[int] keyword[or] identifier[control_set] > literal[int] :
keyword[return] keyword[None]
identifier[control_set_path] = literal[string] . identifier[format] (
identifier[control_set] )
identifier[key_path] = literal[string] . identifier[join] ([ identifier[control_set_path] , identifier[key_path_suffix] ])
keyword[return] identifier[self] . identifier[GetKeyByPath] ( identifier[key_path] ) | def _GetCurrentControlSet(self, key_path_suffix):
"""Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available.
"""
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select'
select_key = self.GetKeyByPath(select_key_path)
if not select_key:
return None # depends on [control=['if'], data=[]]
# To determine the current control set check:
# 1. The "Current" value.
# 2. The "Default" value.
# 3. The "LastKnownGood" value.
control_set = None
for value_name in ('Current', 'Default', 'LastKnownGood'):
value = select_key.GetValueByName(value_name)
if not value or not value.DataIsInteger():
continue # depends on [control=['if'], data=[]]
control_set = value.GetDataAsObject()
# If the control set is 0 then we need to check the other values.
if control_set > 0 or control_set <= 999:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value_name']]
if not control_set or control_set <= 0 or control_set > 999:
return None # depends on [control=['if'], data=[]]
control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format(control_set)
key_path = ''.join([control_set_path, key_path_suffix])
return self.GetKeyByPath(key_path) |
def set_value(obj, name, value):
"""A flexible method for setting a value on an object.
If the object implements __setitem__ (such as a dict) performs obj[name] = value, else performs
setattr(obj, name, value).
:obj: the object to set the value on
:name: the name to assign the value to
:value: the value to assign
"""
if hasattr(obj, "__setitem__"):
obj[name] = value
else:
setattr(obj, name, value) | def function[set_value, parameter[obj, name, value]]:
constant[A flexible method for setting a value on an object.
If the object implements __setitem__ (such as a dict) performs obj[name] = value, else performs
setattr(obj, name, value).
:obj: the object to set the value on
:name: the name to assign the value to
:value: the value to assign
]
if call[name[hasattr], parameter[name[obj], constant[__setitem__]]] begin[:]
call[name[obj]][name[name]] assign[=] name[value] | keyword[def] identifier[set_value] ( identifier[obj] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[obj] [ identifier[name] ]= identifier[value]
keyword[else] :
identifier[setattr] ( identifier[obj] , identifier[name] , identifier[value] ) | def set_value(obj, name, value):
"""A flexible method for setting a value on an object.
If the object implements __setitem__ (such as a dict) performs obj[name] = value, else performs
setattr(obj, name, value).
:obj: the object to set the value on
:name: the name to assign the value to
:value: the value to assign
"""
if hasattr(obj, '__setitem__'):
obj[name] = value # depends on [control=['if'], data=[]]
else:
setattr(obj, name, value) |
def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0 | def function[_wait_time, parameter[self, shard_state, secs, now]]:
constant[Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
]
assert[compare[name[shard_state].slice_start_time is_not constant[None]]]
variable[delta] assign[=] binary_operation[call[name[now], parameter[]] - name[shard_state].slice_start_time]
variable[duration] assign[=] call[name[datetime].timedelta, parameter[]]
if compare[name[delta] less[<] name[duration]] begin[:]
return[call[name[util].total_seconds, parameter[binary_operation[name[duration] - name[delta]]]]] | keyword[def] identifier[_wait_time] ( identifier[self] , identifier[shard_state] , identifier[secs] , identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] ):
literal[string]
keyword[assert] identifier[shard_state] . identifier[slice_start_time] keyword[is] keyword[not] keyword[None]
identifier[delta] = identifier[now] ()- identifier[shard_state] . identifier[slice_start_time]
identifier[duration] = identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[secs] )
keyword[if] identifier[delta] < identifier[duration] :
keyword[return] identifier[util] . identifier[total_seconds] ( identifier[duration] - identifier[delta] )
keyword[else] :
keyword[return] literal[int] | def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta) # depends on [control=['if'], data=['delta', 'duration']]
else:
return 0 |
def main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point or reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(
sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except IOError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo('Aborted!', file=sys.stderr)
sys.exit(1) | def function[main, parameter[self, args, prog_name, complete_var, standalone_mode]]:
constant[This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
]
if <ast.UnaryOp object at 0x7da18eb56ef0> begin[:]
call[name[_verify_python3_env], parameter[]]
if compare[name[args] is constant[None]] begin[:]
variable[args] assign[=] call[name[get_os_args], parameter[]]
if compare[name[prog_name] is constant[None]] begin[:]
variable[prog_name] assign[=] call[name[make_str], parameter[call[name[os].path.basename, parameter[<ast.BoolOp object at 0x7da18eb569e0>]]]]
call[name[_bashcomplete], parameter[name[self], name[prog_name], name[complete_var]]]
<ast.Try object at 0x7da18eb571f0> | keyword[def] identifier[main] ( identifier[self] , identifier[args] = keyword[None] , identifier[prog_name] = keyword[None] , identifier[complete_var] = keyword[None] ,
identifier[standalone_mode] = keyword[True] ,** identifier[extra] ):
literal[string]
keyword[if] keyword[not] identifier[PY2] :
identifier[_verify_python3_env] ()
keyword[else] :
identifier[_check_for_unicode_literals] ()
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[args] = identifier[get_os_args] ()
keyword[else] :
identifier[args] = identifier[list] ( identifier[args] )
keyword[if] identifier[prog_name] keyword[is] keyword[None] :
identifier[prog_name] = identifier[make_str] ( identifier[os] . identifier[path] . identifier[basename] (
identifier[sys] . identifier[argv] keyword[and] identifier[sys] . identifier[argv] [ literal[int] ] keyword[or] identifier[__file__] ))
identifier[_bashcomplete] ( identifier[self] , identifier[prog_name] , identifier[complete_var] )
keyword[try] :
keyword[try] :
keyword[with] identifier[self] . identifier[make_context] ( identifier[prog_name] , identifier[args] ,** identifier[extra] ) keyword[as] identifier[ctx] :
identifier[rv] = identifier[self] . identifier[invoke] ( identifier[ctx] )
keyword[if] keyword[not] identifier[standalone_mode] :
keyword[return] identifier[rv]
identifier[ctx] . identifier[exit] ()
keyword[except] ( identifier[EOFError] , identifier[KeyboardInterrupt] ):
identifier[echo] ( identifier[file] = identifier[sys] . identifier[stderr] )
keyword[raise] identifier[Abort] ()
keyword[except] identifier[ClickException] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[standalone_mode] :
keyword[raise]
identifier[e] . identifier[show] ()
identifier[sys] . identifier[exit] ( identifier[e] . identifier[exit_code] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EPIPE] :
identifier[sys] . identifier[stdout] = identifier[PacifyFlushWrapper] ( identifier[sys] . identifier[stdout] )
identifier[sys] . identifier[stderr] = identifier[PacifyFlushWrapper] ( identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
keyword[raise]
keyword[except] identifier[Exit] keyword[as] identifier[e] :
keyword[if] identifier[standalone_mode] :
identifier[sys] . identifier[exit] ( identifier[e] . identifier[exit_code] )
keyword[else] :
keyword[return] identifier[e] . identifier[exit_code]
keyword[except] identifier[Abort] :
keyword[if] keyword[not] identifier[standalone_mode] :
keyword[raise]
identifier[echo] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] ) | def main(self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point or reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env() # depends on [control=['if'], data=[]]
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args() # depends on [control=['if'], data=['args']]
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(sys.argv and sys.argv[0] or __file__)) # depends on [control=['if'], data=['prog_name']]
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv # depends on [control=['if'], data=[]]
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit() # depends on [control=['with'], data=['ctx']] # depends on [control=['try'], data=[]]
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort() # depends on [control=['except'], data=[]]
except ClickException as e:
if not standalone_mode:
raise # depends on [control=['if'], data=[]]
e.show()
sys.exit(e.exit_code) # depends on [control=['except'], data=['e']]
except IOError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']] # depends on [control=['try'], data=[]]
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code) # depends on [control=['if'], data=[]]
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code # depends on [control=['except'], data=['e']]
except Abort:
if not standalone_mode:
raise # depends on [control=['if'], data=[]]
echo('Aborted!', file=sys.stderr)
sys.exit(1) # depends on [control=['except'], data=[]] |
def _get_fit_params(self, x, fit_key):
""" Transforms the input parameter to fit parameters for the 7dq2 model.
That is, maps from
x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]
fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]
chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead
of chiA and chiB.
chi_a = (chiAz - chiBz)/2.
"""
q, chiAz, chiBz = x[0], x[3], x[6]
eta = q/(1.+q)**2
chi_wtAvg = (q*chiAz+chiBz)/(1.+q)
chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.)
chi_a = (chiAz - chiBz)/2.
fit_params = x
fit_params[0] = np.log(q)
fit_params[3] = chiHat
fit_params[6] = chi_a
return fit_params | def function[_get_fit_params, parameter[self, x, fit_key]]:
constant[ Transforms the input parameter to fit parameters for the 7dq2 model.
That is, maps from
x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]
fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]
chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead
of chiA and chiB.
chi_a = (chiAz - chiBz)/2.
]
<ast.Tuple object at 0x7da1b1b9ff10> assign[=] tuple[[<ast.Subscript object at 0x7da1b1b9fb20>, <ast.Subscript object at 0x7da1b1b9d840>, <ast.Subscript object at 0x7da1b1b9fcd0>]]
variable[eta] assign[=] binary_operation[name[q] / binary_operation[binary_operation[constant[1.0] + name[q]] ** constant[2]]]
variable[chi_wtAvg] assign[=] binary_operation[binary_operation[binary_operation[name[q] * name[chiAz]] + name[chiBz]] / binary_operation[constant[1.0] + name[q]]]
variable[chiHat] assign[=] binary_operation[binary_operation[name[chi_wtAvg] - binary_operation[binary_operation[binary_operation[constant[38.0] * name[eta]] / constant[113.0]] * binary_operation[name[chiAz] + name[chiBz]]]] / binary_operation[constant[1.0] - binary_operation[binary_operation[constant[76.0] * name[eta]] / constant[113.0]]]]
variable[chi_a] assign[=] binary_operation[binary_operation[name[chiAz] - name[chiBz]] / constant[2.0]]
variable[fit_params] assign[=] name[x]
call[name[fit_params]][constant[0]] assign[=] call[name[np].log, parameter[name[q]]]
call[name[fit_params]][constant[3]] assign[=] name[chiHat]
call[name[fit_params]][constant[6]] assign[=] name[chi_a]
return[name[fit_params]] | keyword[def] identifier[_get_fit_params] ( identifier[self] , identifier[x] , identifier[fit_key] ):
literal[string]
identifier[q] , identifier[chiAz] , identifier[chiBz] = identifier[x] [ literal[int] ], identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]
identifier[eta] = identifier[q] /( literal[int] + identifier[q] )** literal[int]
identifier[chi_wtAvg] =( identifier[q] * identifier[chiAz] + identifier[chiBz] )/( literal[int] + identifier[q] )
identifier[chiHat] =( identifier[chi_wtAvg] - literal[int] * identifier[eta] / literal[int] *( identifier[chiAz] + identifier[chiBz] ))/( literal[int] - literal[int] * identifier[eta] / literal[int] )
identifier[chi_a] =( identifier[chiAz] - identifier[chiBz] )/ literal[int]
identifier[fit_params] = identifier[x]
identifier[fit_params] [ literal[int] ]= identifier[np] . identifier[log] ( identifier[q] )
identifier[fit_params] [ literal[int] ]= identifier[chiHat]
identifier[fit_params] [ literal[int] ]= identifier[chi_a]
keyword[return] identifier[fit_params] | def _get_fit_params(self, x, fit_key):
""" Transforms the input parameter to fit parameters for the 7dq2 model.
That is, maps from
x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]
fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]
chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead
of chiA and chiB.
chi_a = (chiAz - chiBz)/2.
"""
(q, chiAz, chiBz) = (x[0], x[3], x[6])
eta = q / (1.0 + q) ** 2
chi_wtAvg = (q * chiAz + chiBz) / (1.0 + q)
chiHat = (chi_wtAvg - 38.0 * eta / 113.0 * (chiAz + chiBz)) / (1.0 - 76.0 * eta / 113.0)
chi_a = (chiAz - chiBz) / 2.0
fit_params = x
fit_params[0] = np.log(q)
fit_params[3] = chiHat
fit_params[6] = chi_a
return fit_params |
def create_shortcuts(self):
"""Create shortcuts for ipyconsole."""
inspect = config_shortcut(self._control.inspect_current_object,
context='Console',
name='Inspect current object', parent=self)
clear_console = config_shortcut(self.clear_console, context='Console',
name='Clear shell', parent=self)
restart_kernel = config_shortcut(self.ipyclient.restart_kernel,
context='ipython_console',
name='Restart kernel', parent=self)
new_tab = config_shortcut(lambda: self.new_client.emit(),
context='ipython_console', name='new tab',
parent=self)
reset_namespace = config_shortcut(lambda: self._reset_namespace(),
context='ipython_console',
name='reset namespace', parent=self)
array_inline = config_shortcut(self._control.enter_array_inline,
context='array_builder',
name='enter array inline', parent=self)
array_table = config_shortcut(self._control.enter_array_table,
context='array_builder',
name='enter array table', parent=self)
clear_line = config_shortcut(self.ipyclient.clear_line,
context='console', name='clear line',
parent=self)
return [inspect, clear_console, restart_kernel, new_tab,
reset_namespace, array_inline, array_table, clear_line] | def function[create_shortcuts, parameter[self]]:
constant[Create shortcuts for ipyconsole.]
variable[inspect] assign[=] call[name[config_shortcut], parameter[name[self]._control.inspect_current_object]]
variable[clear_console] assign[=] call[name[config_shortcut], parameter[name[self].clear_console]]
variable[restart_kernel] assign[=] call[name[config_shortcut], parameter[name[self].ipyclient.restart_kernel]]
variable[new_tab] assign[=] call[name[config_shortcut], parameter[<ast.Lambda object at 0x7da1b21d4370>]]
variable[reset_namespace] assign[=] call[name[config_shortcut], parameter[<ast.Lambda object at 0x7da1b21d5db0>]]
variable[array_inline] assign[=] call[name[config_shortcut], parameter[name[self]._control.enter_array_inline]]
variable[array_table] assign[=] call[name[config_shortcut], parameter[name[self]._control.enter_array_table]]
variable[clear_line] assign[=] call[name[config_shortcut], parameter[name[self].ipyclient.clear_line]]
return[list[[<ast.Name object at 0x7da20c76cfa0>, <ast.Name object at 0x7da20c76e380>, <ast.Name object at 0x7da20c76dff0>, <ast.Name object at 0x7da20c76ea70>, <ast.Name object at 0x7da20c76d090>, <ast.Name object at 0x7da20c76d1e0>, <ast.Name object at 0x7da20c76d8a0>, <ast.Name object at 0x7da20c76f6a0>]]] | keyword[def] identifier[create_shortcuts] ( identifier[self] ):
literal[string]
identifier[inspect] = identifier[config_shortcut] ( identifier[self] . identifier[_control] . identifier[inspect_current_object] ,
identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[clear_console] = identifier[config_shortcut] ( identifier[self] . identifier[clear_console] , identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[restart_kernel] = identifier[config_shortcut] ( identifier[self] . identifier[ipyclient] . identifier[restart_kernel] ,
identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[new_tab] = identifier[config_shortcut] ( keyword[lambda] : identifier[self] . identifier[new_client] . identifier[emit] (),
identifier[context] = literal[string] , identifier[name] = literal[string] ,
identifier[parent] = identifier[self] )
identifier[reset_namespace] = identifier[config_shortcut] ( keyword[lambda] : identifier[self] . identifier[_reset_namespace] (),
identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[array_inline] = identifier[config_shortcut] ( identifier[self] . identifier[_control] . identifier[enter_array_inline] ,
identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[array_table] = identifier[config_shortcut] ( identifier[self] . identifier[_control] . identifier[enter_array_table] ,
identifier[context] = literal[string] ,
identifier[name] = literal[string] , identifier[parent] = identifier[self] )
identifier[clear_line] = identifier[config_shortcut] ( identifier[self] . identifier[ipyclient] . identifier[clear_line] ,
identifier[context] = literal[string] , identifier[name] = literal[string] ,
identifier[parent] = identifier[self] )
keyword[return] [ identifier[inspect] , identifier[clear_console] , identifier[restart_kernel] , identifier[new_tab] ,
identifier[reset_namespace] , identifier[array_inline] , identifier[array_table] , identifier[clear_line] ] | def create_shortcuts(self):
"""Create shortcuts for ipyconsole."""
inspect = config_shortcut(self._control.inspect_current_object, context='Console', name='Inspect current object', parent=self)
clear_console = config_shortcut(self.clear_console, context='Console', name='Clear shell', parent=self)
restart_kernel = config_shortcut(self.ipyclient.restart_kernel, context='ipython_console', name='Restart kernel', parent=self)
new_tab = config_shortcut(lambda : self.new_client.emit(), context='ipython_console', name='new tab', parent=self)
reset_namespace = config_shortcut(lambda : self._reset_namespace(), context='ipython_console', name='reset namespace', parent=self)
array_inline = config_shortcut(self._control.enter_array_inline, context='array_builder', name='enter array inline', parent=self)
array_table = config_shortcut(self._control.enter_array_table, context='array_builder', name='enter array table', parent=self)
clear_line = config_shortcut(self.ipyclient.clear_line, context='console', name='clear line', parent=self)
return [inspect, clear_console, restart_kernel, new_tab, reset_namespace, array_inline, array_table, clear_line] |
def weather_history_at_id(self, id, start=None, end=None):
"""
Queries the OWM Weather API for weather history for the specified city ID.
A list of *Weather* objects is returned. It is possible to query for
weather history in a closed time period, whose boundaries can be passed
as optional parameters.
:param id: the city ID
:type id: int
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
"""
assert type(id) is int, "'id' must be an int"
if id < 0:
raise ValueError("'id' value must be greater than 0")
params = {'id': id, 'lang': self._language}
if start is None and end is None:
pass
elif start is not None and end is not None:
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
"while the other is not!")
uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['weather_history'].parse_JSON(json_data) | def function[weather_history_at_id, parameter[self, id, start, end]]:
constant[
Queries the OWM Weather API for weather history for the specified city ID.
A list of *Weather* objects is returned. It is possible to query for
weather history in a closed time period, whose boundaries can be passed
as optional parameters.
:param id: the city ID
:type id: int
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
]
assert[compare[call[name[type], parameter[name[id]]] is name[int]]]
if compare[name[id] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da18f8111e0>
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f812140>, <ast.Constant object at 0x7da18f811e70>], [<ast.Name object at 0x7da18f811b40>, <ast.Attribute object at 0x7da18f813d90>]]
if <ast.BoolOp object at 0x7da18f812050> begin[:]
pass
variable[uri] assign[=] call[name[http_client].HttpClient.to_url, parameter[name[CITY_WEATHER_HISTORY_URL], name[self]._API_key, name[self]._subscription_type, name[self]._use_ssl]]
<ast.Tuple object at 0x7da2054a58a0> assign[=] call[name[self]._wapi.cacheable_get_json, parameter[name[uri]]]
return[call[call[name[self]._parsers][constant[weather_history]].parse_JSON, parameter[name[json_data]]]] | keyword[def] identifier[weather_history_at_id] ( identifier[self] , identifier[id] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
keyword[assert] identifier[type] ( identifier[id] ) keyword[is] identifier[int] , literal[string]
keyword[if] identifier[id] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[params] ={ literal[string] : identifier[id] , literal[string] : identifier[self] . identifier[_language] }
keyword[if] identifier[start] keyword[is] keyword[None] keyword[and] identifier[end] keyword[is] keyword[None] :
keyword[pass]
keyword[elif] identifier[start] keyword[is] keyword[not] keyword[None] keyword[and] identifier[end] keyword[is] keyword[not] keyword[None] :
identifier[unix_start] = identifier[timeformatutils] . identifier[to_UNIXtime] ( identifier[start] )
identifier[unix_end] = identifier[timeformatutils] . identifier[to_UNIXtime] ( identifier[end] )
keyword[if] identifier[unix_start] >= identifier[unix_end] :
keyword[raise] identifier[ValueError] ( literal[string] literal[string] )
identifier[current_time] = identifier[time] ()
keyword[if] identifier[unix_start] > identifier[current_time] :
keyword[raise] identifier[ValueError] ( literal[string] literal[string] )
identifier[params] [ literal[string] ]= identifier[str] ( identifier[unix_start] )
identifier[params] [ literal[string] ]= identifier[str] ( identifier[unix_end] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] literal[string] )
identifier[uri] = identifier[http_client] . identifier[HttpClient] . identifier[to_url] ( identifier[CITY_WEATHER_HISTORY_URL] ,
identifier[self] . identifier[_API_key] ,
identifier[self] . identifier[_subscription_type] ,
identifier[self] . identifier[_use_ssl] )
identifier[_] , identifier[json_data] = identifier[self] . identifier[_wapi] . identifier[cacheable_get_json] ( identifier[uri] , identifier[params] = identifier[params] )
keyword[return] identifier[self] . identifier[_parsers] [ literal[string] ]. identifier[parse_JSON] ( identifier[json_data] ) | def weather_history_at_id(self, id, start=None, end=None):
"""
Queries the OWM Weather API for weather history for the specified city ID.
A list of *Weather* objects is returned. It is possible to query for
weather history in a closed time period, whose boundaries can be passed
as optional parameters.
:param id: the city ID
:type id: int
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
"""
assert type(id) is int, "'id' must be an int"
if id < 0:
raise ValueError("'id' value must be greater than 0") # depends on [control=['if'], data=[]]
params = {'id': id, 'lang': self._language}
if start is None and end is None:
pass # depends on [control=['if'], data=[]]
elif start is not None and end is not None:
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError('Error: the start time boundary must precede the end time!') # depends on [control=['if'], data=[]]
current_time = time()
if unix_start > current_time:
raise ValueError('Error: the start time boundary must precede the current time!') # depends on [control=['if'], data=[]]
params['start'] = str(unix_start)
params['end'] = str(unix_end) # depends on [control=['if'], data=[]]
else:
raise ValueError('Error: one of the time boundaries is None, while the other is not!')
uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL, self._API_key, self._subscription_type, self._use_ssl)
(_, json_data) = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['weather_history'].parse_JSON(json_data) |
def destroy(self):
""" Remove the marker if it was added to the map when destroying"""
marker = self.marker
parent = self.parent()
if marker:
if parent:
del parent.markers[marker.__id__]
marker.remove()
super(AndroidMapItemBase, self).destroy() | def function[destroy, parameter[self]]:
constant[ Remove the marker if it was added to the map when destroying]
variable[marker] assign[=] name[self].marker
variable[parent] assign[=] call[name[self].parent, parameter[]]
if name[marker] begin[:]
if name[parent] begin[:]
<ast.Delete object at 0x7da1b0012740>
call[name[marker].remove, parameter[]]
call[call[name[super], parameter[name[AndroidMapItemBase], name[self]]].destroy, parameter[]] | keyword[def] identifier[destroy] ( identifier[self] ):
literal[string]
identifier[marker] = identifier[self] . identifier[marker]
identifier[parent] = identifier[self] . identifier[parent] ()
keyword[if] identifier[marker] :
keyword[if] identifier[parent] :
keyword[del] identifier[parent] . identifier[markers] [ identifier[marker] . identifier[__id__] ]
identifier[marker] . identifier[remove] ()
identifier[super] ( identifier[AndroidMapItemBase] , identifier[self] ). identifier[destroy] () | def destroy(self):
""" Remove the marker if it was added to the map when destroying"""
marker = self.marker
parent = self.parent()
if marker:
if parent:
del parent.markers[marker.__id__] # depends on [control=['if'], data=[]]
marker.remove() # depends on [control=['if'], data=[]]
super(AndroidMapItemBase, self).destroy() |
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
try:
rv = raw_input(prompt)
except NameError:
rv = input(prompt)
if rv:
return rv
if default is not None:
return default | def function[prompt, parameter[name, default]]:
constant[
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
]
variable[prompt] assign[=] binary_operation[name[name] + <ast.BoolOp object at 0x7da18eb57cd0>]
<ast.AugAssign object at 0x7da18eb56f50>
while constant[True] begin[:]
<ast.Try object at 0x7da18dc07af0>
if name[rv] begin[:]
return[name[rv]]
if compare[name[default] is_not constant[None]] begin[:]
return[name[default]] | keyword[def] identifier[prompt] ( identifier[name] , identifier[default] = keyword[None] ):
literal[string]
identifier[prompt] = identifier[name] +( identifier[default] keyword[and] literal[string] % identifier[default] keyword[or] literal[string] )
identifier[prompt] += identifier[name] . identifier[endswith] ( literal[string] ) keyword[and] literal[string] keyword[or] literal[string]
keyword[while] keyword[True] :
keyword[try] :
identifier[rv] = identifier[raw_input] ( identifier[prompt] )
keyword[except] identifier[NameError] :
identifier[rv] = identifier[input] ( identifier[prompt] )
keyword[if] identifier[rv] :
keyword[return] identifier[rv]
keyword[if] identifier[default] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[default] | def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
try:
rv = raw_input(prompt) # depends on [control=['try'], data=[]]
except NameError:
rv = input(prompt) # depends on [control=['except'], data=[]]
if rv:
return rv # depends on [control=['if'], data=[]]
if default is not None:
return default # depends on [control=['if'], data=['default']] # depends on [control=['while'], data=[]] |
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname | def function[RepositoryName, parameter[self]]:
constant[FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
]
variable[fullname] assign[=] call[name[self].FullName, parameter[]]
if call[name[os].path.exists, parameter[name[fullname]]] begin[:]
variable[project_dir] assign[=] call[name[os].path.dirname, parameter[name[fullname]]]
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[project_dir], constant[.svn]]]]] begin[:]
variable[root_dir] assign[=] name[project_dir]
variable[one_up_dir] assign[=] call[name[os].path.dirname, parameter[name[root_dir]]]
while call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[one_up_dir], constant[.svn]]]]] begin[:]
variable[root_dir] assign[=] call[name[os].path.dirname, parameter[name[root_dir]]]
variable[one_up_dir] assign[=] call[name[os].path.dirname, parameter[name[one_up_dir]]]
variable[prefix] assign[=] call[name[os].path.commonprefix, parameter[list[[<ast.Name object at 0x7da204960400>, <ast.Name object at 0x7da204961840>]]]]
return[call[name[fullname]][<ast.Slice object at 0x7da204960e50>]]
variable[root_dir] assign[=] call[name[os].path.dirname, parameter[name[fullname]]]
while <ast.BoolOp object at 0x7da204960940> begin[:]
variable[root_dir] assign[=] call[name[os].path.dirname, parameter[name[root_dir]]]
if <ast.BoolOp object at 0x7da204962350> begin[:]
variable[prefix] assign[=] call[name[os].path.commonprefix, parameter[list[[<ast.Name object at 0x7da204963a60>, <ast.Name object at 0x7da2049627d0>]]]]
return[call[name[fullname]][<ast.Slice object at 0x7da204960820>]]
return[name[fullname]] | keyword[def] identifier[RepositoryName] ( identifier[self] ):
literal[string]
identifier[fullname] = identifier[self] . identifier[FullName] ()
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[fullname] ):
identifier[project_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[fullname] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[project_dir] , literal[string] )):
identifier[root_dir] = identifier[project_dir]
identifier[one_up_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[root_dir] )
keyword[while] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[one_up_dir] , literal[string] )):
identifier[root_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[root_dir] )
identifier[one_up_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[one_up_dir] )
identifier[prefix] = identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[root_dir] , identifier[project_dir] ])
keyword[return] identifier[fullname] [ identifier[len] ( identifier[prefix] )+ literal[int] :]
identifier[root_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[fullname] )
keyword[while] ( identifier[root_dir] != identifier[os] . identifier[path] . identifier[dirname] ( identifier[root_dir] ) keyword[and]
keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] )) keyword[and]
keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] )) keyword[and]
keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] ))):
identifier[root_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[root_dir] )
keyword[if] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] )) keyword[or]
identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] )) keyword[or]
identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] ))):
identifier[prefix] = identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[root_dir] , identifier[project_dir] ])
keyword[return] identifier[fullname] [ identifier[len] ( identifier[prefix] )+ literal[int] :]
keyword[return] identifier[fullname] | def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, '.svn')):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, '.svn')):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir) # depends on [control=['while'], data=[]]
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:] # depends on [control=['if'], data=[]]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while root_dir != os.path.dirname(root_dir) and (not os.path.exists(os.path.join(root_dir, '.git'))) and (not os.path.exists(os.path.join(root_dir, '.hg'))) and (not os.path.exists(os.path.join(root_dir, '.svn'))):
root_dir = os.path.dirname(root_dir) # depends on [control=['while'], data=[]]
if os.path.exists(os.path.join(root_dir, '.git')) or os.path.exists(os.path.join(root_dir, '.hg')) or os.path.exists(os.path.join(root_dir, '.svn')):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Don't know what to do; header guard warnings may be wrong...
return fullname |
def delete(table, chain=None, position=None, rule=None, family='ipv4'):
'''
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.delete filter INPUT position=3 family=ipv6
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
'''
if position and rule:
return 'Error: Only specify a position or a rule, not both'
if position:
rule = position
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -D {3} {4}'.format(
_iptables_cmd(family), wait, table, chain, rule)
out = __salt__['cmd.run'](cmd)
return out | def function[delete, parameter[table, chain, position, rule, family]]:
constant[
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT \
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.delete filter INPUT position=3 family=ipv6
salt '*' iptables.delete filter INPUT \
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \
family=ipv6
]
if <ast.BoolOp object at 0x7da204962350> begin[:]
return[constant[Error: Only specify a position or a rule, not both]]
if name[position] begin[:]
variable[rule] assign[=] name[position]
variable[wait] assign[=] <ast.IfExp object at 0x7da204961cc0>
variable[cmd] assign[=] call[constant[{0} {1} -t {2} -D {3} {4}].format, parameter[call[name[_iptables_cmd], parameter[name[family]]], name[wait], name[table], name[chain], name[rule]]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]
return[name[out]] | keyword[def] identifier[delete] ( identifier[table] , identifier[chain] = keyword[None] , identifier[position] = keyword[None] , identifier[rule] = keyword[None] , identifier[family] = literal[string] ):
literal[string]
keyword[if] identifier[position] keyword[and] identifier[rule] :
keyword[return] literal[string]
keyword[if] identifier[position] :
identifier[rule] = identifier[position]
identifier[wait] = literal[string] keyword[if] identifier[_has_option] ( literal[string] , identifier[family] ) keyword[else] literal[string]
identifier[cmd] = literal[string] . identifier[format] (
identifier[_iptables_cmd] ( identifier[family] ), identifier[wait] , identifier[table] , identifier[chain] , identifier[rule] )
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
keyword[return] identifier[out] | def delete(table, chain=None, position=None, rule=None, family='ipv4'):
"""
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.delete filter INPUT position=3 family=ipv6
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
"""
if position and rule:
return 'Error: Only specify a position or a rule, not both' # depends on [control=['if'], data=[]]
if position:
rule = position # depends on [control=['if'], data=[]]
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -D {3} {4}'.format(_iptables_cmd(family), wait, table, chain, rule)
out = __salt__['cmd.run'](cmd)
return out |
def BVirial_Tsonopoulos_extended(T, Tc, Pc, omega, a=0, b=0, species_type='',
dipole=0, order=0):
r'''Calculates the second virial coefficient using the
comprehensive model in [1]_. See the notes for the calculation of `a` and
`b`.
.. math::
\frac{BP_c}{RT_c} = B^{(0)} + \omega B^{(1)} + a B^{(2)} + b B^{(3)}
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8
B^{(2)} = 1/T_r^6
B^{(3)} = -1/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
a : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
b : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
species_type : str, optional
One of .
dipole : float
dipole moment, optional, [Debye]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
To calculate `a` or `b`, the following rules are used:
For 'simple' or 'normal' fluids:
.. math::
a = 0
b = 0
For 'ketone', 'aldehyde', 'alkyl nitrile', 'ether', 'carboxylic acid',
or 'ester' types of chemicals:
.. math::
a = -2.14\times 10^{-4} \mu_r - 4.308 \times 10^{-21} (\mu_r)^8
b = 0
For 'alkyl halide', 'mercaptan', 'sulfide', or 'disulfide' types of
chemicals:
.. math::
a = -2.188\times 10^{-4} (\mu_r)^4 - 7.831 \times 10^{-21} (\mu_r)^8
b = 0
For 'alkanol' types of chemicals (except methanol):
.. math::
a = 0.0878
b = 0.00908 + 0.0006957 \mu_r
For methanol:
.. math::
a = 0.0878
b = 0.0525
For water:
.. math::
a = -0.0109
b = 0
If required, the form of dipole moment used in the calculation of some
types of `a` and `b` values is as follows:
.. math::
\mu_r = 100000\frac{\mu^2(Pc/101325.0)}{Tc^2}
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}} + \frac{607 Tc^{8}}{125000 T^{9}}
\frac{d B^{(1)}}{dT} = - \frac{331 Tc^{2}}{500 T^{3}} + \frac{1269 Tc^{3}}{1000 T^{4}} + \frac{8 Tc^{8}}{125 T^{9}}
\frac{d B^{(2)}}{dT} = - \frac{6 Tc^{6}}{T^{7}}
\frac{d B^{(3)}}{dT} = \frac{8 Tc^{8}}{T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{125000 T^{3}} \left(27500 + \frac{34625 Tc}{T} + \frac{6050 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
\frac{d^2 B^{(1)}}{dT^2} = \frac{3 Tc^{2}}{500 T^{4}} \left(331 - \frac{846 Tc}{T} - \frac{96 Tc^{6}}{T^{6}}\right)
\frac{d^2 B^{(2)}}{dT^2} = \frac{42 Tc^{6}}{T^{8}}
\frac{d^2 B^{(3)}}{dT^2} = - \frac{72 Tc^{8}}{T^{10}}
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{12500 T^{4}} \left(8250 + \frac{13850 Tc}{T} + \frac{3025 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc^{2}}{250 T^{5}} \left(-662 + \frac{2115 Tc}{T} + \frac{480 Tc^{6}}{T^{6}}\right)
\frac{d^3 B^{(2)}}{dT^3} = - \frac{336 Tc^{6}}{T^{9}}
\frac{d^3 B^{(3)}}{dT^3} = \frac{720 Tc^{8}}{T^{11}}
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \log{\left (T \right )} + \frac{1}{7000000 T^{7}} \left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\right)
\int{B^{(1)}} dT = \frac{637 T}{10000} - \frac{1}{70000 T^{7}} \left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\right)
\int{B^{(2)}} dT = - \frac{Tc^{6}}{5 T^{5}}
\int{B^{(3)}} dT = \frac{Tc^{8}}{7 T^{7}}
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \log{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \log{\left (T \right )} - \frac{1}{42000000 T^{6}} \left(254100 T^{5} Tc^{3} + 607 Tc^{8}\right)
\int\int B^{(1)} dT dT = \frac{637 T^{2}}{20000} - \frac{331 Tc^{2}}{1000} \log{\left (T \right )} - \frac{1}{210000 T^{6}} \left(44415 T^{5} Tc^{3} + 40 Tc^{8}\right)
\int\int B^{(2)} dT dT = \frac{Tc^{6}}{20 T^{4}}
\int\int B^{(3)} dT dT = - \frac{Tc^{8}}{42 T^{6}}
Examples
--------
Example from Perry's Handbook, 8E, p2-499. Matches to a decimal place.
>>> BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469)
-9.679715056695323e-05
References
----------
.. [1] Tsonopoulos, C., and J. L. Heidman. "From the Virial to the Cubic
Equation of State." Fluid Phase Equilibria 57, no. 3 (1990): 261-76.
doi:10.1016/0378-3812(90)85126-U
.. [2] Tsonopoulos, Constantine, and John H. Dymond. "Second Virial
Coefficients of Normal Alkanes, Linear 1-Alkanols (and Water), Alkyl
Ethers, and Their Mixtures." Fluid Phase Equilibria, International
Workshop on Vapour-Liquid Equilibria and Related Properties in Binary
and Ternary Mixtures of Ethers, Alkanes and Alkanols, 133, no. 1-2
(June 1997): 11-34. doi:10.1016/S0378-3812(97)00058-7.
'''
Tr = T/Tc
if order == 0:
B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3 - 0.000607/Tr**8
B1 = 0.0637 + 0.331/Tr**2 - 0.423/Tr**3 - 0.008/Tr**8
B2 = 1./Tr**6
B3 = -1./Tr**8
elif order == 1:
B0 = 33*Tc/(100*T**2) + 277*Tc**2/(1000*T**3) + 363*Tc**3/(10000*T**4) + 607*Tc**8/(125000*T**9)
B1 = -331*Tc**2/(500*T**3) + 1269*Tc**3/(1000*T**4) + 8*Tc**8/(125*T**9)
B2 = -6.0*Tc**6/T**7
B3 = 8.0*Tc**8/T**9
elif order == 2:
B0 = -3*Tc*(27500 + 34625*Tc/T + 6050*Tc**2/T**2 + 1821*Tc**7/T**7)/(125000*T**3)
B1 = 3*Tc**2*(331 - 846*Tc/T - 96*Tc**6/T**6)/(500*T**4)
B2 = 42.0*Tc**6/T**8
B3 = -72.0*Tc**8/T**10
elif order == 3:
B0 = 3*Tc*(8250 + 13850*Tc/T + 3025*Tc**2/T**2 + 1821*Tc**7/T**7)/(12500*T**4)
B1 = 3*Tc**2*(-662 + 2115*Tc/T + 480*Tc**6/T**6)/(250*T**5)
B2 = -336.0*Tc**6/T**9
B3 = 720.0*Tc**8/T**11
elif order == -1:
B0 = 289*T/2000. - 33*Tc*log(T)/100. + (969500*T**6*Tc**2 + 42350*T**5*Tc**3 + 607*Tc**8)/(7000000.*T**7)
B1 = 637*T/10000. - (23170*T**6*Tc**2 - 14805*T**5*Tc**3 - 80*Tc**8)/(70000.*T**7)
B2 = -Tc**6/(5*T**5)
B3 = Tc**8/(7*T**7)
elif order == -2:
B0 = 289*T**2/4000. - 33*T*Tc*log(T)/100. + 33*T*Tc/100. + 277*Tc**2*log(T)/2000. - (254100*T**5*Tc**3 + 607*Tc**8)/(42000000.*T**6)
B1 = 637*T**2/20000. - 331*Tc**2*log(T)/1000. - (44415*T**5*Tc**3 + 40*Tc**8)/(210000.*T**6)
B2 = Tc**6/(20*T**4)
B3 = -Tc**8/(42*T**6)
else:
raise Exception('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
if a == 0 and b == 0 and species_type != '':
if species_type == 'simple' or species_type == 'normal':
a, b = 0, 0
elif species_type == 'methyl alcohol':
a, b = 0.0878, 0.0525
elif species_type == 'water':
a, b = -0.0109, 0
elif dipole != 0 and Tc != 0 and Pc != 0:
dipole_r = 1E5*dipole**2*(Pc/101325.0)/Tc**2
if (species_type == 'ketone' or species_type == 'aldehyde'
or species_type == 'alkyl nitrile' or species_type == 'ether'
or species_type == 'carboxylic acid' or species_type == 'ester'):
a, b = -2.14E-4*dipole_r-4.308E-21*dipole_r**8, 0
elif (species_type == 'alkyl halide' or species_type == 'mercaptan'
or species_type == 'sulfide' or species_type == 'disulfide'):
a, b = -2.188E-4*dipole_r**4-7.831E-21*dipole_r**8, 0
elif species_type == 'alkanol':
a, b = 0.0878, 0.00908+0.0006957*dipole_r
Br = B0 + omega*B1 + a*B2 + b*B3
return Br*R*Tc/Pc | def function[BVirial_Tsonopoulos_extended, parameter[T, Tc, Pc, omega, a, b, species_type, dipole, order]]:
constant[Calculates the second virial coefficient using the
comprehensive model in [1]_. See the notes for the calculation of `a` and
`b`.
.. math::
\frac{BP_c}{RT_c} = B^{(0)} + \omega B^{(1)} + a B^{(2)} + b B^{(3)}
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8
B^{(2)} = 1/T_r^6
B^{(3)} = -1/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
a : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
b : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
species_type : str, optional
One of .
dipole : float
dipole moment, optional, [Debye]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
To calculate `a` or `b`, the following rules are used:
For 'simple' or 'normal' fluids:
.. math::
a = 0
b = 0
For 'ketone', 'aldehyde', 'alkyl nitrile', 'ether', 'carboxylic acid',
or 'ester' types of chemicals:
.. math::
a = -2.14\times 10^{-4} \mu_r - 4.308 \times 10^{-21} (\mu_r)^8
b = 0
For 'alkyl halide', 'mercaptan', 'sulfide', or 'disulfide' types of
chemicals:
.. math::
a = -2.188\times 10^{-4} (\mu_r)^4 - 7.831 \times 10^{-21} (\mu_r)^8
b = 0
For 'alkanol' types of chemicals (except methanol):
.. math::
a = 0.0878
b = 0.00908 + 0.0006957 \mu_r
For methanol:
.. math::
a = 0.0878
b = 0.0525
For water:
.. math::
a = -0.0109
b = 0
If required, the form of dipole moment used in the calculation of some
types of `a` and `b` values is as follows:
.. math::
\mu_r = 100000\frac{\mu^2(Pc/101325.0)}{Tc^2}
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}} + \frac{607 Tc^{8}}{125000 T^{9}}
\frac{d B^{(1)}}{dT} = - \frac{331 Tc^{2}}{500 T^{3}} + \frac{1269 Tc^{3}}{1000 T^{4}} + \frac{8 Tc^{8}}{125 T^{9}}
\frac{d B^{(2)}}{dT} = - \frac{6 Tc^{6}}{T^{7}}
\frac{d B^{(3)}}{dT} = \frac{8 Tc^{8}}{T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{125000 T^{3}} \left(27500 + \frac{34625 Tc}{T} + \frac{6050 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
\frac{d^2 B^{(1)}}{dT^2} = \frac{3 Tc^{2}}{500 T^{4}} \left(331 - \frac{846 Tc}{T} - \frac{96 Tc^{6}}{T^{6}}\right)
\frac{d^2 B^{(2)}}{dT^2} = \frac{42 Tc^{6}}{T^{8}}
\frac{d^2 B^{(3)}}{dT^2} = - \frac{72 Tc^{8}}{T^{10}}
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{12500 T^{4}} \left(8250 + \frac{13850 Tc}{T} + \frac{3025 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc^{2}}{250 T^{5}} \left(-662 + \frac{2115 Tc}{T} + \frac{480 Tc^{6}}{T^{6}}\right)
\frac{d^3 B^{(2)}}{dT^3} = - \frac{336 Tc^{6}}{T^{9}}
\frac{d^3 B^{(3)}}{dT^3} = \frac{720 Tc^{8}}{T^{11}}
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \log{\left (T \right )} + \frac{1}{7000000 T^{7}} \left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\right)
\int{B^{(1)}} dT = \frac{637 T}{10000} - \frac{1}{70000 T^{7}} \left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\right)
\int{B^{(2)}} dT = - \frac{Tc^{6}}{5 T^{5}}
\int{B^{(3)}} dT = \frac{Tc^{8}}{7 T^{7}}
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \log{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \log{\left (T \right )} - \frac{1}{42000000 T^{6}} \left(254100 T^{5} Tc^{3} + 607 Tc^{8}\right)
\int\int B^{(1)} dT dT = \frac{637 T^{2}}{20000} - \frac{331 Tc^{2}}{1000} \log{\left (T \right )} - \frac{1}{210000 T^{6}} \left(44415 T^{5} Tc^{3} + 40 Tc^{8}\right)
\int\int B^{(2)} dT dT = \frac{Tc^{6}}{20 T^{4}}
\int\int B^{(3)} dT dT = - \frac{Tc^{8}}{42 T^{6}}
Examples
--------
Example from Perry's Handbook, 8E, p2-499. Matches to a decimal place.
>>> BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469)
-9.679715056695323e-05
References
----------
.. [1] Tsonopoulos, C., and J. L. Heidman. "From the Virial to the Cubic
Equation of State." Fluid Phase Equilibria 57, no. 3 (1990): 261-76.
doi:10.1016/0378-3812(90)85126-U
.. [2] Tsonopoulos, Constantine, and John H. Dymond. "Second Virial
Coefficients of Normal Alkanes, Linear 1-Alkanols (and Water), Alkyl
Ethers, and Their Mixtures." Fluid Phase Equilibria, International
Workshop on Vapour-Liquid Equilibria and Related Properties in Binary
and Ternary Mixtures of Ethers, Alkanes and Alkanols, 133, no. 1-2
(June 1997): 11-34. doi:10.1016/S0378-3812(97)00058-7.
]
variable[Tr] assign[=] binary_operation[name[T] / name[Tc]]
if compare[name[order] equal[==] constant[0]] begin[:]
variable[B0] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[0.1445] - binary_operation[constant[0.33] / name[Tr]]] - binary_operation[constant[0.1385] / binary_operation[name[Tr] ** constant[2]]]] - binary_operation[constant[0.0121] / binary_operation[name[Tr] ** constant[3]]]] - binary_operation[constant[0.000607] / binary_operation[name[Tr] ** constant[8]]]]
variable[B1] assign[=] binary_operation[binary_operation[binary_operation[constant[0.0637] + binary_operation[constant[0.331] / binary_operation[name[Tr] ** constant[2]]]] - binary_operation[constant[0.423] / binary_operation[name[Tr] ** constant[3]]]] - binary_operation[constant[0.008] / binary_operation[name[Tr] ** constant[8]]]]
variable[B2] assign[=] binary_operation[constant[1.0] / binary_operation[name[Tr] ** constant[6]]]
variable[B3] assign[=] binary_operation[<ast.UnaryOp object at 0x7da204960070> / binary_operation[name[Tr] ** constant[8]]]
if <ast.BoolOp object at 0x7da2054a61a0> begin[:]
if <ast.BoolOp object at 0x7da2054a45b0> begin[:]
<ast.Tuple object at 0x7da2054a55d0> assign[=] tuple[[<ast.Constant object at 0x7da2054a4700>, <ast.Constant object at 0x7da2054a7790>]]
variable[Br] assign[=] binary_operation[binary_operation[binary_operation[name[B0] + binary_operation[name[omega] * name[B1]]] + binary_operation[name[a] * name[B2]]] + binary_operation[name[b] * name[B3]]]
return[binary_operation[binary_operation[binary_operation[name[Br] * name[R]] * name[Tc]] / name[Pc]]] | keyword[def] identifier[BVirial_Tsonopoulos_extended] ( identifier[T] , identifier[Tc] , identifier[Pc] , identifier[omega] , identifier[a] = literal[int] , identifier[b] = literal[int] , identifier[species_type] = literal[string] ,
identifier[dipole] = literal[int] , identifier[order] = literal[int] ):
literal[string]
identifier[Tr] = identifier[T] / identifier[Tc]
keyword[if] identifier[order] == literal[int] :
identifier[B0] = literal[int] - literal[int] / identifier[Tr] - literal[int] / identifier[Tr] ** literal[int] - literal[int] / identifier[Tr] ** literal[int] - literal[int] / identifier[Tr] ** literal[int]
identifier[B1] = literal[int] + literal[int] / identifier[Tr] ** literal[int] - literal[int] / identifier[Tr] ** literal[int] - literal[int] / identifier[Tr] ** literal[int]
identifier[B2] = literal[int] / identifier[Tr] ** literal[int]
identifier[B3] =- literal[int] / identifier[Tr] ** literal[int]
keyword[elif] identifier[order] == literal[int] :
identifier[B0] = literal[int] * identifier[Tc] /( literal[int] * identifier[T] ** literal[int] )+ literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )+ literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )+ literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
identifier[B1] =- literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )+ literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )+ literal[int] * identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
identifier[B2] =- literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
identifier[B3] = literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
keyword[elif] identifier[order] == literal[int] :
identifier[B0] =- literal[int] * identifier[Tc] *( literal[int] + literal[int] * identifier[Tc] / identifier[T] + literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] + literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B1] = literal[int] * identifier[Tc] ** literal[int] *( literal[int] - literal[int] * identifier[Tc] / identifier[T] - literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B2] = literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
identifier[B3] =- literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
keyword[elif] identifier[order] == literal[int] :
identifier[B0] = literal[int] * identifier[Tc] *( literal[int] + literal[int] * identifier[Tc] / identifier[T] + literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] + literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B1] = literal[int] * identifier[Tc] ** literal[int] *(- literal[int] + literal[int] * identifier[Tc] / identifier[T] + literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B2] =- literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
identifier[B3] = literal[int] * identifier[Tc] ** literal[int] / identifier[T] ** literal[int]
keyword[elif] identifier[order] ==- literal[int] :
identifier[B0] = literal[int] * identifier[T] / literal[int] - literal[int] * identifier[Tc] * identifier[log] ( identifier[T] )/ literal[int] +( literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] + literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] + literal[int] * identifier[Tc] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B1] = literal[int] * identifier[T] / literal[int] -( literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] - literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] - literal[int] * identifier[Tc] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B2] =- identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
identifier[B3] = identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
keyword[elif] identifier[order] ==- literal[int] :
identifier[B0] = literal[int] * identifier[T] ** literal[int] / literal[int] - literal[int] * identifier[T] * identifier[Tc] * identifier[log] ( identifier[T] )/ literal[int] + literal[int] * identifier[T] * identifier[Tc] / literal[int] + literal[int] * identifier[Tc] ** literal[int] * identifier[log] ( identifier[T] )/ literal[int] -( literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] + literal[int] * identifier[Tc] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B1] = literal[int] * identifier[T] ** literal[int] / literal[int] - literal[int] * identifier[Tc] ** literal[int] * identifier[log] ( identifier[T] )/ literal[int] -( literal[int] * identifier[T] ** literal[int] * identifier[Tc] ** literal[int] + literal[int] * identifier[Tc] ** literal[int] )/( literal[int] * identifier[T] ** literal[int] )
identifier[B2] = identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
identifier[B3] =- identifier[Tc] ** literal[int] /( literal[int] * identifier[T] ** literal[int] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[a] == literal[int] keyword[and] identifier[b] == literal[int] keyword[and] identifier[species_type] != literal[string] :
keyword[if] identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string] :
identifier[a] , identifier[b] = literal[int] , literal[int]
keyword[elif] identifier[species_type] == literal[string] :
identifier[a] , identifier[b] = literal[int] , literal[int]
keyword[elif] identifier[species_type] == literal[string] :
identifier[a] , identifier[b] =- literal[int] , literal[int]
keyword[elif] identifier[dipole] != literal[int] keyword[and] identifier[Tc] != literal[int] keyword[and] identifier[Pc] != literal[int] :
identifier[dipole_r] = literal[int] * identifier[dipole] ** literal[int] *( identifier[Pc] / literal[int] )/ identifier[Tc] ** literal[int]
keyword[if] ( identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string]
keyword[or] identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string]
keyword[or] identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string] ):
identifier[a] , identifier[b] =- literal[int] * identifier[dipole_r] - literal[int] * identifier[dipole_r] ** literal[int] , literal[int]
keyword[elif] ( identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string]
keyword[or] identifier[species_type] == literal[string] keyword[or] identifier[species_type] == literal[string] ):
identifier[a] , identifier[b] =- literal[int] * identifier[dipole_r] ** literal[int] - literal[int] * identifier[dipole_r] ** literal[int] , literal[int]
keyword[elif] identifier[species_type] == literal[string] :
identifier[a] , identifier[b] = literal[int] , literal[int] + literal[int] * identifier[dipole_r]
identifier[Br] = identifier[B0] + identifier[omega] * identifier[B1] + identifier[a] * identifier[B2] + identifier[b] * identifier[B3]
keyword[return] identifier[Br] * identifier[R] * identifier[Tc] / identifier[Pc] | def BVirial_Tsonopoulos_extended(T, Tc, Pc, omega, a=0, b=0, species_type='', dipole=0, order=0):
"""Calculates the second virial coefficient using the
comprehensive model in [1]_. See the notes for the calculation of `a` and
`b`.
.. math::
\\frac{BP_c}{RT_c} = B^{(0)} + \\omega B^{(1)} + a B^{(2)} + b B^{(3)}
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8
B^{(2)} = 1/T_r^6
B^{(3)} = -1/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
a : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
b : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
species_type : str, optional
One of .
dipole : float
dipole moment, optional, [Debye]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
To calculate `a` or `b`, the following rules are used:
For 'simple' or 'normal' fluids:
.. math::
a = 0
b = 0
For 'ketone', 'aldehyde', 'alkyl nitrile', 'ether', 'carboxylic acid',
or 'ester' types of chemicals:
.. math::
a = -2.14\\times 10^{-4} \\mu_r - 4.308 \\times 10^{-21} (\\mu_r)^8
b = 0
For 'alkyl halide', 'mercaptan', 'sulfide', or 'disulfide' types of
chemicals:
.. math::
a = -2.188\\times 10^{-4} (\\mu_r)^4 - 7.831 \\times 10^{-21} (\\mu_r)^8
b = 0
For 'alkanol' types of chemicals (except methanol):
.. math::
a = 0.0878
b = 0.00908 + 0.0006957 \\mu_r
For methanol:
.. math::
a = 0.0878
b = 0.0525
For water:
.. math::
a = -0.0109
b = 0
If required, the form of dipole moment used in the calculation of some
types of `a` and `b` values is as follows:
.. math::
\\mu_r = 100000\\frac{\\mu^2(Pc/101325.0)}{Tc^2}
For first temperature derivative of B:
.. math::
\\frac{d B^{(0)}}{dT} = \\frac{33 Tc}{100 T^{2}} + \\frac{277 Tc^{2}}{1000 T^{3}} + \\frac{363 Tc^{3}}{10000 T^{4}} + \\frac{607 Tc^{8}}{125000 T^{9}}
\\frac{d B^{(1)}}{dT} = - \\frac{331 Tc^{2}}{500 T^{3}} + \\frac{1269 Tc^{3}}{1000 T^{4}} + \\frac{8 Tc^{8}}{125 T^{9}}
\\frac{d B^{(2)}}{dT} = - \\frac{6 Tc^{6}}{T^{7}}
\\frac{d B^{(3)}}{dT} = \\frac{8 Tc^{8}}{T^{9}}
For the second temperature derivative of B:
.. math::
\\frac{d^2 B^{(0)}}{dT^2} = - \\frac{3 Tc}{125000 T^{3}} \\left(27500 + \\frac{34625 Tc}{T} + \\frac{6050 Tc^{2}}{T^{2}} + \\frac{1821 Tc^{7}}{T^{7}}\\right)
\\frac{d^2 B^{(1)}}{dT^2} = \\frac{3 Tc^{2}}{500 T^{4}} \\left(331 - \\frac{846 Tc}{T} - \\frac{96 Tc^{6}}{T^{6}}\\right)
\\frac{d^2 B^{(2)}}{dT^2} = \\frac{42 Tc^{6}}{T^{8}}
\\frac{d^2 B^{(3)}}{dT^2} = - \\frac{72 Tc^{8}}{T^{10}}
For the third temperature derivative of B:
.. math::
\\frac{d^3 B^{(0)}}{dT^3} = \\frac{3 Tc}{12500 T^{4}} \\left(8250 + \\frac{13850 Tc}{T} + \\frac{3025 Tc^{2}}{T^{2}} + \\frac{1821 Tc^{7}}{T^{7}}\\right)
\\frac{d^3 B^{(1)}}{dT^3} = \\frac{3 Tc^{2}}{250 T^{5}} \\left(-662 + \\frac{2115 Tc}{T} + \\frac{480 Tc^{6}}{T^{6}}\\right)
\\frac{d^3 B^{(2)}}{dT^3} = - \\frac{336 Tc^{6}}{T^{9}}
\\frac{d^3 B^{(3)}}{dT^3} = \\frac{720 Tc^{8}}{T^{11}}
For the first indefinite integral of B:
.. math::
\\int{B^{(0)}} dT = \\frac{289 T}{2000} - \\frac{33 Tc}{100} \\log{\\left (T \\right )} + \\frac{1}{7000000 T^{7}} \\left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\\right)
\\int{B^{(1)}} dT = \\frac{637 T}{10000} - \\frac{1}{70000 T^{7}} \\left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\\right)
\\int{B^{(2)}} dT = - \\frac{Tc^{6}}{5 T^{5}}
\\int{B^{(3)}} dT = \\frac{Tc^{8}}{7 T^{7}}
For the second indefinite integral of B:
.. math::
\\int\\int B^{(0)} dT dT = \\frac{289 T^{2}}{4000} - \\frac{33 T}{100} Tc \\log{\\left (T \\right )} + \\frac{33 T}{100} Tc + \\frac{277 Tc^{2}}{2000} \\log{\\left (T \\right )} - \\frac{1}{42000000 T^{6}} \\left(254100 T^{5} Tc^{3} + 607 Tc^{8}\\right)
\\int\\int B^{(1)} dT dT = \\frac{637 T^{2}}{20000} - \\frac{331 Tc^{2}}{1000} \\log{\\left (T \\right )} - \\frac{1}{210000 T^{6}} \\left(44415 T^{5} Tc^{3} + 40 Tc^{8}\\right)
\\int\\int B^{(2)} dT dT = \\frac{Tc^{6}}{20 T^{4}}
\\int\\int B^{(3)} dT dT = - \\frac{Tc^{8}}{42 T^{6}}
Examples
--------
Example from Perry's Handbook, 8E, p2-499. Matches to a decimal place.
>>> BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469)
-9.679715056695323e-05
References
----------
.. [1] Tsonopoulos, C., and J. L. Heidman. "From the Virial to the Cubic
Equation of State." Fluid Phase Equilibria 57, no. 3 (1990): 261-76.
doi:10.1016/0378-3812(90)85126-U
.. [2] Tsonopoulos, Constantine, and John H. Dymond. "Second Virial
Coefficients of Normal Alkanes, Linear 1-Alkanols (and Water), Alkyl
Ethers, and Their Mixtures." Fluid Phase Equilibria, International
Workshop on Vapour-Liquid Equilibria and Related Properties in Binary
and Ternary Mixtures of Ethers, Alkanes and Alkanols, 133, no. 1-2
(June 1997): 11-34. doi:10.1016/S0378-3812(97)00058-7.
"""
Tr = T / Tc
if order == 0:
B0 = 0.1445 - 0.33 / Tr - 0.1385 / Tr ** 2 - 0.0121 / Tr ** 3 - 0.000607 / Tr ** 8
B1 = 0.0637 + 0.331 / Tr ** 2 - 0.423 / Tr ** 3 - 0.008 / Tr ** 8
B2 = 1.0 / Tr ** 6
B3 = -1.0 / Tr ** 8 # depends on [control=['if'], data=[]]
elif order == 1:
B0 = 33 * Tc / (100 * T ** 2) + 277 * Tc ** 2 / (1000 * T ** 3) + 363 * Tc ** 3 / (10000 * T ** 4) + 607 * Tc ** 8 / (125000 * T ** 9)
B1 = -331 * Tc ** 2 / (500 * T ** 3) + 1269 * Tc ** 3 / (1000 * T ** 4) + 8 * Tc ** 8 / (125 * T ** 9)
B2 = -6.0 * Tc ** 6 / T ** 7
B3 = 8.0 * Tc ** 8 / T ** 9 # depends on [control=['if'], data=[]]
elif order == 2:
B0 = -3 * Tc * (27500 + 34625 * Tc / T + 6050 * Tc ** 2 / T ** 2 + 1821 * Tc ** 7 / T ** 7) / (125000 * T ** 3)
B1 = 3 * Tc ** 2 * (331 - 846 * Tc / T - 96 * Tc ** 6 / T ** 6) / (500 * T ** 4)
B2 = 42.0 * Tc ** 6 / T ** 8
B3 = -72.0 * Tc ** 8 / T ** 10 # depends on [control=['if'], data=[]]
elif order == 3:
B0 = 3 * Tc * (8250 + 13850 * Tc / T + 3025 * Tc ** 2 / T ** 2 + 1821 * Tc ** 7 / T ** 7) / (12500 * T ** 4)
B1 = 3 * Tc ** 2 * (-662 + 2115 * Tc / T + 480 * Tc ** 6 / T ** 6) / (250 * T ** 5)
B2 = -336.0 * Tc ** 6 / T ** 9
B3 = 720.0 * Tc ** 8 / T ** 11 # depends on [control=['if'], data=[]]
elif order == -1:
B0 = 289 * T / 2000.0 - 33 * Tc * log(T) / 100.0 + (969500 * T ** 6 * Tc ** 2 + 42350 * T ** 5 * Tc ** 3 + 607 * Tc ** 8) / (7000000.0 * T ** 7)
B1 = 637 * T / 10000.0 - (23170 * T ** 6 * Tc ** 2 - 14805 * T ** 5 * Tc ** 3 - 80 * Tc ** 8) / (70000.0 * T ** 7)
B2 = -Tc ** 6 / (5 * T ** 5)
B3 = Tc ** 8 / (7 * T ** 7) # depends on [control=['if'], data=[]]
elif order == -2:
B0 = 289 * T ** 2 / 4000.0 - 33 * T * Tc * log(T) / 100.0 + 33 * T * Tc / 100.0 + 277 * Tc ** 2 * log(T) / 2000.0 - (254100 * T ** 5 * Tc ** 3 + 607 * Tc ** 8) / (42000000.0 * T ** 6)
B1 = 637 * T ** 2 / 20000.0 - 331 * Tc ** 2 * log(T) / 1000.0 - (44415 * T ** 5 * Tc ** 3 + 40 * Tc ** 8) / (210000.0 * T ** 6)
B2 = Tc ** 6 / (20 * T ** 4)
B3 = -Tc ** 8 / (42 * T ** 6) # depends on [control=['if'], data=[]]
else:
raise Exception('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
if a == 0 and b == 0 and (species_type != ''):
if species_type == 'simple' or species_type == 'normal':
(a, b) = (0, 0) # depends on [control=['if'], data=[]]
elif species_type == 'methyl alcohol':
(a, b) = (0.0878, 0.0525) # depends on [control=['if'], data=[]]
elif species_type == 'water':
(a, b) = (-0.0109, 0) # depends on [control=['if'], data=[]]
elif dipole != 0 and Tc != 0 and (Pc != 0):
dipole_r = 100000.0 * dipole ** 2 * (Pc / 101325.0) / Tc ** 2
if species_type == 'ketone' or species_type == 'aldehyde' or species_type == 'alkyl nitrile' or (species_type == 'ether') or (species_type == 'carboxylic acid') or (species_type == 'ester'):
(a, b) = (-0.000214 * dipole_r - 4.308e-21 * dipole_r ** 8, 0) # depends on [control=['if'], data=[]]
elif species_type == 'alkyl halide' or species_type == 'mercaptan' or species_type == 'sulfide' or (species_type == 'disulfide'):
(a, b) = (-0.0002188 * dipole_r ** 4 - 7.831e-21 * dipole_r ** 8, 0) # depends on [control=['if'], data=[]]
elif species_type == 'alkanol':
(a, b) = (0.0878, 0.00908 + 0.0006957 * dipole_r) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
Br = B0 + omega * B1 + a * B2 + b * B3
return Br * R * Tc / Pc |
def to_markdown(self):
"""
Generate a summary in markdown format
"""
json = self.to_report_json()
# summary = json['summary']
s = "# Group: {group} - Dataset: {dataset}\n".format(group=json["group"], dataset=json["dataset"])
s += "\n## SUMMARY\n\n"
s += "This report generated on {}\n\n".format(datetime.date.today())
s += " * Associations: {}\n" . format(json["associations"])
s += " * Lines in file (incl headers): {}\n" . format(json["lines"])
s += " * Lines skipped: {}\n" . format(json["skipped_lines"])
# Header from GAF
s += "## Header From Original Association File\n\n"
s += "\n".join(["> {} ".format(head) for head in self.header])
## Table of Contents
s += "\n\n## Contents\n\n"
for rule, messages in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=", ".join(self.config.suppress_rule_reporting_tags)))
continue
s += "[{rule}](#{rule})\n\n".format(rule=rule)
s += "\n## MESSAGES\n\n"
for (rule, messages) in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# Skip if the rule metadata has "silent" as a tag
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
# If there is a rule metadata, and the rule ID is in the config,
# get the list of tags if present and check for existence of "silent".
# If contained, continue to the next rule.
continue
s += "### {rule}\n\n".format(rule=rule)
if rule != "other" and self.config.rule_metadata:
s += "{title}\n\n".format(title=self.config.rule_metadata.get(rule, {}).get("title", ""))
s += "* total: {amount}\n".format(amount=len(messages))
if len(messages) > 0:
s += "#### Messages\n"
for message in messages:
obj = " ({})".format(message["obj"]) if message["obj"] else ""
s += "* {level} - {type}: {message}{obj} -- `{line}`\n".format(level=message["level"], type=message["type"], message=message["message"], line=message["line"], obj=obj)
# for g in json['groups']:
# s += " * {}: {}\n".format(g['level'], g['count'])
# s += "\n\n"
# for g in json['groups']:
# level = g['level']
# msgs = g['messages']
# if len(msgs) > 0:
# s += "### {}\n\n".format(level)
# for m in msgs:
# s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line'])
return s | def function[to_markdown, parameter[self]]:
constant[
Generate a summary in markdown format
]
variable[json] assign[=] call[name[self].to_report_json, parameter[]]
variable[s] assign[=] call[constant[# Group: {group} - Dataset: {dataset}
].format, parameter[]]
<ast.AugAssign object at 0x7da1b08cb490>
<ast.AugAssign object at 0x7da1b08cb100>
<ast.AugAssign object at 0x7da1b08c8ac0>
<ast.AugAssign object at 0x7da1b08c8580>
<ast.AugAssign object at 0x7da1b08c9c60>
<ast.AugAssign object at 0x7da1b08c8820>
<ast.AugAssign object at 0x7da1b08ca110>
<ast.AugAssign object at 0x7da1b08c9960>
for taget[tuple[[<ast.Name object at 0x7da1b08c9900>, <ast.Name object at 0x7da1b08c9930>]]] in starred[call[name[sorted], parameter[call[call[name[json]][constant[messages]].items, parameter[]]]]] begin[:]
variable[any_suppress_tag_in_rule_metadata] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da1b077b6d0>]]
if <ast.BoolOp object at 0x7da18bcc8700> begin[:]
call[name[print], parameter[call[constant[Skipping {rule_num} because the tag(s) '{tag}' are suppressed].format, parameter[]]]]
continue
<ast.AugAssign object at 0x7da1b0733280>
<ast.AugAssign object at 0x7da1b0733400>
for taget[tuple[[<ast.Name object at 0x7da1b0732e30>, <ast.Name object at 0x7da1b0733610>]]] in starred[call[name[sorted], parameter[call[call[name[json]][constant[messages]].items, parameter[]]]]] begin[:]
variable[any_suppress_tag_in_rule_metadata] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da1b083ee30>]]
if <ast.BoolOp object at 0x7da1b083cd60> begin[:]
continue
<ast.AugAssign object at 0x7da1b083cdf0>
if <ast.BoolOp object at 0x7da1b083d540> begin[:]
<ast.AugAssign object at 0x7da1b08c96c0>
<ast.AugAssign object at 0x7da1b08c9000>
if compare[call[name[len], parameter[name[messages]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b08cb550>
for taget[name[message]] in starred[name[messages]] begin[:]
variable[obj] assign[=] <ast.IfExp object at 0x7da1b08ca140>
<ast.AugAssign object at 0x7da1b08cae30>
return[name[s]] | keyword[def] identifier[to_markdown] ( identifier[self] ):
literal[string]
identifier[json] = identifier[self] . identifier[to_report_json] ()
identifier[s] = literal[string] . identifier[format] ( identifier[group] = identifier[json] [ literal[string] ], identifier[dataset] = identifier[json] [ literal[string] ])
identifier[s] += literal[string]
identifier[s] += literal[string] . identifier[format] ( identifier[datetime] . identifier[date] . identifier[today] ())
identifier[s] += literal[string] . identifier[format] ( identifier[json] [ literal[string] ])
identifier[s] += literal[string] . identifier[format] ( identifier[json] [ literal[string] ])
identifier[s] += literal[string] . identifier[format] ( identifier[json] [ literal[string] ])
identifier[s] += literal[string]
identifier[s] += literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[head] ) keyword[for] identifier[head] keyword[in] identifier[self] . identifier[header] ])
identifier[s] += literal[string]
keyword[for] identifier[rule] , identifier[messages] keyword[in] identifier[sorted] ( identifier[json] [ literal[string] ]. identifier[items] (), identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ]):
identifier[any_suppress_tag_in_rule_metadata] = identifier[any] ([ identifier[tag] keyword[in] identifier[self] . identifier[config] . identifier[rule_metadata] . identifier[get] ( identifier[rule] ,{}). identifier[get] ( literal[string] ,[]) keyword[for] identifier[tag] keyword[in] identifier[self] . identifier[config] . identifier[suppress_rule_reporting_tags] ])
keyword[if] identifier[self] . identifier[config] . identifier[rule_metadata] keyword[and] identifier[any_suppress_tag_in_rule_metadata] :
identifier[print] ( literal[string] . identifier[format] ( identifier[rule_num] = identifier[rule] , identifier[tag] = literal[string] . identifier[join] ( identifier[self] . identifier[config] . identifier[suppress_rule_reporting_tags] )))
keyword[continue]
identifier[s] += literal[string] . identifier[format] ( identifier[rule] = identifier[rule] )
identifier[s] += literal[string]
keyword[for] ( identifier[rule] , identifier[messages] ) keyword[in] identifier[sorted] ( identifier[json] [ literal[string] ]. identifier[items] (), identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ]):
identifier[any_suppress_tag_in_rule_metadata] = identifier[any] ([ identifier[tag] keyword[in] identifier[self] . identifier[config] . identifier[rule_metadata] . identifier[get] ( identifier[rule] ,{}). identifier[get] ( literal[string] ,[]) keyword[for] identifier[tag] keyword[in] identifier[self] . identifier[config] . identifier[suppress_rule_reporting_tags] ])
keyword[if] identifier[self] . identifier[config] . identifier[rule_metadata] keyword[and] identifier[any_suppress_tag_in_rule_metadata] :
keyword[continue]
identifier[s] += literal[string] . identifier[format] ( identifier[rule] = identifier[rule] )
keyword[if] identifier[rule] != literal[string] keyword[and] identifier[self] . identifier[config] . identifier[rule_metadata] :
identifier[s] += literal[string] . identifier[format] ( identifier[title] = identifier[self] . identifier[config] . identifier[rule_metadata] . identifier[get] ( identifier[rule] ,{}). identifier[get] ( literal[string] , literal[string] ))
identifier[s] += literal[string] . identifier[format] ( identifier[amount] = identifier[len] ( identifier[messages] ))
keyword[if] identifier[len] ( identifier[messages] )> literal[int] :
identifier[s] += literal[string]
keyword[for] identifier[message] keyword[in] identifier[messages] :
identifier[obj] = literal[string] . identifier[format] ( identifier[message] [ literal[string] ]) keyword[if] identifier[message] [ literal[string] ] keyword[else] literal[string]
identifier[s] += literal[string] . identifier[format] ( identifier[level] = identifier[message] [ literal[string] ], identifier[type] = identifier[message] [ literal[string] ], identifier[message] = identifier[message] [ literal[string] ], identifier[line] = identifier[message] [ literal[string] ], identifier[obj] = identifier[obj] )
keyword[return] identifier[s] | def to_markdown(self):
"""
Generate a summary in markdown format
"""
json = self.to_report_json()
# summary = json['summary']
s = '# Group: {group} - Dataset: {dataset}\n'.format(group=json['group'], dataset=json['dataset'])
s += '\n## SUMMARY\n\n'
s += 'This report generated on {}\n\n'.format(datetime.date.today())
s += ' * Associations: {}\n'.format(json['associations'])
s += ' * Lines in file (incl headers): {}\n'.format(json['lines'])
s += ' * Lines skipped: {}\n'.format(json['skipped_lines'])
# Header from GAF
s += '## Header From Original Association File\n\n'
s += '\n'.join(['> {} '.format(head) for head in self.header])
## Table of Contents
s += '\n\n## Contents\n\n'
for (rule, messages) in sorted(json['messages'].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get('tags', []) for tag in self.config.suppress_rule_reporting_tags])
# For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=', '.join(self.config.suppress_rule_reporting_tags)))
continue # depends on [control=['if'], data=[]]
s += '[{rule}](#{rule})\n\n'.format(rule=rule) # depends on [control=['for'], data=[]]
s += '\n## MESSAGES\n\n'
for (rule, messages) in sorted(json['messages'].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get('tags', []) for tag in self.config.suppress_rule_reporting_tags])
# Skip if the rule metadata has "silent" as a tag
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
# If there is a rule metadata, and the rule ID is in the config,
# get the list of tags if present and check for existence of "silent".
# If contained, continue to the next rule.
continue # depends on [control=['if'], data=[]]
s += '### {rule}\n\n'.format(rule=rule)
if rule != 'other' and self.config.rule_metadata:
s += '{title}\n\n'.format(title=self.config.rule_metadata.get(rule, {}).get('title', '')) # depends on [control=['if'], data=[]]
s += '* total: {amount}\n'.format(amount=len(messages))
if len(messages) > 0:
s += '#### Messages\n' # depends on [control=['if'], data=[]]
for message in messages:
obj = ' ({})'.format(message['obj']) if message['obj'] else ''
s += '* {level} - {type}: {message}{obj} -- `{line}`\n'.format(level=message['level'], type=message['type'], message=message['message'], line=message['line'], obj=obj) # depends on [control=['for'], data=['message']] # depends on [control=['for'], data=[]]
# for g in json['groups']:
# s += " * {}: {}\n".format(g['level'], g['count'])
# s += "\n\n"
# for g in json['groups']:
# level = g['level']
# msgs = g['messages']
# if len(msgs) > 0:
# s += "### {}\n\n".format(level)
# for m in msgs:
# s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line'])
return s |
def BuildTemplate(self,
context=None,
output=None,
fleetspeak_service_config=None):
"""Find template builder and call it."""
context = context or []
context.append("Arch:%s" % self.GetArch())
# Platform context has common platform settings, Target has template build
# specific stuff.
self.platform = platform.system()
context.append("Platform:%s" % self.platform)
context.append("Target:%s" % self.platform)
if "Target:Linux" in context:
context.append(self.GetPackageFormat())
template_path = None
if output:
template_path = os.path.join(
output,
grr_config.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = self.GetBuilder(context, fleetspeak_service_config)
builder_obj.MakeExecutableTemplate(output_file=template_path) | def function[BuildTemplate, parameter[self, context, output, fleetspeak_service_config]]:
constant[Find template builder and call it.]
variable[context] assign[=] <ast.BoolOp object at 0x7da1b1c18a30>
call[name[context].append, parameter[binary_operation[constant[Arch:%s] <ast.Mod object at 0x7da2590d6920> call[name[self].GetArch, parameter[]]]]]
name[self].platform assign[=] call[name[platform].system, parameter[]]
call[name[context].append, parameter[binary_operation[constant[Platform:%s] <ast.Mod object at 0x7da2590d6920> name[self].platform]]]
call[name[context].append, parameter[binary_operation[constant[Target:%s] <ast.Mod object at 0x7da2590d6920> name[self].platform]]]
if compare[constant[Target:Linux] in name[context]] begin[:]
call[name[context].append, parameter[call[name[self].GetPackageFormat, parameter[]]]]
variable[template_path] assign[=] constant[None]
if name[output] begin[:]
variable[template_path] assign[=] call[name[os].path.join, parameter[name[output], call[name[grr_config].CONFIG.Get, parameter[constant[PyInstaller.template_filename]]]]]
variable[builder_obj] assign[=] call[name[self].GetBuilder, parameter[name[context], name[fleetspeak_service_config]]]
call[name[builder_obj].MakeExecutableTemplate, parameter[]] | keyword[def] identifier[BuildTemplate] ( identifier[self] ,
identifier[context] = keyword[None] ,
identifier[output] = keyword[None] ,
identifier[fleetspeak_service_config] = keyword[None] ):
literal[string]
identifier[context] = identifier[context] keyword[or] []
identifier[context] . identifier[append] ( literal[string] % identifier[self] . identifier[GetArch] ())
identifier[self] . identifier[platform] = identifier[platform] . identifier[system] ()
identifier[context] . identifier[append] ( literal[string] % identifier[self] . identifier[platform] )
identifier[context] . identifier[append] ( literal[string] % identifier[self] . identifier[platform] )
keyword[if] literal[string] keyword[in] identifier[context] :
identifier[context] . identifier[append] ( identifier[self] . identifier[GetPackageFormat] ())
identifier[template_path] = keyword[None]
keyword[if] identifier[output] :
identifier[template_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[output] ,
identifier[grr_config] . identifier[CONFIG] . identifier[Get] (
literal[string] , identifier[context] = identifier[context] ))
identifier[builder_obj] = identifier[self] . identifier[GetBuilder] ( identifier[context] , identifier[fleetspeak_service_config] )
identifier[builder_obj] . identifier[MakeExecutableTemplate] ( identifier[output_file] = identifier[template_path] ) | def BuildTemplate(self, context=None, output=None, fleetspeak_service_config=None):
"""Find template builder and call it."""
context = context or []
context.append('Arch:%s' % self.GetArch())
# Platform context has common platform settings, Target has template build
# specific stuff.
self.platform = platform.system()
context.append('Platform:%s' % self.platform)
context.append('Target:%s' % self.platform)
if 'Target:Linux' in context:
context.append(self.GetPackageFormat()) # depends on [control=['if'], data=['context']]
template_path = None
if output:
template_path = os.path.join(output, grr_config.CONFIG.Get('PyInstaller.template_filename', context=context)) # depends on [control=['if'], data=[]]
builder_obj = self.GetBuilder(context, fleetspeak_service_config)
builder_obj.MakeExecutableTemplate(output_file=template_path) |
def is_vert_aligned_center(c):
"""Return true if all the components are vertically aligned on their center.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the center of
their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_center(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) | def function[is_vert_aligned_center, parameter[c]]:
constant[Return true if all the components are vertically aligned on their center.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the center of
their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
]
return[call[name[all], parameter[<ast.ListComp object at 0x7da18f00f880>]]] | keyword[def] identifier[is_vert_aligned_center] ( identifier[c] ):
literal[string]
keyword[return] identifier[all] (
[
identifier[_to_span] ( identifier[c] [ identifier[i] ]). identifier[sentence] . identifier[is_visual] ()
keyword[and] identifier[bbox_vert_aligned_center] (
identifier[bbox_from_span] ( identifier[_to_span] ( identifier[c] [ identifier[i] ])), identifier[bbox_from_span] ( identifier[_to_span] ( identifier[c] [ literal[int] ]))
)
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[c] ))
]
) | def is_vert_aligned_center(c):
"""Return true if all the components are vertically aligned on their center.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the center of
their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return all([_to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_center(bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))) for i in range(len(c))]) |
def _read_elem_nodes(self, fid):
""" Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
"""
nodes = {}
# # prepare nodes
# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)
# nodes = np.zeros((number_of_nodes, 3), dtype=float)
# read in nodes
nodes_raw = np.empty((self.header['nr_nodes'], 3), dtype=float)
for nr in range(0, self.header['nr_nodes']):
node_line = fid.readline().lstrip()
nodes_raw[nr, :] = np.fromstring(
node_line, dtype=float, sep=' ')
# round node coordinates to 5th decimal point. Sometimes this is
# important when we deal with mal-formatted node data
nodes_raw[:, 1:3] = np.round(nodes_raw[:, 1:3], 5)
# check for CutMcK
# The check is based on the first node, but if one node was renumbered,
# so were all the others.
if(nodes_raw[:, 0] != list(range(1, nodes_raw.shape[0]))):
self.header['cutmck'] = True
print(
'This grid was sorted using CutMcK. The nodes were resorted!')
else:
self.header['cutmck'] = False
# Rearrange nodes when CutMcK was used.
if(self.header['cutmck']):
nodes_cutmck = np.empty_like(nodes_raw)
nodes_cutmck_index = np.zeros(nodes_raw.shape[0], dtype=int)
for node in range(0, self.header['nr_nodes']):
new_index = np.where(nodes_raw[:, 0].astype(int) == (node + 1))
nodes_cutmck[new_index[0], 1:3] = nodes_raw[node, 1:3]
nodes_cutmck[new_index[0], 0] = new_index[0]
nodes_cutmck_index[node] = new_index[0]
# sort them
nodes_sorted = nodes_cutmck[nodes_cutmck_index, :]
nodes['presort'] = nodes_cutmck
nodes['cutmck_index'] = nodes_cutmck_index
nodes['rev_cutmck_index'] = np.argsort(nodes_cutmck_index)
else:
nodes_sorted = nodes_raw
nodes['presort'] = nodes_raw
# prepare node dict
nodes['raw'] = nodes_raw
nodes['sorted'] = nodes_sorted
self.nodes = nodes
self.nr_of_nodes = nodes['raw'].shape[0] | def function[_read_elem_nodes, parameter[self, fid]]:
constant[ Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
]
variable[nodes] assign[=] dictionary[[], []]
variable[nodes_raw] assign[=] call[name[np].empty, parameter[tuple[[<ast.Subscript object at 0x7da1b2297b20>, <ast.Constant object at 0x7da1b2295540>]]]]
for taget[name[nr]] in starred[call[name[range], parameter[constant[0], call[name[self].header][constant[nr_nodes]]]]] begin[:]
variable[node_line] assign[=] call[call[name[fid].readline, parameter[]].lstrip, parameter[]]
call[name[nodes_raw]][tuple[[<ast.Name object at 0x7da1b2295750>, <ast.Slice object at 0x7da1b2295900>]]] assign[=] call[name[np].fromstring, parameter[name[node_line]]]
call[name[nodes_raw]][tuple[[<ast.Slice object at 0x7da1b2296bf0>, <ast.Slice object at 0x7da1b2296ce0>]]] assign[=] call[name[np].round, parameter[call[name[nodes_raw]][tuple[[<ast.Slice object at 0x7da1b2296560>, <ast.Slice object at 0x7da1b22965f0>]]], constant[5]]]
if compare[call[name[nodes_raw]][tuple[[<ast.Slice object at 0x7da1b2295480>, <ast.Constant object at 0x7da1b2295270>]]] not_equal[!=] call[name[list], parameter[call[name[range], parameter[constant[1], call[name[nodes_raw].shape][constant[0]]]]]]] begin[:]
call[name[self].header][constant[cutmck]] assign[=] constant[True]
call[name[print], parameter[constant[This grid was sorted using CutMcK. The nodes were resorted!]]]
if call[name[self].header][constant[cutmck]] begin[:]
variable[nodes_cutmck] assign[=] call[name[np].empty_like, parameter[name[nodes_raw]]]
variable[nodes_cutmck_index] assign[=] call[name[np].zeros, parameter[call[name[nodes_raw].shape][constant[0]]]]
for taget[name[node]] in starred[call[name[range], parameter[constant[0], call[name[self].header][constant[nr_nodes]]]]] begin[:]
variable[new_index] assign[=] call[name[np].where, parameter[compare[call[call[name[nodes_raw]][tuple[[<ast.Slice object at 0x7da1b2362f80>, <ast.Constant object at 0x7da1b2362380>]]].astype, parameter[name[int]]] equal[==] binary_operation[name[node] + constant[1]]]]]
call[name[nodes_cutmck]][tuple[[<ast.Subscript object at 0x7da1b2360280>, <ast.Slice object at 0x7da1b2360340>]]] assign[=] call[name[nodes_raw]][tuple[[<ast.Name object at 0x7da1b2363220>, <ast.Slice object at 0x7da1b23629e0>]]]
call[name[nodes_cutmck]][tuple[[<ast.Subscript object at 0x7da1b2360460>, <ast.Constant object at 0x7da1b2361ba0>]]] assign[=] call[name[new_index]][constant[0]]
call[name[nodes_cutmck_index]][name[node]] assign[=] call[name[new_index]][constant[0]]
variable[nodes_sorted] assign[=] call[name[nodes_cutmck]][tuple[[<ast.Name object at 0x7da1b2363040>, <ast.Slice object at 0x7da1b2361ea0>]]]
call[name[nodes]][constant[presort]] assign[=] name[nodes_cutmck]
call[name[nodes]][constant[cutmck_index]] assign[=] name[nodes_cutmck_index]
call[name[nodes]][constant[rev_cutmck_index]] assign[=] call[name[np].argsort, parameter[name[nodes_cutmck_index]]]
call[name[nodes]][constant[raw]] assign[=] name[nodes_raw]
call[name[nodes]][constant[sorted]] assign[=] name[nodes_sorted]
name[self].nodes assign[=] name[nodes]
name[self].nr_of_nodes assign[=] call[call[name[nodes]][constant[raw]].shape][constant[0]] | keyword[def] identifier[_read_elem_nodes] ( identifier[self] , identifier[fid] ):
literal[string]
identifier[nodes] ={}
identifier[nodes_raw] = identifier[np] . identifier[empty] (( identifier[self] . identifier[header] [ literal[string] ], literal[int] ), identifier[dtype] = identifier[float] )
keyword[for] identifier[nr] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[header] [ literal[string] ]):
identifier[node_line] = identifier[fid] . identifier[readline] (). identifier[lstrip] ()
identifier[nodes_raw] [ identifier[nr] ,:]= identifier[np] . identifier[fromstring] (
identifier[node_line] , identifier[dtype] = identifier[float] , identifier[sep] = literal[string] )
identifier[nodes_raw] [:, literal[int] : literal[int] ]= identifier[np] . identifier[round] ( identifier[nodes_raw] [:, literal[int] : literal[int] ], literal[int] )
keyword[if] ( identifier[nodes_raw] [:, literal[int] ]!= identifier[list] ( identifier[range] ( literal[int] , identifier[nodes_raw] . identifier[shape] [ literal[int] ]))):
identifier[self] . identifier[header] [ literal[string] ]= keyword[True]
identifier[print] (
literal[string] )
keyword[else] :
identifier[self] . identifier[header] [ literal[string] ]= keyword[False]
keyword[if] ( identifier[self] . identifier[header] [ literal[string] ]):
identifier[nodes_cutmck] = identifier[np] . identifier[empty_like] ( identifier[nodes_raw] )
identifier[nodes_cutmck_index] = identifier[np] . identifier[zeros] ( identifier[nodes_raw] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[int] )
keyword[for] identifier[node] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[header] [ literal[string] ]):
identifier[new_index] = identifier[np] . identifier[where] ( identifier[nodes_raw] [:, literal[int] ]. identifier[astype] ( identifier[int] )==( identifier[node] + literal[int] ))
identifier[nodes_cutmck] [ identifier[new_index] [ literal[int] ], literal[int] : literal[int] ]= identifier[nodes_raw] [ identifier[node] , literal[int] : literal[int] ]
identifier[nodes_cutmck] [ identifier[new_index] [ literal[int] ], literal[int] ]= identifier[new_index] [ literal[int] ]
identifier[nodes_cutmck_index] [ identifier[node] ]= identifier[new_index] [ literal[int] ]
identifier[nodes_sorted] = identifier[nodes_cutmck] [ identifier[nodes_cutmck_index] ,:]
identifier[nodes] [ literal[string] ]= identifier[nodes_cutmck]
identifier[nodes] [ literal[string] ]= identifier[nodes_cutmck_index]
identifier[nodes] [ literal[string] ]= identifier[np] . identifier[argsort] ( identifier[nodes_cutmck_index] )
keyword[else] :
identifier[nodes_sorted] = identifier[nodes_raw]
identifier[nodes] [ literal[string] ]= identifier[nodes_raw]
identifier[nodes] [ literal[string] ]= identifier[nodes_raw]
identifier[nodes] [ literal[string] ]= identifier[nodes_sorted]
identifier[self] . identifier[nodes] = identifier[nodes]
identifier[self] . identifier[nr_of_nodes] = identifier[nodes] [ literal[string] ]. identifier[shape] [ literal[int] ] | def _read_elem_nodes(self, fid):
""" Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
"""
nodes = {}
# # prepare nodes
# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)
# nodes = np.zeros((number_of_nodes, 3), dtype=float)
# read in nodes
nodes_raw = np.empty((self.header['nr_nodes'], 3), dtype=float)
for nr in range(0, self.header['nr_nodes']):
node_line = fid.readline().lstrip()
nodes_raw[nr, :] = np.fromstring(node_line, dtype=float, sep=' ') # depends on [control=['for'], data=['nr']]
# round node coordinates to 5th decimal point. Sometimes this is
# important when we deal with mal-formatted node data
nodes_raw[:, 1:3] = np.round(nodes_raw[:, 1:3], 5)
# check for CutMcK
# The check is based on the first node, but if one node was renumbered,
# so were all the others.
if nodes_raw[:, 0] != list(range(1, nodes_raw.shape[0])):
self.header['cutmck'] = True
print('This grid was sorted using CutMcK. The nodes were resorted!') # depends on [control=['if'], data=[]]
else:
self.header['cutmck'] = False
# Rearrange nodes when CutMcK was used.
if self.header['cutmck']:
nodes_cutmck = np.empty_like(nodes_raw)
nodes_cutmck_index = np.zeros(nodes_raw.shape[0], dtype=int)
for node in range(0, self.header['nr_nodes']):
new_index = np.where(nodes_raw[:, 0].astype(int) == node + 1)
nodes_cutmck[new_index[0], 1:3] = nodes_raw[node, 1:3]
nodes_cutmck[new_index[0], 0] = new_index[0]
nodes_cutmck_index[node] = new_index[0] # depends on [control=['for'], data=['node']]
# sort them
nodes_sorted = nodes_cutmck[nodes_cutmck_index, :]
nodes['presort'] = nodes_cutmck
nodes['cutmck_index'] = nodes_cutmck_index
nodes['rev_cutmck_index'] = np.argsort(nodes_cutmck_index) # depends on [control=['if'], data=[]]
else:
nodes_sorted = nodes_raw
nodes['presort'] = nodes_raw
# prepare node dict
nodes['raw'] = nodes_raw
nodes['sorted'] = nodes_sorted
self.nodes = nodes
self.nr_of_nodes = nodes['raw'].shape[0] |
def tacacs_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[tacacs_server_host_retries, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[tacacs_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[tacacs-server]]]
variable[host] assign[=] call[name[ET].SubElement, parameter[name[tacacs_server], constant[host]]]
variable[hostname_key] assign[=] call[name[ET].SubElement, parameter[name[host], constant[hostname]]]
name[hostname_key].text assign[=] call[name[kwargs].pop, parameter[constant[hostname]]]
variable[retries] assign[=] call[name[ET].SubElement, parameter[name[host], constant[retries]]]
name[retries].text assign[=] call[name[kwargs].pop, parameter[constant[retries]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[tacacs_server_host_retries] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[tacacs_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[host] = identifier[ET] . identifier[SubElement] ( identifier[tacacs_server] , literal[string] )
identifier[hostname_key] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[hostname_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[retries] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[retries] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def tacacs_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
tacacs_server = ET.SubElement(config, 'tacacs-server', xmlns='urn:brocade.com:mgmt:brocade-aaa')
host = ET.SubElement(tacacs_server, 'host')
hostname_key = ET.SubElement(host, 'hostname')
hostname_key.text = kwargs.pop('hostname')
retries = ET.SubElement(host, 'retries')
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def bg(self) -> np.array:
"""A uint8 array with the shape (height, width, 3).
You can change the consoles background colors by using this array.
Index this array with ``console.bg[i, j, channel] # order='C'`` or
``console.bg[x, y, channel] # order='F'``.
"""
bg = self._tiles["bg"][..., :3]
if self._order == "F":
bg = bg.transpose(1, 0, 2)
return bg | def function[bg, parameter[self]]:
constant[A uint8 array with the shape (height, width, 3).
You can change the consoles background colors by using this array.
Index this array with ``console.bg[i, j, channel] # order='C'`` or
``console.bg[x, y, channel] # order='F'``.
]
variable[bg] assign[=] call[call[name[self]._tiles][constant[bg]]][tuple[[<ast.Constant object at 0x7da18eb56f20>, <ast.Slice object at 0x7da18eb54400>]]]
if compare[name[self]._order equal[==] constant[F]] begin[:]
variable[bg] assign[=] call[name[bg].transpose, parameter[constant[1], constant[0], constant[2]]]
return[name[bg]] | keyword[def] identifier[bg] ( identifier[self] )-> identifier[np] . identifier[array] :
literal[string]
identifier[bg] = identifier[self] . identifier[_tiles] [ literal[string] ][...,: literal[int] ]
keyword[if] identifier[self] . identifier[_order] == literal[string] :
identifier[bg] = identifier[bg] . identifier[transpose] ( literal[int] , literal[int] , literal[int] )
keyword[return] identifier[bg] | def bg(self) -> np.array:
"""A uint8 array with the shape (height, width, 3).
You can change the consoles background colors by using this array.
Index this array with ``console.bg[i, j, channel] # order='C'`` or
``console.bg[x, y, channel] # order='F'``.
"""
bg = self._tiles['bg'][..., :3]
if self._order == 'F':
bg = bg.transpose(1, 0, 2) # depends on [control=['if'], data=[]]
return bg |
def get_workspace_disk_usage(workspace, summarize=False):
"""Retrieve disk usage information of a workspace."""
command = ['du', '-h']
if summarize:
command.append('-s')
else:
command.append('-a')
command.append(workspace)
disk_usage_info = subprocess.check_output(command).decode().split()
# create pairs of (size, filename)
filesize_pairs = list(zip(disk_usage_info[::2], disk_usage_info[1::2]))
filesizes = []
for filesize_pair in filesize_pairs:
size, name = filesize_pair
# trim workspace path in every file name
filesizes.append({'name': name[len(workspace):],
'size': size})
return filesizes | def function[get_workspace_disk_usage, parameter[workspace, summarize]]:
constant[Retrieve disk usage information of a workspace.]
variable[command] assign[=] list[[<ast.Constant object at 0x7da1b0470430>, <ast.Constant object at 0x7da1b0473010>]]
if name[summarize] begin[:]
call[name[command].append, parameter[constant[-s]]]
call[name[command].append, parameter[name[workspace]]]
variable[disk_usage_info] assign[=] call[call[call[name[subprocess].check_output, parameter[name[command]]].decode, parameter[]].split, parameter[]]
variable[filesize_pairs] assign[=] call[name[list], parameter[call[name[zip], parameter[call[name[disk_usage_info]][<ast.Slice object at 0x7da1b0472d10>], call[name[disk_usage_info]][<ast.Slice object at 0x7da1b0471ae0>]]]]]
variable[filesizes] assign[=] list[[]]
for taget[name[filesize_pair]] in starred[name[filesize_pairs]] begin[:]
<ast.Tuple object at 0x7da1b0470400> assign[=] name[filesize_pair]
call[name[filesizes].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0472f50>, <ast.Constant object at 0x7da1b0470220>], [<ast.Subscript object at 0x7da1b0470d90>, <ast.Name object at 0x7da1b0471f00>]]]]
return[name[filesizes]] | keyword[def] identifier[get_workspace_disk_usage] ( identifier[workspace] , identifier[summarize] = keyword[False] ):
literal[string]
identifier[command] =[ literal[string] , literal[string] ]
keyword[if] identifier[summarize] :
identifier[command] . identifier[append] ( literal[string] )
keyword[else] :
identifier[command] . identifier[append] ( literal[string] )
identifier[command] . identifier[append] ( identifier[workspace] )
identifier[disk_usage_info] = identifier[subprocess] . identifier[check_output] ( identifier[command] ). identifier[decode] (). identifier[split] ()
identifier[filesize_pairs] = identifier[list] ( identifier[zip] ( identifier[disk_usage_info] [:: literal[int] ], identifier[disk_usage_info] [ literal[int] :: literal[int] ]))
identifier[filesizes] =[]
keyword[for] identifier[filesize_pair] keyword[in] identifier[filesize_pairs] :
identifier[size] , identifier[name] = identifier[filesize_pair]
identifier[filesizes] . identifier[append] ({ literal[string] : identifier[name] [ identifier[len] ( identifier[workspace] ):],
literal[string] : identifier[size] })
keyword[return] identifier[filesizes] | def get_workspace_disk_usage(workspace, summarize=False):
"""Retrieve disk usage information of a workspace."""
command = ['du', '-h']
if summarize:
command.append('-s') # depends on [control=['if'], data=[]]
else:
command.append('-a')
command.append(workspace)
disk_usage_info = subprocess.check_output(command).decode().split()
# create pairs of (size, filename)
filesize_pairs = list(zip(disk_usage_info[::2], disk_usage_info[1::2]))
filesizes = []
for filesize_pair in filesize_pairs:
(size, name) = filesize_pair
# trim workspace path in every file name
filesizes.append({'name': name[len(workspace):], 'size': size}) # depends on [control=['for'], data=['filesize_pair']]
return filesizes |
def plugin_categories_to_choices(categories):
"""
Return a tuple of plugin model choices, suitable for a select field.
Each tuple is a ("TypeName", "Title") value.
"""
choices = []
for category, items in categories.items():
if items:
plugin_tuples = tuple((plugin.type_name, plugin.verbose_name) for plugin in items)
if category:
choices.append((category, plugin_tuples))
else:
choices += plugin_tuples
choices.sort(key=lambda item: item[0])
return choices | def function[plugin_categories_to_choices, parameter[categories]]:
constant[
Return a tuple of plugin model choices, suitable for a select field.
Each tuple is a ("TypeName", "Title") value.
]
variable[choices] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1046b60>, <ast.Name object at 0x7da1b1047ca0>]]] in starred[call[name[categories].items, parameter[]]] begin[:]
if name[items] begin[:]
variable[plugin_tuples] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1045180>]]
if name[category] begin[:]
call[name[choices].append, parameter[tuple[[<ast.Name object at 0x7da1b1044c70>, <ast.Name object at 0x7da1b1045690>]]]]
call[name[choices].sort, parameter[]]
return[name[choices]] | keyword[def] identifier[plugin_categories_to_choices] ( identifier[categories] ):
literal[string]
identifier[choices] =[]
keyword[for] identifier[category] , identifier[items] keyword[in] identifier[categories] . identifier[items] ():
keyword[if] identifier[items] :
identifier[plugin_tuples] = identifier[tuple] (( identifier[plugin] . identifier[type_name] , identifier[plugin] . identifier[verbose_name] ) keyword[for] identifier[plugin] keyword[in] identifier[items] )
keyword[if] identifier[category] :
identifier[choices] . identifier[append] (( identifier[category] , identifier[plugin_tuples] ))
keyword[else] :
identifier[choices] += identifier[plugin_tuples]
identifier[choices] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[item] : identifier[item] [ literal[int] ])
keyword[return] identifier[choices] | def plugin_categories_to_choices(categories):
"""
Return a tuple of plugin model choices, suitable for a select field.
Each tuple is a ("TypeName", "Title") value.
"""
choices = []
for (category, items) in categories.items():
if items:
plugin_tuples = tuple(((plugin.type_name, plugin.verbose_name) for plugin in items))
if category:
choices.append((category, plugin_tuples)) # depends on [control=['if'], data=[]]
else:
choices += plugin_tuples # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
choices.sort(key=lambda item: item[0])
return choices |
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d | def function[_construct_axes_dict, parameter[self, axes]]:
constant[Return an axes dictionary for myself.]
variable[d] assign[=] <ast.DictComp object at 0x7da18ede7fd0>
call[name[d].update, parameter[name[kwargs]]]
return[name[d]] | keyword[def] identifier[_construct_axes_dict] ( identifier[self] , identifier[axes] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[d] ={ identifier[a] : identifier[self] . identifier[_get_axis] ( identifier[a] ) keyword[for] identifier[a] keyword[in] ( identifier[axes] keyword[or] identifier[self] . identifier[_AXIS_ORDERS] )}
identifier[d] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[d] | def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in axes or self._AXIS_ORDERS}
d.update(kwargs)
return d |
def _decode(self, data):
"""Decode data, if any
Called before passing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace')
return data | def function[_decode, parameter[self, data]]:
constant[Decode data, if any
Called before passing to stdout/stderr streams
]
if name[self].encoding begin[:]
variable[data] assign[=] call[name[data].decode, parameter[name[self].encoding, constant[replace]]]
return[name[data]] | keyword[def] identifier[_decode] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[self] . identifier[encoding] :
identifier[data] = identifier[data] . identifier[decode] ( identifier[self] . identifier[encoding] , literal[string] )
keyword[return] identifier[data] | def _decode(self, data):
"""Decode data, if any
Called before passing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace') # depends on [control=['if'], data=[]]
return data |
def forward_messages(
self,
chat_id: Union[int, str],
from_chat_id: Union[int, str],
message_ids: Iterable[int],
disable_notification: bool = None,
as_copy: bool = False,
remove_caption: bool = False
) -> "pyrogram.Messages":
"""Use this method to forward messages of any kind.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
from_chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the source chat where the original message was sent.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_ids (``iterable``):
A list of Message identifiers in the chat specified in *from_chat_id* or a single message id.
Iterators and Generators are also accepted.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
as_copy (``bool``, *optional*):
Pass True to forward messages without the forward header (i.e.: send a copy of the message content).
Defaults to False.
remove_caption (``bool``, *optional*):
If set to True and *as_copy* is enabled as well, media captions are not preserved when copying the
message. Has no effect if *as_copy* is not enabled.
Defaults to False.
Returns:
On success and in case *message_ids* was an iterable, the returned value will be a list of the forwarded
:obj:`Messages <pyrogram.Message>` even if a list contains just one element, otherwise if
*message_ids* was an integer, the single forwarded :obj:`Message <pyrogram.Message>`
is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
is_iterable = not isinstance(message_ids, int)
message_ids = list(message_ids) if is_iterable else [message_ids]
if as_copy:
forwarded_messages = []
for chunk in [message_ids[i:i + 200] for i in range(0, len(message_ids), 200)]:
messages = self.get_messages(chat_id=from_chat_id, message_ids=chunk) # type: pyrogram.Messages
for message in messages.messages:
forwarded_messages.append(
message.forward(
chat_id,
disable_notification=disable_notification,
as_copy=True,
remove_caption=remove_caption
)
)
return pyrogram.Messages(
client=self,
total_count=len(forwarded_messages),
messages=forwarded_messages
) if is_iterable else forwarded_messages[0]
else:
r = self.send(
functions.messages.ForwardMessages(
to_peer=self.resolve_peer(chat_id),
from_peer=self.resolve_peer(from_chat_id),
id=message_ids,
silent=disable_notification or None,
random_id=[self.rnd_id() for _ in message_ids]
)
)
forwarded_messages = []
users = {i.id: i for i in r.users}
chats = {i.id: i for i in r.chats}
for i in r.updates:
if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):
forwarded_messages.append(
pyrogram.Message._parse(
self, i.message,
users, chats
)
)
return pyrogram.Messages(
client=self,
total_count=len(forwarded_messages),
messages=forwarded_messages
) if is_iterable else forwarded_messages[0] | def function[forward_messages, parameter[self, chat_id, from_chat_id, message_ids, disable_notification, as_copy, remove_caption]]:
constant[Use this method to forward messages of any kind.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
from_chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the source chat where the original message was sent.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_ids (``iterable``):
A list of Message identifiers in the chat specified in *from_chat_id* or a single message id.
Iterators and Generators are also accepted.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
as_copy (``bool``, *optional*):
Pass True to forward messages without the forward header (i.e.: send a copy of the message content).
Defaults to False.
remove_caption (``bool``, *optional*):
If set to True and *as_copy* is enabled as well, media captions are not preserved when copying the
message. Has no effect if *as_copy* is not enabled.
Defaults to False.
Returns:
On success and in case *message_ids* was an iterable, the returned value will be a list of the forwarded
:obj:`Messages <pyrogram.Message>` even if a list contains just one element, otherwise if
*message_ids* was an integer, the single forwarded :obj:`Message <pyrogram.Message>`
is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
]
variable[is_iterable] assign[=] <ast.UnaryOp object at 0x7da1b26ad630>
variable[message_ids] assign[=] <ast.IfExp object at 0x7da1b26afb20>
if name[as_copy] begin[:]
variable[forwarded_messages] assign[=] list[[]]
for taget[name[chunk]] in starred[<ast.ListComp object at 0x7da1b26ac550>] begin[:]
variable[messages] assign[=] call[name[self].get_messages, parameter[]]
for taget[name[message]] in starred[name[messages].messages] begin[:]
call[name[forwarded_messages].append, parameter[call[name[message].forward, parameter[name[chat_id]]]]]
return[<ast.IfExp object at 0x7da1b26ac7f0>] | keyword[def] identifier[forward_messages] (
identifier[self] ,
identifier[chat_id] : identifier[Union] [ identifier[int] , identifier[str] ],
identifier[from_chat_id] : identifier[Union] [ identifier[int] , identifier[str] ],
identifier[message_ids] : identifier[Iterable] [ identifier[int] ],
identifier[disable_notification] : identifier[bool] = keyword[None] ,
identifier[as_copy] : identifier[bool] = keyword[False] ,
identifier[remove_caption] : identifier[bool] = keyword[False]
)-> literal[string] :
literal[string]
identifier[is_iterable] = keyword[not] identifier[isinstance] ( identifier[message_ids] , identifier[int] )
identifier[message_ids] = identifier[list] ( identifier[message_ids] ) keyword[if] identifier[is_iterable] keyword[else] [ identifier[message_ids] ]
keyword[if] identifier[as_copy] :
identifier[forwarded_messages] =[]
keyword[for] identifier[chunk] keyword[in] [ identifier[message_ids] [ identifier[i] : identifier[i] + literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[message_ids] ), literal[int] )]:
identifier[messages] = identifier[self] . identifier[get_messages] ( identifier[chat_id] = identifier[from_chat_id] , identifier[message_ids] = identifier[chunk] )
keyword[for] identifier[message] keyword[in] identifier[messages] . identifier[messages] :
identifier[forwarded_messages] . identifier[append] (
identifier[message] . identifier[forward] (
identifier[chat_id] ,
identifier[disable_notification] = identifier[disable_notification] ,
identifier[as_copy] = keyword[True] ,
identifier[remove_caption] = identifier[remove_caption]
)
)
keyword[return] identifier[pyrogram] . identifier[Messages] (
identifier[client] = identifier[self] ,
identifier[total_count] = identifier[len] ( identifier[forwarded_messages] ),
identifier[messages] = identifier[forwarded_messages]
) keyword[if] identifier[is_iterable] keyword[else] identifier[forwarded_messages] [ literal[int] ]
keyword[else] :
identifier[r] = identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[ForwardMessages] (
identifier[to_peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] ),
identifier[from_peer] = identifier[self] . identifier[resolve_peer] ( identifier[from_chat_id] ),
identifier[id] = identifier[message_ids] ,
identifier[silent] = identifier[disable_notification] keyword[or] keyword[None] ,
identifier[random_id] =[ identifier[self] . identifier[rnd_id] () keyword[for] identifier[_] keyword[in] identifier[message_ids] ]
)
)
identifier[forwarded_messages] =[]
identifier[users] ={ identifier[i] . identifier[id] : identifier[i] keyword[for] identifier[i] keyword[in] identifier[r] . identifier[users] }
identifier[chats] ={ identifier[i] . identifier[id] : identifier[i] keyword[for] identifier[i] keyword[in] identifier[r] . identifier[chats] }
keyword[for] identifier[i] keyword[in] identifier[r] . identifier[updates] :
keyword[if] identifier[isinstance] ( identifier[i] ,( identifier[types] . identifier[UpdateNewMessage] , identifier[types] . identifier[UpdateNewChannelMessage] )):
identifier[forwarded_messages] . identifier[append] (
identifier[pyrogram] . identifier[Message] . identifier[_parse] (
identifier[self] , identifier[i] . identifier[message] ,
identifier[users] , identifier[chats]
)
)
keyword[return] identifier[pyrogram] . identifier[Messages] (
identifier[client] = identifier[self] ,
identifier[total_count] = identifier[len] ( identifier[forwarded_messages] ),
identifier[messages] = identifier[forwarded_messages]
) keyword[if] identifier[is_iterable] keyword[else] identifier[forwarded_messages] [ literal[int] ] | def forward_messages(self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_ids: Iterable[int], disable_notification: bool=None, as_copy: bool=False, remove_caption: bool=False) -> 'pyrogram.Messages':
"""Use this method to forward messages of any kind.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
from_chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the source chat where the original message was sent.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_ids (``iterable``):
A list of Message identifiers in the chat specified in *from_chat_id* or a single message id.
Iterators and Generators are also accepted.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
as_copy (``bool``, *optional*):
Pass True to forward messages without the forward header (i.e.: send a copy of the message content).
Defaults to False.
remove_caption (``bool``, *optional*):
If set to True and *as_copy* is enabled as well, media captions are not preserved when copying the
message. Has no effect if *as_copy* is not enabled.
Defaults to False.
Returns:
On success and in case *message_ids* was an iterable, the returned value will be a list of the forwarded
:obj:`Messages <pyrogram.Message>` even if a list contains just one element, otherwise if
*message_ids* was an integer, the single forwarded :obj:`Message <pyrogram.Message>`
is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
is_iterable = not isinstance(message_ids, int)
message_ids = list(message_ids) if is_iterable else [message_ids]
if as_copy:
forwarded_messages = []
for chunk in [message_ids[i:i + 200] for i in range(0, len(message_ids), 200)]:
messages = self.get_messages(chat_id=from_chat_id, message_ids=chunk) # type: pyrogram.Messages
for message in messages.messages:
forwarded_messages.append(message.forward(chat_id, disable_notification=disable_notification, as_copy=True, remove_caption=remove_caption)) # depends on [control=['for'], data=['message']] # depends on [control=['for'], data=['chunk']]
return pyrogram.Messages(client=self, total_count=len(forwarded_messages), messages=forwarded_messages) if is_iterable else forwarded_messages[0] # depends on [control=['if'], data=[]]
else:
r = self.send(functions.messages.ForwardMessages(to_peer=self.resolve_peer(chat_id), from_peer=self.resolve_peer(from_chat_id), id=message_ids, silent=disable_notification or None, random_id=[self.rnd_id() for _ in message_ids]))
forwarded_messages = []
users = {i.id: i for i in r.users}
chats = {i.id: i for i in r.chats}
for i in r.updates:
if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):
forwarded_messages.append(pyrogram.Message._parse(self, i.message, users, chats)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return pyrogram.Messages(client=self, total_count=len(forwarded_messages), messages=forwarded_messages) if is_iterable else forwarded_messages[0] |
def parse(handle, seqtype=None, robust=False):
'''Wrap SeqIO.parse'''
if seqtype is None:
seqtype = _get_seqtype_from_ext(handle)
if seqtype.startswith('gz-'):
handle = _unzip_handle(handle)
seqtype = seqtype[3:]
# False positive from pylint, both handles are fileobj-like
# pylint: disable=redefined-variable-type
if robust:
if seqtype == "embl":
handle = sanity_check_embl(handle)
elif seqtype == "genbank":
handle = sanity_check_genbank(handle)
elif seqtype == "fasta":
handle = sanity_check_fasta(handle)
# pylint: enable=redefined-variable-type
return SeqIO.parse(handle, seqtype) | def function[parse, parameter[handle, seqtype, robust]]:
constant[Wrap SeqIO.parse]
if compare[name[seqtype] is constant[None]] begin[:]
variable[seqtype] assign[=] call[name[_get_seqtype_from_ext], parameter[name[handle]]]
if call[name[seqtype].startswith, parameter[constant[gz-]]] begin[:]
variable[handle] assign[=] call[name[_unzip_handle], parameter[name[handle]]]
variable[seqtype] assign[=] call[name[seqtype]][<ast.Slice object at 0x7da1b2243d30>]
if name[robust] begin[:]
if compare[name[seqtype] equal[==] constant[embl]] begin[:]
variable[handle] assign[=] call[name[sanity_check_embl], parameter[name[handle]]]
return[call[name[SeqIO].parse, parameter[name[handle], name[seqtype]]]] | keyword[def] identifier[parse] ( identifier[handle] , identifier[seqtype] = keyword[None] , identifier[robust] = keyword[False] ):
literal[string]
keyword[if] identifier[seqtype] keyword[is] keyword[None] :
identifier[seqtype] = identifier[_get_seqtype_from_ext] ( identifier[handle] )
keyword[if] identifier[seqtype] . identifier[startswith] ( literal[string] ):
identifier[handle] = identifier[_unzip_handle] ( identifier[handle] )
identifier[seqtype] = identifier[seqtype] [ literal[int] :]
keyword[if] identifier[robust] :
keyword[if] identifier[seqtype] == literal[string] :
identifier[handle] = identifier[sanity_check_embl] ( identifier[handle] )
keyword[elif] identifier[seqtype] == literal[string] :
identifier[handle] = identifier[sanity_check_genbank] ( identifier[handle] )
keyword[elif] identifier[seqtype] == literal[string] :
identifier[handle] = identifier[sanity_check_fasta] ( identifier[handle] )
keyword[return] identifier[SeqIO] . identifier[parse] ( identifier[handle] , identifier[seqtype] ) | def parse(handle, seqtype=None, robust=False):
"""Wrap SeqIO.parse"""
if seqtype is None:
seqtype = _get_seqtype_from_ext(handle) # depends on [control=['if'], data=['seqtype']]
if seqtype.startswith('gz-'):
handle = _unzip_handle(handle)
seqtype = seqtype[3:] # depends on [control=['if'], data=[]]
# False positive from pylint, both handles are fileobj-like
# pylint: disable=redefined-variable-type
if robust:
if seqtype == 'embl':
handle = sanity_check_embl(handle) # depends on [control=['if'], data=[]]
elif seqtype == 'genbank':
handle = sanity_check_genbank(handle) # depends on [control=['if'], data=[]]
elif seqtype == 'fasta':
handle = sanity_check_fasta(handle) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# pylint: enable=redefined-variable-type
return SeqIO.parse(handle, seqtype) |
def create(self, name, url, description=None, extra=None, is_public=None,
is_protected=None):
"""Create a Job Binary.
:param dict extra: authentication info needed for some job binaries,
containing the keys `user` and `password` for job binary in Swift
or the keys `accesskey`, `secretkey`, and `endpoint` for job
binary in S3
"""
data = {
"name": name,
"url": url
}
self._copy_if_defined(data, description=description, extra=extra,
is_public=is_public, is_protected=is_protected)
return self._create('/job-binaries', data, 'job_binary') | def function[create, parameter[self, name, url, description, extra, is_public, is_protected]]:
constant[Create a Job Binary.
:param dict extra: authentication info needed for some job binaries,
containing the keys `user` and `password` for job binary in Swift
or the keys `accesskey`, `secretkey`, and `endpoint` for job
binary in S3
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b34850>, <ast.Constant object at 0x7da1b1b345b0>], [<ast.Name object at 0x7da1b1b34d60>, <ast.Name object at 0x7da1b1b35bd0>]]
call[name[self]._copy_if_defined, parameter[name[data]]]
return[call[name[self]._create, parameter[constant[/job-binaries], name[data], constant[job_binary]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[name] , identifier[url] , identifier[description] = keyword[None] , identifier[extra] = keyword[None] , identifier[is_public] = keyword[None] ,
identifier[is_protected] = keyword[None] ):
literal[string]
identifier[data] ={
literal[string] : identifier[name] ,
literal[string] : identifier[url]
}
identifier[self] . identifier[_copy_if_defined] ( identifier[data] , identifier[description] = identifier[description] , identifier[extra] = identifier[extra] ,
identifier[is_public] = identifier[is_public] , identifier[is_protected] = identifier[is_protected] )
keyword[return] identifier[self] . identifier[_create] ( literal[string] , identifier[data] , literal[string] ) | def create(self, name, url, description=None, extra=None, is_public=None, is_protected=None):
"""Create a Job Binary.
:param dict extra: authentication info needed for some job binaries,
containing the keys `user` and `password` for job binary in Swift
or the keys `accesskey`, `secretkey`, and `endpoint` for job
binary in S3
"""
data = {'name': name, 'url': url}
self._copy_if_defined(data, description=description, extra=extra, is_public=is_public, is_protected=is_protected)
return self._create('/job-binaries', data, 'job_binary') |
def get_video_url_from_video_id(video_id):
"""Splicing URLs according to video ID to get video details"""
# from js
data = [""] * 256
for index, _ in enumerate(data):
t = index
for i in range(8):
t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1)
data[index] = t
def tmp():
rand_num = random.random()
path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id,
random_num=str(rand_num)[2:])
e = o = r = -1
i, a = 0, len(path)
while i < a:
e = ord(path[i])
i += 1
if e < 128:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)]
else:
if e < 2048:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
else:
if 55296 <= e < 57344:
e = (1023 & e) + 64
i += 1
o = 1023 & t.url(i)
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))]
else:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0))
while 1:
url = tmp()
if url.split("=")[-1][0] != "-": # 参数s不能为负数
return url | def function[get_video_url_from_video_id, parameter[video_id]]:
constant[Splicing URLs according to video ID to get video details]
variable[data] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0528fd0>]] * constant[256]]
for taget[tuple[[<ast.Name object at 0x7da1b052af20>, <ast.Name object at 0x7da1b052a620>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:]
variable[t] assign[=] name[index]
for taget[name[i]] in starred[call[name[range], parameter[constant[8]]]] begin[:]
variable[t] assign[=] <ast.IfExp object at 0x7da1b052afe0>
call[name[data]][name[index]] assign[=] name[t]
def function[tmp, parameter[]]:
variable[rand_num] assign[=] call[name[random].random, parameter[]]
variable[path] assign[=] call[constant[/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}].format, parameter[]]
variable[e] assign[=] <ast.UnaryOp object at 0x7da1b05296f0>
<ast.Tuple object at 0x7da1b0529db0> assign[=] tuple[[<ast.Constant object at 0x7da1b0529e70>, <ast.Call object at 0x7da1b052b2b0>]]
while compare[name[i] less[<] name[a]] begin[:]
variable[e] assign[=] call[name[ord], parameter[call[name[path]][name[i]]]]
<ast.AugAssign object at 0x7da1b0528160>
if compare[name[e] less[<] constant[128]] begin[:]
variable[r] assign[=] binary_operation[call[name[unsigned_right_shitf], parameter[name[r], constant[8]]] <ast.BitXor object at 0x7da2590d6b00> call[name[data]][binary_operation[constant[255] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[name[r] <ast.BitXor object at 0x7da2590d6b00> name[e]]]]]
return[call[constant[https://ib.365yg.com{path}&s={param}].format, parameter[]]]
while constant[1] begin[:]
variable[url] assign[=] call[name[tmp], parameter[]]
if compare[call[call[call[name[url].split, parameter[constant[=]]]][<ast.UnaryOp object at 0x7da2054a7550>]][constant[0]] not_equal[!=] constant[-]] begin[:]
return[name[url]] | keyword[def] identifier[get_video_url_from_video_id] ( identifier[video_id] ):
literal[string]
identifier[data] =[ literal[string] ]* literal[int]
keyword[for] identifier[index] , identifier[_] keyword[in] identifier[enumerate] ( identifier[data] ):
identifier[t] = identifier[index]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[t] =- literal[int] ^ identifier[unsigned_right_shitf] ( identifier[t] , literal[int] ) keyword[if] literal[int] & identifier[t] keyword[else] identifier[unsigned_right_shitf] ( identifier[t] , literal[int] )
identifier[data] [ identifier[index] ]= identifier[t]
keyword[def] identifier[tmp] ():
identifier[rand_num] = identifier[random] . identifier[random] ()
identifier[path] = literal[string] . identifier[format] ( identifier[video_id] = identifier[video_id] ,
identifier[random_num] = identifier[str] ( identifier[rand_num] )[ literal[int] :])
identifier[e] = identifier[o] = identifier[r] =- literal[int]
identifier[i] , identifier[a] = literal[int] , identifier[len] ( identifier[path] )
keyword[while] identifier[i] < identifier[a] :
identifier[e] = identifier[ord] ( identifier[path] [ identifier[i] ])
identifier[i] += literal[int]
keyword[if] identifier[e] < literal[int] :
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^ identifier[e] )]
keyword[else] :
keyword[if] identifier[e] < literal[int] :
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[e] >> literal[int] & literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | literal[int] & identifier[e] ))]
keyword[else] :
keyword[if] literal[int] <= identifier[e] < literal[int] :
identifier[e] =( literal[int] & identifier[e] )+ literal[int]
identifier[i] += literal[int]
identifier[o] = literal[int] & identifier[t] . identifier[url] ( identifier[i] )
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[e] >> literal[int] & literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[e] >> literal[int] & literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[o] >> literal[int] & literal[int] |( literal[int] & identifier[e] )<< literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | literal[int] & identifier[o] ))]
keyword[else] :
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[e] >> literal[int] & literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | identifier[e] >> literal[int] & literal[int] ))]
identifier[r] = identifier[unsigned_right_shitf] ( identifier[r] , literal[int] )^ identifier[data] [ literal[int] &( identifier[r] ^( literal[int] | literal[int] & identifier[e] ))]
keyword[return] literal[string] . identifier[format] ( identifier[path] = identifier[path] , identifier[param] = identifier[unsigned_right_shitf] ( identifier[r] ^- literal[int] , literal[int] ))
keyword[while] literal[int] :
identifier[url] = identifier[tmp] ()
keyword[if] identifier[url] . identifier[split] ( literal[string] )[- literal[int] ][ literal[int] ]!= literal[string] :
keyword[return] identifier[url] | def get_video_url_from_video_id(video_id):
"""Splicing URLs according to video ID to get video details"""
# from js
data = [''] * 256
for (index, _) in enumerate(data):
t = index
for i in range(8):
t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1) # depends on [control=['for'], data=[]]
data[index] = t # depends on [control=['for'], data=[]]
def tmp():
rand_num = random.random()
path = '/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}'.format(video_id=video_id, random_num=str(rand_num)[2:])
e = o = r = -1
(i, a) = (0, len(path))
while i < a:
e = ord(path[i])
i += 1
if e < 128:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)] # depends on [control=['if'], data=['e']]
elif e < 2048:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] # depends on [control=['if'], data=['e']]
elif 55296 <= e < 57344:
e = (1023 & e) + 64
i += 1
o = 1023 & t.url(i)
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))] # depends on [control=['if'], data=['e']]
else:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] # depends on [control=['while'], data=['i']]
return 'https://ib.365yg.com{path}&s={param}'.format(path=path, param=unsigned_right_shitf(r ^ -1, 0))
while 1:
url = tmp()
if url.split('=')[-1][0] != '-': # 参数s不能为负数
return url # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.