code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _RequestUrl(self, url, verb, data=None):
"""
Request a url.
:param url: The web location we want to retrieve.
:param verb: GET only (for now).
:param data: A dict of (str, unicode) key/value pairs.
:return:A JSON object.
"""
if verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
return requests.get(
url,
auth=self.__auth,
timeout=self._timeout
)
except requests.RequestException as e:
raise ZillowError(str(e))
return 0 | def function[_RequestUrl, parameter[self, url, verb, data]]:
constant[
Request a url.
:param url: The web location we want to retrieve.
:param verb: GET only (for now).
:param data: A dict of (str, unicode) key/value pairs.
:return:A JSON object.
]
if compare[name[verb] equal[==] constant[GET]] begin[:]
variable[url] assign[=] call[name[self]._BuildUrl, parameter[name[url]]]
<ast.Try object at 0x7da2046225c0>
return[constant[0]] | keyword[def] identifier[_RequestUrl] ( identifier[self] , identifier[url] , identifier[verb] , identifier[data] = keyword[None] ):
literal[string]
keyword[if] identifier[verb] == literal[string] :
identifier[url] = identifier[self] . identifier[_BuildUrl] ( identifier[url] , identifier[extra_params] = identifier[data] )
keyword[try] :
keyword[return] identifier[requests] . identifier[get] (
identifier[url] ,
identifier[auth] = identifier[self] . identifier[__auth] ,
identifier[timeout] = identifier[self] . identifier[_timeout]
)
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[raise] identifier[ZillowError] ( identifier[str] ( identifier[e] ))
keyword[return] literal[int] | def _RequestUrl(self, url, verb, data=None):
"""
Request a url.
:param url: The web location we want to retrieve.
:param verb: GET only (for now).
:param data: A dict of (str, unicode) key/value pairs.
:return:A JSON object.
"""
if verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
return requests.get(url, auth=self.__auth, timeout=self._timeout) # depends on [control=['try'], data=[]]
except requests.RequestException as e:
raise ZillowError(str(e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return 0 |
def validate_quantity(self, value):
"""Validate that the value is of the `Quantity` type."""
if not isinstance(value, pq.quantity.Quantity):
self._error('%s' % value, "Must be a Python quantity.") | def function[validate_quantity, parameter[self, value]]:
constant[Validate that the value is of the `Quantity` type.]
if <ast.UnaryOp object at 0x7da1b0e648e0> begin[:]
call[name[self]._error, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[value]], constant[Must be a Python quantity.]]] | keyword[def] identifier[validate_quantity] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[pq] . identifier[quantity] . identifier[Quantity] ):
identifier[self] . identifier[_error] ( literal[string] % identifier[value] , literal[string] ) | def validate_quantity(self, value):
"""Validate that the value is of the `Quantity` type."""
if not isinstance(value, pq.quantity.Quantity):
self._error('%s' % value, 'Must be a Python quantity.') # depends on [control=['if'], data=[]] |
def _del_thread(self, dwThreadId):
"""
Private method to remove a thread object from the snapshot.
@type dwThreadId: int
@param dwThreadId: Global thread ID.
"""
try:
aThread = self.__threadDict[dwThreadId]
del self.__threadDict[dwThreadId]
except KeyError:
aThread = None
msg = "Unknown thread ID %d" % dwThreadId
warnings.warn(msg, RuntimeWarning)
if aThread:
aThread.clear() | def function[_del_thread, parameter[self, dwThreadId]]:
constant[
Private method to remove a thread object from the snapshot.
@type dwThreadId: int
@param dwThreadId: Global thread ID.
]
<ast.Try object at 0x7da1b08d9330>
if name[aThread] begin[:]
call[name[aThread].clear, parameter[]] | keyword[def] identifier[_del_thread] ( identifier[self] , identifier[dwThreadId] ):
literal[string]
keyword[try] :
identifier[aThread] = identifier[self] . identifier[__threadDict] [ identifier[dwThreadId] ]
keyword[del] identifier[self] . identifier[__threadDict] [ identifier[dwThreadId] ]
keyword[except] identifier[KeyError] :
identifier[aThread] = keyword[None]
identifier[msg] = literal[string] % identifier[dwThreadId]
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[RuntimeWarning] )
keyword[if] identifier[aThread] :
identifier[aThread] . identifier[clear] () | def _del_thread(self, dwThreadId):
"""
Private method to remove a thread object from the snapshot.
@type dwThreadId: int
@param dwThreadId: Global thread ID.
"""
try:
aThread = self.__threadDict[dwThreadId]
del self.__threadDict[dwThreadId] # depends on [control=['try'], data=[]]
except KeyError:
aThread = None
msg = 'Unknown thread ID %d' % dwThreadId
warnings.warn(msg, RuntimeWarning) # depends on [control=['except'], data=[]]
if aThread:
aThread.clear() # depends on [control=['if'], data=[]] |
def fetch(self, keyDict):
"""Like update(), but for retrieving values.
"""
for key in keyDict.keys():
keyDict[key] = self.tbl[key] | def function[fetch, parameter[self, keyDict]]:
constant[Like update(), but for retrieving values.
]
for taget[name[key]] in starred[call[name[keyDict].keys, parameter[]]] begin[:]
call[name[keyDict]][name[key]] assign[=] call[name[self].tbl][name[key]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[keyDict] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[keyDict] . identifier[keys] ():
identifier[keyDict] [ identifier[key] ]= identifier[self] . identifier[tbl] [ identifier[key] ] | def fetch(self, keyDict):
"""Like update(), but for retrieving values.
"""
for key in keyDict.keys():
keyDict[key] = self.tbl[key] # depends on [control=['for'], data=['key']] |
def get_episode_title(episode: Episode) -> int:
"""Get the episode title.
Japanese title is prioritized.
"""
for title in episode.titles:
if title.lang == 'ja':
return title.title
else:
return episode.titles[0].title | def function[get_episode_title, parameter[episode]]:
constant[Get the episode title.
Japanese title is prioritized.
]
for taget[name[title]] in starred[name[episode].titles] begin[:]
if compare[name[title].lang equal[==] constant[ja]] begin[:]
return[name[title].title] | keyword[def] identifier[get_episode_title] ( identifier[episode] : identifier[Episode] )-> identifier[int] :
literal[string]
keyword[for] identifier[title] keyword[in] identifier[episode] . identifier[titles] :
keyword[if] identifier[title] . identifier[lang] == literal[string] :
keyword[return] identifier[title] . identifier[title]
keyword[else] :
keyword[return] identifier[episode] . identifier[titles] [ literal[int] ]. identifier[title] | def get_episode_title(episode: Episode) -> int:
"""Get the episode title.
Japanese title is prioritized.
"""
for title in episode.titles:
if title.lang == 'ja':
return title.title # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['title']]
else:
return episode.titles[0].title |
def add_country_locations(self, countries, locations=None, use_live=True):
# type: (List[str], Optional[List[str]], bool) -> bool
"""Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country
names, converted to iso 3 codes. If any country is already added, it is ignored.
Args:
countries (List[str]): list of countries to add
locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.
use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True.
Returns:
bool: True if all countries added or False if any already present.
"""
allcountriesadded = True
for country in countries:
if not self.add_country_location(country, locations=locations, use_live=use_live):
allcountriesadded = False
return allcountriesadded | def function[add_country_locations, parameter[self, countries, locations, use_live]]:
constant[Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country
names, converted to iso 3 codes. If any country is already added, it is ignored.
Args:
countries (List[str]): list of countries to add
locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.
use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True.
Returns:
bool: True if all countries added or False if any already present.
]
variable[allcountriesadded] assign[=] constant[True]
for taget[name[country]] in starred[name[countries]] begin[:]
if <ast.UnaryOp object at 0x7da1b0e332e0> begin[:]
variable[allcountriesadded] assign[=] constant[False]
return[name[allcountriesadded]] | keyword[def] identifier[add_country_locations] ( identifier[self] , identifier[countries] , identifier[locations] = keyword[None] , identifier[use_live] = keyword[True] ):
literal[string]
identifier[allcountriesadded] = keyword[True]
keyword[for] identifier[country] keyword[in] identifier[countries] :
keyword[if] keyword[not] identifier[self] . identifier[add_country_location] ( identifier[country] , identifier[locations] = identifier[locations] , identifier[use_live] = identifier[use_live] ):
identifier[allcountriesadded] = keyword[False]
keyword[return] identifier[allcountriesadded] | def add_country_locations(self, countries, locations=None, use_live=True):
# type: (List[str], Optional[List[str]], bool) -> bool
'Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country\n names, converted to iso 3 codes. If any country is already added, it is ignored.\n\n Args:\n countries (List[str]): list of countries to add\n locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\n use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True.\n\n Returns:\n bool: True if all countries added or False if any already present.\n '
allcountriesadded = True
for country in countries:
if not self.add_country_location(country, locations=locations, use_live=use_live):
allcountriesadded = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['country']]
return allcountriesadded |
def create(self, num):
"""
Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...)
"""
self.log.record_process('enviroment.py', 'Creating ' + str(num) + ' environments - ' + self.name) | def function[create, parameter[self, num]]:
constant[
Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...)
]
call[name[self].log.record_process, parameter[constant[enviroment.py], binary_operation[binary_operation[binary_operation[constant[Creating ] + call[name[str], parameter[name[num]]]] + constant[ environments - ]] + name[self].name]]] | keyword[def] identifier[create] ( identifier[self] , identifier[num] ):
literal[string]
identifier[self] . identifier[log] . identifier[record_process] ( literal[string] , literal[string] + identifier[str] ( identifier[num] )+ literal[string] + identifier[self] . identifier[name] ) | def create(self, num):
"""
Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...)
"""
self.log.record_process('enviroment.py', 'Creating ' + str(num) + ' environments - ' + self.name) |
def can_import(self, file_uris, current_doc=None):
"""
Check that the specified file looks like a directory containing many
pdf files
"""
if len(file_uris) <= 0:
return False
try:
for file_uri in file_uris:
file_uri = self.fs.safe(file_uri)
for child in self.fs.recurse(file_uri):
if self.check_file_type(child):
return True
except GLib.GError:
pass
return False | def function[can_import, parameter[self, file_uris, current_doc]]:
constant[
Check that the specified file looks like a directory containing many
pdf files
]
if compare[call[name[len], parameter[name[file_uris]]] less_or_equal[<=] constant[0]] begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b254d5a0>
return[constant[False]] | keyword[def] identifier[can_import] ( identifier[self] , identifier[file_uris] , identifier[current_doc] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[file_uris] )<= literal[int] :
keyword[return] keyword[False]
keyword[try] :
keyword[for] identifier[file_uri] keyword[in] identifier[file_uris] :
identifier[file_uri] = identifier[self] . identifier[fs] . identifier[safe] ( identifier[file_uri] )
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[fs] . identifier[recurse] ( identifier[file_uri] ):
keyword[if] identifier[self] . identifier[check_file_type] ( identifier[child] ):
keyword[return] keyword[True]
keyword[except] identifier[GLib] . identifier[GError] :
keyword[pass]
keyword[return] keyword[False] | def can_import(self, file_uris, current_doc=None):
"""
Check that the specified file looks like a directory containing many
pdf files
"""
if len(file_uris) <= 0:
return False # depends on [control=['if'], data=[]]
try:
for file_uri in file_uris:
file_uri = self.fs.safe(file_uri)
for child in self.fs.recurse(file_uri):
if self.check_file_type(child):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['for'], data=['file_uri']] # depends on [control=['try'], data=[]]
except GLib.GError:
pass # depends on [control=['except'], data=[]]
return False |
def _find_listeners():
"""Find GPIB listeners.
"""
for i in range(31):
try:
if gpib.listener(BOARD, i) and gpib.ask(BOARD, 1) != i:
yield i
except gpib.GpibError as e:
logger.debug("GPIB error in _find_listeners(): %s", repr(e)) | def function[_find_listeners, parameter[]]:
constant[Find GPIB listeners.
]
for taget[name[i]] in starred[call[name[range], parameter[constant[31]]]] begin[:]
<ast.Try object at 0x7da20c6c4f70> | keyword[def] identifier[_find_listeners] ():
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
keyword[try] :
keyword[if] identifier[gpib] . identifier[listener] ( identifier[BOARD] , identifier[i] ) keyword[and] identifier[gpib] . identifier[ask] ( identifier[BOARD] , literal[int] )!= identifier[i] :
keyword[yield] identifier[i]
keyword[except] identifier[gpib] . identifier[GpibError] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[repr] ( identifier[e] )) | def _find_listeners():
"""Find GPIB listeners.
"""
for i in range(31):
try:
if gpib.listener(BOARD, i) and gpib.ask(BOARD, 1) != i:
yield i # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except gpib.GpibError as e:
logger.debug('GPIB error in _find_listeners(): %s', repr(e)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['i']] |
def get_devices(self, device_type='all'):
num_devices = self._pyaudio.get_device_count()
self._logger.debug('Found %d PyAudio devices', num_devices)
for i in range(num_devices):
info = self._pyaudio.get_device_info_by_index(i)
name = info['name']
if name in self.devices:
continue
else:
self.devices[name] = PyAudioDevice(self, info)
return self.devices
"""
if device_type == plugin.audioengine.DEVICE_TYPE_ALL:
return devs
else:
return [device for device in devs if device_type in device.types]
""" | def function[get_devices, parameter[self, device_type]]:
variable[num_devices] assign[=] call[name[self]._pyaudio.get_device_count, parameter[]]
call[name[self]._logger.debug, parameter[constant[Found %d PyAudio devices], name[num_devices]]]
for taget[name[i]] in starred[call[name[range], parameter[name[num_devices]]]] begin[:]
variable[info] assign[=] call[name[self]._pyaudio.get_device_info_by_index, parameter[name[i]]]
variable[name] assign[=] call[name[info]][constant[name]]
if compare[name[name] in name[self].devices] begin[:]
continue
return[name[self].devices]
constant[
if device_type == plugin.audioengine.DEVICE_TYPE_ALL:
return devs
else:
return [device for device in devs if device_type in device.types]
] | keyword[def] identifier[get_devices] ( identifier[self] , identifier[device_type] = literal[string] ):
identifier[num_devices] = identifier[self] . identifier[_pyaudio] . identifier[get_device_count] ()
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[num_devices] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_devices] ):
identifier[info] = identifier[self] . identifier[_pyaudio] . identifier[get_device_info_by_index] ( identifier[i] )
identifier[name] = identifier[info] [ literal[string] ]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[devices] :
keyword[continue]
keyword[else] :
identifier[self] . identifier[devices] [ identifier[name] ]= identifier[PyAudioDevice] ( identifier[self] , identifier[info] )
keyword[return] identifier[self] . identifier[devices]
literal[string] | def get_devices(self, device_type='all'):
num_devices = self._pyaudio.get_device_count()
self._logger.debug('Found %d PyAudio devices', num_devices)
for i in range(num_devices):
info = self._pyaudio.get_device_info_by_index(i)
name = info['name']
if name in self.devices:
continue # depends on [control=['if'], data=[]]
else:
self.devices[name] = PyAudioDevice(self, info) # depends on [control=['for'], data=['i']]
return self.devices
'\n if device_type == plugin.audioengine.DEVICE_TYPE_ALL:\n return devs\n else:\n return [device for device in devs if device_type in device.types]\n ' |
def copy_experiment(experiment):
"""If experiment is a restart, we should resume from last check point"""
try:
publisher.publish_experiment_job_log(
log_lines='Copying outputs from experiment `{}` into experiment `{}`'.format(
experiment.original_experiment.unique_name, experiment.unique_name
),
experiment_uuid=experiment.uuid.hex,
experiment_name=experiment.unique_name,
job_uuid='all',
)
stores.copy_experiment_outputs(
persistence_outputs_from=experiment.original_experiment.persistence_outputs,
persistence_outputs_to=experiment.persistence_outputs,
experiment_name_from=experiment.original_experiment.unique_name,
experiment_name_to=experiment.unique_name)
except OSError:
publisher.publish_experiment_job_log(
log_lines='Could not copy the outputs of experiment `{}` into experiment `{}`'.format(
experiment.original_experiment.unique_name, experiment.unique_name
),
experiment_uuid=experiment.uuid.hex,
experiment_name=experiment.unique_name,
job_uuid='all',
)
_logger.warning(
'Could not copy the outputs of experiment `%s` into experiment `%s`',
experiment.original_experiment.unique_name, experiment.unique_name) | def function[copy_experiment, parameter[experiment]]:
constant[If experiment is a restart, we should resume from last check point]
<ast.Try object at 0x7da20c992410> | keyword[def] identifier[copy_experiment] ( identifier[experiment] ):
literal[string]
keyword[try] :
identifier[publisher] . identifier[publish_experiment_job_log] (
identifier[log_lines] = literal[string] . identifier[format] (
identifier[experiment] . identifier[original_experiment] . identifier[unique_name] , identifier[experiment] . identifier[unique_name]
),
identifier[experiment_uuid] = identifier[experiment] . identifier[uuid] . identifier[hex] ,
identifier[experiment_name] = identifier[experiment] . identifier[unique_name] ,
identifier[job_uuid] = literal[string] ,
)
identifier[stores] . identifier[copy_experiment_outputs] (
identifier[persistence_outputs_from] = identifier[experiment] . identifier[original_experiment] . identifier[persistence_outputs] ,
identifier[persistence_outputs_to] = identifier[experiment] . identifier[persistence_outputs] ,
identifier[experiment_name_from] = identifier[experiment] . identifier[original_experiment] . identifier[unique_name] ,
identifier[experiment_name_to] = identifier[experiment] . identifier[unique_name] )
keyword[except] identifier[OSError] :
identifier[publisher] . identifier[publish_experiment_job_log] (
identifier[log_lines] = literal[string] . identifier[format] (
identifier[experiment] . identifier[original_experiment] . identifier[unique_name] , identifier[experiment] . identifier[unique_name]
),
identifier[experiment_uuid] = identifier[experiment] . identifier[uuid] . identifier[hex] ,
identifier[experiment_name] = identifier[experiment] . identifier[unique_name] ,
identifier[job_uuid] = literal[string] ,
)
identifier[_logger] . identifier[warning] (
literal[string] ,
identifier[experiment] . identifier[original_experiment] . identifier[unique_name] , identifier[experiment] . identifier[unique_name] ) | def copy_experiment(experiment):
"""If experiment is a restart, we should resume from last check point"""
try:
publisher.publish_experiment_job_log(log_lines='Copying outputs from experiment `{}` into experiment `{}`'.format(experiment.original_experiment.unique_name, experiment.unique_name), experiment_uuid=experiment.uuid.hex, experiment_name=experiment.unique_name, job_uuid='all')
stores.copy_experiment_outputs(persistence_outputs_from=experiment.original_experiment.persistence_outputs, persistence_outputs_to=experiment.persistence_outputs, experiment_name_from=experiment.original_experiment.unique_name, experiment_name_to=experiment.unique_name) # depends on [control=['try'], data=[]]
except OSError:
publisher.publish_experiment_job_log(log_lines='Could not copy the outputs of experiment `{}` into experiment `{}`'.format(experiment.original_experiment.unique_name, experiment.unique_name), experiment_uuid=experiment.uuid.hex, experiment_name=experiment.unique_name, job_uuid='all')
_logger.warning('Could not copy the outputs of experiment `%s` into experiment `%s`', experiment.original_experiment.unique_name, experiment.unique_name) # depends on [control=['except'], data=[]] |
def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body) | def function[response, parameter[code, body, etag, last_modified, expires]]:
constant[Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
]
if compare[name[etag] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c6aa950> begin[:]
variable[etag] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[etag]]
call[name[kw]][constant[etag]] assign[=] name[etag]
if compare[name[last_modified] is_not constant[None]] begin[:]
call[name[kw]][constant[last_modified]] assign[=] call[name[datetime_to_httpdate], parameter[name[last_modified]]]
if compare[name[expires] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[expires], name[datetime]]] begin[:]
call[name[kw]][constant[expires]] assign[=] call[name[datetime_to_httpdate], parameter[name[expires]]]
variable[headers] assign[=] <ast.ListComp object at 0x7da20c6a8640>
return[call[name[Response], parameter[name[code], name[headers], name[body]]]] | keyword[def] identifier[response] ( identifier[code] , identifier[body] = literal[string] , identifier[etag] = keyword[None] , identifier[last_modified] = keyword[None] , identifier[expires] = keyword[None] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[etag] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] ( identifier[etag] [ literal[int] ]== literal[string] keyword[and] identifier[etag] [- literal[int] ]== literal[string] ):
identifier[etag] = literal[string] % identifier[etag]
identifier[kw] [ literal[string] ]= identifier[etag]
keyword[if] identifier[last_modified] keyword[is] keyword[not] keyword[None] :
identifier[kw] [ literal[string] ]= identifier[datetime_to_httpdate] ( identifier[last_modified] )
keyword[if] identifier[expires] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[expires] , identifier[datetime] ):
identifier[kw] [ literal[string] ]= identifier[datetime_to_httpdate] ( identifier[expires] )
keyword[else] :
identifier[kw] [ literal[string] ]= identifier[timedelta_to_httpdate] ( identifier[expires] )
identifier[headers] =[( identifier[k] . identifier[replace] ( literal[string] , literal[string] ). identifier[title] (), identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[kw] . identifier[items] ())]
keyword[return] identifier[Response] ( identifier[code] , identifier[headers] , identifier[body] ) | def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag # depends on [control=['if'], data=[]]
kw['etag'] = etag # depends on [control=['if'], data=['etag']]
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified) # depends on [control=['if'], data=['last_modified']]
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires) # depends on [control=['if'], data=[]]
else:
kw['expires'] = timedelta_to_httpdate(expires) # depends on [control=['if'], data=['expires']]
headers = [(k.replace('_', '-').title(), v) for (k, v) in sorted(kw.items())]
return Response(code, headers, body) |
def add_letter(self, letter):
"""Add a letter at the cursor pos."""
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1 | def function[add_letter, parameter[self, letter]]:
constant[Add a letter at the cursor pos.]
assert[call[name[isinstance], parameter[name[letter], name[str]]]]
assert[compare[call[name[len], parameter[name[letter]]] equal[==] constant[1]]]
name[self].text assign[=] binary_operation[binary_operation[call[name[self].text][<ast.Slice object at 0x7da20c991180>] + name[letter]] + call[name[self].text][<ast.Slice object at 0x7da20c993790>]]
<ast.AugAssign object at 0x7da20c992110> | keyword[def] identifier[add_letter] ( identifier[self] , identifier[letter] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[letter] , identifier[str] )
keyword[assert] identifier[len] ( identifier[letter] )== literal[int]
identifier[self] . identifier[text] = identifier[self] . identifier[text] [: identifier[self] . identifier[cursor] ]+ identifier[letter] + identifier[self] . identifier[text] [ identifier[self] . identifier[cursor] :]
identifier[self] . identifier[cursor] += literal[int] | def add_letter(self, letter):
"""Add a letter at the cursor pos."""
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1 |
def addSwitch(self, name=None):
'''
Add a new switch to the topology.
'''
if name is None:
while True:
name = 's' + str(self.__snum)
self.__snum += 1
if name not in self.__nxgraph:
break
self.__addNode(name, Switch)
return name | def function[addSwitch, parameter[self, name]]:
constant[
Add a new switch to the topology.
]
if compare[name[name] is constant[None]] begin[:]
while constant[True] begin[:]
variable[name] assign[=] binary_operation[constant[s] + call[name[str], parameter[name[self].__snum]]]
<ast.AugAssign object at 0x7da18eb56b60>
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].__nxgraph] begin[:]
break
call[name[self].__addNode, parameter[name[name], name[Switch]]]
return[name[name]] | keyword[def] identifier[addSwitch] ( identifier[self] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
keyword[while] keyword[True] :
identifier[name] = literal[string] + identifier[str] ( identifier[self] . identifier[__snum] )
identifier[self] . identifier[__snum] += literal[int]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[__nxgraph] :
keyword[break]
identifier[self] . identifier[__addNode] ( identifier[name] , identifier[Switch] )
keyword[return] identifier[name] | def addSwitch(self, name=None):
"""
Add a new switch to the topology.
"""
if name is None:
while True:
name = 's' + str(self.__snum)
self.__snum += 1
if name not in self.__nxgraph:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['name']]
self.__addNode(name, Switch)
return name |
def is_instance_running(self, instance_id):
"""Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
# Here, it's always better if we update the instance.
instance = self._load_instance(instance_id, force_reload=True)
return instance.status == 'ACTIVE' | def function[is_instance_running, parameter[self, instance_id]]:
constant[Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
]
variable[instance] assign[=] call[name[self]._load_instance, parameter[name[instance_id]]]
return[compare[name[instance].status equal[==] constant[ACTIVE]]] | keyword[def] identifier[is_instance_running] ( identifier[self] , identifier[instance_id] ):
literal[string]
identifier[instance] = identifier[self] . identifier[_load_instance] ( identifier[instance_id] , identifier[force_reload] = keyword[True] )
keyword[return] identifier[instance] . identifier[status] == literal[string] | def is_instance_running(self, instance_id):
"""Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
# Here, it's always better if we update the instance.
instance = self._load_instance(instance_id, force_reload=True)
return instance.status == 'ACTIVE' |
def _add_node(self, agent):
"""Add an Agent as a node to the graph."""
if agent is None:
return
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if
bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + \
'/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents)
else:
node_label = _get_node_label(agent)
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return
self.existing_nodes.append(node_key)
self.graph.add_node(node_key,
label=node_label,
**self.node_properties) | def function[_add_node, parameter[self, agent]]:
constant[Add an Agent as a node to the graph.]
if compare[name[agent] is constant[None]] begin[:]
return[None]
variable[node_label] assign[=] call[name[_get_node_label], parameter[name[agent]]]
if <ast.BoolOp object at 0x7da18bcc8d90> begin[:]
variable[bound_agents] assign[=] <ast.ListComp object at 0x7da18bcc9090>
if name[bound_agents] begin[:]
variable[bound_names] assign[=] <ast.ListComp object at 0x7da18bccb7c0>
variable[node_label] assign[=] binary_operation[binary_operation[call[name[_get_node_label], parameter[name[agent]]] + constant[/]] + call[constant[/].join, parameter[name[bound_names]]]]
call[name[self]._complex_nodes.append, parameter[binary_operation[list[[<ast.Name object at 0x7da18bcca410>]] + name[bound_agents]]]]
variable[node_key] assign[=] call[name[_get_node_key], parameter[name[agent]]]
if compare[name[node_key] in name[self].existing_nodes] begin[:]
return[None]
call[name[self].existing_nodes.append, parameter[name[node_key]]]
call[name[self].graph.add_node, parameter[name[node_key]]] | keyword[def] identifier[_add_node] ( identifier[self] , identifier[agent] ):
literal[string]
keyword[if] identifier[agent] keyword[is] keyword[None] :
keyword[return]
identifier[node_label] = identifier[_get_node_label] ( identifier[agent] )
keyword[if] identifier[isinstance] ( identifier[agent] , identifier[Agent] ) keyword[and] identifier[agent] . identifier[bound_conditions] :
identifier[bound_agents] =[ identifier[bc] . identifier[agent] keyword[for] identifier[bc] keyword[in] identifier[agent] . identifier[bound_conditions] keyword[if]
identifier[bc] . identifier[is_bound] ]
keyword[if] identifier[bound_agents] :
identifier[bound_names] =[ identifier[_get_node_label] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[bound_agents] ]
identifier[node_label] = identifier[_get_node_label] ( identifier[agent] )+ literal[string] + literal[string] . identifier[join] ( identifier[bound_names] )
identifier[self] . identifier[_complex_nodes] . identifier[append] ([ identifier[agent] ]+ identifier[bound_agents] )
keyword[else] :
identifier[node_label] = identifier[_get_node_label] ( identifier[agent] )
identifier[node_key] = identifier[_get_node_key] ( identifier[agent] )
keyword[if] identifier[node_key] keyword[in] identifier[self] . identifier[existing_nodes] :
keyword[return]
identifier[self] . identifier[existing_nodes] . identifier[append] ( identifier[node_key] )
identifier[self] . identifier[graph] . identifier[add_node] ( identifier[node_key] ,
identifier[label] = identifier[node_label] ,
** identifier[self] . identifier[node_properties] ) | def _add_node(self, agent):
"""Add an Agent as a node to the graph."""
if agent is None:
return # depends on [control=['if'], data=[]]
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + '/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents) # depends on [control=['if'], data=[]]
else:
node_label = _get_node_label(agent) # depends on [control=['if'], data=[]]
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return # depends on [control=['if'], data=[]]
self.existing_nodes.append(node_key)
self.graph.add_node(node_key, label=node_label, **self.node_properties) |
def get_queryset(self):
"""
Return the first preferences object for the current site.
If preferences do not exist create it.
"""
queryset = super(SingletonManager, self).get_queryset()
# Get current site
current_site = None
if getattr(settings, 'SITE_ID', None) is not None:
current_site = Site.objects.get_current()
# If site found limit queryset to site.
if current_site is not None:
queryset = queryset.filter(sites=settings.SITE_ID)
if not queryset.exists():
# Create object (for current site) if it doesn't exist.
obj = self.model.objects.create()
if current_site is not None:
obj.sites.add(current_site)
return queryset | def function[get_queryset, parameter[self]]:
constant[
Return the first preferences object for the current site.
If preferences do not exist create it.
]
variable[queryset] assign[=] call[call[name[super], parameter[name[SingletonManager], name[self]]].get_queryset, parameter[]]
variable[current_site] assign[=] constant[None]
if compare[call[name[getattr], parameter[name[settings], constant[SITE_ID], constant[None]]] is_not constant[None]] begin[:]
variable[current_site] assign[=] call[name[Site].objects.get_current, parameter[]]
if compare[name[current_site] is_not constant[None]] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[]]
if <ast.UnaryOp object at 0x7da1b0fccb80> begin[:]
variable[obj] assign[=] call[name[self].model.objects.create, parameter[]]
if compare[name[current_site] is_not constant[None]] begin[:]
call[name[obj].sites.add, parameter[name[current_site]]]
return[name[queryset]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[queryset] = identifier[super] ( identifier[SingletonManager] , identifier[self] ). identifier[get_queryset] ()
identifier[current_site] = keyword[None]
keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] ) keyword[is] keyword[not] keyword[None] :
identifier[current_site] = identifier[Site] . identifier[objects] . identifier[get_current] ()
keyword[if] identifier[current_site] keyword[is] keyword[not] keyword[None] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[sites] = identifier[settings] . identifier[SITE_ID] )
keyword[if] keyword[not] identifier[queryset] . identifier[exists] ():
identifier[obj] = identifier[self] . identifier[model] . identifier[objects] . identifier[create] ()
keyword[if] identifier[current_site] keyword[is] keyword[not] keyword[None] :
identifier[obj] . identifier[sites] . identifier[add] ( identifier[current_site] )
keyword[return] identifier[queryset] | def get_queryset(self):
"""
Return the first preferences object for the current site.
If preferences do not exist create it.
"""
queryset = super(SingletonManager, self).get_queryset()
# Get current site
current_site = None
if getattr(settings, 'SITE_ID', None) is not None:
current_site = Site.objects.get_current() # depends on [control=['if'], data=[]]
# If site found limit queryset to site.
if current_site is not None:
queryset = queryset.filter(sites=settings.SITE_ID) # depends on [control=['if'], data=[]]
if not queryset.exists():
# Create object (for current site) if it doesn't exist.
obj = self.model.objects.create()
if current_site is not None:
obj.sites.add(current_site) # depends on [control=['if'], data=['current_site']] # depends on [control=['if'], data=[]]
return queryset |
def push(**kwargs):
''' Force synchronization of directory. '''
output, err = cli_syncthing_adapter.refresh(**kwargs)
if output:
click.echo("%s" % output, err=err)
if kwargs['verbose'] and not err:
with click.progressbar(
iterable=None,
length=100,
label='Synchronizing') as bar:
device_num = 0
max_devices = 1
prev_percent = 0
while True:
kwargs['progress'] = True
kwargs['device_num'] = device_num
data, err = cli_syncthing_adapter.refresh(**kwargs)
device_num = data['device_num']
max_devices = data['max_devices']
cur_percent = math.floor(data['percent']) - prev_percent
if cur_percent > 0:
bar.update(cur_percent)
prev_percent = math.floor(data['percent'])
if device_num < max_devices:
time.sleep(0.5)
else:
break | def function[push, parameter[]]:
constant[ Force synchronization of directory. ]
<ast.Tuple object at 0x7da1b1f7ca00> assign[=] call[name[cli_syncthing_adapter].refresh, parameter[]]
if name[output] begin[:]
call[name[click].echo, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[output]]]]
if <ast.BoolOp object at 0x7da1b1f20cd0> begin[:]
with call[name[click].progressbar, parameter[]] begin[:]
variable[device_num] assign[=] constant[0]
variable[max_devices] assign[=] constant[1]
variable[prev_percent] assign[=] constant[0]
while constant[True] begin[:]
call[name[kwargs]][constant[progress]] assign[=] constant[True]
call[name[kwargs]][constant[device_num]] assign[=] name[device_num]
<ast.Tuple object at 0x7da1b1f7c9a0> assign[=] call[name[cli_syncthing_adapter].refresh, parameter[]]
variable[device_num] assign[=] call[name[data]][constant[device_num]]
variable[max_devices] assign[=] call[name[data]][constant[max_devices]]
variable[cur_percent] assign[=] binary_operation[call[name[math].floor, parameter[call[name[data]][constant[percent]]]] - name[prev_percent]]
if compare[name[cur_percent] greater[>] constant[0]] begin[:]
call[name[bar].update, parameter[name[cur_percent]]]
variable[prev_percent] assign[=] call[name[math].floor, parameter[call[name[data]][constant[percent]]]]
if compare[name[device_num] less[<] name[max_devices]] begin[:]
call[name[time].sleep, parameter[constant[0.5]]] | keyword[def] identifier[push] (** identifier[kwargs] ):
literal[string]
identifier[output] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[refresh] (** identifier[kwargs] )
keyword[if] identifier[output] :
identifier[click] . identifier[echo] ( literal[string] % identifier[output] , identifier[err] = identifier[err] )
keyword[if] identifier[kwargs] [ literal[string] ] keyword[and] keyword[not] identifier[err] :
keyword[with] identifier[click] . identifier[progressbar] (
identifier[iterable] = keyword[None] ,
identifier[length] = literal[int] ,
identifier[label] = literal[string] ) keyword[as] identifier[bar] :
identifier[device_num] = literal[int]
identifier[max_devices] = literal[int]
identifier[prev_percent] = literal[int]
keyword[while] keyword[True] :
identifier[kwargs] [ literal[string] ]= keyword[True]
identifier[kwargs] [ literal[string] ]= identifier[device_num]
identifier[data] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[refresh] (** identifier[kwargs] )
identifier[device_num] = identifier[data] [ literal[string] ]
identifier[max_devices] = identifier[data] [ literal[string] ]
identifier[cur_percent] = identifier[math] . identifier[floor] ( identifier[data] [ literal[string] ])- identifier[prev_percent]
keyword[if] identifier[cur_percent] > literal[int] :
identifier[bar] . identifier[update] ( identifier[cur_percent] )
identifier[prev_percent] = identifier[math] . identifier[floor] ( identifier[data] [ literal[string] ])
keyword[if] identifier[device_num] < identifier[max_devices] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
keyword[break] | def push(**kwargs):
""" Force synchronization of directory. """
(output, err) = cli_syncthing_adapter.refresh(**kwargs)
if output:
click.echo('%s' % output, err=err) # depends on [control=['if'], data=[]]
if kwargs['verbose'] and (not err):
with click.progressbar(iterable=None, length=100, label='Synchronizing') as bar:
device_num = 0
max_devices = 1
prev_percent = 0
while True:
kwargs['progress'] = True
kwargs['device_num'] = device_num
(data, err) = cli_syncthing_adapter.refresh(**kwargs)
device_num = data['device_num']
max_devices = data['max_devices']
cur_percent = math.floor(data['percent']) - prev_percent
if cur_percent > 0:
bar.update(cur_percent)
prev_percent = math.floor(data['percent']) # depends on [control=['if'], data=['cur_percent']]
if device_num < max_devices:
time.sleep(0.5) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['bar']] # depends on [control=['if'], data=[]] |
def _get_description(prev_description):
"""Get the parsed description file (a dictionary) from another
parsed description file."""
current_desc_file = os.path.join(utils.get_project_root(),
prev_description['data-source'],
"info.yml")
if not os.path.isfile(current_desc_file):
logging.error("You are probably not in the folder of a model, because "
"%s is not a file.", current_desc_file)
sys.exit(-1)
with open(current_desc_file, 'r') as ymlfile:
current_description = yaml.load(ymlfile)
return current_description | def function[_get_description, parameter[prev_description]]:
constant[Get the parsed description file (a dictionary) from another
parsed description file.]
variable[current_desc_file] assign[=] call[name[os].path.join, parameter[call[name[utils].get_project_root, parameter[]], call[name[prev_description]][constant[data-source]], constant[info.yml]]]
if <ast.UnaryOp object at 0x7da1b28c6f50> begin[:]
call[name[logging].error, parameter[constant[You are probably not in the folder of a model, because %s is not a file.], name[current_desc_file]]]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2041d82b0>]]
with call[name[open], parameter[name[current_desc_file], constant[r]]] begin[:]
variable[current_description] assign[=] call[name[yaml].load, parameter[name[ymlfile]]]
return[name[current_description]] | keyword[def] identifier[_get_description] ( identifier[prev_description] ):
literal[string]
identifier[current_desc_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[utils] . identifier[get_project_root] (),
identifier[prev_description] [ literal[string] ],
literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[current_desc_file] ):
identifier[logging] . identifier[error] ( literal[string]
literal[string] , identifier[current_desc_file] )
identifier[sys] . identifier[exit] (- literal[int] )
keyword[with] identifier[open] ( identifier[current_desc_file] , literal[string] ) keyword[as] identifier[ymlfile] :
identifier[current_description] = identifier[yaml] . identifier[load] ( identifier[ymlfile] )
keyword[return] identifier[current_description] | def _get_description(prev_description):
"""Get the parsed description file (a dictionary) from another
parsed description file."""
current_desc_file = os.path.join(utils.get_project_root(), prev_description['data-source'], 'info.yml')
if not os.path.isfile(current_desc_file):
logging.error('You are probably not in the folder of a model, because %s is not a file.', current_desc_file)
sys.exit(-1) # depends on [control=['if'], data=[]]
with open(current_desc_file, 'r') as ymlfile:
current_description = yaml.load(ymlfile) # depends on [control=['with'], data=['ymlfile']]
return current_description |
def service_desks(self):
"""Get a list of ServiceDesk Resources from the server visible to the current authenticated user.
:rtype: List[ServiceDesk]
"""
url = self._options['server'] + '/rest/servicedeskapi/servicedesk'
headers = {'X-ExperimentalApi': 'opt-in'}
r_json = json_loads(self._session.get(url, headers=headers))
projects = [ServiceDesk(self._options, self._session, raw_project_json)
for raw_project_json in r_json['values']]
return projects | def function[service_desks, parameter[self]]:
constant[Get a list of ServiceDesk Resources from the server visible to the current authenticated user.
:rtype: List[ServiceDesk]
]
variable[url] assign[=] binary_operation[call[name[self]._options][constant[server]] + constant[/rest/servicedeskapi/servicedesk]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b21f33a0>], [<ast.Constant object at 0x7da1b21f3430>]]
variable[r_json] assign[=] call[name[json_loads], parameter[call[name[self]._session.get, parameter[name[url]]]]]
variable[projects] assign[=] <ast.ListComp object at 0x7da1b21f3cd0>
return[name[projects]] | keyword[def] identifier[service_desks] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[_options] [ literal[string] ]+ literal[string]
identifier[headers] ={ literal[string] : literal[string] }
identifier[r_json] = identifier[json_loads] ( identifier[self] . identifier[_session] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] ))
identifier[projects] =[ identifier[ServiceDesk] ( identifier[self] . identifier[_options] , identifier[self] . identifier[_session] , identifier[raw_project_json] )
keyword[for] identifier[raw_project_json] keyword[in] identifier[r_json] [ literal[string] ]]
keyword[return] identifier[projects] | def service_desks(self):
"""Get a list of ServiceDesk Resources from the server visible to the current authenticated user.
:rtype: List[ServiceDesk]
"""
url = self._options['server'] + '/rest/servicedeskapi/servicedesk'
headers = {'X-ExperimentalApi': 'opt-in'}
r_json = json_loads(self._session.get(url, headers=headers))
projects = [ServiceDesk(self._options, self._session, raw_project_json) for raw_project_json in r_json['values']]
return projects |
def frequency_from_polarizations(h_plus, h_cross):
"""Return gravitational wave frequency
Return the gravitation-wave frequency as a function of time
from the h_plus and h_cross polarizations of the waveform.
It is 1 bin shorter than the input vectors and the sample times
are advanced half a bin.
Parameters
----------
h_plus : TimeSeries
A PyCBC TimeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TimeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWFrequency : TimeSeries
A TimeSeries containing the gravitational wave frequency as a function
of time.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> freq = frequency_from_polarizations(hp, hc)
"""
phase = phase_from_polarizations(h_plus, h_cross)
freq = numpy.diff(phase) / ( 2 * lal.PI * phase.delta_t )
start_time = phase.start_time + phase.delta_t / 2
return TimeSeries(freq.astype(real_same_precision_as(h_plus)),
delta_t=phase.delta_t, epoch=start_time) | def function[frequency_from_polarizations, parameter[h_plus, h_cross]]:
constant[Return gravitational wave frequency
Return the gravitation-wave frequency as a function of time
from the h_plus and h_cross polarizations of the waveform.
It is 1 bin shorter than the input vectors and the sample times
are advanced half a bin.
Parameters
----------
h_plus : TimeSeries
A PyCBC TimeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TimeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWFrequency : TimeSeries
A TimeSeries containing the gravitational wave frequency as a function
of time.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> freq = frequency_from_polarizations(hp, hc)
]
variable[phase] assign[=] call[name[phase_from_polarizations], parameter[name[h_plus], name[h_cross]]]
variable[freq] assign[=] binary_operation[call[name[numpy].diff, parameter[name[phase]]] / binary_operation[binary_operation[constant[2] * name[lal].PI] * name[phase].delta_t]]
variable[start_time] assign[=] binary_operation[name[phase].start_time + binary_operation[name[phase].delta_t / constant[2]]]
return[call[name[TimeSeries], parameter[call[name[freq].astype, parameter[call[name[real_same_precision_as], parameter[name[h_plus]]]]]]]] | keyword[def] identifier[frequency_from_polarizations] ( identifier[h_plus] , identifier[h_cross] ):
literal[string]
identifier[phase] = identifier[phase_from_polarizations] ( identifier[h_plus] , identifier[h_cross] )
identifier[freq] = identifier[numpy] . identifier[diff] ( identifier[phase] )/( literal[int] * identifier[lal] . identifier[PI] * identifier[phase] . identifier[delta_t] )
identifier[start_time] = identifier[phase] . identifier[start_time] + identifier[phase] . identifier[delta_t] / literal[int]
keyword[return] identifier[TimeSeries] ( identifier[freq] . identifier[astype] ( identifier[real_same_precision_as] ( identifier[h_plus] )),
identifier[delta_t] = identifier[phase] . identifier[delta_t] , identifier[epoch] = identifier[start_time] ) | def frequency_from_polarizations(h_plus, h_cross):
"""Return gravitational wave frequency
Return the gravitation-wave frequency as a function of time
from the h_plus and h_cross polarizations of the waveform.
It is 1 bin shorter than the input vectors and the sample times
are advanced half a bin.
Parameters
----------
h_plus : TimeSeries
A PyCBC TimeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TimeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWFrequency : TimeSeries
A TimeSeries containing the gravitational wave frequency as a function
of time.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> freq = frequency_from_polarizations(hp, hc)
"""
phase = phase_from_polarizations(h_plus, h_cross)
freq = numpy.diff(phase) / (2 * lal.PI * phase.delta_t)
start_time = phase.start_time + phase.delta_t / 2
return TimeSeries(freq.astype(real_same_precision_as(h_plus)), delta_t=phase.delta_t, epoch=start_time) |
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, str):
if obj.index.name != level:
raise ValueError('level name {} is not the name of the '
'index'.format(level))
elif level > 0 or level < -1:
raise ValueError(
'level > 0 or level < -1 only valid with MultiIndex')
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, {key.key}, obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = ("Interpreting tuple 'by' as a list of keys, rather than "
"a single key. Use 'by=[...]' instead of 'by=(...)'. In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
# is this an index replacement?
if (not any_callable and not any_arraylike and not any_groupers and
match_axis_length and level is None):
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in
obj.index.names for g in keys)
elif isinstance(obj, Series):
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis)
if not isinstance(gpr, Grouping) else gpr)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj | def function[_get_grouper, parameter[obj, key, axis, level, sort, observed, mutated, validate]]:
constant[
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
]
variable[group_axis] assign[=] call[name[obj]._get_axis, parameter[name[axis]]]
if compare[name[level] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[group_axis], name[MultiIndex]]] begin[:]
if <ast.BoolOp object at 0x7da1b26af0d0> begin[:]
variable[level] assign[=] call[name[level]][constant[0]]
if <ast.BoolOp object at 0x7da1b26aca60> begin[:]
variable[key] assign[=] call[name[group_axis].get_level_values, parameter[name[level]]]
variable[level] assign[=] constant[None]
if call[name[isinstance], parameter[name[key], name[Grouper]]] begin[:]
<ast.Tuple object at 0x7da18dc99cc0> assign[=] call[name[key]._get_grouper, parameter[name[obj]]]
if compare[name[key].key is constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b26af910>, <ast.List object at 0x7da1b26aecb0>, <ast.Name object at 0x7da1b26acdf0>]]]
variable[is_tuple] assign[=] call[name[isinstance], parameter[name[key], name[tuple]]]
variable[all_hashable] assign[=] <ast.BoolOp object at 0x7da1b26af6d0>
if name[is_tuple] begin[:]
if <ast.BoolOp object at 0x7da1b26af250> begin[:]
variable[msg] assign[=] constant[Interpreting tuple 'by' as a list of keys, rather than a single key. Use 'by=[...]' instead of 'by=(...)'. In the future, a tuple will always mean a single key.]
call[name[warnings].warn, parameter[name[msg], name[FutureWarning]]]
variable[key] assign[=] call[name[list], parameter[name[key]]]
if <ast.UnaryOp object at 0x7da1b26ad810> begin[:]
variable[keys] assign[=] list[[<ast.Name object at 0x7da1b26af7c0>]]
variable[match_axis_length] assign[=] constant[False]
variable[any_callable] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b26ac430>]]
variable[any_groupers] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b26af4c0>]]
variable[any_arraylike] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b26accd0>]]
if <ast.BoolOp object at 0x7da1b26af7f0> begin[:]
if call[name[isinstance], parameter[name[obj], name[DataFrame]]] begin[:]
variable[all_in_columns_index] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b26ac2b0>]]
if <ast.UnaryOp object at 0x7da1b26afc40> begin[:]
variable[keys] assign[=] list[[<ast.Call object at 0x7da1b26ac700>]]
if call[name[isinstance], parameter[name[level], tuple[[<ast.Name object at 0x7da1b26af400>, <ast.Name object at 0x7da1b26ae1d0>]]]] begin[:]
if compare[name[key] is constant[None]] begin[:]
variable[keys] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b26ac7f0>]] * call[name[len], parameter[name[level]]]]
variable[levels] assign[=] name[level]
variable[groupings] assign[=] list[[]]
variable[exclusions] assign[=] list[[]]
def function[is_in_axis, parameter[key]]:
if <ast.UnaryOp object at 0x7da20c7cb0d0> begin[:]
<ast.Try object at 0x7da20c7c9360>
return[constant[True]]
def function[is_in_obj, parameter[gpr]]:
<ast.Try object at 0x7da20c7c8280>
for taget[tuple[[<ast.Name object at 0x7da20c7caf50>, <ast.Tuple object at 0x7da20c7cae00>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[keys], name[levels]]]]]] begin[:]
if call[name[is_in_obj], parameter[name[gpr]]] begin[:]
<ast.Tuple object at 0x7da20c7c9f60> assign[=] tuple[[<ast.Constant object at 0x7da20c7caad0>, <ast.Attribute object at 0x7da20c7ca860>]]
call[name[exclusions].append, parameter[name[name]]]
if <ast.BoolOp object at 0x7da204963460> begin[:]
<ast.Raise object at 0x7da204960e80>
variable[ping] assign[=] <ast.IfExp object at 0x7da2049606d0>
call[name[groupings].append, parameter[name[ping]]]
if compare[call[name[len], parameter[name[groupings]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da204961e40>
variable[grouper] assign[=] call[name[BaseGrouper], parameter[name[group_axis], name[groupings]]]
return[tuple[[<ast.Name object at 0x7da18f723ac0>, <ast.Name object at 0x7da18f721a50>, <ast.Name object at 0x7da18f7212d0>]]] | keyword[def] identifier[_get_grouper] ( identifier[obj] , identifier[key] = keyword[None] , identifier[axis] = literal[int] , identifier[level] = keyword[None] , identifier[sort] = keyword[True] ,
identifier[observed] = keyword[False] , identifier[mutated] = keyword[False] , identifier[validate] = keyword[True] ):
literal[string]
identifier[group_axis] = identifier[obj] . identifier[_get_axis] ( identifier[axis] )
keyword[if] identifier[level] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[group_axis] , identifier[MultiIndex] ):
keyword[if] identifier[is_list_like] ( identifier[level] ) keyword[and] identifier[len] ( identifier[level] )== literal[int] :
identifier[level] = identifier[level] [ literal[int] ]
keyword[if] identifier[key] keyword[is] keyword[None] keyword[and] identifier[is_scalar] ( identifier[level] ):
identifier[key] = identifier[group_axis] . identifier[get_level_values] ( identifier[level] )
identifier[level] = keyword[None]
keyword[else] :
keyword[if] identifier[is_list_like] ( identifier[level] ):
identifier[nlevels] = identifier[len] ( identifier[level] )
keyword[if] identifier[nlevels] == literal[int] :
identifier[level] = identifier[level] [ literal[int] ]
keyword[elif] identifier[nlevels] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[isinstance] ( identifier[level] , identifier[str] ):
keyword[if] identifier[obj] . identifier[index] . identifier[name] != identifier[level] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[level] ))
keyword[elif] identifier[level] > literal[int] keyword[or] identifier[level] <- literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[level] = keyword[None]
identifier[key] = identifier[group_axis]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[Grouper] ):
identifier[binner] , identifier[grouper] , identifier[obj] = identifier[key] . identifier[_get_grouper] ( identifier[obj] , identifier[validate] = keyword[False] )
keyword[if] identifier[key] . identifier[key] keyword[is] keyword[None] :
keyword[return] identifier[grouper] ,[], identifier[obj]
keyword[else] :
keyword[return] identifier[grouper] ,{ identifier[key] . identifier[key] }, identifier[obj]
keyword[elif] identifier[isinstance] ( identifier[key] , identifier[BaseGrouper] ):
keyword[return] identifier[key] ,[], identifier[obj]
identifier[is_tuple] = identifier[isinstance] ( identifier[key] , identifier[tuple] )
identifier[all_hashable] = identifier[is_tuple] keyword[and] identifier[is_hashable] ( identifier[key] )
keyword[if] identifier[is_tuple] :
keyword[if] (( identifier[all_hashable] keyword[and] identifier[key] keyword[not] keyword[in] identifier[obj] keyword[and] identifier[set] ( identifier[key] ). identifier[issubset] ( identifier[obj] ))
keyword[or] keyword[not] identifier[all_hashable] ):
identifier[msg] =( literal[string]
literal[string]
literal[string] )
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[FutureWarning] , identifier[stacklevel] = literal[int] )
identifier[key] = identifier[list] ( identifier[key] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[list] ):
identifier[keys] =[ identifier[key] ]
identifier[match_axis_length] = keyword[False]
keyword[else] :
identifier[keys] = identifier[key]
identifier[match_axis_length] = identifier[len] ( identifier[keys] )== identifier[len] ( identifier[group_axis] )
identifier[any_callable] = identifier[any] ( identifier[callable] ( identifier[g] ) keyword[or] identifier[isinstance] ( identifier[g] , identifier[dict] ) keyword[for] identifier[g] keyword[in] identifier[keys] )
identifier[any_groupers] = identifier[any] ( identifier[isinstance] ( identifier[g] , identifier[Grouper] ) keyword[for] identifier[g] keyword[in] identifier[keys] )
identifier[any_arraylike] = identifier[any] ( identifier[isinstance] ( identifier[g] ,( identifier[list] , identifier[tuple] , identifier[Series] , identifier[Index] , identifier[np] . identifier[ndarray] ))
keyword[for] identifier[g] keyword[in] identifier[keys] )
keyword[if] ( keyword[not] identifier[any_callable] keyword[and] keyword[not] identifier[any_arraylike] keyword[and] keyword[not] identifier[any_groupers] keyword[and]
identifier[match_axis_length] keyword[and] identifier[level] keyword[is] keyword[None] ):
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[DataFrame] ):
identifier[all_in_columns_index] = identifier[all] ( identifier[g] keyword[in] identifier[obj] . identifier[columns] keyword[or] identifier[g] keyword[in]
identifier[obj] . identifier[index] . identifier[names] keyword[for] identifier[g] keyword[in] identifier[keys] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[Series] ):
identifier[all_in_columns_index] = identifier[all] ( identifier[g] keyword[in] identifier[obj] . identifier[index] . identifier[names] keyword[for] identifier[g] keyword[in] identifier[keys] )
keyword[if] keyword[not] identifier[all_in_columns_index] :
identifier[keys] =[ identifier[com] . identifier[asarray_tuplesafe] ( identifier[keys] )]
keyword[if] identifier[isinstance] ( identifier[level] ,( identifier[tuple] , identifier[list] )):
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[keys] =[ keyword[None] ]* identifier[len] ( identifier[level] )
identifier[levels] = identifier[level]
keyword[else] :
identifier[levels] =[ identifier[level] ]* identifier[len] ( identifier[keys] )
identifier[groupings] =[]
identifier[exclusions] =[]
keyword[def] identifier[is_in_axis] ( identifier[key] ):
keyword[if] keyword[not] identifier[_is_label_like] ( identifier[key] ):
keyword[try] :
identifier[obj] . identifier[_data] . identifier[items] . identifier[get_loc] ( identifier[key] )
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[def] identifier[is_in_obj] ( identifier[gpr] ):
keyword[try] :
keyword[return] identifier[id] ( identifier[gpr] )== identifier[id] ( identifier[obj] [ identifier[gpr] . identifier[name] ])
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[for] identifier[i] ,( identifier[gpr] , identifier[level] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[keys] , identifier[levels] )):
keyword[if] identifier[is_in_obj] ( identifier[gpr] ):
identifier[in_axis] , identifier[name] = keyword[True] , identifier[gpr] . identifier[name]
identifier[exclusions] . identifier[append] ( identifier[name] )
keyword[elif] identifier[is_in_axis] ( identifier[gpr] ):
keyword[if] identifier[gpr] keyword[in] identifier[obj] :
keyword[if] identifier[validate] :
identifier[obj] . identifier[_check_label_or_level_ambiguity] ( identifier[gpr] )
identifier[in_axis] , identifier[name] , identifier[gpr] = keyword[True] , identifier[gpr] , identifier[obj] [ identifier[gpr] ]
identifier[exclusions] . identifier[append] ( identifier[name] )
keyword[elif] identifier[obj] . identifier[_is_level_reference] ( identifier[gpr] ):
identifier[in_axis] , identifier[name] , identifier[level] , identifier[gpr] = keyword[False] , keyword[None] , identifier[gpr] , keyword[None]
keyword[else] :
keyword[raise] identifier[KeyError] ( identifier[gpr] )
keyword[elif] identifier[isinstance] ( identifier[gpr] , identifier[Grouper] ) keyword[and] identifier[gpr] . identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[exclusions] . identifier[append] ( identifier[gpr] . identifier[key] )
identifier[in_axis] , identifier[name] = keyword[False] , keyword[None]
keyword[else] :
identifier[in_axis] , identifier[name] = keyword[False] , keyword[None]
keyword[if] identifier[is_categorical_dtype] ( identifier[gpr] ) keyword[and] identifier[len] ( identifier[gpr] )!= identifier[obj] . identifier[shape] [ identifier[axis] ]:
keyword[raise] identifier[ValueError] (
( literal[string]
literal[string]
. identifier[format] ( identifier[len_gpr] = identifier[len] ( identifier[gpr] ), identifier[len_axis] = identifier[obj] . identifier[shape] [ identifier[axis] ])))
identifier[ping] =( identifier[Grouping] ( identifier[group_axis] ,
identifier[gpr] ,
identifier[obj] = identifier[obj] ,
identifier[name] = identifier[name] ,
identifier[level] = identifier[level] ,
identifier[sort] = identifier[sort] ,
identifier[observed] = identifier[observed] ,
identifier[in_axis] = identifier[in_axis] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[gpr] , identifier[Grouping] ) keyword[else] identifier[gpr] )
identifier[groupings] . identifier[append] ( identifier[ping] )
keyword[if] identifier[len] ( identifier[groupings] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[grouper] = identifier[BaseGrouper] ( identifier[group_axis] , identifier[groupings] , identifier[sort] = identifier[sort] , identifier[mutated] = identifier[mutated] )
keyword[return] identifier[grouper] , identifier[exclusions] , identifier[obj] | def _get_grouper(obj, key=None, axis=0, level=None, sort=True, observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0] # depends on [control=['if'], data=[]]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0] # depends on [control=['if'], data=[]]
elif nlevels == 0:
raise ValueError('No group keys passed!') # depends on [control=['if'], data=[]]
else:
raise ValueError('multiple levels only valid with MultiIndex') # depends on [control=['if'], data=[]]
if isinstance(level, str):
if obj.index.name != level:
raise ValueError('level name {} is not the name of the index'.format(level)) # depends on [control=['if'], data=['level']] # depends on [control=['if'], data=[]]
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with MultiIndex') # depends on [control=['if'], data=[]]
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis # depends on [control=['if'], data=['level']]
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
(binner, grouper, obj) = key._get_grouper(obj, validate=False)
if key.key is None:
return (grouper, [], obj) # depends on [control=['if'], data=[]]
else:
return (grouper, {key.key}, obj) # depends on [control=['if'], data=[]]
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return (key, [], obj) # depends on [control=['if'], data=[]]
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if all_hashable and key not in obj and set(key).issubset(obj) or not all_hashable:
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = "Interpreting tuple 'by' as a list of keys, rather than a single key. Use 'by=[...]' instead of 'by=(...)'. In the future, a tuple will always mean a single key."
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not isinstance(key, list):
keys = [key]
match_axis_length = False # depends on [control=['if'], data=[]]
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any((callable(g) or isinstance(g, dict) for g in keys))
any_groupers = any((isinstance(g, Grouper) for g in keys))
any_arraylike = any((isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys))
# is this an index replacement?
if not any_callable and (not any_arraylike) and (not any_groupers) and match_axis_length and (level is None):
if isinstance(obj, DataFrame):
all_in_columns_index = all((g in obj.columns or g in obj.index.names for g in keys)) # depends on [control=['if'], data=[]]
elif isinstance(obj, Series):
all_in_columns_index = all((g in obj.index.names for g in keys)) # depends on [control=['if'], data=[]]
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level) # depends on [control=['if'], data=[]]
levels = level # depends on [control=['if'], data=[]]
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name]) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]]
for (i, (gpr, level)) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
(in_axis, name) = (True, gpr.name)
exclusions.append(name) # depends on [control=['if'], data=[]]
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr) # depends on [control=['if'], data=[]]
(in_axis, name, gpr) = (True, gpr, obj[gpr])
exclusions.append(name) # depends on [control=['if'], data=['gpr', 'obj']]
elif obj._is_level_reference(gpr):
(in_axis, name, level, gpr) = (False, None, gpr, None) # depends on [control=['if'], data=[]]
else:
raise KeyError(gpr) # depends on [control=['if'], data=[]]
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
(in_axis, name) = (False, None) # depends on [control=['if'], data=[]]
else:
(in_axis, name) = (False, None)
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError('Length of grouper ({len_gpr}) and axis ({len_axis}) must be same length'.format(len_gpr=len(gpr), len_axis=obj.shape[axis])) # depends on [control=['if'], data=[]]
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis) if not isinstance(gpr, Grouping) else gpr
groupings.append(ping) # depends on [control=['for'], data=[]]
if len(groupings) == 0:
raise ValueError('No group keys passed!') # depends on [control=['if'], data=[]]
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return (grouper, exclusions, obj) |
def mult_selection(model,a,b):
"""mult_selection -- add piecewise relation with multiple selection formulation
Parameters:
- model: a model where to include the piecewise linear relation
- a[k]: x-coordinate of the k-th point in the piecewise linear relation
- b[k]: y-coordinate of the k-th point in the piecewise linear relation
Returns the model with the piecewise linear relation on added variables X, Y, and z.
"""
K = len(a)-1
w,z = {},{}
for k in range(K):
w[k] = model.addVar(lb=-model.infinity()) # do not name variables for avoiding clash
z[k] = model.addVar(vtype="B")
X = model.addVar(lb=a[0], ub=a[K], vtype="C")
Y = model.addVar(lb=-model.infinity())
for k in range(K):
model.addCons(w[k] >= a[k]*z[k])
model.addCons(w[k] <= a[k+1]*z[k])
model.addCons(quicksum(z[k] for k in range(K)) == 1)
model.addCons(X == quicksum(w[k] for k in range(K)))
c = [float(b[k+1]-b[k])/(a[k+1]-a[k]) for k in range(K)]
d = [b[k]-c[k]*a[k] for k in range(K)]
model.addCons(Y == quicksum(d[k]*z[k] + c[k]*w[k] for k in range(K)))
return X,Y,z | def function[mult_selection, parameter[model, a, b]]:
constant[mult_selection -- add piecewise relation with multiple selection formulation
Parameters:
- model: a model where to include the piecewise linear relation
- a[k]: x-coordinate of the k-th point in the piecewise linear relation
- b[k]: y-coordinate of the k-th point in the piecewise linear relation
Returns the model with the piecewise linear relation on added variables X, Y, and z.
]
variable[K] assign[=] binary_operation[call[name[len], parameter[name[a]]] - constant[1]]
<ast.Tuple object at 0x7da20c6a88b0> assign[=] tuple[[<ast.Dict object at 0x7da20c6a9ff0>, <ast.Dict object at 0x7da20c6a9240>]]
for taget[name[k]] in starred[call[name[range], parameter[name[K]]]] begin[:]
call[name[w]][name[k]] assign[=] call[name[model].addVar, parameter[]]
call[name[z]][name[k]] assign[=] call[name[model].addVar, parameter[]]
variable[X] assign[=] call[name[model].addVar, parameter[]]
variable[Y] assign[=] call[name[model].addVar, parameter[]]
for taget[name[k]] in starred[call[name[range], parameter[name[K]]]] begin[:]
call[name[model].addCons, parameter[compare[call[name[w]][name[k]] greater_or_equal[>=] binary_operation[call[name[a]][name[k]] * call[name[z]][name[k]]]]]]
call[name[model].addCons, parameter[compare[call[name[w]][name[k]] less_or_equal[<=] binary_operation[call[name[a]][binary_operation[name[k] + constant[1]]] * call[name[z]][name[k]]]]]]
call[name[model].addCons, parameter[compare[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da20c6aab60>]] equal[==] constant[1]]]]
call[name[model].addCons, parameter[compare[name[X] equal[==] call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da20c6a8700>]]]]]
variable[c] assign[=] <ast.ListComp object at 0x7da20c6abf40>
variable[d] assign[=] <ast.ListComp object at 0x7da20c6aae30>
call[name[model].addCons, parameter[compare[name[Y] equal[==] call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da20c6a99f0>]]]]]
return[tuple[[<ast.Name object at 0x7da1b1701660>, <ast.Name object at 0x7da1b1701510>, <ast.Name object at 0x7da1b1702350>]]] | keyword[def] identifier[mult_selection] ( identifier[model] , identifier[a] , identifier[b] ):
literal[string]
identifier[K] = identifier[len] ( identifier[a] )- literal[int]
identifier[w] , identifier[z] ={},{}
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] ):
identifier[w] [ identifier[k] ]= identifier[model] . identifier[addVar] ( identifier[lb] =- identifier[model] . identifier[infinity] ())
identifier[z] [ identifier[k] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] )
identifier[X] = identifier[model] . identifier[addVar] ( identifier[lb] = identifier[a] [ literal[int] ], identifier[ub] = identifier[a] [ identifier[K] ], identifier[vtype] = literal[string] )
identifier[Y] = identifier[model] . identifier[addVar] ( identifier[lb] =- identifier[model] . identifier[infinity] ())
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] ):
identifier[model] . identifier[addCons] ( identifier[w] [ identifier[k] ]>= identifier[a] [ identifier[k] ]* identifier[z] [ identifier[k] ])
identifier[model] . identifier[addCons] ( identifier[w] [ identifier[k] ]<= identifier[a] [ identifier[k] + literal[int] ]* identifier[z] [ identifier[k] ])
identifier[model] . identifier[addCons] ( identifier[quicksum] ( identifier[z] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] ))== literal[int] )
identifier[model] . identifier[addCons] ( identifier[X] == identifier[quicksum] ( identifier[w] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] )))
identifier[c] =[ identifier[float] ( identifier[b] [ identifier[k] + literal[int] ]- identifier[b] [ identifier[k] ])/( identifier[a] [ identifier[k] + literal[int] ]- identifier[a] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] )]
identifier[d] =[ identifier[b] [ identifier[k] ]- identifier[c] [ identifier[k] ]* identifier[a] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] )]
identifier[model] . identifier[addCons] ( identifier[Y] == identifier[quicksum] ( identifier[d] [ identifier[k] ]* identifier[z] [ identifier[k] ]+ identifier[c] [ identifier[k] ]* identifier[w] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] )))
keyword[return] identifier[X] , identifier[Y] , identifier[z] | def mult_selection(model, a, b):
"""mult_selection -- add piecewise relation with multiple selection formulation
Parameters:
- model: a model where to include the piecewise linear relation
- a[k]: x-coordinate of the k-th point in the piecewise linear relation
- b[k]: y-coordinate of the k-th point in the piecewise linear relation
Returns the model with the piecewise linear relation on added variables X, Y, and z.
"""
K = len(a) - 1
(w, z) = ({}, {})
for k in range(K):
w[k] = model.addVar(lb=-model.infinity()) # do not name variables for avoiding clash
z[k] = model.addVar(vtype='B') # depends on [control=['for'], data=['k']]
X = model.addVar(lb=a[0], ub=a[K], vtype='C')
Y = model.addVar(lb=-model.infinity())
for k in range(K):
model.addCons(w[k] >= a[k] * z[k])
model.addCons(w[k] <= a[k + 1] * z[k]) # depends on [control=['for'], data=['k']]
model.addCons(quicksum((z[k] for k in range(K))) == 1)
model.addCons(X == quicksum((w[k] for k in range(K))))
c = [float(b[k + 1] - b[k]) / (a[k + 1] - a[k]) for k in range(K)]
d = [b[k] - c[k] * a[k] for k in range(K)]
model.addCons(Y == quicksum((d[k] * z[k] + c[k] * w[k] for k in range(K))))
return (X, Y, z) |
def __load_omitted_predictions(self):
"""private: set the omitted_predictions attribute
"""
# if there are no base predictions
if self.predictions is None:
raise Exception("ErrVar.__load_omitted_predictions(): " +
"no 'included' predictions is None")
if self.omitted_predictions_arg is None and \
self.omitted_par_arg is None:
raise Exception("ErrVar.__load_omitted_predictions: " +
"both omitted args are None")
# try to set omitted_predictions by
# extracting from existing predictions
if self.omitted_predictions_arg is None and \
self.omitted_par_arg is not None:
# check to see if omitted par names are in each predictions
found = True
missing_par,missing_pred = None, None
for par_name in self.omitted_jco.col_names:
for prediction in self.predictions_iter:
if par_name not in prediction.row_names:
found = False
missing_par = par_name
missing_pred = prediction.col_names[0]
break
if found:
opreds = []
# need to access the attribute directly,
# not a view of attribute
opred_mat = self._LinearAnalysis__predictions.extract(
row_names=self.omitted_jco.col_names)
opreds = [opred_mat.get(col_names=name) for name in self.forecast_names]
#for prediction in self._LinearAnalysis__predictions:
# opred = prediction.extract(self.omitted_jco.col_names)
# opreds.append(opred)
self.__omitted_predictions = opreds
else:
raise Exception("ErrVar.__load_omitted_predictions(): " +
" omitted parameter " + str(missing_par) +\
" not found in prediction vector " +
str(missing_pred))
elif self.omitted_parcov_arg is not None:
raise NotImplementedError() | def function[__load_omitted_predictions, parameter[self]]:
constant[private: set the omitted_predictions attribute
]
if compare[name[self].predictions is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76e3b0>
if <ast.BoolOp object at 0x7da20c76eb90> begin[:]
<ast.Raise object at 0x7da20c76d900>
if <ast.BoolOp object at 0x7da1b23c6470> begin[:]
variable[found] assign[=] constant[True]
<ast.Tuple object at 0x7da1b23c4d00> assign[=] tuple[[<ast.Constant object at 0x7da1b23c5540>, <ast.Constant object at 0x7da1b23c74c0>]]
for taget[name[par_name]] in starred[name[self].omitted_jco.col_names] begin[:]
for taget[name[prediction]] in starred[name[self].predictions_iter] begin[:]
if compare[name[par_name] <ast.NotIn object at 0x7da2590d7190> name[prediction].row_names] begin[:]
variable[found] assign[=] constant[False]
variable[missing_par] assign[=] name[par_name]
variable[missing_pred] assign[=] call[name[prediction].col_names][constant[0]]
break
if name[found] begin[:]
variable[opreds] assign[=] list[[]]
variable[opred_mat] assign[=] call[name[self]._LinearAnalysis__predictions.extract, parameter[]]
variable[opreds] assign[=] <ast.ListComp object at 0x7da1b23c7040>
name[self].__omitted_predictions assign[=] name[opreds] | keyword[def] identifier[__load_omitted_predictions] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[predictions] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] )
keyword[if] identifier[self] . identifier[omitted_predictions_arg] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[omitted_par_arg] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] )
keyword[if] identifier[self] . identifier[omitted_predictions_arg] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[omitted_par_arg] keyword[is] keyword[not] keyword[None] :
identifier[found] = keyword[True]
identifier[missing_par] , identifier[missing_pred] = keyword[None] , keyword[None]
keyword[for] identifier[par_name] keyword[in] identifier[self] . identifier[omitted_jco] . identifier[col_names] :
keyword[for] identifier[prediction] keyword[in] identifier[self] . identifier[predictions_iter] :
keyword[if] identifier[par_name] keyword[not] keyword[in] identifier[prediction] . identifier[row_names] :
identifier[found] = keyword[False]
identifier[missing_par] = identifier[par_name]
identifier[missing_pred] = identifier[prediction] . identifier[col_names] [ literal[int] ]
keyword[break]
keyword[if] identifier[found] :
identifier[opreds] =[]
identifier[opred_mat] = identifier[self] . identifier[_LinearAnalysis__predictions] . identifier[extract] (
identifier[row_names] = identifier[self] . identifier[omitted_jco] . identifier[col_names] )
identifier[opreds] =[ identifier[opred_mat] . identifier[get] ( identifier[col_names] = identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[self] . identifier[forecast_names] ]
identifier[self] . identifier[__omitted_predictions] = identifier[opreds]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] + identifier[str] ( identifier[missing_par] )+ literal[string] +
identifier[str] ( identifier[missing_pred] ))
keyword[elif] identifier[self] . identifier[omitted_parcov_arg] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[NotImplementedError] () | def __load_omitted_predictions(self):
"""private: set the omitted_predictions attribute
"""
# if there are no base predictions
if self.predictions is None:
raise Exception('ErrVar.__load_omitted_predictions(): ' + "no 'included' predictions is None") # depends on [control=['if'], data=[]]
if self.omitted_predictions_arg is None and self.omitted_par_arg is None:
raise Exception('ErrVar.__load_omitted_predictions: ' + 'both omitted args are None') # depends on [control=['if'], data=[]]
# try to set omitted_predictions by
# extracting from existing predictions
if self.omitted_predictions_arg is None and self.omitted_par_arg is not None:
# check to see if omitted par names are in each predictions
found = True
(missing_par, missing_pred) = (None, None)
for par_name in self.omitted_jco.col_names:
for prediction in self.predictions_iter:
if par_name not in prediction.row_names:
found = False
missing_par = par_name
missing_pred = prediction.col_names[0]
break # depends on [control=['if'], data=['par_name']] # depends on [control=['for'], data=['prediction']] # depends on [control=['for'], data=['par_name']]
if found:
opreds = []
# need to access the attribute directly,
# not a view of attribute
opred_mat = self._LinearAnalysis__predictions.extract(row_names=self.omitted_jco.col_names)
opreds = [opred_mat.get(col_names=name) for name in self.forecast_names]
#for prediction in self._LinearAnalysis__predictions:
# opred = prediction.extract(self.omitted_jco.col_names)
# opreds.append(opred)
self.__omitted_predictions = opreds # depends on [control=['if'], data=[]]
else:
raise Exception('ErrVar.__load_omitted_predictions(): ' + ' omitted parameter ' + str(missing_par) + ' not found in prediction vector ' + str(missing_pred)) # depends on [control=['if'], data=[]]
elif self.omitted_parcov_arg is not None:
raise NotImplementedError() # depends on [control=['if'], data=[]] |
def curvature_flipping(script, angle_threshold=1.0, curve_type=0,
selected=False):
""" Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
angle_threshold (float): To avoid excessive flipping/swapping we
consider only couple of faces with a significant diedral angle
(e.g. greater than the indicated threshold).
curve_type (int): Choose a metric to compute surface curvature on vertices
H = mean curv, K = gaussian curv, A = area per vertex
1: Mean curvature = H
2: Norm squared mean curvature = (H * H) / A
3: Absolute curvature:
if(K >= 0) return 2 * H
else return 2 * sqrt(H ^ 2 - A * K)
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Curvature flipping optimization">\n',
' <Param name="selection" ',
'value="{}" '.format(str(selected).lower()),
'description="Update selection" ',
'type="RichBool" ',
'/>\n',
' <Param name="pthreshold" ',
'value="{}" '.format(angle_threshold),
'description="Angle Thr (deg)" ',
'type="RichFloat" ',
'/>\n',
' <Param name="curvtype" ',
'value="{:d}" '.format(curve_type),
'description="Curvature metric" ',
'enum_val0="mean" ',
'enum_val1="norm squared" ',
'enum_val2="absolute" ',
'enum_cardinality="3" ',
'type="RichEnum" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | def function[curvature_flipping, parameter[script, angle_threshold, curve_type, selected]]:
constant[ Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
angle_threshold (float): To avoid excessive flipping/swapping we
consider only couple of faces with a significant diedral angle
(e.g. greater than the indicated threshold).
curve_type (int): Choose a metric to compute surface curvature on vertices
H = mean curv, K = gaussian curv, A = area per vertex
1: Mean curvature = H
2: Norm squared mean curvature = (H * H) / A
3: Absolute curvature:
if(K >= 0) return 2 * H
else return 2 * sqrt(H ^ 2 - A * K)
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18dc07ee0>, <ast.Constant object at 0x7da18dc07610>, <ast.Call object at 0x7da18dc06620>, <ast.Constant object at 0x7da18dc075b0>, <ast.Constant object at 0x7da18dc04df0>, <ast.Constant object at 0x7da18dc07820>, <ast.Constant object at 0x7da18dc05e10>, <ast.Call object at 0x7da18dc04fa0>, <ast.Constant object at 0x7da18dc05540>, <ast.Constant object at 0x7da18dc06e00>, <ast.Constant object at 0x7da18dc05360>, <ast.Constant object at 0x7da18dc04a30>, <ast.Call object at 0x7da18dc05240>, <ast.Constant object at 0x7da18dc05000>, <ast.Constant object at 0x7da18dc07d90>, <ast.Constant object at 0x7da18dc06290>, <ast.Constant object at 0x7da18dc05c00>, <ast.Constant object at 0x7da18dc057b0>, <ast.Constant object at 0x7da18dc050c0>, <ast.Constant object at 0x7da18dc05930>, <ast.Constant object at 0x7da18dc07a00>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
return[constant[None]] | keyword[def] identifier[curvature_flipping] ( identifier[script] , identifier[angle_threshold] = literal[int] , identifier[curve_type] = literal[int] ,
identifier[selected] = keyword[False] ):
literal[string]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[selected] ). identifier[lower] ()),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[angle_threshold] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[curve_type] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[return] keyword[None] | def curvature_flipping(script, angle_threshold=1.0, curve_type=0, selected=False):
""" Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
angle_threshold (float): To avoid excessive flipping/swapping we
consider only couple of faces with a significant diedral angle
(e.g. greater than the indicated threshold).
curve_type (int): Choose a metric to compute surface curvature on vertices
H = mean curv, K = gaussian curv, A = area per vertex
1: Mean curvature = H
2: Norm squared mean curvature = (H * H) / A
3: Absolute curvature:
if(K >= 0) return 2 * H
else return 2 * sqrt(H ^ 2 - A * K)
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([' <filter name="Curvature flipping optimization">\n', ' <Param name="selection" ', 'value="{}" '.format(str(selected).lower()), 'description="Update selection" ', 'type="RichBool" ', '/>\n', ' <Param name="pthreshold" ', 'value="{}" '.format(angle_threshold), 'description="Angle Thr (deg)" ', 'type="RichFloat" ', '/>\n', ' <Param name="curvtype" ', 'value="{:d}" '.format(curve_type), 'description="Curvature metric" ', 'enum_val0="mean" ', 'enum_val1="norm squared" ', 'enum_val2="absolute" ', 'enum_cardinality="3" ', 'type="RichEnum" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def main():
"""The main function of the module.
These are the steps:
1. Reads the population file (:py:func:`readPopulations`).
2. Extracts the MDS values (:py:func:`extractData`).
3. Plots the MDS values (:py:func:`plotMDS`).
"""
# Getting and checking the options
args = parseArgs()
checkArgs(args)
# Reads the population file
populations = readPopulations(args.population_file, args.population_order)
# Acquire the data
theData, theLabels = extractData(args.file, populations,
args.population_order, args.xaxis,
args.yaxis)
# Plot the data
plotMDS(theData, args.population_order, theLabels, args.population_colors,
args.population_alpha, args.population_sizes,
args.population_markers, args) | def function[main, parameter[]]:
constant[The main function of the module.
These are the steps:
1. Reads the population file (:py:func:`readPopulations`).
2. Extracts the MDS values (:py:func:`extractData`).
3. Plots the MDS values (:py:func:`plotMDS`).
]
variable[args] assign[=] call[name[parseArgs], parameter[]]
call[name[checkArgs], parameter[name[args]]]
variable[populations] assign[=] call[name[readPopulations], parameter[name[args].population_file, name[args].population_order]]
<ast.Tuple object at 0x7da1b0a651e0> assign[=] call[name[extractData], parameter[name[args].file, name[populations], name[args].population_order, name[args].xaxis, name[args].yaxis]]
call[name[plotMDS], parameter[name[theData], name[args].population_order, name[theLabels], name[args].population_colors, name[args].population_alpha, name[args].population_sizes, name[args].population_markers, name[args]]] | keyword[def] identifier[main] ():
literal[string]
identifier[args] = identifier[parseArgs] ()
identifier[checkArgs] ( identifier[args] )
identifier[populations] = identifier[readPopulations] ( identifier[args] . identifier[population_file] , identifier[args] . identifier[population_order] )
identifier[theData] , identifier[theLabels] = identifier[extractData] ( identifier[args] . identifier[file] , identifier[populations] ,
identifier[args] . identifier[population_order] , identifier[args] . identifier[xaxis] ,
identifier[args] . identifier[yaxis] )
identifier[plotMDS] ( identifier[theData] , identifier[args] . identifier[population_order] , identifier[theLabels] , identifier[args] . identifier[population_colors] ,
identifier[args] . identifier[population_alpha] , identifier[args] . identifier[population_sizes] ,
identifier[args] . identifier[population_markers] , identifier[args] ) | def main():
"""The main function of the module.
These are the steps:
1. Reads the population file (:py:func:`readPopulations`).
2. Extracts the MDS values (:py:func:`extractData`).
3. Plots the MDS values (:py:func:`plotMDS`).
"""
# Getting and checking the options
args = parseArgs()
checkArgs(args)
# Reads the population file
populations = readPopulations(args.population_file, args.population_order)
# Acquire the data
(theData, theLabels) = extractData(args.file, populations, args.population_order, args.xaxis, args.yaxis)
# Plot the data
plotMDS(theData, args.population_order, theLabels, args.population_colors, args.population_alpha, args.population_sizes, args.population_markers, args) |
def socketio_manage(environ, namespaces, request=None, error_handler=None,
json_loads=None, json_dumps=None):
"""Main SocketIO management function, call from within your Framework of
choice's view.
The ``environ`` variable is the WSGI ``environ``. It is used to extract
Socket object from the underlying server (as the 'socketio' key), and will
be attached to both the ``Socket`` and ``Namespace`` objects.
The ``namespaces`` parameter is a dictionary of the namespace string
representation as key, and the BaseNamespace namespace class descendant as
a value. The empty string ('') namespace is the global namespace. You can
use Socket.GLOBAL_NS to be more explicit. So it would look like:
.. code-block:: python
namespaces={'': GlobalNamespace,
'/chat': ChatNamespace}
The ``request`` object is not required, but will probably be useful to pass
framework-specific things into your Socket and Namespace functions. It will
simply be attached to the Socket and Namespace object (accessible through
``self.request`` in both cases), and it is not accessed in any case by the
``gevent-socketio`` library.
Pass in an ``error_handler`` if you want to override the default
error_handler (which is :func:`socketio.virtsocket.default_error_handler`.
The callable you pass in should have the same signature as the default
error handler.
The ``json_loads`` and ``json_dumps`` are overrides for the default
``json.loads`` and ``json.dumps`` function calls. Override these at
the top-most level here. This will affect all sockets created by this
socketio manager, and all namespaces inside.
This function will block the current "view" or "controller" in your
framework to do the recv/send on the socket, and dispatch incoming messages
to your namespaces.
This is a simple example using Pyramid:
.. code-block:: python
def my_view(request):
socketio_manage(request.environ, {'': GlobalNamespace}, request)
NOTE: You must understand that this function is going to be called
*only once* per socket opening, *even though* you are using a long
polling mechanism. The subsequent calls (for long polling) will
be hooked directly at the server-level, to interact with the
active ``Socket`` instance. This means you will *not* get access
to the future ``request`` or ``environ`` objects. This is of
particular importance regarding sessions (like Beaker). The
session will be opened once at the opening of the Socket, and not
closed until the socket is closed. You are responsible for
opening and closing the cookie-based session yourself if you want
to keep its data in sync with the rest of your GET/POST calls.
"""
socket = environ['socketio']
socket._set_environ(environ)
socket._set_namespaces(namespaces)
if request:
socket._set_request(request)
if error_handler:
socket._set_error_handler(error_handler)
if json_loads:
socket._set_json_loads(json_loads)
if json_dumps:
socket._set_json_dumps(json_dumps)
receiver_loop = socket._spawn_receiver_loop()
gevent.joinall([receiver_loop])
# TODO: double check, what happens to the WSGI request here ? it vanishes ?
return | def function[socketio_manage, parameter[environ, namespaces, request, error_handler, json_loads, json_dumps]]:
constant[Main SocketIO management function, call from within your Framework of
choice's view.
The ``environ`` variable is the WSGI ``environ``. It is used to extract
Socket object from the underlying server (as the 'socketio' key), and will
be attached to both the ``Socket`` and ``Namespace`` objects.
The ``namespaces`` parameter is a dictionary of the namespace string
representation as key, and the BaseNamespace namespace class descendant as
a value. The empty string ('') namespace is the global namespace. You can
use Socket.GLOBAL_NS to be more explicit. So it would look like:
.. code-block:: python
namespaces={'': GlobalNamespace,
'/chat': ChatNamespace}
The ``request`` object is not required, but will probably be useful to pass
framework-specific things into your Socket and Namespace functions. It will
simply be attached to the Socket and Namespace object (accessible through
``self.request`` in both cases), and it is not accessed in any case by the
``gevent-socketio`` library.
Pass in an ``error_handler`` if you want to override the default
error_handler (which is :func:`socketio.virtsocket.default_error_handler`.
The callable you pass in should have the same signature as the default
error handler.
The ``json_loads`` and ``json_dumps`` are overrides for the default
``json.loads`` and ``json.dumps`` function calls. Override these at
the top-most level here. This will affect all sockets created by this
socketio manager, and all namespaces inside.
This function will block the current "view" or "controller" in your
framework to do the recv/send on the socket, and dispatch incoming messages
to your namespaces.
This is a simple example using Pyramid:
.. code-block:: python
def my_view(request):
socketio_manage(request.environ, {'': GlobalNamespace}, request)
NOTE: You must understand that this function is going to be called
*only once* per socket opening, *even though* you are using a long
polling mechanism. The subsequent calls (for long polling) will
be hooked directly at the server-level, to interact with the
active ``Socket`` instance. This means you will *not* get access
to the future ``request`` or ``environ`` objects. This is of
particular importance regarding sessions (like Beaker). The
session will be opened once at the opening of the Socket, and not
closed until the socket is closed. You are responsible for
opening and closing the cookie-based session yourself if you want
to keep its data in sync with the rest of your GET/POST calls.
]
variable[socket] assign[=] call[name[environ]][constant[socketio]]
call[name[socket]._set_environ, parameter[name[environ]]]
call[name[socket]._set_namespaces, parameter[name[namespaces]]]
if name[request] begin[:]
call[name[socket]._set_request, parameter[name[request]]]
if name[error_handler] begin[:]
call[name[socket]._set_error_handler, parameter[name[error_handler]]]
if name[json_loads] begin[:]
call[name[socket]._set_json_loads, parameter[name[json_loads]]]
if name[json_dumps] begin[:]
call[name[socket]._set_json_dumps, parameter[name[json_dumps]]]
variable[receiver_loop] assign[=] call[name[socket]._spawn_receiver_loop, parameter[]]
call[name[gevent].joinall, parameter[list[[<ast.Name object at 0x7da18f09ec20>]]]]
return[None] | keyword[def] identifier[socketio_manage] ( identifier[environ] , identifier[namespaces] , identifier[request] = keyword[None] , identifier[error_handler] = keyword[None] ,
identifier[json_loads] = keyword[None] , identifier[json_dumps] = keyword[None] ):
literal[string]
identifier[socket] = identifier[environ] [ literal[string] ]
identifier[socket] . identifier[_set_environ] ( identifier[environ] )
identifier[socket] . identifier[_set_namespaces] ( identifier[namespaces] )
keyword[if] identifier[request] :
identifier[socket] . identifier[_set_request] ( identifier[request] )
keyword[if] identifier[error_handler] :
identifier[socket] . identifier[_set_error_handler] ( identifier[error_handler] )
keyword[if] identifier[json_loads] :
identifier[socket] . identifier[_set_json_loads] ( identifier[json_loads] )
keyword[if] identifier[json_dumps] :
identifier[socket] . identifier[_set_json_dumps] ( identifier[json_dumps] )
identifier[receiver_loop] = identifier[socket] . identifier[_spawn_receiver_loop] ()
identifier[gevent] . identifier[joinall] ([ identifier[receiver_loop] ])
keyword[return] | def socketio_manage(environ, namespaces, request=None, error_handler=None, json_loads=None, json_dumps=None):
"""Main SocketIO management function, call from within your Framework of
choice's view.
The ``environ`` variable is the WSGI ``environ``. It is used to extract
Socket object from the underlying server (as the 'socketio' key), and will
be attached to both the ``Socket`` and ``Namespace`` objects.
The ``namespaces`` parameter is a dictionary of the namespace string
representation as key, and the BaseNamespace namespace class descendant as
a value. The empty string ('') namespace is the global namespace. You can
use Socket.GLOBAL_NS to be more explicit. So it would look like:
.. code-block:: python
namespaces={'': GlobalNamespace,
'/chat': ChatNamespace}
The ``request`` object is not required, but will probably be useful to pass
framework-specific things into your Socket and Namespace functions. It will
simply be attached to the Socket and Namespace object (accessible through
``self.request`` in both cases), and it is not accessed in any case by the
``gevent-socketio`` library.
Pass in an ``error_handler`` if you want to override the default
error_handler (which is :func:`socketio.virtsocket.default_error_handler`.
The callable you pass in should have the same signature as the default
error handler.
The ``json_loads`` and ``json_dumps`` are overrides for the default
``json.loads`` and ``json.dumps`` function calls. Override these at
the top-most level here. This will affect all sockets created by this
socketio manager, and all namespaces inside.
This function will block the current "view" or "controller" in your
framework to do the recv/send on the socket, and dispatch incoming messages
to your namespaces.
This is a simple example using Pyramid:
.. code-block:: python
def my_view(request):
socketio_manage(request.environ, {'': GlobalNamespace}, request)
NOTE: You must understand that this function is going to be called
*only once* per socket opening, *even though* you are using a long
polling mechanism. The subsequent calls (for long polling) will
be hooked directly at the server-level, to interact with the
active ``Socket`` instance. This means you will *not* get access
to the future ``request`` or ``environ`` objects. This is of
particular importance regarding sessions (like Beaker). The
session will be opened once at the opening of the Socket, and not
closed until the socket is closed. You are responsible for
opening and closing the cookie-based session yourself if you want
to keep its data in sync with the rest of your GET/POST calls.
"""
socket = environ['socketio']
socket._set_environ(environ)
socket._set_namespaces(namespaces)
if request:
socket._set_request(request) # depends on [control=['if'], data=[]]
if error_handler:
socket._set_error_handler(error_handler) # depends on [control=['if'], data=[]]
if json_loads:
socket._set_json_loads(json_loads) # depends on [control=['if'], data=[]]
if json_dumps:
socket._set_json_dumps(json_dumps) # depends on [control=['if'], data=[]]
receiver_loop = socket._spawn_receiver_loop()
gevent.joinall([receiver_loop])
# TODO: double check, what happens to the WSGI request here ? it vanishes ?
return |
def _normalize_request_parameters(self, oauth_params, req_kwargs):
'''
This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict
'''
normalized = []
params = req_kwargs.get('params', {})
data = req_kwargs.get('data', {})
headers = req_kwargs.get('headers', {})
# process request parameters
for k, v in params.items():
if v is not None:
normalized += [(k, v)]
# process request data
if 'Content-Type' in headers and \
headers['Content-Type'] == FORM_URLENCODED:
for k, v in data.items():
normalized += [(k, v)]
# extract values from our list of tuples
all_normalized = []
for t in normalized:
k, v = t
if is_basestring(v) and not isinstance(v, bytes):
v = v.encode('utf-8')
all_normalized += [(k, v)]
# add in the params from oauth_params for signing
for k, v in oauth_params.items():
if (k, v) in all_normalized: # pragma: no cover
continue
all_normalized += [(k, v)]
# sort the params as per the OAuth 1.0/a spec
all_normalized.sort()
# finally encode the params as a string
return urlencode(all_normalized, True)\
.replace('+', '%20')\
.replace('%7E', '~') | def function[_normalize_request_parameters, parameter[self, oauth_params, req_kwargs]]:
constant[
This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict
]
variable[normalized] assign[=] list[[]]
variable[params] assign[=] call[name[req_kwargs].get, parameter[constant[params], dictionary[[], []]]]
variable[data] assign[=] call[name[req_kwargs].get, parameter[constant[data], dictionary[[], []]]]
variable[headers] assign[=] call[name[req_kwargs].get, parameter[constant[headers], dictionary[[], []]]]
for taget[tuple[[<ast.Name object at 0x7da1b0652110>, <ast.Name object at 0x7da1b0653f70>]]] in starred[call[name[params].items, parameter[]]] begin[:]
if compare[name[v] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0653a60>
if <ast.BoolOp object at 0x7da1b0652260> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0653250>, <ast.Name object at 0x7da1b0653c40>]]] in starred[call[name[data].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b06539a0>
variable[all_normalized] assign[=] list[[]]
for taget[name[t]] in starred[name[normalized]] begin[:]
<ast.Tuple object at 0x7da1b0651330> assign[=] name[t]
if <ast.BoolOp object at 0x7da1b0652080> begin[:]
variable[v] assign[=] call[name[v].encode, parameter[constant[utf-8]]]
<ast.AugAssign object at 0x7da1b0651d50>
for taget[tuple[[<ast.Name object at 0x7da1b0651f00>, <ast.Name object at 0x7da1b0651990>]]] in starred[call[name[oauth_params].items, parameter[]]] begin[:]
if compare[tuple[[<ast.Name object at 0x7da1b0653c10>, <ast.Name object at 0x7da1b0651930>]] in name[all_normalized]] begin[:]
continue
<ast.AugAssign object at 0x7da1b0651c00>
call[name[all_normalized].sort, parameter[]]
return[call[call[call[name[urlencode], parameter[name[all_normalized], constant[True]]].replace, parameter[constant[+], constant[%20]]].replace, parameter[constant[%7E], constant[~]]]] | keyword[def] identifier[_normalize_request_parameters] ( identifier[self] , identifier[oauth_params] , identifier[req_kwargs] ):
literal[string]
identifier[normalized] =[]
identifier[params] = identifier[req_kwargs] . identifier[get] ( literal[string] ,{})
identifier[data] = identifier[req_kwargs] . identifier[get] ( literal[string] ,{})
identifier[headers] = identifier[req_kwargs] . identifier[get] ( literal[string] ,{})
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[params] . identifier[items] ():
keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] :
identifier[normalized] +=[( identifier[k] , identifier[v] )]
keyword[if] literal[string] keyword[in] identifier[headers] keyword[and] identifier[headers] [ literal[string] ]== identifier[FORM_URLENCODED] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ():
identifier[normalized] +=[( identifier[k] , identifier[v] )]
identifier[all_normalized] =[]
keyword[for] identifier[t] keyword[in] identifier[normalized] :
identifier[k] , identifier[v] = identifier[t]
keyword[if] identifier[is_basestring] ( identifier[v] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[v] , identifier[bytes] ):
identifier[v] = identifier[v] . identifier[encode] ( literal[string] )
identifier[all_normalized] +=[( identifier[k] , identifier[v] )]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[oauth_params] . identifier[items] ():
keyword[if] ( identifier[k] , identifier[v] ) keyword[in] identifier[all_normalized] :
keyword[continue]
identifier[all_normalized] +=[( identifier[k] , identifier[v] )]
identifier[all_normalized] . identifier[sort] ()
keyword[return] identifier[urlencode] ( identifier[all_normalized] , keyword[True] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) | def _normalize_request_parameters(self, oauth_params, req_kwargs):
"""
This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict
"""
normalized = []
params = req_kwargs.get('params', {})
data = req_kwargs.get('data', {})
headers = req_kwargs.get('headers', {})
# process request parameters
for (k, v) in params.items():
if v is not None:
normalized += [(k, v)] # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=[]]
# process request data
if 'Content-Type' in headers and headers['Content-Type'] == FORM_URLENCODED:
for (k, v) in data.items():
normalized += [(k, v)] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# extract values from our list of tuples
all_normalized = []
for t in normalized:
(k, v) = t
if is_basestring(v) and (not isinstance(v, bytes)):
v = v.encode('utf-8') # depends on [control=['if'], data=[]]
all_normalized += [(k, v)] # depends on [control=['for'], data=['t']]
# add in the params from oauth_params for signing
for (k, v) in oauth_params.items():
if (k, v) in all_normalized: # pragma: no cover
continue # depends on [control=['if'], data=[]]
all_normalized += [(k, v)] # depends on [control=['for'], data=[]]
# sort the params as per the OAuth 1.0/a spec
all_normalized.sort()
# finally encode the params as a string
return urlencode(all_normalized, True).replace('+', '%20').replace('%7E', '~') |
def read_frame_nowait(self) -> Optional[DataFrame]:
"""Read a single frame from the local buffer immediately.
If no frames are available but the stream is still open, returns None.
Otherwise, raises StreamConsumedError.
"""
try:
frame = self._data_frames.get_nowait()
except asyncio.QueueEmpty:
if self.closed:
raise StreamConsumedError(self.id)
return None
self._data_frames.task_done()
if frame is None:
raise StreamConsumedError(self.id)
return frame | def function[read_frame_nowait, parameter[self]]:
constant[Read a single frame from the local buffer immediately.
If no frames are available but the stream is still open, returns None.
Otherwise, raises StreamConsumedError.
]
<ast.Try object at 0x7da18fe93cd0>
call[name[self]._data_frames.task_done, parameter[]]
if compare[name[frame] is constant[None]] begin[:]
<ast.Raise object at 0x7da18fe93e20>
return[name[frame]] | keyword[def] identifier[read_frame_nowait] ( identifier[self] )-> identifier[Optional] [ identifier[DataFrame] ]:
literal[string]
keyword[try] :
identifier[frame] = identifier[self] . identifier[_data_frames] . identifier[get_nowait] ()
keyword[except] identifier[asyncio] . identifier[QueueEmpty] :
keyword[if] identifier[self] . identifier[closed] :
keyword[raise] identifier[StreamConsumedError] ( identifier[self] . identifier[id] )
keyword[return] keyword[None]
identifier[self] . identifier[_data_frames] . identifier[task_done] ()
keyword[if] identifier[frame] keyword[is] keyword[None] :
keyword[raise] identifier[StreamConsumedError] ( identifier[self] . identifier[id] )
keyword[return] identifier[frame] | def read_frame_nowait(self) -> Optional[DataFrame]:
"""Read a single frame from the local buffer immediately.
If no frames are available but the stream is still open, returns None.
Otherwise, raises StreamConsumedError.
"""
try:
frame = self._data_frames.get_nowait() # depends on [control=['try'], data=[]]
except asyncio.QueueEmpty:
if self.closed:
raise StreamConsumedError(self.id) # depends on [control=['if'], data=[]]
return None # depends on [control=['except'], data=[]]
self._data_frames.task_done()
if frame is None:
raise StreamConsumedError(self.id) # depends on [control=['if'], data=[]]
return frame |
def shutdown(self, timeout=None):
""" Optional. """
try:
shutdown_method = self._proxied_manager.shutdown
except AttributeError:
return
shutdown_method(timeout) | def function[shutdown, parameter[self, timeout]]:
constant[ Optional. ]
<ast.Try object at 0x7da1b053b640>
call[name[shutdown_method], parameter[name[timeout]]] | keyword[def] identifier[shutdown] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[try] :
identifier[shutdown_method] = identifier[self] . identifier[_proxied_manager] . identifier[shutdown]
keyword[except] identifier[AttributeError] :
keyword[return]
identifier[shutdown_method] ( identifier[timeout] ) | def shutdown(self, timeout=None):
""" Optional. """
try:
shutdown_method = self._proxied_manager.shutdown # depends on [control=['try'], data=[]]
except AttributeError:
return # depends on [control=['except'], data=[]]
shutdown_method(timeout) |
def expire(self, key, max_age, **opts):
"""Set the maximum age of a given key, in seconds."""
self.expire_at(key, time() + max_age, **opts) | def function[expire, parameter[self, key, max_age]]:
constant[Set the maximum age of a given key, in seconds.]
call[name[self].expire_at, parameter[name[key], binary_operation[call[name[time], parameter[]] + name[max_age]]]] | keyword[def] identifier[expire] ( identifier[self] , identifier[key] , identifier[max_age] ,** identifier[opts] ):
literal[string]
identifier[self] . identifier[expire_at] ( identifier[key] , identifier[time] ()+ identifier[max_age] ,** identifier[opts] ) | def expire(self, key, max_age, **opts):
"""Set the maximum age of a given key, in seconds."""
self.expire_at(key, time() + max_age, **opts) |
def is_valid_release_version(version):
'''Checks that the given version code is valid.'''
return version is not None and len(version) == 6 and version[0] == 'R' \
and int(version[1:5]) in range(1990, 2050) \
and version[5] in ('h', 'g', 'f', 'e', 'd', 'c', 'b', 'a') | def function[is_valid_release_version, parameter[version]]:
constant[Checks that the given version code is valid.]
return[<ast.BoolOp object at 0x7da1b10c6350>] | keyword[def] identifier[is_valid_release_version] ( identifier[version] ):
literal[string]
keyword[return] identifier[version] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[version] )== literal[int] keyword[and] identifier[version] [ literal[int] ]== literal[string] keyword[and] identifier[int] ( identifier[version] [ literal[int] : literal[int] ]) keyword[in] identifier[range] ( literal[int] , literal[int] ) keyword[and] identifier[version] [ literal[int] ] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) | def is_valid_release_version(version):
"""Checks that the given version code is valid."""
return version is not None and len(version) == 6 and (version[0] == 'R') and (int(version[1:5]) in range(1990, 2050)) and (version[5] in ('h', 'g', 'f', 'e', 'd', 'c', 'b', 'a')) |
def calculate_clock_skew(self):
"""
Computer average and standard deviation
using all the data points.
"""
n = self.statx_n(self.data_points)
"""
Required to be able to compute the standard
deviation.
"""
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
"""
Incrementally remove aberration points.
"""
for k in range(0, self.clean_steps):
"""
Remove aberration points: keep only
the sigma range around the average.
"""
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue
cleaned_data_points.append(v)
self.data_points = cleaned_data_points[:]
"""
Recompute the new average using the
"sound" points we kept.
"""
n = self.statx_n(self.data_points)
"""
Not enough data to compute standard
deviation.
"""
if n < 2:
break
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break
"""
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
"""
if sdev > self.max_sdev or n < self.min_data:
return Decimal("0")
return avg | def function[calculate_clock_skew, parameter[self]]:
constant[
Computer average and standard deviation
using all the data points.
]
variable[n] assign[=] call[name[self].statx_n, parameter[name[self].data_points]]
constant[
Required to be able to compute the standard
deviation.
]
if compare[name[n] less[<] constant[1]] begin[:]
return[call[name[Decimal], parameter[constant[0]]]]
variable[avg] assign[=] call[name[self].statx_avg, parameter[name[self].data_points]]
variable[sdev] assign[=] call[name[self].statx_sdev, parameter[name[self].data_points]]
constant[
Incrementally remove aberration points.
]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], name[self].clean_steps]]] begin[:]
constant[
Remove aberration points: keep only
the sigma range around the average.
]
variable[min_val] assign[=] binary_operation[name[avg] - name[sdev]]
variable[max_val] assign[=] binary_operation[name[avg] + name[sdev]]
variable[cleaned_data_points] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[n]]]] begin[:]
variable[v] assign[=] call[name[self].data_points][name[i]]
if <ast.BoolOp object at 0x7da204622c50> begin[:]
continue
call[name[cleaned_data_points].append, parameter[name[v]]]
name[self].data_points assign[=] call[name[cleaned_data_points]][<ast.Slice object at 0x7da1b0627e20>]
constant[
Recompute the new average using the
"sound" points we kept.
]
variable[n] assign[=] call[name[self].statx_n, parameter[name[self].data_points]]
constant[
Not enough data to compute standard
deviation.
]
if compare[name[n] less[<] constant[2]] begin[:]
break
variable[avg] assign[=] call[name[self].statx_avg, parameter[name[self].data_points]]
variable[sdev] assign[=] call[name[self].statx_sdev, parameter[name[self].data_points]]
if <ast.BoolOp object at 0x7da1b0624190> begin[:]
break
constant[
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
]
if <ast.BoolOp object at 0x7da1b06269e0> begin[:]
return[call[name[Decimal], parameter[constant[0]]]]
return[name[avg]] | keyword[def] identifier[calculate_clock_skew] ( identifier[self] ):
literal[string]
identifier[n] = identifier[self] . identifier[statx_n] ( identifier[self] . identifier[data_points] )
literal[string]
keyword[if] identifier[n] < literal[int] :
keyword[return] identifier[Decimal] ( literal[string] )
identifier[avg] = identifier[self] . identifier[statx_avg] ( identifier[self] . identifier[data_points] )
identifier[sdev] = identifier[self] . identifier[statx_sdev] ( identifier[self] . identifier[data_points] )
literal[string]
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[clean_steps] ):
literal[string]
identifier[min_val] = identifier[avg] - identifier[sdev]
identifier[max_val] = identifier[avg] + identifier[sdev]
identifier[cleaned_data_points] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] ):
identifier[v] = identifier[self] . identifier[data_points] [ identifier[i] ]
keyword[if] identifier[v] < identifier[min_val] keyword[or] identifier[v] > identifier[max_val] :
keyword[continue]
identifier[cleaned_data_points] . identifier[append] ( identifier[v] )
identifier[self] . identifier[data_points] = identifier[cleaned_data_points] [:]
literal[string]
identifier[n] = identifier[self] . identifier[statx_n] ( identifier[self] . identifier[data_points] )
literal[string]
keyword[if] identifier[n] < literal[int] :
keyword[break]
identifier[avg] = identifier[self] . identifier[statx_avg] ( identifier[self] . identifier[data_points] )
identifier[sdev] = identifier[self] . identifier[statx_sdev] ( identifier[self] . identifier[data_points] )
keyword[if] identifier[sdev] <= identifier[self] . identifier[max_sdev] keyword[or] identifier[n] < identifier[self] . identifier[min_data] :
keyword[break]
literal[string]
keyword[if] identifier[sdev] > identifier[self] . identifier[max_sdev] keyword[or] identifier[n] < identifier[self] . identifier[min_data] :
keyword[return] identifier[Decimal] ( literal[string] )
keyword[return] identifier[avg] | def calculate_clock_skew(self):
"""
Computer average and standard deviation
using all the data points.
"""
n = self.statx_n(self.data_points)
'\n Required to be able to compute the standard\n deviation.\n '
if n < 1:
return Decimal('0') # depends on [control=['if'], data=[]]
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
'\n Incrementally remove aberration points.\n '
for k in range(0, self.clean_steps):
'\n Remove aberration points: keep only\n the sigma range around the average.\n '
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue # depends on [control=['if'], data=[]]
cleaned_data_points.append(v) # depends on [control=['for'], data=['i']]
self.data_points = cleaned_data_points[:]
'\n Recompute the new average using the\n "sound" points we kept.\n '
n = self.statx_n(self.data_points)
'\n Not enough data to compute standard\n deviation.\n '
if n < 2:
break # depends on [control=['if'], data=[]]
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
"\n If standard deviation is too large still, we\n cannot update our clock. Collect more points.\n\n If we don't have a minimum amount of data,\n don't attempt the update yet, continue collecting.\n "
if sdev > self.max_sdev or n < self.min_data:
return Decimal('0') # depends on [control=['if'], data=[]]
return avg |
def read_record_member(self, orcid_id, request_type, token, put_code=None,
accept_type='application/orcid+json'):
"""Get the member info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
For example: 'record'.
See https://members.orcid.org/api/tutorial/read-orcid-records
for possible values..
:param response_format: string
One of json, xml.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param put_code: string | list of strings
The id of the queried work. In case of 'works' request_type
might be a list of strings
:param accept_type: expected MIME type of received data
Returns
-------
:returns: dict | lxml.etree._Element
Record(s) in JSON-compatible dictionary representation or
in XML E-tree, depending on accept_type specified.
"""
return self._get_info(orcid_id, self._get_member_info, request_type,
token, put_code, accept_type) | def function[read_record_member, parameter[self, orcid_id, request_type, token, put_code, accept_type]]:
constant[Get the member info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
For example: 'record'.
See https://members.orcid.org/api/tutorial/read-orcid-records
for possible values..
:param response_format: string
One of json, xml.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param put_code: string | list of strings
The id of the queried work. In case of 'works' request_type
might be a list of strings
:param accept_type: expected MIME type of received data
Returns
-------
:returns: dict | lxml.etree._Element
Record(s) in JSON-compatible dictionary representation or
in XML E-tree, depending on accept_type specified.
]
return[call[name[self]._get_info, parameter[name[orcid_id], name[self]._get_member_info, name[request_type], name[token], name[put_code], name[accept_type]]]] | keyword[def] identifier[read_record_member] ( identifier[self] , identifier[orcid_id] , identifier[request_type] , identifier[token] , identifier[put_code] = keyword[None] ,
identifier[accept_type] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_info] ( identifier[orcid_id] , identifier[self] . identifier[_get_member_info] , identifier[request_type] ,
identifier[token] , identifier[put_code] , identifier[accept_type] ) | def read_record_member(self, orcid_id, request_type, token, put_code=None, accept_type='application/orcid+json'):
"""Get the member info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
For example: 'record'.
See https://members.orcid.org/api/tutorial/read-orcid-records
for possible values..
:param response_format: string
One of json, xml.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param put_code: string | list of strings
The id of the queried work. In case of 'works' request_type
might be a list of strings
:param accept_type: expected MIME type of received data
Returns
-------
:returns: dict | lxml.etree._Element
Record(s) in JSON-compatible dictionary representation or
in XML E-tree, depending on accept_type specified.
"""
return self._get_info(orcid_id, self._get_member_info, request_type, token, put_code, accept_type) |
def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2. | def function[farray, parameter[self]]:
constant[Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
]
variable[bandwidths] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * binary_operation[name[pi] ** binary_operation[constant[1] / constant[2.0]]]] * name[self].frequencies] / name[self].q]
return[binary_operation[name[self].frequencies - binary_operation[name[bandwidths] / constant[2.0]]]] | keyword[def] identifier[farray] ( identifier[self] ):
literal[string]
identifier[bandwidths] = literal[int] * identifier[pi] **( literal[int] / literal[int] )* identifier[self] . identifier[frequencies] / identifier[self] . identifier[q]
keyword[return] identifier[self] . identifier[frequencies] - identifier[bandwidths] / literal[int] | def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1 / 2.0) * self.frequencies / self.q
return self.frequencies - bandwidths / 2.0 |
def disconnect(self, timeout=None, blocking=True):
"""
Disconnects the chromecast and waits for it to terminate.
:param timeout: a floating point number specifying a timeout for the
operation in seconds (or fractions thereof). Or None
to block forever.
:param blocking: If True it will block until the disconnection is
complete, otherwise it will return immediately.
"""
self.socket_client.disconnect()
if blocking:
self.join(timeout=timeout) | def function[disconnect, parameter[self, timeout, blocking]]:
constant[
Disconnects the chromecast and waits for it to terminate.
:param timeout: a floating point number specifying a timeout for the
operation in seconds (or fractions thereof). Or None
to block forever.
:param blocking: If True it will block until the disconnection is
complete, otherwise it will return immediately.
]
call[name[self].socket_client.disconnect, parameter[]]
if name[blocking] begin[:]
call[name[self].join, parameter[]] | keyword[def] identifier[disconnect] ( identifier[self] , identifier[timeout] = keyword[None] , identifier[blocking] = keyword[True] ):
literal[string]
identifier[self] . identifier[socket_client] . identifier[disconnect] ()
keyword[if] identifier[blocking] :
identifier[self] . identifier[join] ( identifier[timeout] = identifier[timeout] ) | def disconnect(self, timeout=None, blocking=True):
"""
Disconnects the chromecast and waits for it to terminate.
:param timeout: a floating point number specifying a timeout for the
operation in seconds (or fractions thereof). Or None
to block forever.
:param blocking: If True it will block until the disconnection is
complete, otherwise it will return immediately.
"""
self.socket_client.disconnect()
if blocking:
self.join(timeout=timeout) # depends on [control=['if'], data=[]] |
def _request(self, *args, **kwargs):
# type (Any) -> Response
"""Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed."""
self._amend_request_kwargs(kwargs)
_response = self._requests_session.request(*args, **kwargs)
try:
_response.raise_for_status()
except HTTPError as e:
if e.response is not None:
raise_from(ConjureHTTPError(e), e)
raise e
return _response | def function[_request, parameter[self]]:
constant[Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed.]
call[name[self]._amend_request_kwargs, parameter[name[kwargs]]]
variable[_response] assign[=] call[name[self]._requests_session.request, parameter[<ast.Starred object at 0x7da1b0c93a90>]]
<ast.Try object at 0x7da1b0c92b30>
return[name[_response]] | keyword[def] identifier[_request] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_amend_request_kwargs] ( identifier[kwargs] )
identifier[_response] = identifier[self] . identifier[_requests_session] . identifier[request] (* identifier[args] ,** identifier[kwargs] )
keyword[try] :
identifier[_response] . identifier[raise_for_status] ()
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[response] keyword[is] keyword[not] keyword[None] :
identifier[raise_from] ( identifier[ConjureHTTPError] ( identifier[e] ), identifier[e] )
keyword[raise] identifier[e]
keyword[return] identifier[_response] | def _request(self, *args, **kwargs):
# type (Any) -> Response
'Make requests using configured :class:`requests.Session`.\n Any error details will be extracted to an :class:`HTTPError`\n which will contain relevant error details when printed.'
self._amend_request_kwargs(kwargs)
_response = self._requests_session.request(*args, **kwargs)
try:
_response.raise_for_status() # depends on [control=['try'], data=[]]
except HTTPError as e:
if e.response is not None:
raise_from(ConjureHTTPError(e), e) # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']]
return _response |
def get_queue_settings(self, project_key):
"""
Get queue settings on project
:param project_key: str
:return:
"""
url = 'rest/servicedeskapi/queues/{}'.format(project_key)
return self.get(url, headers=self.experimental_headers) | def function[get_queue_settings, parameter[self, project_key]]:
constant[
Get queue settings on project
:param project_key: str
:return:
]
variable[url] assign[=] call[constant[rest/servicedeskapi/queues/{}].format, parameter[name[project_key]]]
return[call[name[self].get, parameter[name[url]]]] | keyword[def] identifier[get_queue_settings] ( identifier[self] , identifier[project_key] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[project_key] )
keyword[return] identifier[self] . identifier[get] ( identifier[url] , identifier[headers] = identifier[self] . identifier[experimental_headers] ) | def get_queue_settings(self, project_key):
"""
Get queue settings on project
:param project_key: str
:return:
"""
url = 'rest/servicedeskapi/queues/{}'.format(project_key)
return self.get(url, headers=self.experimental_headers) |
def cli(**settings):
"""Notify about new reviews in AppStore and Google Play in slack.
Launch command using supervisor or using screen/tmux/etc.
Reviews are fetched for multiple apps and languages in --beat=300 interval.
"""
setup_logging(settings)
settings = setup_languages(settings)
channels = setup_channel_map(settings)
app = CriticApp(**dict(settings, channels=channels))
if settings['sentry_dsn']:
app.sentry_client = Client(settings['sentry_dsn'])
logger.debug('Errors are reported to %s' % settings['sentry_dsn'])
else:
app.sentry_client = None
if settings['version']:
click.echo('Version %s' % critics.__version__)
return
if not (settings['ios'] or settings['android']):
click.echo('Please choose either --ios or --android')
return
loop = tornado.ioloop.IOLoop.instance()
if app.load_model():
logger.debug('Model loaded OK, not skipping notify on first run')
notify = True
else:
notify = False
if settings['ios']:
logger.info('Tracking IOS apps: %s', ', '.join(settings['ios']))
itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'),
1000 * settings['beat'], loop)
itunes.start()
if settings['android']:
logger.info('Tracking Android apps: %s', ', '.join(settings['android']))
google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'),
1000 * settings['beat'], loop)
google_play.start()
echo_channel_map(channels)
if settings['ios']:
app.poll_store('ios', notify=notify)
if settings['android']:
app.poll_store('android', notify=notify)
if settings['stats']:
port = int(settings['stats'])
logger.debug('Serving metrics server on port %s' % port)
start_http_server(port)
if settings['daemonize']:
loop.start() | def function[cli, parameter[]]:
constant[Notify about new reviews in AppStore and Google Play in slack.
Launch command using supervisor or using screen/tmux/etc.
Reviews are fetched for multiple apps and languages in --beat=300 interval.
]
call[name[setup_logging], parameter[name[settings]]]
variable[settings] assign[=] call[name[setup_languages], parameter[name[settings]]]
variable[channels] assign[=] call[name[setup_channel_map], parameter[name[settings]]]
variable[app] assign[=] call[name[CriticApp], parameter[]]
if call[name[settings]][constant[sentry_dsn]] begin[:]
name[app].sentry_client assign[=] call[name[Client], parameter[call[name[settings]][constant[sentry_dsn]]]]
call[name[logger].debug, parameter[binary_operation[constant[Errors are reported to %s] <ast.Mod object at 0x7da2590d6920> call[name[settings]][constant[sentry_dsn]]]]]
if call[name[settings]][constant[version]] begin[:]
call[name[click].echo, parameter[binary_operation[constant[Version %s] <ast.Mod object at 0x7da2590d6920> name[critics].__version__]]]
return[None]
if <ast.UnaryOp object at 0x7da1b0af4370> begin[:]
call[name[click].echo, parameter[constant[Please choose either --ios or --android]]]
return[None]
variable[loop] assign[=] call[name[tornado].ioloop.IOLoop.instance, parameter[]]
if call[name[app].load_model, parameter[]] begin[:]
call[name[logger].debug, parameter[constant[Model loaded OK, not skipping notify on first run]]]
variable[notify] assign[=] constant[True]
if call[name[settings]][constant[ios]] begin[:]
call[name[logger].info, parameter[constant[Tracking IOS apps: %s], call[constant[, ].join, parameter[call[name[settings]][constant[ios]]]]]]
variable[itunes] assign[=] call[name[tornado].ioloop.PeriodicCallback, parameter[call[name[partial], parameter[name[app].poll_store, constant[ios]]], binary_operation[constant[1000] * call[name[settings]][constant[beat]]], name[loop]]]
call[name[itunes].start, parameter[]]
if call[name[settings]][constant[android]] begin[:]
call[name[logger].info, parameter[constant[Tracking Android apps: %s], call[constant[, ].join, parameter[call[name[settings]][constant[android]]]]]]
variable[google_play] assign[=] call[name[tornado].ioloop.PeriodicCallback, parameter[call[name[partial], parameter[name[app].poll_store, constant[android]]], binary_operation[constant[1000] * call[name[settings]][constant[beat]]], name[loop]]]
call[name[google_play].start, parameter[]]
call[name[echo_channel_map], parameter[name[channels]]]
if call[name[settings]][constant[ios]] begin[:]
call[name[app].poll_store, parameter[constant[ios]]]
if call[name[settings]][constant[android]] begin[:]
call[name[app].poll_store, parameter[constant[android]]]
if call[name[settings]][constant[stats]] begin[:]
variable[port] assign[=] call[name[int], parameter[call[name[settings]][constant[stats]]]]
call[name[logger].debug, parameter[binary_operation[constant[Serving metrics server on port %s] <ast.Mod object at 0x7da2590d6920> name[port]]]]
call[name[start_http_server], parameter[name[port]]]
if call[name[settings]][constant[daemonize]] begin[:]
call[name[loop].start, parameter[]] | keyword[def] identifier[cli] (** identifier[settings] ):
literal[string]
identifier[setup_logging] ( identifier[settings] )
identifier[settings] = identifier[setup_languages] ( identifier[settings] )
identifier[channels] = identifier[setup_channel_map] ( identifier[settings] )
identifier[app] = identifier[CriticApp] (** identifier[dict] ( identifier[settings] , identifier[channels] = identifier[channels] ))
keyword[if] identifier[settings] [ literal[string] ]:
identifier[app] . identifier[sentry_client] = identifier[Client] ( identifier[settings] [ literal[string] ])
identifier[logger] . identifier[debug] ( literal[string] % identifier[settings] [ literal[string] ])
keyword[else] :
identifier[app] . identifier[sentry_client] = keyword[None]
keyword[if] identifier[settings] [ literal[string] ]:
identifier[click] . identifier[echo] ( literal[string] % identifier[critics] . identifier[__version__] )
keyword[return]
keyword[if] keyword[not] ( identifier[settings] [ literal[string] ] keyword[or] identifier[settings] [ literal[string] ]):
identifier[click] . identifier[echo] ( literal[string] )
keyword[return]
identifier[loop] = identifier[tornado] . identifier[ioloop] . identifier[IOLoop] . identifier[instance] ()
keyword[if] identifier[app] . identifier[load_model] ():
identifier[logger] . identifier[debug] ( literal[string] )
identifier[notify] = keyword[True]
keyword[else] :
identifier[notify] = keyword[False]
keyword[if] identifier[settings] [ literal[string] ]:
identifier[logger] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[settings] [ literal[string] ]))
identifier[itunes] = identifier[tornado] . identifier[ioloop] . identifier[PeriodicCallback] ( identifier[partial] ( identifier[app] . identifier[poll_store] , literal[string] ),
literal[int] * identifier[settings] [ literal[string] ], identifier[loop] )
identifier[itunes] . identifier[start] ()
keyword[if] identifier[settings] [ literal[string] ]:
identifier[logger] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[settings] [ literal[string] ]))
identifier[google_play] = identifier[tornado] . identifier[ioloop] . identifier[PeriodicCallback] ( identifier[partial] ( identifier[app] . identifier[poll_store] , literal[string] ),
literal[int] * identifier[settings] [ literal[string] ], identifier[loop] )
identifier[google_play] . identifier[start] ()
identifier[echo_channel_map] ( identifier[channels] )
keyword[if] identifier[settings] [ literal[string] ]:
identifier[app] . identifier[poll_store] ( literal[string] , identifier[notify] = identifier[notify] )
keyword[if] identifier[settings] [ literal[string] ]:
identifier[app] . identifier[poll_store] ( literal[string] , identifier[notify] = identifier[notify] )
keyword[if] identifier[settings] [ literal[string] ]:
identifier[port] = identifier[int] ( identifier[settings] [ literal[string] ])
identifier[logger] . identifier[debug] ( literal[string] % identifier[port] )
identifier[start_http_server] ( identifier[port] )
keyword[if] identifier[settings] [ literal[string] ]:
identifier[loop] . identifier[start] () | def cli(**settings):
"""Notify about new reviews in AppStore and Google Play in slack.
Launch command using supervisor or using screen/tmux/etc.
Reviews are fetched for multiple apps and languages in --beat=300 interval.
"""
setup_logging(settings)
settings = setup_languages(settings)
channels = setup_channel_map(settings)
app = CriticApp(**dict(settings, channels=channels))
if settings['sentry_dsn']:
app.sentry_client = Client(settings['sentry_dsn'])
logger.debug('Errors are reported to %s' % settings['sentry_dsn']) # depends on [control=['if'], data=[]]
else:
app.sentry_client = None
if settings['version']:
click.echo('Version %s' % critics.__version__)
return # depends on [control=['if'], data=[]]
if not (settings['ios'] or settings['android']):
click.echo('Please choose either --ios or --android')
return # depends on [control=['if'], data=[]]
loop = tornado.ioloop.IOLoop.instance()
if app.load_model():
logger.debug('Model loaded OK, not skipping notify on first run')
notify = True # depends on [control=['if'], data=[]]
else:
notify = False
if settings['ios']:
logger.info('Tracking IOS apps: %s', ', '.join(settings['ios']))
itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'), 1000 * settings['beat'], loop)
itunes.start() # depends on [control=['if'], data=[]]
if settings['android']:
logger.info('Tracking Android apps: %s', ', '.join(settings['android']))
google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'), 1000 * settings['beat'], loop)
google_play.start() # depends on [control=['if'], data=[]]
echo_channel_map(channels)
if settings['ios']:
app.poll_store('ios', notify=notify) # depends on [control=['if'], data=[]]
if settings['android']:
app.poll_store('android', notify=notify) # depends on [control=['if'], data=[]]
if settings['stats']:
port = int(settings['stats'])
logger.debug('Serving metrics server on port %s' % port)
start_http_server(port) # depends on [control=['if'], data=[]]
if settings['daemonize']:
loop.start() # depends on [control=['if'], data=[]] |
def push(self, value: Union[int, bytes]) -> None:
"""
Push an item onto the stack.
"""
if len(self.values) > 1023:
raise FullStack('Stack limit reached')
validate_stack_item(value)
self.values.append(value) | def function[push, parameter[self, value]]:
constant[
Push an item onto the stack.
]
if compare[call[name[len], parameter[name[self].values]] greater[>] constant[1023]] begin[:]
<ast.Raise object at 0x7da1b1720c40>
call[name[validate_stack_item], parameter[name[value]]]
call[name[self].values.append, parameter[name[value]]] | keyword[def] identifier[push] ( identifier[self] , identifier[value] : identifier[Union] [ identifier[int] , identifier[bytes] ])-> keyword[None] :
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[values] )> literal[int] :
keyword[raise] identifier[FullStack] ( literal[string] )
identifier[validate_stack_item] ( identifier[value] )
identifier[self] . identifier[values] . identifier[append] ( identifier[value] ) | def push(self, value: Union[int, bytes]) -> None:
"""
Push an item onto the stack.
"""
if len(self.values) > 1023:
raise FullStack('Stack limit reached') # depends on [control=['if'], data=[]]
validate_stack_item(value)
self.values.append(value) |
def output(results_dir, config, parent='../../'):
"""Write the results output for the given test
:param str results_dir: the directory for the results
:param dict config: the configuration of the test
:param str parents: the parent directory
"""
start = time.time()
print("Compiling results...")
results_dir = os.path.abspath(results_dir)
results = ReportResults(config['run_time'], config['results_ts_interval'])
results.compile_results()
print("Results compiled in {} seconds\n".format(time.time() - start))
if results.total_transactions == 0:
print("No results, cannot create report")
return False
print_infos(results)
data = {
'report': results,
'run_time': config['run_time'],
'ts_interval': config['results_ts_interval'],
'turrets_config': results.turrets,
'results': {"all": results.main_results, "timers": results.timers_results}
}
print("Generating graphs...")
partial = time.time()
generate_graphs(results.main_results, 'All_Transactions', results_dir)
for key, value in results.timers_results.items():
generate_graphs(value, key, results_dir)
print("All graphs generated in {} seconds\n".format(time.time() - partial))
write_template(data, results_dir, parent)
print("Full report generated in {} seconds".format(time.time() - start))
return True | def function[output, parameter[results_dir, config, parent]]:
constant[Write the results output for the given test
:param str results_dir: the directory for the results
:param dict config: the configuration of the test
:param str parents: the parent directory
]
variable[start] assign[=] call[name[time].time, parameter[]]
call[name[print], parameter[constant[Compiling results...]]]
variable[results_dir] assign[=] call[name[os].path.abspath, parameter[name[results_dir]]]
variable[results] assign[=] call[name[ReportResults], parameter[call[name[config]][constant[run_time]], call[name[config]][constant[results_ts_interval]]]]
call[name[results].compile_results, parameter[]]
call[name[print], parameter[call[constant[Results compiled in {} seconds
].format, parameter[binary_operation[call[name[time].time, parameter[]] - name[start]]]]]]
if compare[name[results].total_transactions equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[No results, cannot create report]]]
return[constant[False]]
call[name[print_infos], parameter[name[results]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18f58ca00>, <ast.Constant object at 0x7da18f58fa30>, <ast.Constant object at 0x7da18f58f070>, <ast.Constant object at 0x7da18f00c520>, <ast.Constant object at 0x7da18f00ccd0>], [<ast.Name object at 0x7da18f00d9f0>, <ast.Subscript object at 0x7da18f00c3a0>, <ast.Subscript object at 0x7da18f00df90>, <ast.Attribute object at 0x7da18f00d4e0>, <ast.Dict object at 0x7da18f00da80>]]
call[name[print], parameter[constant[Generating graphs...]]]
variable[partial] assign[=] call[name[time].time, parameter[]]
call[name[generate_graphs], parameter[name[results].main_results, constant[All_Transactions], name[results_dir]]]
for taget[tuple[[<ast.Name object at 0x7da18bc73dc0>, <ast.Name object at 0x7da18bc73eb0>]]] in starred[call[name[results].timers_results.items, parameter[]]] begin[:]
call[name[generate_graphs], parameter[name[value], name[key], name[results_dir]]]
call[name[print], parameter[call[constant[All graphs generated in {} seconds
].format, parameter[binary_operation[call[name[time].time, parameter[]] - name[partial]]]]]]
call[name[write_template], parameter[name[data], name[results_dir], name[parent]]]
call[name[print], parameter[call[constant[Full report generated in {} seconds].format, parameter[binary_operation[call[name[time].time, parameter[]] - name[start]]]]]]
return[constant[True]] | keyword[def] identifier[output] ( identifier[results_dir] , identifier[config] , identifier[parent] = literal[string] ):
literal[string]
identifier[start] = identifier[time] . identifier[time] ()
identifier[print] ( literal[string] )
identifier[results_dir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[results_dir] )
identifier[results] = identifier[ReportResults] ( identifier[config] [ literal[string] ], identifier[config] [ literal[string] ])
identifier[results] . identifier[compile_results] ()
identifier[print] ( literal[string] . identifier[format] ( identifier[time] . identifier[time] ()- identifier[start] ))
keyword[if] identifier[results] . identifier[total_transactions] == literal[int] :
identifier[print] ( literal[string] )
keyword[return] keyword[False]
identifier[print_infos] ( identifier[results] )
identifier[data] ={
literal[string] : identifier[results] ,
literal[string] : identifier[config] [ literal[string] ],
literal[string] : identifier[config] [ literal[string] ],
literal[string] : identifier[results] . identifier[turrets] ,
literal[string] :{ literal[string] : identifier[results] . identifier[main_results] , literal[string] : identifier[results] . identifier[timers_results] }
}
identifier[print] ( literal[string] )
identifier[partial] = identifier[time] . identifier[time] ()
identifier[generate_graphs] ( identifier[results] . identifier[main_results] , literal[string] , identifier[results_dir] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[results] . identifier[timers_results] . identifier[items] ():
identifier[generate_graphs] ( identifier[value] , identifier[key] , identifier[results_dir] )
identifier[print] ( literal[string] . identifier[format] ( identifier[time] . identifier[time] ()- identifier[partial] ))
identifier[write_template] ( identifier[data] , identifier[results_dir] , identifier[parent] )
identifier[print] ( literal[string] . identifier[format] ( identifier[time] . identifier[time] ()- identifier[start] ))
keyword[return] keyword[True] | def output(results_dir, config, parent='../../'):
"""Write the results output for the given test
:param str results_dir: the directory for the results
:param dict config: the configuration of the test
:param str parents: the parent directory
"""
start = time.time()
print('Compiling results...')
results_dir = os.path.abspath(results_dir)
results = ReportResults(config['run_time'], config['results_ts_interval'])
results.compile_results()
print('Results compiled in {} seconds\n'.format(time.time() - start))
if results.total_transactions == 0:
print('No results, cannot create report')
return False # depends on [control=['if'], data=[]]
print_infos(results)
data = {'report': results, 'run_time': config['run_time'], 'ts_interval': config['results_ts_interval'], 'turrets_config': results.turrets, 'results': {'all': results.main_results, 'timers': results.timers_results}}
print('Generating graphs...')
partial = time.time()
generate_graphs(results.main_results, 'All_Transactions', results_dir)
for (key, value) in results.timers_results.items():
generate_graphs(value, key, results_dir) # depends on [control=['for'], data=[]]
print('All graphs generated in {} seconds\n'.format(time.time() - partial))
write_template(data, results_dir, parent)
print('Full report generated in {} seconds'.format(time.time() - start))
return True |
def by_puuid(self, region, encrypted_puuid):
"""
Get a summoner by PUUID.
:param string region: The region to execute this request on
:param string encrypted_puuid: PUUID
:returns: SummonerDTO: represents a summoner
"""
url, query = SummonerApiV4Urls.by_puuid(
region=region, encrypted_puuid=encrypted_puuid
)
return self._raw_request(self.by_puuid.__name__, region, url, query) | def function[by_puuid, parameter[self, region, encrypted_puuid]]:
constant[
Get a summoner by PUUID.
:param string region: The region to execute this request on
:param string encrypted_puuid: PUUID
:returns: SummonerDTO: represents a summoner
]
<ast.Tuple object at 0x7da1b1d4eec0> assign[=] call[name[SummonerApiV4Urls].by_puuid, parameter[]]
return[call[name[self]._raw_request, parameter[name[self].by_puuid.__name__, name[region], name[url], name[query]]]] | keyword[def] identifier[by_puuid] ( identifier[self] , identifier[region] , identifier[encrypted_puuid] ):
literal[string]
identifier[url] , identifier[query] = identifier[SummonerApiV4Urls] . identifier[by_puuid] (
identifier[region] = identifier[region] , identifier[encrypted_puuid] = identifier[encrypted_puuid]
)
keyword[return] identifier[self] . identifier[_raw_request] ( identifier[self] . identifier[by_puuid] . identifier[__name__] , identifier[region] , identifier[url] , identifier[query] ) | def by_puuid(self, region, encrypted_puuid):
"""
Get a summoner by PUUID.
:param string region: The region to execute this request on
:param string encrypted_puuid: PUUID
:returns: SummonerDTO: represents a summoner
"""
(url, query) = SummonerApiV4Urls.by_puuid(region=region, encrypted_puuid=encrypted_puuid)
return self._raw_request(self.by_puuid.__name__, region, url, query) |
def config(self):
"""
Config resolution order:
if this is a dependency model:
- own project config
- in-model config
- active project config
if this is a top-level model:
- active project config
- in-model config
"""
defaults = {"enabled": True, "materialized": "view"}
if self.node_type == NodeType.Seed:
defaults['materialized'] = 'seed'
elif self.node_type == NodeType.Archive:
defaults['materialized'] = 'archive'
active_config = self.load_config_from_active_project()
if self.active_project.project_name == self.own_project.project_name:
cfg = self._merge(defaults, active_config,
self.in_model_config)
else:
own_config = self.load_config_from_own_project()
cfg = self._merge(
defaults, own_config, self.in_model_config, active_config
)
return cfg | def function[config, parameter[self]]:
constant[
Config resolution order:
if this is a dependency model:
- own project config
- in-model config
- active project config
if this is a top-level model:
- active project config
- in-model config
]
variable[defaults] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c7e680>, <ast.Constant object at 0x7da1b1c7ca60>], [<ast.Constant object at 0x7da1b1c7d5d0>, <ast.Constant object at 0x7da1b1c7f3a0>]]
if compare[name[self].node_type equal[==] name[NodeType].Seed] begin[:]
call[name[defaults]][constant[materialized]] assign[=] constant[seed]
variable[active_config] assign[=] call[name[self].load_config_from_active_project, parameter[]]
if compare[name[self].active_project.project_name equal[==] name[self].own_project.project_name] begin[:]
variable[cfg] assign[=] call[name[self]._merge, parameter[name[defaults], name[active_config], name[self].in_model_config]]
return[name[cfg]] | keyword[def] identifier[config] ( identifier[self] ):
literal[string]
identifier[defaults] ={ literal[string] : keyword[True] , literal[string] : literal[string] }
keyword[if] identifier[self] . identifier[node_type] == identifier[NodeType] . identifier[Seed] :
identifier[defaults] [ literal[string] ]= literal[string]
keyword[elif] identifier[self] . identifier[node_type] == identifier[NodeType] . identifier[Archive] :
identifier[defaults] [ literal[string] ]= literal[string]
identifier[active_config] = identifier[self] . identifier[load_config_from_active_project] ()
keyword[if] identifier[self] . identifier[active_project] . identifier[project_name] == identifier[self] . identifier[own_project] . identifier[project_name] :
identifier[cfg] = identifier[self] . identifier[_merge] ( identifier[defaults] , identifier[active_config] ,
identifier[self] . identifier[in_model_config] )
keyword[else] :
identifier[own_config] = identifier[self] . identifier[load_config_from_own_project] ()
identifier[cfg] = identifier[self] . identifier[_merge] (
identifier[defaults] , identifier[own_config] , identifier[self] . identifier[in_model_config] , identifier[active_config]
)
keyword[return] identifier[cfg] | def config(self):
"""
Config resolution order:
if this is a dependency model:
- own project config
- in-model config
- active project config
if this is a top-level model:
- active project config
- in-model config
"""
defaults = {'enabled': True, 'materialized': 'view'}
if self.node_type == NodeType.Seed:
defaults['materialized'] = 'seed' # depends on [control=['if'], data=[]]
elif self.node_type == NodeType.Archive:
defaults['materialized'] = 'archive' # depends on [control=['if'], data=[]]
active_config = self.load_config_from_active_project()
if self.active_project.project_name == self.own_project.project_name:
cfg = self._merge(defaults, active_config, self.in_model_config) # depends on [control=['if'], data=[]]
else:
own_config = self.load_config_from_own_project()
cfg = self._merge(defaults, own_config, self.in_model_config, active_config)
return cfg |
def send(self, *args, **kwargs):
"""Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns."""
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return conn, send_result | def function[send, parameter[self]]:
constant[Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns.]
variable[conn] assign[=] call[name[SMTP], parameter[<ast.Starred object at 0x7da18ede40d0>]]
variable[send_result] assign[=] call[name[conn].send, parameter[name[self]]]
return[tuple[[<ast.Name object at 0x7da1b11a2aa0>, <ast.Name object at 0x7da1b11a3370>]]] | keyword[def] identifier[send] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[SMTP] (* identifier[args] ,** identifier[kwargs] )
identifier[send_result] = identifier[conn] . identifier[send] ( identifier[self] )
keyword[return] identifier[conn] , identifier[send_result] | def send(self, *args, **kwargs):
"""Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns."""
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return (conn, send_result) |
async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
version = await self.version(elem_type, params, elem=elem)
if self.is_tracked():
return self.get_tracked()
if hasattr(elem_type, 'boost_serialize'):
elem = elem_type() if elem is None else elem
self.pop_track()
return await elem.boost_serialize(self, elem=elem, elem_type=elem_type, params=params, version=version)
if self.writing:
self.pop_track()
return await self.blob_dump(elem=elem, elem_type=elem_type, params=params)
else:
obj = await self.blob_load(elem_type=elem_type, params=params, elem=elem)
return self.track_obj(obj) | <ast.AsyncFunctionDef object at 0x7da2054a7ee0> | keyword[async] keyword[def] identifier[blob] ( identifier[self] , identifier[elem] = keyword[None] , identifier[elem_type] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
identifier[elem_type] = identifier[elem_type] keyword[if] identifier[elem_type] keyword[else] identifier[elem] . identifier[__class__]
identifier[version] = keyword[await] identifier[self] . identifier[version] ( identifier[elem_type] , identifier[params] , identifier[elem] = identifier[elem] )
keyword[if] identifier[self] . identifier[is_tracked] ():
keyword[return] identifier[self] . identifier[get_tracked] ()
keyword[if] identifier[hasattr] ( identifier[elem_type] , literal[string] ):
identifier[elem] = identifier[elem_type] () keyword[if] identifier[elem] keyword[is] keyword[None] keyword[else] identifier[elem]
identifier[self] . identifier[pop_track] ()
keyword[return] keyword[await] identifier[elem] . identifier[boost_serialize] ( identifier[self] , identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] , identifier[version] = identifier[version] )
keyword[if] identifier[self] . identifier[writing] :
identifier[self] . identifier[pop_track] ()
keyword[return] keyword[await] identifier[self] . identifier[blob_dump] ( identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] )
keyword[else] :
identifier[obj] = keyword[await] identifier[self] . identifier[blob_load] ( identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] , identifier[elem] = identifier[elem] )
keyword[return] identifier[self] . identifier[track_obj] ( identifier[obj] ) | async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
version = await self.version(elem_type, params, elem=elem)
if self.is_tracked():
return self.get_tracked() # depends on [control=['if'], data=[]]
if hasattr(elem_type, 'boost_serialize'):
elem = elem_type() if elem is None else elem
self.pop_track()
return await elem.boost_serialize(self, elem=elem, elem_type=elem_type, params=params, version=version) # depends on [control=['if'], data=[]]
if self.writing:
self.pop_track()
return await self.blob_dump(elem=elem, elem_type=elem_type, params=params) # depends on [control=['if'], data=[]]
else:
obj = await self.blob_load(elem_type=elem_type, params=params, elem=elem)
return self.track_obj(obj) |
def running_apps(self):
"""Return a list of running user applications."""
ps = self.adb_shell(RUNNING_APPS_CMD)
if ps:
return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()]
return [] | def function[running_apps, parameter[self]]:
constant[Return a list of running user applications.]
variable[ps] assign[=] call[name[self].adb_shell, parameter[name[RUNNING_APPS_CMD]]]
if name[ps] begin[:]
return[<ast.ListComp object at 0x7da20c6e6590>]
return[list[[]]] | keyword[def] identifier[running_apps] ( identifier[self] ):
literal[string]
identifier[ps] = identifier[self] . identifier[adb_shell] ( identifier[RUNNING_APPS_CMD] )
keyword[if] identifier[ps] :
keyword[return] [ identifier[line] . identifier[strip] (). identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ] keyword[for] identifier[line] keyword[in] identifier[ps] . identifier[splitlines] () keyword[if] identifier[line] . identifier[strip] ()]
keyword[return] [] | def running_apps(self):
"""Return a list of running user applications."""
ps = self.adb_shell(RUNNING_APPS_CMD)
if ps:
return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()] # depends on [control=['if'], data=[]]
return [] |
def visit_literal_block(self, node):
"""Check syntax of code block."""
# For "..code-block:: language"
language = node.get('language', None)
is_code_node = False
if not language:
# For "..code:: language"
is_code_node = True
classes = node.get('classes')
if 'code' in classes:
language = classes[-1]
else:
return
if language in self.ignore['languages']:
return
if language == 'doctest' or (
language == 'python' and
node.rawsource.lstrip().startswith('>>> ')):
self.visit_doctest_block(node)
raise docutils.nodes.SkipNode
checker = {
'bash': bash_checker,
'c': c_checker,
'cpp': cpp_checker,
'json': lambda source, _: lambda: check_json(source),
'xml': lambda source, _: lambda: check_xml(source),
'python': lambda source, _: lambda: check_python(source),
'rst': lambda source, _: lambda: check_rst(source,
ignore=self.ignore)
}.get(language)
if checker:
run = checker(node.rawsource, self.working_directory)
self._add_check(node=node,
run=run,
language=language,
is_code_node=is_code_node)
raise docutils.nodes.SkipNode | def function[visit_literal_block, parameter[self, node]]:
constant[Check syntax of code block.]
variable[language] assign[=] call[name[node].get, parameter[constant[language], constant[None]]]
variable[is_code_node] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b0747c40> begin[:]
variable[is_code_node] assign[=] constant[True]
variable[classes] assign[=] call[name[node].get, parameter[constant[classes]]]
if compare[constant[code] in name[classes]] begin[:]
variable[language] assign[=] call[name[classes]][<ast.UnaryOp object at 0x7da1b07478b0>]
if compare[name[language] in call[name[self].ignore][constant[languages]]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b0747610> begin[:]
call[name[self].visit_doctest_block, parameter[name[node]]]
<ast.Raise object at 0x7da1b0747220>
variable[checker] assign[=] call[dictionary[[<ast.Constant object at 0x7da1b0747070>, <ast.Constant object at 0x7da1b0747040>, <ast.Constant object at 0x7da1b0747010>, <ast.Constant object at 0x7da1b0746fe0>, <ast.Constant object at 0x7da1b0746fb0>, <ast.Constant object at 0x7da1b0746f80>, <ast.Constant object at 0x7da1b0746f50>], [<ast.Name object at 0x7da1b0746f20>, <ast.Name object at 0x7da1b0746ef0>, <ast.Name object at 0x7da1b0746ec0>, <ast.Lambda object at 0x7da1b0746e90>, <ast.Lambda object at 0x7da1b0746ce0>, <ast.Lambda object at 0x7da1b0746b30>, <ast.Lambda object at 0x7da1b0746980>]].get, parameter[name[language]]]
if name[checker] begin[:]
variable[run] assign[=] call[name[checker], parameter[name[node].rawsource, name[self].working_directory]]
call[name[self]._add_check, parameter[]]
<ast.Raise object at 0x7da1b088afb0> | keyword[def] identifier[visit_literal_block] ( identifier[self] , identifier[node] ):
literal[string]
identifier[language] = identifier[node] . identifier[get] ( literal[string] , keyword[None] )
identifier[is_code_node] = keyword[False]
keyword[if] keyword[not] identifier[language] :
identifier[is_code_node] = keyword[True]
identifier[classes] = identifier[node] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[classes] :
identifier[language] = identifier[classes] [- literal[int] ]
keyword[else] :
keyword[return]
keyword[if] identifier[language] keyword[in] identifier[self] . identifier[ignore] [ literal[string] ]:
keyword[return]
keyword[if] identifier[language] == literal[string] keyword[or] (
identifier[language] == literal[string] keyword[and]
identifier[node] . identifier[rawsource] . identifier[lstrip] (). identifier[startswith] ( literal[string] )):
identifier[self] . identifier[visit_doctest_block] ( identifier[node] )
keyword[raise] identifier[docutils] . identifier[nodes] . identifier[SkipNode]
identifier[checker] ={
literal[string] : identifier[bash_checker] ,
literal[string] : identifier[c_checker] ,
literal[string] : identifier[cpp_checker] ,
literal[string] : keyword[lambda] identifier[source] , identifier[_] : keyword[lambda] : identifier[check_json] ( identifier[source] ),
literal[string] : keyword[lambda] identifier[source] , identifier[_] : keyword[lambda] : identifier[check_xml] ( identifier[source] ),
literal[string] : keyword[lambda] identifier[source] , identifier[_] : keyword[lambda] : identifier[check_python] ( identifier[source] ),
literal[string] : keyword[lambda] identifier[source] , identifier[_] : keyword[lambda] : identifier[check_rst] ( identifier[source] ,
identifier[ignore] = identifier[self] . identifier[ignore] )
}. identifier[get] ( identifier[language] )
keyword[if] identifier[checker] :
identifier[run] = identifier[checker] ( identifier[node] . identifier[rawsource] , identifier[self] . identifier[working_directory] )
identifier[self] . identifier[_add_check] ( identifier[node] = identifier[node] ,
identifier[run] = identifier[run] ,
identifier[language] = identifier[language] ,
identifier[is_code_node] = identifier[is_code_node] )
keyword[raise] identifier[docutils] . identifier[nodes] . identifier[SkipNode] | def visit_literal_block(self, node):
"""Check syntax of code block."""
# For "..code-block:: language"
language = node.get('language', None)
is_code_node = False
if not language:
# For "..code:: language"
is_code_node = True
classes = node.get('classes')
if 'code' in classes:
language = classes[-1] # depends on [control=['if'], data=['classes']]
else:
return # depends on [control=['if'], data=[]]
if language in self.ignore['languages']:
return # depends on [control=['if'], data=[]]
if language == 'doctest' or (language == 'python' and node.rawsource.lstrip().startswith('>>> ')):
self.visit_doctest_block(node)
raise docutils.nodes.SkipNode # depends on [control=['if'], data=[]]
checker = {'bash': bash_checker, 'c': c_checker, 'cpp': cpp_checker, 'json': lambda source, _: lambda : check_json(source), 'xml': lambda source, _: lambda : check_xml(source), 'python': lambda source, _: lambda : check_python(source), 'rst': lambda source, _: lambda : check_rst(source, ignore=self.ignore)}.get(language)
if checker:
run = checker(node.rawsource, self.working_directory)
self._add_check(node=node, run=run, language=language, is_code_node=is_code_node) # depends on [control=['if'], data=[]]
raise docutils.nodes.SkipNode |
def list_channels(self, collection_name, experiment_name):
"""
List all channels belonging to the named experiment that is part
of the named collection.
Args:
collection_name (string): Name of the parent collection.
experiment_name (string): Name of the parent experiment.
Returns:
(list)
Raises:
requests.HTTPError on failure.
"""
dont_care = 'image'
chan = ChannelResource(
name='', collection_name=collection_name,
experiment_name=experiment_name, type=dont_care)
return self._list_resource(chan) | def function[list_channels, parameter[self, collection_name, experiment_name]]:
constant[
List all channels belonging to the named experiment that is part
of the named collection.
Args:
collection_name (string): Name of the parent collection.
experiment_name (string): Name of the parent experiment.
Returns:
(list)
Raises:
requests.HTTPError on failure.
]
variable[dont_care] assign[=] constant[image]
variable[chan] assign[=] call[name[ChannelResource], parameter[]]
return[call[name[self]._list_resource, parameter[name[chan]]]] | keyword[def] identifier[list_channels] ( identifier[self] , identifier[collection_name] , identifier[experiment_name] ):
literal[string]
identifier[dont_care] = literal[string]
identifier[chan] = identifier[ChannelResource] (
identifier[name] = literal[string] , identifier[collection_name] = identifier[collection_name] ,
identifier[experiment_name] = identifier[experiment_name] , identifier[type] = identifier[dont_care] )
keyword[return] identifier[self] . identifier[_list_resource] ( identifier[chan] ) | def list_channels(self, collection_name, experiment_name):
"""
List all channels belonging to the named experiment that is part
of the named collection.
Args:
collection_name (string): Name of the parent collection.
experiment_name (string): Name of the parent experiment.
Returns:
(list)
Raises:
requests.HTTPError on failure.
"""
dont_care = 'image'
chan = ChannelResource(name='', collection_name=collection_name, experiment_name=experiment_name, type=dont_care)
return self._list_resource(chan) |
def check_ownership(obj, raise_if_false=True):
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False
security_exception = SupersetSecurityException(
"You don't have the rights to alter [{}]".format(obj))
if g.user.is_anonymous:
if raise_if_false:
raise security_exception
return False
roles = [r.name for r in get_user_roles()]
if 'Admin' in roles:
return True
session = db.create_scoped_session()
orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first()
# Making a list of owners that works across ORM models
owners = []
if hasattr(orig_obj, 'owners'):
owners += orig_obj.owners
if hasattr(orig_obj, 'owner'):
owners += [orig_obj.owner]
if hasattr(orig_obj, 'created_by'):
owners += [orig_obj.created_by]
owner_names = [o.username for o in owners if o]
if (
g.user and hasattr(g.user, 'username') and
g.user.username in owner_names):
return True
if raise_if_false:
raise security_exception
else:
return False | def function[check_ownership, parameter[obj, raise_if_false]]:
constant[Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
]
if <ast.UnaryOp object at 0x7da1b2031150> begin[:]
return[constant[False]]
variable[security_exception] assign[=] call[name[SupersetSecurityException], parameter[call[constant[You don't have the rights to alter [{}]].format, parameter[name[obj]]]]]
if name[g].user.is_anonymous begin[:]
if name[raise_if_false] begin[:]
<ast.Raise object at 0x7da1b2033460>
return[constant[False]]
variable[roles] assign[=] <ast.ListComp object at 0x7da1b20334f0>
if compare[constant[Admin] in name[roles]] begin[:]
return[constant[True]]
variable[session] assign[=] call[name[db].create_scoped_session, parameter[]]
variable[orig_obj] assign[=] call[call[call[name[session].query, parameter[name[obj].__class__]].filter_by, parameter[]].first, parameter[]]
variable[owners] assign[=] list[[]]
if call[name[hasattr], parameter[name[orig_obj], constant[owners]]] begin[:]
<ast.AugAssign object at 0x7da1b2032e00>
if call[name[hasattr], parameter[name[orig_obj], constant[owner]]] begin[:]
<ast.AugAssign object at 0x7da1b2033a90>
if call[name[hasattr], parameter[name[orig_obj], constant[created_by]]] begin[:]
<ast.AugAssign object at 0x7da20e9b3520>
variable[owner_names] assign[=] <ast.ListComp object at 0x7da20e9b1f30>
if <ast.BoolOp object at 0x7da20e9b1f90> begin[:]
return[constant[True]]
if name[raise_if_false] begin[:]
<ast.Raise object at 0x7da20e9b1600> | keyword[def] identifier[check_ownership] ( identifier[obj] , identifier[raise_if_false] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[obj] :
keyword[return] keyword[False]
identifier[security_exception] = identifier[SupersetSecurityException] (
literal[string] . identifier[format] ( identifier[obj] ))
keyword[if] identifier[g] . identifier[user] . identifier[is_anonymous] :
keyword[if] identifier[raise_if_false] :
keyword[raise] identifier[security_exception]
keyword[return] keyword[False]
identifier[roles] =[ identifier[r] . identifier[name] keyword[for] identifier[r] keyword[in] identifier[get_user_roles] ()]
keyword[if] literal[string] keyword[in] identifier[roles] :
keyword[return] keyword[True]
identifier[session] = identifier[db] . identifier[create_scoped_session] ()
identifier[orig_obj] = identifier[session] . identifier[query] ( identifier[obj] . identifier[__class__] ). identifier[filter_by] ( identifier[id] = identifier[obj] . identifier[id] ). identifier[first] ()
identifier[owners] =[]
keyword[if] identifier[hasattr] ( identifier[orig_obj] , literal[string] ):
identifier[owners] += identifier[orig_obj] . identifier[owners]
keyword[if] identifier[hasattr] ( identifier[orig_obj] , literal[string] ):
identifier[owners] +=[ identifier[orig_obj] . identifier[owner] ]
keyword[if] identifier[hasattr] ( identifier[orig_obj] , literal[string] ):
identifier[owners] +=[ identifier[orig_obj] . identifier[created_by] ]
identifier[owner_names] =[ identifier[o] . identifier[username] keyword[for] identifier[o] keyword[in] identifier[owners] keyword[if] identifier[o] ]
keyword[if] (
identifier[g] . identifier[user] keyword[and] identifier[hasattr] ( identifier[g] . identifier[user] , literal[string] ) keyword[and]
identifier[g] . identifier[user] . identifier[username] keyword[in] identifier[owner_names] ):
keyword[return] keyword[True]
keyword[if] identifier[raise_if_false] :
keyword[raise] identifier[security_exception]
keyword[else] :
keyword[return] keyword[False] | def check_ownership(obj, raise_if_false=True):
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False # depends on [control=['if'], data=[]]
security_exception = SupersetSecurityException("You don't have the rights to alter [{}]".format(obj))
if g.user.is_anonymous:
if raise_if_false:
raise security_exception # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]]
roles = [r.name for r in get_user_roles()]
if 'Admin' in roles:
return True # depends on [control=['if'], data=[]]
session = db.create_scoped_session()
orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first()
# Making a list of owners that works across ORM models
owners = []
if hasattr(orig_obj, 'owners'):
owners += orig_obj.owners # depends on [control=['if'], data=[]]
if hasattr(orig_obj, 'owner'):
owners += [orig_obj.owner] # depends on [control=['if'], data=[]]
if hasattr(orig_obj, 'created_by'):
owners += [orig_obj.created_by] # depends on [control=['if'], data=[]]
owner_names = [o.username for o in owners if o]
if g.user and hasattr(g.user, 'username') and (g.user.username in owner_names):
return True # depends on [control=['if'], data=[]]
if raise_if_false:
raise security_exception # depends on [control=['if'], data=[]]
else:
return False |
def createmeta(accountable, project_key, issue_type=None):
"""
Create new issue.
"""
metadata = accountable.create_meta(project_key, issue_type)
headers = [
'project_key', 'issuetype_name', 'field_key', 'field_name', 'required'
]
rows = [headers]
for project in metadata:
key = project['key']
issuetypes = project['issuetypes']
for issuetype in issuetypes:
name = issuetype['name']
fields = issuetype['fields']
for k, v in fields.items():
field_key = k
field_name = v['name']
required = v['required']
rows.append([key, name, field_key, field_name, required])
print_table(SingleTable(rows)) | def function[createmeta, parameter[accountable, project_key, issue_type]]:
constant[
Create new issue.
]
variable[metadata] assign[=] call[name[accountable].create_meta, parameter[name[project_key], name[issue_type]]]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da18f09c970>, <ast.Constant object at 0x7da18f09d660>, <ast.Constant object at 0x7da18f09e0b0>, <ast.Constant object at 0x7da18f09e620>, <ast.Constant object at 0x7da18f09d2d0>]]
variable[rows] assign[=] list[[<ast.Name object at 0x7da18f09efb0>]]
for taget[name[project]] in starred[name[metadata]] begin[:]
variable[key] assign[=] call[name[project]][constant[key]]
variable[issuetypes] assign[=] call[name[project]][constant[issuetypes]]
for taget[name[issuetype]] in starred[name[issuetypes]] begin[:]
variable[name] assign[=] call[name[issuetype]][constant[name]]
variable[fields] assign[=] call[name[issuetype]][constant[fields]]
for taget[tuple[[<ast.Name object at 0x7da18f09ec80>, <ast.Name object at 0x7da18f09e950>]]] in starred[call[name[fields].items, parameter[]]] begin[:]
variable[field_key] assign[=] name[k]
variable[field_name] assign[=] call[name[v]][constant[name]]
variable[required] assign[=] call[name[v]][constant[required]]
call[name[rows].append, parameter[list[[<ast.Name object at 0x7da18f09fe20>, <ast.Name object at 0x7da18f09f4c0>, <ast.Name object at 0x7da18f09feb0>, <ast.Name object at 0x7da18f09ff10>, <ast.Name object at 0x7da18f09d180>]]]]
call[name[print_table], parameter[call[name[SingleTable], parameter[name[rows]]]]] | keyword[def] identifier[createmeta] ( identifier[accountable] , identifier[project_key] , identifier[issue_type] = keyword[None] ):
literal[string]
identifier[metadata] = identifier[accountable] . identifier[create_meta] ( identifier[project_key] , identifier[issue_type] )
identifier[headers] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string]
]
identifier[rows] =[ identifier[headers] ]
keyword[for] identifier[project] keyword[in] identifier[metadata] :
identifier[key] = identifier[project] [ literal[string] ]
identifier[issuetypes] = identifier[project] [ literal[string] ]
keyword[for] identifier[issuetype] keyword[in] identifier[issuetypes] :
identifier[name] = identifier[issuetype] [ literal[string] ]
identifier[fields] = identifier[issuetype] [ literal[string] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[fields] . identifier[items] ():
identifier[field_key] = identifier[k]
identifier[field_name] = identifier[v] [ literal[string] ]
identifier[required] = identifier[v] [ literal[string] ]
identifier[rows] . identifier[append] ([ identifier[key] , identifier[name] , identifier[field_key] , identifier[field_name] , identifier[required] ])
identifier[print_table] ( identifier[SingleTable] ( identifier[rows] )) | def createmeta(accountable, project_key, issue_type=None):
"""
Create new issue.
"""
metadata = accountable.create_meta(project_key, issue_type)
headers = ['project_key', 'issuetype_name', 'field_key', 'field_name', 'required']
rows = [headers]
for project in metadata:
key = project['key']
issuetypes = project['issuetypes']
for issuetype in issuetypes:
name = issuetype['name']
fields = issuetype['fields']
for (k, v) in fields.items():
field_key = k
field_name = v['name']
required = v['required']
rows.append([key, name, field_key, field_name, required]) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['issuetype']] # depends on [control=['for'], data=['project']]
print_table(SingleTable(rows)) |
def getPayloadStruct(self, attributes, objType=None):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param key: The key to modify
@param attribute: The data
@param objType: NOT USED in this class
@return RETURN: The API result
"""
if self.setInParentPayload:
return {self.parentPayloadObject:
{self.payloadObj: attributes}}
else:
return {self.payloadObj: attributes} | def function[getPayloadStruct, parameter[self, attributes, objType]]:
constant[ Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param key: The key to modify
@param attribute: The data
@param objType: NOT USED in this class
@return RETURN: The API result
]
if name[self].setInParentPayload begin[:]
return[dictionary[[<ast.Attribute object at 0x7da1b26ad2a0>], [<ast.Dict object at 0x7da1b26acd60>]]] | keyword[def] identifier[getPayloadStruct] ( identifier[self] , identifier[attributes] , identifier[objType] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[setInParentPayload] :
keyword[return] { identifier[self] . identifier[parentPayloadObject] :
{ identifier[self] . identifier[payloadObj] : identifier[attributes] }}
keyword[else] :
keyword[return] { identifier[self] . identifier[payloadObj] : identifier[attributes] } | def getPayloadStruct(self, attributes, objType=None):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param key: The key to modify
@param attribute: The data
@param objType: NOT USED in this class
@return RETURN: The API result
"""
if self.setInParentPayload:
return {self.parentPayloadObject: {self.payloadObj: attributes}} # depends on [control=['if'], data=[]]
else:
return {self.payloadObj: attributes} |
def titled_box(self, titles, contents, tdir='h', cdir='h'):
"""
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
"""
H = self.H
def wrapt(x):
return H.div['hrepr-title'](x)
rval = H.div[f'hrepr-titled-{tdir}']
contents = H.div[f'hrepr-contents-{cdir}'].fill(contents)
if isinstance(titles, tuple) and len(titles) == 2:
open, close = titles
else:
open, close = titles, None
if open:
rval = rval(wrapt(open))
rval = rval(contents)
if close:
rval = rval(wrapt(close))
return rval | def function[titled_box, parameter[self, titles, contents, tdir, cdir]]:
constant[
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
]
variable[H] assign[=] name[self].H
def function[wrapt, parameter[x]]:
return[call[call[name[H].div][constant[hrepr-title]], parameter[name[x]]]]
variable[rval] assign[=] call[name[H].div][<ast.JoinedStr object at 0x7da1b26ae830>]
variable[contents] assign[=] call[call[name[H].div][<ast.JoinedStr object at 0x7da1b26afaf0>].fill, parameter[name[contents]]]
if <ast.BoolOp object at 0x7da1b26ae5f0> begin[:]
<ast.Tuple object at 0x7da1b26ac5b0> assign[=] name[titles]
if name[open] begin[:]
variable[rval] assign[=] call[name[rval], parameter[call[name[wrapt], parameter[name[open]]]]]
variable[rval] assign[=] call[name[rval], parameter[name[contents]]]
if name[close] begin[:]
variable[rval] assign[=] call[name[rval], parameter[call[name[wrapt], parameter[name[close]]]]]
return[name[rval]] | keyword[def] identifier[titled_box] ( identifier[self] , identifier[titles] , identifier[contents] , identifier[tdir] = literal[string] , identifier[cdir] = literal[string] ):
literal[string]
identifier[H] = identifier[self] . identifier[H]
keyword[def] identifier[wrapt] ( identifier[x] ):
keyword[return] identifier[H] . identifier[div] [ literal[string] ]( identifier[x] )
identifier[rval] = identifier[H] . identifier[div] [ literal[string] ]
identifier[contents] = identifier[H] . identifier[div] [ literal[string] ]. identifier[fill] ( identifier[contents] )
keyword[if] identifier[isinstance] ( identifier[titles] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[titles] )== literal[int] :
identifier[open] , identifier[close] = identifier[titles]
keyword[else] :
identifier[open] , identifier[close] = identifier[titles] , keyword[None]
keyword[if] identifier[open] :
identifier[rval] = identifier[rval] ( identifier[wrapt] ( identifier[open] ))
identifier[rval] = identifier[rval] ( identifier[contents] )
keyword[if] identifier[close] :
identifier[rval] = identifier[rval] ( identifier[wrapt] ( identifier[close] ))
keyword[return] identifier[rval] | def titled_box(self, titles, contents, tdir='h', cdir='h'):
"""
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
"""
H = self.H
def wrapt(x):
return H.div['hrepr-title'](x)
rval = H.div[f'hrepr-titled-{tdir}']
contents = H.div[f'hrepr-contents-{cdir}'].fill(contents)
if isinstance(titles, tuple) and len(titles) == 2:
(open, close) = titles # depends on [control=['if'], data=[]]
else:
(open, close) = (titles, None)
if open:
rval = rval(wrapt(open)) # depends on [control=['if'], data=[]]
rval = rval(contents)
if close:
rval = rval(wrapt(close)) # depends on [control=['if'], data=[]]
return rval |
def consume_input(self, inp):
"""
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Retunrs:
bool: A true or false value depending on if the DFA
accepts the provided input
"""
cur_state = self.states[0]
for character in inp:
found = False
for arc in cur_state.arcs:
if arc.guard.is_sat(character):
cur_state = self.states[arc.dst_state]
found = True
break
if not found:
raise RuntimeError('SFA not complete')
return cur_state.final | def function[consume_input, parameter[self, inp]]:
constant[
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Retunrs:
bool: A true or false value depending on if the DFA
accepts the provided input
]
variable[cur_state] assign[=] call[name[self].states][constant[0]]
for taget[name[character]] in starred[name[inp]] begin[:]
variable[found] assign[=] constant[False]
for taget[name[arc]] in starred[name[cur_state].arcs] begin[:]
if call[name[arc].guard.is_sat, parameter[name[character]]] begin[:]
variable[cur_state] assign[=] call[name[self].states][name[arc].dst_state]
variable[found] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da1b28fd9f0> begin[:]
<ast.Raise object at 0x7da1b28fe7d0>
return[name[cur_state].final] | keyword[def] identifier[consume_input] ( identifier[self] , identifier[inp] ):
literal[string]
identifier[cur_state] = identifier[self] . identifier[states] [ literal[int] ]
keyword[for] identifier[character] keyword[in] identifier[inp] :
identifier[found] = keyword[False]
keyword[for] identifier[arc] keyword[in] identifier[cur_state] . identifier[arcs] :
keyword[if] identifier[arc] . identifier[guard] . identifier[is_sat] ( identifier[character] ):
identifier[cur_state] = identifier[self] . identifier[states] [ identifier[arc] . identifier[dst_state] ]
identifier[found] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[found] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[cur_state] . identifier[final] | def consume_input(self, inp):
"""
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Retunrs:
bool: A true or false value depending on if the DFA
accepts the provided input
"""
cur_state = self.states[0]
for character in inp:
found = False
for arc in cur_state.arcs:
if arc.guard.is_sat(character):
cur_state = self.states[arc.dst_state]
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arc']]
if not found:
raise RuntimeError('SFA not complete') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['character']]
return cur_state.final |
def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0))
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity())
except AttributeError:
result = os.cpu_count()
return result | def function[usable_cpu_count, parameter[]]:
constant[Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
]
<ast.Try object at 0x7da20e955480>
return[name[result]] | keyword[def] identifier[usable_cpu_count] ():
literal[string]
keyword[try] :
identifier[result] = identifier[len] ( identifier[os] . identifier[sched_getaffinity] ( literal[int] ))
keyword[except] identifier[AttributeError] :
keyword[try] :
identifier[result] = identifier[len] ( identifier[psutil] . identifier[Process] (). identifier[cpu_affinity] ())
keyword[except] identifier[AttributeError] :
identifier[result] = identifier[os] . identifier[cpu_count] ()
keyword[return] identifier[result] | def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0)) # depends on [control=['try'], data=[]]
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity()) # depends on [control=['try'], data=[]]
except AttributeError:
result = os.cpu_count() # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return result |
def is_valid_pid_to_be_updated(did):
"""Return True if ``did`` is the PID of an object that can be updated (obsoleted)
with MNStorage.update()"""
return (
is_existing_object(did)
and not is_local_replica(did)
and not is_archived(did)
and not is_obsoleted(did)
) | def function[is_valid_pid_to_be_updated, parameter[did]]:
constant[Return True if ``did`` is the PID of an object that can be updated (obsoleted)
with MNStorage.update()]
return[<ast.BoolOp object at 0x7da1b1b68ee0>] | keyword[def] identifier[is_valid_pid_to_be_updated] ( identifier[did] ):
literal[string]
keyword[return] (
identifier[is_existing_object] ( identifier[did] )
keyword[and] keyword[not] identifier[is_local_replica] ( identifier[did] )
keyword[and] keyword[not] identifier[is_archived] ( identifier[did] )
keyword[and] keyword[not] identifier[is_obsoleted] ( identifier[did] )
) | def is_valid_pid_to_be_updated(did):
"""Return True if ``did`` is the PID of an object that can be updated (obsoleted)
with MNStorage.update()"""
return is_existing_object(did) and (not is_local_replica(did)) and (not is_archived(did)) and (not is_obsoleted(did)) |
def _check_and_handle_includes(self, from_file):
"""Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
"""
logger.debug("Check/handle includes from %s", from_file)
try:
paths = self._parser.get("INCLUDE", "paths")
except (config_parser.NoSectionError,
config_parser.NoOptionError) as exc:
logger.debug("_check_and_handle_includes: EXCEPTION: %s", exc)
return
paths_lines = [p.strip() for p in paths.split("\n")]
logger.debug("paths = %s (wanted just once; CLEARING)", paths_lines)
self._parser.remove_option("INCLUDE", "paths")
for f in paths_lines:
abspath = (f if os.path.isabs(f) else
os.path.abspath(
os.path.join(os.path.dirname(from_file), f)))
use_path = os.path.normpath(abspath)
if use_path in self._parsed_files:
raise RecursionInConfigFile("In %s: %s already read",
from_file, use_path)
self._parsed_files.append(use_path)
self._handle_rc_file(use_path) | def function[_check_and_handle_includes, parameter[self, from_file]]:
constant[Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
]
call[name[logger].debug, parameter[constant[Check/handle includes from %s], name[from_file]]]
<ast.Try object at 0x7da1b1471a80>
variable[paths_lines] assign[=] <ast.ListComp object at 0x7da1b1473190>
call[name[logger].debug, parameter[constant[paths = %s (wanted just once; CLEARING)], name[paths_lines]]]
call[name[self]._parser.remove_option, parameter[constant[INCLUDE], constant[paths]]]
for taget[name[f]] in starred[name[paths_lines]] begin[:]
variable[abspath] assign[=] <ast.IfExp object at 0x7da20e963dc0>
variable[use_path] assign[=] call[name[os].path.normpath, parameter[name[abspath]]]
if compare[name[use_path] in name[self]._parsed_files] begin[:]
<ast.Raise object at 0x7da20e962470>
call[name[self]._parsed_files.append, parameter[name[use_path]]]
call[name[self]._handle_rc_file, parameter[name[use_path]]] | keyword[def] identifier[_check_and_handle_includes] ( identifier[self] , identifier[from_file] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[from_file] )
keyword[try] :
identifier[paths] = identifier[self] . identifier[_parser] . identifier[get] ( literal[string] , literal[string] )
keyword[except] ( identifier[config_parser] . identifier[NoSectionError] ,
identifier[config_parser] . identifier[NoOptionError] ) keyword[as] identifier[exc] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[exc] )
keyword[return]
identifier[paths_lines] =[ identifier[p] . identifier[strip] () keyword[for] identifier[p] keyword[in] identifier[paths] . identifier[split] ( literal[string] )]
identifier[logger] . identifier[debug] ( literal[string] , identifier[paths_lines] )
identifier[self] . identifier[_parser] . identifier[remove_option] ( literal[string] , literal[string] )
keyword[for] identifier[f] keyword[in] identifier[paths_lines] :
identifier[abspath] =( identifier[f] keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[f] ) keyword[else]
identifier[os] . identifier[path] . identifier[abspath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[from_file] ), identifier[f] )))
identifier[use_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[abspath] )
keyword[if] identifier[use_path] keyword[in] identifier[self] . identifier[_parsed_files] :
keyword[raise] identifier[RecursionInConfigFile] ( literal[string] ,
identifier[from_file] , identifier[use_path] )
identifier[self] . identifier[_parsed_files] . identifier[append] ( identifier[use_path] )
identifier[self] . identifier[_handle_rc_file] ( identifier[use_path] ) | def _check_and_handle_includes(self, from_file):
"""Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
"""
logger.debug('Check/handle includes from %s', from_file)
try:
paths = self._parser.get('INCLUDE', 'paths') # depends on [control=['try'], data=[]]
except (config_parser.NoSectionError, config_parser.NoOptionError) as exc:
logger.debug('_check_and_handle_includes: EXCEPTION: %s', exc)
return # depends on [control=['except'], data=['exc']]
paths_lines = [p.strip() for p in paths.split('\n')]
logger.debug('paths = %s (wanted just once; CLEARING)', paths_lines)
self._parser.remove_option('INCLUDE', 'paths')
for f in paths_lines:
abspath = f if os.path.isabs(f) else os.path.abspath(os.path.join(os.path.dirname(from_file), f))
use_path = os.path.normpath(abspath)
if use_path in self._parsed_files:
raise RecursionInConfigFile('In %s: %s already read', from_file, use_path) # depends on [control=['if'], data=['use_path']]
self._parsed_files.append(use_path)
self._handle_rc_file(use_path) # depends on [control=['for'], data=['f']] |
def flat_data(self):
"""
Pass all the data from modified_data to original_data
"""
def flat_field(value):
"""
Flat field data
"""
try:
value.flat_data()
return value
except AttributeError:
return value
modified_dict = self.__original_data__
modified_dict.update(self.__modified_data__)
self.__original_data__ = {k: flat_field(v)
for k, v in modified_dict.items()
if k not in self.__deleted_fields__}
self.clear_modified_data() | def function[flat_data, parameter[self]]:
constant[
Pass all the data from modified_data to original_data
]
def function[flat_field, parameter[value]]:
constant[
Flat field data
]
<ast.Try object at 0x7da1b0aa6b90>
variable[modified_dict] assign[=] name[self].__original_data__
call[name[modified_dict].update, parameter[name[self].__modified_data__]]
name[self].__original_data__ assign[=] <ast.DictComp object at 0x7da1b0aa5d20>
call[name[self].clear_modified_data, parameter[]] | keyword[def] identifier[flat_data] ( identifier[self] ):
literal[string]
keyword[def] identifier[flat_field] ( identifier[value] ):
literal[string]
keyword[try] :
identifier[value] . identifier[flat_data] ()
keyword[return] identifier[value]
keyword[except] identifier[AttributeError] :
keyword[return] identifier[value]
identifier[modified_dict] = identifier[self] . identifier[__original_data__]
identifier[modified_dict] . identifier[update] ( identifier[self] . identifier[__modified_data__] )
identifier[self] . identifier[__original_data__] ={ identifier[k] : identifier[flat_field] ( identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[modified_dict] . identifier[items] ()
keyword[if] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[__deleted_fields__] }
identifier[self] . identifier[clear_modified_data] () | def flat_data(self):
"""
Pass all the data from modified_data to original_data
"""
def flat_field(value):
"""
Flat field data
"""
try:
value.flat_data()
return value # depends on [control=['try'], data=[]]
except AttributeError:
return value # depends on [control=['except'], data=[]]
modified_dict = self.__original_data__
modified_dict.update(self.__modified_data__)
self.__original_data__ = {k: flat_field(v) for (k, v) in modified_dict.items() if k not in self.__deleted_fields__}
self.clear_modified_data() |
def infer_format(filename:str) -> str:
"""Return extension identifying format of given filename"""
_, ext = os.path.splitext(filename)
return ext | def function[infer_format, parameter[filename]]:
constant[Return extension identifying format of given filename]
<ast.Tuple object at 0x7da20e957e50> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
return[name[ext]] | keyword[def] identifier[infer_format] ( identifier[filename] : identifier[str] )-> identifier[str] :
literal[string]
identifier[_] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
keyword[return] identifier[ext] | def infer_format(filename: str) -> str:
"""Return extension identifying format of given filename"""
(_, ext) = os.path.splitext(filename)
return ext |
def a(self, **kwargs):
'''
Returns the lattice parameter, a, in Angstroms at a given
temperature, `T`, in Kelvin (default: 300 K).
'''
T = kwargs.get('T', 300.)
return (self.a_300K(**kwargs) +
self.thermal_expansion(**kwargs) * (T - 300.)) | def function[a, parameter[self]]:
constant[
Returns the lattice parameter, a, in Angstroms at a given
temperature, `T`, in Kelvin (default: 300 K).
]
variable[T] assign[=] call[name[kwargs].get, parameter[constant[T], constant[300.0]]]
return[binary_operation[call[name[self].a_300K, parameter[]] + binary_operation[call[name[self].thermal_expansion, parameter[]] * binary_operation[name[T] - constant[300.0]]]]] | keyword[def] identifier[a] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[T] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
keyword[return] ( identifier[self] . identifier[a_300K] (** identifier[kwargs] )+
identifier[self] . identifier[thermal_expansion] (** identifier[kwargs] )*( identifier[T] - literal[int] )) | def a(self, **kwargs):
"""
Returns the lattice parameter, a, in Angstroms at a given
temperature, `T`, in Kelvin (default: 300 K).
"""
T = kwargs.get('T', 300.0)
return self.a_300K(**kwargs) + self.thermal_expansion(**kwargs) * (T - 300.0) |
def string_to_double_precision_float(s: str) -> float:
"""
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_double_precision_float('1d-82')
1e-82
>>> string_to_double_precision_float('1.0D-82')
1e-82
>>> string_to_double_precision_float('0.8D234')
8e+233
>>> string_to_double_precision_float('.8d234')
8e+233
"""
first, second, exponential = re.match(
"(-?\d*)\.?(-?\d*)d(-?\d+)", s, re.IGNORECASE).groups()
return float(first + '.' + second + 'e' + exponential) | def function[string_to_double_precision_float, parameter[s]]:
constant[
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_double_precision_float('1d-82')
1e-82
>>> string_to_double_precision_float('1.0D-82')
1e-82
>>> string_to_double_precision_float('0.8D234')
8e+233
>>> string_to_double_precision_float('.8d234')
8e+233
]
<ast.Tuple object at 0x7da20e961330> assign[=] call[call[name[re].match, parameter[constant[(-?\d*)\.?(-?\d*)d(-?\d+)], name[s], name[re].IGNORECASE]].groups, parameter[]]
return[call[name[float], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[first] + constant[.]] + name[second]] + constant[e]] + name[exponential]]]]] | keyword[def] identifier[string_to_double_precision_float] ( identifier[s] : identifier[str] )-> identifier[float] :
literal[string]
identifier[first] , identifier[second] , identifier[exponential] = identifier[re] . identifier[match] (
literal[string] , identifier[s] , identifier[re] . identifier[IGNORECASE] ). identifier[groups] ()
keyword[return] identifier[float] ( identifier[first] + literal[string] + identifier[second] + literal[string] + identifier[exponential] ) | def string_to_double_precision_float(s: str) -> float:
"""
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_double_precision_float('1d-82')
1e-82
>>> string_to_double_precision_float('1.0D-82')
1e-82
>>> string_to_double_precision_float('0.8D234')
8e+233
>>> string_to_double_precision_float('.8d234')
8e+233
"""
(first, second, exponential) = re.match('(-?\\d*)\\.?(-?\\d*)d(-?\\d+)', s, re.IGNORECASE).groups()
return float(first + '.' + second + 'e' + exponential) |
def redirect_stdout(new_stdout):
"""Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout to use instead
"""
old_stdout, sys.stdout = sys.stdout, new_stdout
try:
yield None
finally:
sys.stdout = old_stdout | def function[redirect_stdout, parameter[new_stdout]]:
constant[Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout to use instead
]
<ast.Tuple object at 0x7da1b007cd30> assign[=] tuple[[<ast.Attribute object at 0x7da1b007c040>, <ast.Name object at 0x7da1b007c3a0>]]
<ast.Try object at 0x7da1b007d540> | keyword[def] identifier[redirect_stdout] ( identifier[new_stdout] ):
literal[string]
identifier[old_stdout] , identifier[sys] . identifier[stdout] = identifier[sys] . identifier[stdout] , identifier[new_stdout]
keyword[try] :
keyword[yield] keyword[None]
keyword[finally] :
identifier[sys] . identifier[stdout] = identifier[old_stdout] | def redirect_stdout(new_stdout):
"""Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout to use instead
"""
(old_stdout, sys.stdout) = (sys.stdout, new_stdout)
try:
yield None # depends on [control=['try'], data=[]]
finally:
sys.stdout = old_stdout |
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasAgg(figure)
manager = FigureManagerBase(canvas, num)
return manager | def function[new_figure_manager_given_figure, parameter[num, figure]]:
constant[
Create a new figure manager instance for the given figure.
]
variable[canvas] assign[=] call[name[FigureCanvasAgg], parameter[name[figure]]]
variable[manager] assign[=] call[name[FigureManagerBase], parameter[name[canvas], name[num]]]
return[name[manager]] | keyword[def] identifier[new_figure_manager_given_figure] ( identifier[num] , identifier[figure] ):
literal[string]
identifier[canvas] = identifier[FigureCanvasAgg] ( identifier[figure] )
identifier[manager] = identifier[FigureManagerBase] ( identifier[canvas] , identifier[num] )
keyword[return] identifier[manager] | def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasAgg(figure)
manager = FigureManagerBase(canvas, num)
return manager |
def db_uri(self):
"""The connection URL for the remote database. For example:
postgres://some-long-uid@ec2-52-7-232-59.compute-1.amazonaws.com:5432/d5fou154it1nvt
"""
output = self.get("DATABASE", subcommand="pg:credentials:url")
match = re.search("(postgres://.*)$", output)
if match is None:
raise NameError(
"Could not retrieve the DB URI. Check for error output from "
"heroku above the stack trace."
)
return match.group(1) | def function[db_uri, parameter[self]]:
constant[The connection URL for the remote database. For example:
postgres://some-long-uid@ec2-52-7-232-59.compute-1.amazonaws.com:5432/d5fou154it1nvt
]
variable[output] assign[=] call[name[self].get, parameter[constant[DATABASE]]]
variable[match] assign[=] call[name[re].search, parameter[constant[(postgres://.*)$], name[output]]]
if compare[name[match] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede7fd0>
return[call[name[match].group, parameter[constant[1]]]] | keyword[def] identifier[db_uri] ( identifier[self] ):
literal[string]
identifier[output] = identifier[self] . identifier[get] ( literal[string] , identifier[subcommand] = literal[string] )
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[output] )
keyword[if] identifier[match] keyword[is] keyword[None] :
keyword[raise] identifier[NameError] (
literal[string]
literal[string]
)
keyword[return] identifier[match] . identifier[group] ( literal[int] ) | def db_uri(self):
"""The connection URL for the remote database. For example:
postgres://some-long-uid@ec2-52-7-232-59.compute-1.amazonaws.com:5432/d5fou154it1nvt
"""
output = self.get('DATABASE', subcommand='pg:credentials:url')
match = re.search('(postgres://.*)$', output)
if match is None:
raise NameError('Could not retrieve the DB URI. Check for error output from heroku above the stack trace.') # depends on [control=['if'], data=[]]
return match.group(1) |
def evaluate_classifier(input_, labels, per_example_weights=None,
topk=1, name=PROVIDED, phase=Phase.train):
"""Calculates the total ratio of correct predictions across all examples seen.
In test and infer mode, this creates variables in the graph collection
pt.GraphKeys.TEST_VARIABLES and does not add them to
tf.GraphKeys.ALL_VARIABLES. This means that you must initialize them
separately from tf.global_variables_initializer().
In the case of `topk == 1`, this breaks ties left-to-right, in all other cases
it follows `tf.nn.in_top_k`. *Note*: the tie behavior will change in the
future.
Args:
input_: A rank 2 Tensor or a Pretty Tensor holding the result of the model.
labels: A float or double `Tensor` containing the target for this layer.
per_example_weights: Weights that are applied to every example.
topk: Integer k for 'accuracy at top k' metric.
name: The name of this layer.
phase: In training mode the batch accuracy is returned and in eval/infer
modes a total average is calculated.
Returns:
A Pretty Tensor with the ratio of correct to total examples seen.
Raises:
ValueError: If labels is not the correct shape.
"""
result = input_.evaluate_classifier_fraction(
labels,
per_example_weights=per_example_weights,
topk=topk,
name=name,
phase=phase)
return input_.with_tensor(result[0] / result[1], result.layer_parameters) | def function[evaluate_classifier, parameter[input_, labels, per_example_weights, topk, name, phase]]:
constant[Calculates the total ratio of correct predictions across all examples seen.
In test and infer mode, this creates variables in the graph collection
pt.GraphKeys.TEST_VARIABLES and does not add them to
tf.GraphKeys.ALL_VARIABLES. This means that you must initialize them
separately from tf.global_variables_initializer().
In the case of `topk == 1`, this breaks ties left-to-right, in all other cases
it follows `tf.nn.in_top_k`. *Note*: the tie behavior will change in the
future.
Args:
input_: A rank 2 Tensor or a Pretty Tensor holding the result of the model.
labels: A float or double `Tensor` containing the target for this layer.
per_example_weights: Weights that are applied to every example.
topk: Integer k for 'accuracy at top k' metric.
name: The name of this layer.
phase: In training mode the batch accuracy is returned and in eval/infer
modes a total average is calculated.
Returns:
A Pretty Tensor with the ratio of correct to total examples seen.
Raises:
ValueError: If labels is not the correct shape.
]
variable[result] assign[=] call[name[input_].evaluate_classifier_fraction, parameter[name[labels]]]
return[call[name[input_].with_tensor, parameter[binary_operation[call[name[result]][constant[0]] / call[name[result]][constant[1]]], name[result].layer_parameters]]] | keyword[def] identifier[evaluate_classifier] ( identifier[input_] , identifier[labels] , identifier[per_example_weights] = keyword[None] ,
identifier[topk] = literal[int] , identifier[name] = identifier[PROVIDED] , identifier[phase] = identifier[Phase] . identifier[train] ):
literal[string]
identifier[result] = identifier[input_] . identifier[evaluate_classifier_fraction] (
identifier[labels] ,
identifier[per_example_weights] = identifier[per_example_weights] ,
identifier[topk] = identifier[topk] ,
identifier[name] = identifier[name] ,
identifier[phase] = identifier[phase] )
keyword[return] identifier[input_] . identifier[with_tensor] ( identifier[result] [ literal[int] ]/ identifier[result] [ literal[int] ], identifier[result] . identifier[layer_parameters] ) | def evaluate_classifier(input_, labels, per_example_weights=None, topk=1, name=PROVIDED, phase=Phase.train):
"""Calculates the total ratio of correct predictions across all examples seen.
In test and infer mode, this creates variables in the graph collection
pt.GraphKeys.TEST_VARIABLES and does not add them to
tf.GraphKeys.ALL_VARIABLES. This means that you must initialize them
separately from tf.global_variables_initializer().
In the case of `topk == 1`, this breaks ties left-to-right, in all other cases
it follows `tf.nn.in_top_k`. *Note*: the tie behavior will change in the
future.
Args:
input_: A rank 2 Tensor or a Pretty Tensor holding the result of the model.
labels: A float or double `Tensor` containing the target for this layer.
per_example_weights: Weights that are applied to every example.
topk: Integer k for 'accuracy at top k' metric.
name: The name of this layer.
phase: In training mode the batch accuracy is returned and in eval/infer
modes a total average is calculated.
Returns:
A Pretty Tensor with the ratio of correct to total examples seen.
Raises:
ValueError: If labels is not the correct shape.
"""
result = input_.evaluate_classifier_fraction(labels, per_example_weights=per_example_weights, topk=topk, name=name, phase=phase)
return input_.with_tensor(result[0] / result[1], result.layer_parameters) |
def predict(self, trial_history):
"""predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
"""
self.trial_history = trial_history
self.point_num = len(trial_history)
self.fit_theta()
self.filter_curve()
if self.effective_model_num < LEAST_FITTED_FUNCTION:
# different curve's predictions are too scattered, requires more information
return None
self.mcmc_sampling()
ret = 0
for i in range(NUM_OF_INSTANCE):
ret += self.f_comb(self.target_pos, self.weight_samples[i])
return ret / NUM_OF_INSTANCE | def function[predict, parameter[self, trial_history]]:
constant[predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
]
name[self].trial_history assign[=] name[trial_history]
name[self].point_num assign[=] call[name[len], parameter[name[trial_history]]]
call[name[self].fit_theta, parameter[]]
call[name[self].filter_curve, parameter[]]
if compare[name[self].effective_model_num less[<] name[LEAST_FITTED_FUNCTION]] begin[:]
return[constant[None]]
call[name[self].mcmc_sampling, parameter[]]
variable[ret] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[NUM_OF_INSTANCE]]]] begin[:]
<ast.AugAssign object at 0x7da18bccb9d0>
return[binary_operation[name[ret] / name[NUM_OF_INSTANCE]]] | keyword[def] identifier[predict] ( identifier[self] , identifier[trial_history] ):
literal[string]
identifier[self] . identifier[trial_history] = identifier[trial_history]
identifier[self] . identifier[point_num] = identifier[len] ( identifier[trial_history] )
identifier[self] . identifier[fit_theta] ()
identifier[self] . identifier[filter_curve] ()
keyword[if] identifier[self] . identifier[effective_model_num] < identifier[LEAST_FITTED_FUNCTION] :
keyword[return] keyword[None]
identifier[self] . identifier[mcmc_sampling] ()
identifier[ret] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[NUM_OF_INSTANCE] ):
identifier[ret] += identifier[self] . identifier[f_comb] ( identifier[self] . identifier[target_pos] , identifier[self] . identifier[weight_samples] [ identifier[i] ])
keyword[return] identifier[ret] / identifier[NUM_OF_INSTANCE] | def predict(self, trial_history):
"""predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
"""
self.trial_history = trial_history
self.point_num = len(trial_history)
self.fit_theta()
self.filter_curve()
if self.effective_model_num < LEAST_FITTED_FUNCTION:
# different curve's predictions are too scattered, requires more information
return None # depends on [control=['if'], data=[]]
self.mcmc_sampling()
ret = 0
for i in range(NUM_OF_INSTANCE):
ret += self.f_comb(self.target_pos, self.weight_samples[i]) # depends on [control=['for'], data=['i']]
return ret / NUM_OF_INSTANCE |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2016-11-02 - Started - Bovy (UofT)
"""
x,y,z= self._compute_xyz(R,phi,z,t)
Tp, Tm= self._compute_TpTm(x,y,z)
return numpy.log((x-self._a+Tm)/(x+self._a+Tp))/2./self._a | def function[_evaluate, parameter[self, R, z, phi, t]]:
constant[
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2016-11-02 - Started - Bovy (UofT)
]
<ast.Tuple object at 0x7da1b0c416f0> assign[=] call[name[self]._compute_xyz, parameter[name[R], name[phi], name[z], name[t]]]
<ast.Tuple object at 0x7da1b0c423e0> assign[=] call[name[self]._compute_TpTm, parameter[name[x], name[y], name[z]]]
return[binary_operation[binary_operation[call[name[numpy].log, parameter[binary_operation[binary_operation[binary_operation[name[x] - name[self]._a] + name[Tm]] / binary_operation[binary_operation[name[x] + name[self]._a] + name[Tp]]]]] / constant[2.0]] / name[self]._a]] | keyword[def] identifier[_evaluate] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
identifier[x] , identifier[y] , identifier[z] = identifier[self] . identifier[_compute_xyz] ( identifier[R] , identifier[phi] , identifier[z] , identifier[t] )
identifier[Tp] , identifier[Tm] = identifier[self] . identifier[_compute_TpTm] ( identifier[x] , identifier[y] , identifier[z] )
keyword[return] identifier[numpy] . identifier[log] (( identifier[x] - identifier[self] . identifier[_a] + identifier[Tm] )/( identifier[x] + identifier[self] . identifier[_a] + identifier[Tp] ))/ literal[int] / identifier[self] . identifier[_a] | def _evaluate(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2016-11-02 - Started - Bovy (UofT)
"""
(x, y, z) = self._compute_xyz(R, phi, z, t)
(Tp, Tm) = self._compute_TpTm(x, y, z)
return numpy.log((x - self._a + Tm) / (x + self._a + Tp)) / 2.0 / self._a |
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result | def function[scan_for_devices, parameter[timeout]]:
constant[Scan for bluetooth low energy devices.
Note this must be run as root!]
from relative_module[bluepy.btle] import module[Scanner]
variable[scanner] assign[=] call[name[Scanner], parameter[]]
variable[result] assign[=] list[[]]
for taget[name[device]] in starred[call[name[scanner].scan, parameter[name[timeout]]]] begin[:]
call[name[result].append, parameter[tuple[[<ast.Attribute object at 0x7da1b0e17820>, <ast.Call object at 0x7da1b0e15b40>]]]]
return[name[result]] | keyword[def] identifier[scan_for_devices] ( identifier[timeout] : identifier[float] )-> identifier[List] [ identifier[Tuple] [ identifier[str] , identifier[str] ]]:
literal[string]
keyword[from] identifier[bluepy] . identifier[btle] keyword[import] identifier[Scanner]
identifier[scanner] = identifier[Scanner] ()
identifier[result] =[]
keyword[for] identifier[device] keyword[in] identifier[scanner] . identifier[scan] ( identifier[timeout] ):
identifier[result] . identifier[append] (( identifier[device] . identifier[addr] , identifier[device] . identifier[getValueText] ( literal[int] )))
keyword[return] identifier[result] | def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9))) # depends on [control=['for'], data=['device']]
return result |
def _embedding_spectral(matrix, dimensions=3, unit_length=True,
affinity_matrix=None, sigma=1):
"""
Private method to calculate Spectral embedding
:param dimensions: (int)
:return: coordinate matrix (np.array)
"""
if affinity_matrix is None:
aff = rbf(matrix, sigma=sigma)
else:
aff = affinity_matrix
coords = sklearn.manifold.spectral_embedding(aff, dimensions)
return normalise_rows(coords) if unit_length else coords | def function[_embedding_spectral, parameter[matrix, dimensions, unit_length, affinity_matrix, sigma]]:
constant[
Private method to calculate Spectral embedding
:param dimensions: (int)
:return: coordinate matrix (np.array)
]
if compare[name[affinity_matrix] is constant[None]] begin[:]
variable[aff] assign[=] call[name[rbf], parameter[name[matrix]]]
variable[coords] assign[=] call[name[sklearn].manifold.spectral_embedding, parameter[name[aff], name[dimensions]]]
return[<ast.IfExp object at 0x7da18bc71ea0>] | keyword[def] identifier[_embedding_spectral] ( identifier[matrix] , identifier[dimensions] = literal[int] , identifier[unit_length] = keyword[True] ,
identifier[affinity_matrix] = keyword[None] , identifier[sigma] = literal[int] ):
literal[string]
keyword[if] identifier[affinity_matrix] keyword[is] keyword[None] :
identifier[aff] = identifier[rbf] ( identifier[matrix] , identifier[sigma] = identifier[sigma] )
keyword[else] :
identifier[aff] = identifier[affinity_matrix]
identifier[coords] = identifier[sklearn] . identifier[manifold] . identifier[spectral_embedding] ( identifier[aff] , identifier[dimensions] )
keyword[return] identifier[normalise_rows] ( identifier[coords] ) keyword[if] identifier[unit_length] keyword[else] identifier[coords] | def _embedding_spectral(matrix, dimensions=3, unit_length=True, affinity_matrix=None, sigma=1):
"""
Private method to calculate Spectral embedding
:param dimensions: (int)
:return: coordinate matrix (np.array)
"""
if affinity_matrix is None:
aff = rbf(matrix, sigma=sigma) # depends on [control=['if'], data=[]]
else:
aff = affinity_matrix
coords = sklearn.manifold.spectral_embedding(aff, dimensions)
return normalise_rows(coords) if unit_length else coords |
def get_file(self, file_path, mode="r"):
"""
provide File object specified via 'file_path'
:param file_path: str, path to the file
:param mode: str, mode used when opening the file
:return: File instance
"""
return open(self.cont_path(file_path), mode=mode) | def function[get_file, parameter[self, file_path, mode]]:
constant[
provide File object specified via 'file_path'
:param file_path: str, path to the file
:param mode: str, mode used when opening the file
:return: File instance
]
return[call[name[open], parameter[call[name[self].cont_path, parameter[name[file_path]]]]]] | keyword[def] identifier[get_file] ( identifier[self] , identifier[file_path] , identifier[mode] = literal[string] ):
literal[string]
keyword[return] identifier[open] ( identifier[self] . identifier[cont_path] ( identifier[file_path] ), identifier[mode] = identifier[mode] ) | def get_file(self, file_path, mode='r'):
"""
provide File object specified via 'file_path'
:param file_path: str, path to the file
:param mode: str, mode used when opening the file
:return: File instance
"""
return open(self.cont_path(file_path), mode=mode) |
def mutation_jwt_refresh_token_required(fn):
"""
A decorator to protect a mutation.
If you decorate anmutation with this, it will ensure that the requester
has a valid refresh token before allowing the mutation to be called.
"""
@wraps(fn)
def wrapper(cls, *args, **kwargs):
token = kwargs.pop(current_app.config['JWT_REFRESH_TOKEN_ARGUMENT_NAME'])
try:
verify_refresh_jwt_in_argument(token)
except Exception as e:
return cls(AuthInfoField(message=str(e)))
return fn(*args, **kwargs)
return wrapper | def function[mutation_jwt_refresh_token_required, parameter[fn]]:
constant[
A decorator to protect a mutation.
If you decorate anmutation with this, it will ensure that the requester
has a valid refresh token before allowing the mutation to be called.
]
def function[wrapper, parameter[cls]]:
variable[token] assign[=] call[name[kwargs].pop, parameter[call[name[current_app].config][constant[JWT_REFRESH_TOKEN_ARGUMENT_NAME]]]]
<ast.Try object at 0x7da1b1ad9ab0>
return[call[name[fn], parameter[<ast.Starred object at 0x7da1b190a9b0>]]]
return[name[wrapper]] | keyword[def] identifier[mutation_jwt_refresh_token_required] ( identifier[fn] ):
literal[string]
@ identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapper] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
identifier[token] = identifier[kwargs] . identifier[pop] ( identifier[current_app] . identifier[config] [ literal[string] ])
keyword[try] :
identifier[verify_refresh_jwt_in_argument] ( identifier[token] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[cls] ( identifier[AuthInfoField] ( identifier[message] = identifier[str] ( identifier[e] )))
keyword[return] identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def mutation_jwt_refresh_token_required(fn):
"""
A decorator to protect a mutation.
If you decorate anmutation with this, it will ensure that the requester
has a valid refresh token before allowing the mutation to be called.
"""
@wraps(fn)
def wrapper(cls, *args, **kwargs):
token = kwargs.pop(current_app.config['JWT_REFRESH_TOKEN_ARGUMENT_NAME'])
try:
verify_refresh_jwt_in_argument(token) # depends on [control=['try'], data=[]]
except Exception as e:
return cls(AuthInfoField(message=str(e))) # depends on [control=['except'], data=['e']]
return fn(*args, **kwargs)
return wrapper |
def detect(self, text):
"""Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
"""
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang)
return result
data = self._translate(text, dest='en', src='auto')
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
src = ''
confidence = 0.0
try:
src = ''.join(data[8][0])
confidence = data[8][-2][0]
except Exception: # pragma: nocover
pass
result = Detected(lang=src, confidence=confidence)
return result | def function[detect, parameter[self, text]]:
constant[Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
]
if call[name[isinstance], parameter[name[text], name[list]]] begin[:]
variable[result] assign[=] list[[]]
for taget[name[item]] in starred[name[text]] begin[:]
variable[lang] assign[=] call[name[self].detect, parameter[name[item]]]
call[name[result].append, parameter[name[lang]]]
return[name[result]]
variable[data] assign[=] call[name[self]._translate, parameter[name[text]]]
variable[src] assign[=] constant[]
variable[confidence] assign[=] constant[0.0]
<ast.Try object at 0x7da20c7c88b0>
variable[result] assign[=] call[name[Detected], parameter[]]
return[name[result]] | keyword[def] identifier[detect] ( identifier[self] , identifier[text] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[text] , identifier[list] ):
identifier[result] =[]
keyword[for] identifier[item] keyword[in] identifier[text] :
identifier[lang] = identifier[self] . identifier[detect] ( identifier[item] )
identifier[result] . identifier[append] ( identifier[lang] )
keyword[return] identifier[result]
identifier[data] = identifier[self] . identifier[_translate] ( identifier[text] , identifier[dest] = literal[string] , identifier[src] = literal[string] )
identifier[src] = literal[string]
identifier[confidence] = literal[int]
keyword[try] :
identifier[src] = literal[string] . identifier[join] ( identifier[data] [ literal[int] ][ literal[int] ])
identifier[confidence] = identifier[data] [ literal[int] ][- literal[int] ][ literal[int] ]
keyword[except] identifier[Exception] :
keyword[pass]
identifier[result] = identifier[Detected] ( identifier[lang] = identifier[src] , identifier[confidence] = identifier[confidence] )
keyword[return] identifier[result] | def detect(self, text):
"""Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
"""
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang) # depends on [control=['for'], data=['item']]
return result # depends on [control=['if'], data=[]]
data = self._translate(text, dest='en', src='auto')
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
src = ''
confidence = 0.0
try:
src = ''.join(data[8][0])
confidence = data[8][-2][0] # depends on [control=['try'], data=[]]
except Exception: # pragma: nocover
pass # depends on [control=['except'], data=[]]
result = Detected(lang=src, confidence=confidence)
return result |
def users_update_many(self, data, external_ids=None, ids=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#update-many-users"
api_path = "/api/v2/users/update_many.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if external_ids:
api_query.update({
"external_ids": external_ids,
})
if ids:
api_query.update({
"ids": ids,
})
return self.call(api_path, query=api_query, method="PUT", data=data, **kwargs) | def function[users_update_many, parameter[self, data, external_ids, ids]]:
constant[https://developer.zendesk.com/rest_api/docs/core/users#update-many-users]
variable[api_path] assign[=] constant[/api/v2/users/update_many.json]
variable[api_query] assign[=] dictionary[[], []]
if compare[constant[query] in call[name[kwargs].keys, parameter[]]] begin[:]
call[name[api_query].update, parameter[call[name[kwargs]][constant[query]]]]
<ast.Delete object at 0x7da1b0e26e30>
if name[external_ids] begin[:]
call[name[api_query].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0e26710>], [<ast.Name object at 0x7da1b0e26230>]]]]
if name[ids] begin[:]
call[name[api_query].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0d4a3b0>], [<ast.Name object at 0x7da1b0e26b60>]]]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[users_update_many] ( identifier[self] , identifier[data] , identifier[external_ids] = keyword[None] , identifier[ids] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_query] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] . identifier[keys] ():
identifier[api_query] . identifier[update] ( identifier[kwargs] [ literal[string] ])
keyword[del] identifier[kwargs] [ literal[string] ]
keyword[if] identifier[external_ids] :
identifier[api_query] . identifier[update] ({
literal[string] : identifier[external_ids] ,
})
keyword[if] identifier[ids] :
identifier[api_query] . identifier[update] ({
literal[string] : identifier[ids] ,
})
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[query] = identifier[api_query] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def users_update_many(self, data, external_ids=None, ids=None, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/users#update-many-users"""
api_path = '/api/v2/users/update_many.json'
api_query = {}
if 'query' in kwargs.keys():
api_query.update(kwargs['query'])
del kwargs['query'] # depends on [control=['if'], data=[]]
if external_ids:
api_query.update({'external_ids': external_ids}) # depends on [control=['if'], data=[]]
if ids:
api_query.update({'ids': ids}) # depends on [control=['if'], data=[]]
return self.call(api_path, query=api_query, method='PUT', data=data, **kwargs) |
def bytes(self):
"""Emit the address in bytes format."""
byteout = bytearray()
for i in range(1, 15):
key = 'd' + str(i)
if self._userdata[key] is not None:
byteout.append(self._userdata[key])
else:
byteout.append(0x00)
return byteout | def function[bytes, parameter[self]]:
constant[Emit the address in bytes format.]
variable[byteout] assign[=] call[name[bytearray], parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], constant[15]]]] begin[:]
variable[key] assign[=] binary_operation[constant[d] + call[name[str], parameter[name[i]]]]
if compare[call[name[self]._userdata][name[key]] is_not constant[None]] begin[:]
call[name[byteout].append, parameter[call[name[self]._userdata][name[key]]]]
return[name[byteout]] | keyword[def] identifier[bytes] ( identifier[self] ):
literal[string]
identifier[byteout] = identifier[bytearray] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[key] = literal[string] + identifier[str] ( identifier[i] )
keyword[if] identifier[self] . identifier[_userdata] [ identifier[key] ] keyword[is] keyword[not] keyword[None] :
identifier[byteout] . identifier[append] ( identifier[self] . identifier[_userdata] [ identifier[key] ])
keyword[else] :
identifier[byteout] . identifier[append] ( literal[int] )
keyword[return] identifier[byteout] | def bytes(self):
"""Emit the address in bytes format."""
byteout = bytearray()
for i in range(1, 15):
key = 'd' + str(i)
if self._userdata[key] is not None:
byteout.append(self._userdata[key]) # depends on [control=['if'], data=[]]
else:
byteout.append(0) # depends on [control=['for'], data=['i']]
return byteout |
def configfile(f):
""" This decorator will parse a configuration file in YAML format
and store the dictionary in ``ctx.blockchain.config``
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
ctx.config = yaml.load(open(ctx.obj["configfile"]))
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(new_func, f) | def function[configfile, parameter[f]]:
constant[ This decorator will parse a configuration file in YAML format
and store the dictionary in ``ctx.blockchain.config``
]
def function[new_func, parameter[ctx]]:
name[ctx].config assign[=] call[name[yaml].load, parameter[call[name[open], parameter[call[name[ctx].obj][constant[configfile]]]]]]
return[call[name[ctx].invoke, parameter[name[f], <ast.Starred object at 0x7da1b1038730>]]]
return[call[name[update_wrapper], parameter[name[new_func], name[f]]]] | keyword[def] identifier[configfile] ( identifier[f] ):
literal[string]
@ identifier[click] . identifier[pass_context]
keyword[def] identifier[new_func] ( identifier[ctx] ,* identifier[args] ,** identifier[kwargs] ):
identifier[ctx] . identifier[config] = identifier[yaml] . identifier[load] ( identifier[open] ( identifier[ctx] . identifier[obj] [ literal[string] ]))
keyword[return] identifier[ctx] . identifier[invoke] ( identifier[f] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[update_wrapper] ( identifier[new_func] , identifier[f] ) | def configfile(f):
""" This decorator will parse a configuration file in YAML format
and store the dictionary in ``ctx.blockchain.config``
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
ctx.config = yaml.load(open(ctx.obj['configfile']))
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(new_func, f) |
def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if not tag:
tag = True
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag, klass = token.split('.', 1)
if not tag:
tag = True
found = []
for context in current_context:
found.extend(
context.findAll(tag,
{'class': lambda attr: attr and klass in attr.split()}
)
)
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
# Here we should just have a regular tag
if not tag_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context | def function[select, parameter[soup, selector]]:
constant[
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
]
variable[tokens] assign[=] call[name[selector].split, parameter[]]
variable[current_context] assign[=] list[[<ast.Name object at 0x7da1b1e1b820>]]
for taget[name[token]] in starred[name[tokens]] begin[:]
variable[m] assign[=] call[name[attribselect_re].match, parameter[name[token]]]
if name[m] begin[:]
<ast.Tuple object at 0x7da1b1e1ba60> assign[=] call[name[m].groups, parameter[]]
if <ast.UnaryOp object at 0x7da1b1e1b2b0> begin[:]
variable[tag] assign[=] constant[True]
variable[checker] assign[=] call[name[attribute_checker], parameter[name[operator], name[attribute], name[value]]]
variable[found] assign[=] list[[]]
for taget[name[context]] in starred[name[current_context]] begin[:]
call[name[found].extend, parameter[<ast.ListComp object at 0x7da1b1f28580>]]
variable[current_context] assign[=] name[found]
continue
if compare[constant[#] in name[token]] begin[:]
<ast.Tuple object at 0x7da1b1f29570> assign[=] call[name[token].split, parameter[constant[#], constant[1]]]
if <ast.UnaryOp object at 0x7da18f09e530> begin[:]
variable[tag] assign[=] constant[True]
variable[el] assign[=] call[call[name[current_context]][constant[0]].find, parameter[name[tag], dictionary[[<ast.Constant object at 0x7da18f09c370>], [<ast.Name object at 0x7da18f09f0d0>]]]]
if <ast.UnaryOp object at 0x7da18f09e3b0> begin[:]
return[list[[]]]
variable[current_context] assign[=] list[[<ast.Name object at 0x7da18f09f7f0>]]
continue
if compare[constant[.] in name[token]] begin[:]
<ast.Tuple object at 0x7da18f09df30> assign[=] call[name[token].split, parameter[constant[.], constant[1]]]
if <ast.UnaryOp object at 0x7da18f09ff10> begin[:]
variable[tag] assign[=] constant[True]
variable[found] assign[=] list[[]]
for taget[name[context]] in starred[name[current_context]] begin[:]
call[name[found].extend, parameter[call[name[context].findAll, parameter[name[tag], dictionary[[<ast.Constant object at 0x7da18f09dd80>], [<ast.Lambda object at 0x7da18f09d120>]]]]]]
variable[current_context] assign[=] name[found]
continue
if compare[name[token] equal[==] constant[*]] begin[:]
variable[found] assign[=] list[[]]
for taget[name[context]] in starred[name[current_context]] begin[:]
call[name[found].extend, parameter[call[name[context].findAll, parameter[constant[True]]]]]
variable[current_context] assign[=] name[found]
continue
if <ast.UnaryOp object at 0x7da18f09fbb0> begin[:]
return[list[[]]]
variable[found] assign[=] list[[]]
for taget[name[context]] in starred[name[current_context]] begin[:]
call[name[found].extend, parameter[call[name[context].findAll, parameter[name[token]]]]]
variable[current_context] assign[=] name[found]
return[name[current_context]] | keyword[def] identifier[select] ( identifier[soup] , identifier[selector] ):
literal[string]
identifier[tokens] = identifier[selector] . identifier[split] ()
identifier[current_context] =[ identifier[soup] ]
keyword[for] identifier[token] keyword[in] identifier[tokens] :
identifier[m] = identifier[attribselect_re] . identifier[match] ( identifier[token] )
keyword[if] identifier[m] :
identifier[tag] , identifier[attribute] , identifier[operator] , identifier[value] = identifier[m] . identifier[groups] ()
keyword[if] keyword[not] identifier[tag] :
identifier[tag] = keyword[True]
identifier[checker] = identifier[attribute_checker] ( identifier[operator] , identifier[attribute] , identifier[value] )
identifier[found] =[]
keyword[for] identifier[context] keyword[in] identifier[current_context] :
identifier[found] . identifier[extend] ([ identifier[el] keyword[for] identifier[el] keyword[in] identifier[context] . identifier[findAll] ( identifier[tag] ) keyword[if] identifier[checker] ( identifier[el] )])
identifier[current_context] = identifier[found]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[token] :
identifier[tag] , identifier[id] = identifier[token] . identifier[split] ( literal[string] , literal[int] )
keyword[if] keyword[not] identifier[tag] :
identifier[tag] = keyword[True]
identifier[el] = identifier[current_context] [ literal[int] ]. identifier[find] ( identifier[tag] ,{ literal[string] : identifier[id] })
keyword[if] keyword[not] identifier[el] :
keyword[return] []
identifier[current_context] =[ identifier[el] ]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[token] :
identifier[tag] , identifier[klass] = identifier[token] . identifier[split] ( literal[string] , literal[int] )
keyword[if] keyword[not] identifier[tag] :
identifier[tag] = keyword[True]
identifier[found] =[]
keyword[for] identifier[context] keyword[in] identifier[current_context] :
identifier[found] . identifier[extend] (
identifier[context] . identifier[findAll] ( identifier[tag] ,
{ literal[string] : keyword[lambda] identifier[attr] : identifier[attr] keyword[and] identifier[klass] keyword[in] identifier[attr] . identifier[split] ()}
)
)
identifier[current_context] = identifier[found]
keyword[continue]
keyword[if] identifier[token] == literal[string] :
identifier[found] =[]
keyword[for] identifier[context] keyword[in] identifier[current_context] :
identifier[found] . identifier[extend] ( identifier[context] . identifier[findAll] ( keyword[True] ))
identifier[current_context] = identifier[found]
keyword[continue]
keyword[if] keyword[not] identifier[tag_re] . identifier[match] ( identifier[token] ):
keyword[return] []
identifier[found] =[]
keyword[for] identifier[context] keyword[in] identifier[current_context] :
identifier[found] . identifier[extend] ( identifier[context] . identifier[findAll] ( identifier[token] ))
identifier[current_context] = identifier[found]
keyword[return] identifier[current_context] | def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
(tag, attribute, operator, value) = m.groups()
if not tag:
tag = True # depends on [control=['if'], data=[]]
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)]) # depends on [control=['for'], data=['context']]
current_context = found
continue # depends on [control=['if'], data=[]]
if '#' in token:
# ID selector
(tag, id) = token.split('#', 1)
if not tag:
tag = True # depends on [control=['if'], data=[]]
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match # depends on [control=['if'], data=[]]
current_context = [el]
continue # depends on [control=['if'], data=['token']]
if '.' in token:
# Class selector
(tag, klass) = token.split('.', 1)
if not tag:
tag = True # depends on [control=['if'], data=[]]
found = []
for context in current_context:
found.extend(context.findAll(tag, {'class': lambda attr: attr and klass in attr.split()})) # depends on [control=['for'], data=['context']]
current_context = found
continue # depends on [control=['if'], data=['token']]
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True)) # depends on [control=['for'], data=['context']]
current_context = found
continue # depends on [control=['if'], data=[]]
# Here we should just have a regular tag
if not tag_re.match(token):
return [] # depends on [control=['if'], data=[]]
found = []
for context in current_context:
found.extend(context.findAll(token)) # depends on [control=['for'], data=['context']]
current_context = found # depends on [control=['for'], data=['token']]
return current_context |
def setRedYellowGreenState(self, tlsID, state):
"""setRedYellowGreenState(string, string) -> None
Sets the named tl's state as a tuple of light definitions from
rugGyYuoO, for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate.
"""
self._connection._sendStringCmd(
tc.CMD_SET_TL_VARIABLE, tc.TL_RED_YELLOW_GREEN_STATE, tlsID, state) | def function[setRedYellowGreenState, parameter[self, tlsID, state]]:
constant[setRedYellowGreenState(string, string) -> None
Sets the named tl's state as a tuple of light definitions from
rugGyYuoO, for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate.
]
call[name[self]._connection._sendStringCmd, parameter[name[tc].CMD_SET_TL_VARIABLE, name[tc].TL_RED_YELLOW_GREEN_STATE, name[tlsID], name[state]]] | keyword[def] identifier[setRedYellowGreenState] ( identifier[self] , identifier[tlsID] , identifier[state] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_sendStringCmd] (
identifier[tc] . identifier[CMD_SET_TL_VARIABLE] , identifier[tc] . identifier[TL_RED_YELLOW_GREEN_STATE] , identifier[tlsID] , identifier[state] ) | def setRedYellowGreenState(self, tlsID, state):
"""setRedYellowGreenState(string, string) -> None
Sets the named tl's state as a tuple of light definitions from
rugGyYuoO, for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate.
"""
self._connection._sendStringCmd(tc.CMD_SET_TL_VARIABLE, tc.TL_RED_YELLOW_GREEN_STATE, tlsID, state) |
def body(self):
"""
This method just handles AMQP connection details and receive loop.
Warning:
Don't override this method!
"""
self.connection = pika.BlockingConnection(self.connection_param)
self.channel = self.connection.channel()
# receive messages and put them to .onMessageReceived() callback
for method_frame, properties, body in self.channel.consume(self.queue):
if self.onMessageReceived(method_frame, properties, body):
self.ack(method_frame.delivery_tag) | def function[body, parameter[self]]:
constant[
This method just handles AMQP connection details and receive loop.
Warning:
Don't override this method!
]
name[self].connection assign[=] call[name[pika].BlockingConnection, parameter[name[self].connection_param]]
name[self].channel assign[=] call[name[self].connection.channel, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b09bf2e0>, <ast.Name object at 0x7da1b09be350>, <ast.Name object at 0x7da1b09bcdf0>]]] in starred[call[name[self].channel.consume, parameter[name[self].queue]]] begin[:]
if call[name[self].onMessageReceived, parameter[name[method_frame], name[properties], name[body]]] begin[:]
call[name[self].ack, parameter[name[method_frame].delivery_tag]] | keyword[def] identifier[body] ( identifier[self] ):
literal[string]
identifier[self] . identifier[connection] = identifier[pika] . identifier[BlockingConnection] ( identifier[self] . identifier[connection_param] )
identifier[self] . identifier[channel] = identifier[self] . identifier[connection] . identifier[channel] ()
keyword[for] identifier[method_frame] , identifier[properties] , identifier[body] keyword[in] identifier[self] . identifier[channel] . identifier[consume] ( identifier[self] . identifier[queue] ):
keyword[if] identifier[self] . identifier[onMessageReceived] ( identifier[method_frame] , identifier[properties] , identifier[body] ):
identifier[self] . identifier[ack] ( identifier[method_frame] . identifier[delivery_tag] ) | def body(self):
"""
This method just handles AMQP connection details and receive loop.
Warning:
Don't override this method!
"""
self.connection = pika.BlockingConnection(self.connection_param)
self.channel = self.connection.channel()
# receive messages and put them to .onMessageReceived() callback
for (method_frame, properties, body) in self.channel.consume(self.queue):
if self.onMessageReceived(method_frame, properties, body):
self.ack(method_frame.delivery_tag) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def cluster_forget(self, node_id):
"""Remove a node from the nodes table."""
fut = self.execute(b'CLUSTER', b'FORGET', node_id)
return wait_ok(fut) | def function[cluster_forget, parameter[self, node_id]]:
constant[Remove a node from the nodes table.]
variable[fut] assign[=] call[name[self].execute, parameter[constant[b'CLUSTER'], constant[b'FORGET'], name[node_id]]]
return[call[name[wait_ok], parameter[name[fut]]]] | keyword[def] identifier[cluster_forget] ( identifier[self] , identifier[node_id] ):
literal[string]
identifier[fut] = identifier[self] . identifier[execute] ( literal[string] , literal[string] , identifier[node_id] )
keyword[return] identifier[wait_ok] ( identifier[fut] ) | def cluster_forget(self, node_id):
"""Remove a node from the nodes table."""
fut = self.execute(b'CLUSTER', b'FORGET', node_id)
return wait_ok(fut) |
def enable_key(self):
"""Enable an existing API Key."""
print("This command will enable a disabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/enable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s enabled." % key["id"])
except:
print("Unable to enable key, please try again.")
self.enable_key() | def function[enable_key, parameter[self]]:
constant[Enable an existing API Key.]
call[name[print], parameter[constant[This command will enable a disabled key.]]]
variable[apiKeyID] assign[=] call[name[input], parameter[constant[API Key ID: ]]]
<ast.Try object at 0x7da18ede5e10> | keyword[def] identifier[enable_key] ( identifier[self] ):
literal[string]
identifier[print] ( literal[string] )
identifier[apiKeyID] = identifier[input] ( literal[string] )
keyword[try] :
identifier[key] = identifier[self] . identifier[_curl_bitmex] ( literal[string] ,
identifier[postdict] ={ literal[string] : identifier[apiKeyID] })
identifier[print] ( literal[string] % identifier[key] [ literal[string] ])
keyword[except] :
identifier[print] ( literal[string] )
identifier[self] . identifier[enable_key] () | def enable_key(self):
"""Enable an existing API Key."""
print('This command will enable a disabled key.')
apiKeyID = input('API Key ID: ')
try:
key = self._curl_bitmex('/apiKey/enable', postdict={'apiKeyID': apiKeyID})
print('Key with ID %s enabled.' % key['id']) # depends on [control=['try'], data=[]]
except:
print('Unable to enable key, please try again.')
self.enable_key() # depends on [control=['except'], data=[]] |
def tfmerXL_lm_split(model:nn.Module) -> List[nn.Module]:
"Split a RNN `model` in groups for differential learning rates."
encoder = model[0]
n = len(encoder.layers)//3
groups = [list(encoder.layers[:n]) + [ParameterModule(encoder.u), ParameterModule(encoder.v)]]
return groups + [list(encoder.layers[n:2*n]), list(encoder.layers[2*n:]), [encoder.encoder, model[1]]] | def function[tfmerXL_lm_split, parameter[model]]:
constant[Split a RNN `model` in groups for differential learning rates.]
variable[encoder] assign[=] call[name[model]][constant[0]]
variable[n] assign[=] binary_operation[call[name[len], parameter[name[encoder].layers]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
variable[groups] assign[=] list[[<ast.BinOp object at 0x7da1b202a0e0>]]
return[binary_operation[name[groups] + list[[<ast.Call object at 0x7da1b20299f0>, <ast.Call object at 0x7da1b202ad70>, <ast.List object at 0x7da1b20283a0>]]]] | keyword[def] identifier[tfmerXL_lm_split] ( identifier[model] : identifier[nn] . identifier[Module] )-> identifier[List] [ identifier[nn] . identifier[Module] ]:
literal[string]
identifier[encoder] = identifier[model] [ literal[int] ]
identifier[n] = identifier[len] ( identifier[encoder] . identifier[layers] )// literal[int]
identifier[groups] =[ identifier[list] ( identifier[encoder] . identifier[layers] [: identifier[n] ])+[ identifier[ParameterModule] ( identifier[encoder] . identifier[u] ), identifier[ParameterModule] ( identifier[encoder] . identifier[v] )]]
keyword[return] identifier[groups] +[ identifier[list] ( identifier[encoder] . identifier[layers] [ identifier[n] : literal[int] * identifier[n] ]), identifier[list] ( identifier[encoder] . identifier[layers] [ literal[int] * identifier[n] :]),[ identifier[encoder] . identifier[encoder] , identifier[model] [ literal[int] ]]] | def tfmerXL_lm_split(model: nn.Module) -> List[nn.Module]:
"""Split a RNN `model` in groups for differential learning rates."""
encoder = model[0]
n = len(encoder.layers) // 3
groups = [list(encoder.layers[:n]) + [ParameterModule(encoder.u), ParameterModule(encoder.v)]]
return groups + [list(encoder.layers[n:2 * n]), list(encoder.layers[2 * n:]), [encoder.encoder, model[1]]] |
def search_url(self, searchterm):
"""Search for URLs
:type searchterm: str
:rtype: list
"""
return self.__search(type_attribute=self.__mispurltypes(), value=searchterm) | def function[search_url, parameter[self, searchterm]]:
constant[Search for URLs
:type searchterm: str
:rtype: list
]
return[call[name[self].__search, parameter[]]] | keyword[def] identifier[search_url] ( identifier[self] , identifier[searchterm] ):
literal[string]
keyword[return] identifier[self] . identifier[__search] ( identifier[type_attribute] = identifier[self] . identifier[__mispurltypes] (), identifier[value] = identifier[searchterm] ) | def search_url(self, searchterm):
"""Search for URLs
:type searchterm: str
:rtype: list
"""
return self.__search(type_attribute=self.__mispurltypes(), value=searchterm) |
def use_custom_term_frequencies(self, custom_term_frequencies):
'''
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
'''
self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)
return self | def function[use_custom_term_frequencies, parameter[self, custom_term_frequencies]]:
constant[
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
]
<ast.AugAssign object at 0x7da1b1b84ee0>
return[name[self]] | keyword[def] identifier[use_custom_term_frequencies] ( identifier[self] , identifier[custom_term_frequencies] ):
literal[string]
identifier[self] . identifier[priors] += identifier[custom_term_frequencies] . identifier[reindex] ( identifier[self] . identifier[priors] . identifier[index] ). identifier[fillna] ( literal[int] )
keyword[return] identifier[self] | def use_custom_term_frequencies(self, custom_term_frequencies):
"""
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
"""
self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)
return self |
def get_text_query(query_string, search_fields):
"""
Builds a query for both included & excluded terms in a text search.
"""
include_terms, exclude_terms = get_text_tokenizer(query_string)
include_q = get_query_includes(include_terms, search_fields)
exclude_q = get_query_excludes(exclude_terms, search_fields)
query = None
if include_q and exclude_q:
query = include_q & ~exclude_q
elif not exclude_q:
query = include_q
else:
query = ~exclude_q
return query | def function[get_text_query, parameter[query_string, search_fields]]:
constant[
Builds a query for both included & excluded terms in a text search.
]
<ast.Tuple object at 0x7da18f09f610> assign[=] call[name[get_text_tokenizer], parameter[name[query_string]]]
variable[include_q] assign[=] call[name[get_query_includes], parameter[name[include_terms], name[search_fields]]]
variable[exclude_q] assign[=] call[name[get_query_excludes], parameter[name[exclude_terms], name[search_fields]]]
variable[query] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f09fdc0> begin[:]
variable[query] assign[=] binary_operation[name[include_q] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da18f09fac0>]
return[name[query]] | keyword[def] identifier[get_text_query] ( identifier[query_string] , identifier[search_fields] ):
literal[string]
identifier[include_terms] , identifier[exclude_terms] = identifier[get_text_tokenizer] ( identifier[query_string] )
identifier[include_q] = identifier[get_query_includes] ( identifier[include_terms] , identifier[search_fields] )
identifier[exclude_q] = identifier[get_query_excludes] ( identifier[exclude_terms] , identifier[search_fields] )
identifier[query] = keyword[None]
keyword[if] identifier[include_q] keyword[and] identifier[exclude_q] :
identifier[query] = identifier[include_q] &~ identifier[exclude_q]
keyword[elif] keyword[not] identifier[exclude_q] :
identifier[query] = identifier[include_q]
keyword[else] :
identifier[query] =~ identifier[exclude_q]
keyword[return] identifier[query] | def get_text_query(query_string, search_fields):
"""
Builds a query for both included & excluded terms in a text search.
"""
(include_terms, exclude_terms) = get_text_tokenizer(query_string)
include_q = get_query_includes(include_terms, search_fields)
exclude_q = get_query_excludes(exclude_terms, search_fields)
query = None
if include_q and exclude_q:
query = include_q & ~exclude_q # depends on [control=['if'], data=[]]
elif not exclude_q:
query = include_q # depends on [control=['if'], data=[]]
else:
query = ~exclude_q
return query |
def extract_xyz_matrix_from_pdb_chain(pdb_lines, chain_id, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, fail_on_model_records = True, include_all_columns = False):
'''Returns a pandas dataframe of X, Y, Z coordinates for the PDB chain.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the chain of the first model is to be parsed.'''
if fail_on_model_records and [l for l in pdb_lines if l.startswith('MODEL')]:
raise Exception('This function does not handle files with MODEL records. Please split those file by model first.')
new_pdb_lines = []
found_chain = False
for l in pdb_lines:
if l.startswith('ATOM '):
if l[21] == chain_id:
found_chain = True
if found_chain:
new_pdb_lines.append(l)
if found_chain and (l.strip() == 'TER' or l.startswith('MODEL') or (len(l) > 21 and l[21] != chain_id)):
# Do not cross over into other chains or models
break
return PDB.extract_xyz_matrix_from_pdb_residue_range(new_pdb_lines, atoms_of_interest = atoms_of_interest, expected_num_residues = expected_num_residues, expected_num_residue_atoms = expected_num_residue_atoms, include_all_columns = include_all_columns) | def function[extract_xyz_matrix_from_pdb_chain, parameter[pdb_lines, chain_id, atoms_of_interest, expected_num_residues, expected_num_residue_atoms, fail_on_model_records, include_all_columns]]:
constant[Returns a pandas dataframe of X, Y, Z coordinates for the PDB chain.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the chain of the first model is to be parsed.]
if <ast.BoolOp object at 0x7da20c6abf40> begin[:]
<ast.Raise object at 0x7da20c6a8790>
variable[new_pdb_lines] assign[=] list[[]]
variable[found_chain] assign[=] constant[False]
for taget[name[l]] in starred[name[pdb_lines]] begin[:]
if call[name[l].startswith, parameter[constant[ATOM ]]] begin[:]
if compare[call[name[l]][constant[21]] equal[==] name[chain_id]] begin[:]
variable[found_chain] assign[=] constant[True]
if name[found_chain] begin[:]
call[name[new_pdb_lines].append, parameter[name[l]]]
if <ast.BoolOp object at 0x7da18ede44f0> begin[:]
break
return[call[name[PDB].extract_xyz_matrix_from_pdb_residue_range, parameter[name[new_pdb_lines]]]] | keyword[def] identifier[extract_xyz_matrix_from_pdb_chain] ( identifier[pdb_lines] , identifier[chain_id] , identifier[atoms_of_interest] = identifier[backbone_atoms] , identifier[expected_num_residues] = keyword[None] , identifier[expected_num_residue_atoms] = keyword[None] , identifier[fail_on_model_records] = keyword[True] , identifier[include_all_columns] = keyword[False] ):
literal[string]
keyword[if] identifier[fail_on_model_records] keyword[and] [ identifier[l] keyword[for] identifier[l] keyword[in] identifier[pdb_lines] keyword[if] identifier[l] . identifier[startswith] ( literal[string] )]:
keyword[raise] identifier[Exception] ( literal[string] )
identifier[new_pdb_lines] =[]
identifier[found_chain] = keyword[False]
keyword[for] identifier[l] keyword[in] identifier[pdb_lines] :
keyword[if] identifier[l] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[l] [ literal[int] ]== identifier[chain_id] :
identifier[found_chain] = keyword[True]
keyword[if] identifier[found_chain] :
identifier[new_pdb_lines] . identifier[append] ( identifier[l] )
keyword[if] identifier[found_chain] keyword[and] ( identifier[l] . identifier[strip] ()== literal[string] keyword[or] identifier[l] . identifier[startswith] ( literal[string] ) keyword[or] ( identifier[len] ( identifier[l] )> literal[int] keyword[and] identifier[l] [ literal[int] ]!= identifier[chain_id] )):
keyword[break]
keyword[return] identifier[PDB] . identifier[extract_xyz_matrix_from_pdb_residue_range] ( identifier[new_pdb_lines] , identifier[atoms_of_interest] = identifier[atoms_of_interest] , identifier[expected_num_residues] = identifier[expected_num_residues] , identifier[expected_num_residue_atoms] = identifier[expected_num_residue_atoms] , identifier[include_all_columns] = identifier[include_all_columns] ) | def extract_xyz_matrix_from_pdb_chain(pdb_lines, chain_id, atoms_of_interest=backbone_atoms, expected_num_residues=None, expected_num_residue_atoms=None, fail_on_model_records=True, include_all_columns=False):
"""Returns a pandas dataframe of X, Y, Z coordinates for the PDB chain.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the chain of the first model is to be parsed."""
if fail_on_model_records and [l for l in pdb_lines if l.startswith('MODEL')]:
raise Exception('This function does not handle files with MODEL records. Please split those file by model first.') # depends on [control=['if'], data=[]]
new_pdb_lines = []
found_chain = False
for l in pdb_lines:
if l.startswith('ATOM '):
if l[21] == chain_id:
found_chain = True
if found_chain:
new_pdb_lines.append(l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if found_chain and (l.strip() == 'TER' or l.startswith('MODEL') or (len(l) > 21 and l[21] != chain_id)):
# Do not cross over into other chains or models
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']]
return PDB.extract_xyz_matrix_from_pdb_residue_range(new_pdb_lines, atoms_of_interest=atoms_of_interest, expected_num_residues=expected_num_residues, expected_num_residue_atoms=expected_num_residue_atoms, include_all_columns=include_all_columns) |
def Services(self, field, show_select_column=True):
"""Render Analyses Services Listing Table
"""
instance = getattr(self, "instance", field.aq_parent)
table = api.get_view("table_analyses_services",
context=instance,
request=self.REQUEST)
# Call listing hooks
table.update()
table.before_render()
return table.ajax_contents_table() | def function[Services, parameter[self, field, show_select_column]]:
constant[Render Analyses Services Listing Table
]
variable[instance] assign[=] call[name[getattr], parameter[name[self], constant[instance], name[field].aq_parent]]
variable[table] assign[=] call[name[api].get_view, parameter[constant[table_analyses_services]]]
call[name[table].update, parameter[]]
call[name[table].before_render, parameter[]]
return[call[name[table].ajax_contents_table, parameter[]]] | keyword[def] identifier[Services] ( identifier[self] , identifier[field] , identifier[show_select_column] = keyword[True] ):
literal[string]
identifier[instance] = identifier[getattr] ( identifier[self] , literal[string] , identifier[field] . identifier[aq_parent] )
identifier[table] = identifier[api] . identifier[get_view] ( literal[string] ,
identifier[context] = identifier[instance] ,
identifier[request] = identifier[self] . identifier[REQUEST] )
identifier[table] . identifier[update] ()
identifier[table] . identifier[before_render] ()
keyword[return] identifier[table] . identifier[ajax_contents_table] () | def Services(self, field, show_select_column=True):
"""Render Analyses Services Listing Table
"""
instance = getattr(self, 'instance', field.aq_parent)
table = api.get_view('table_analyses_services', context=instance, request=self.REQUEST)
# Call listing hooks
table.update()
table.before_render()
return table.ajax_contents_table() |
def get_mnist(data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset with features and label as ndarray.
Data would be downloaded automatically if it doesn't present at the specific location.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: (features: ndarray, label: ndarray)
"""
X, Y = mnist.read_data_sets(location, data_type)
return X, Y + 1 | def function[get_mnist, parameter[data_type, location]]:
constant[
Get mnist dataset with features and label as ndarray.
Data would be downloaded automatically if it doesn't present at the specific location.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: (features: ndarray, label: ndarray)
]
<ast.Tuple object at 0x7da204345f30> assign[=] call[name[mnist].read_data_sets, parameter[name[location], name[data_type]]]
return[tuple[[<ast.Name object at 0x7da2043451b0>, <ast.BinOp object at 0x7da204346200>]]] | keyword[def] identifier[get_mnist] ( identifier[data_type] = literal[string] , identifier[location] = literal[string] ):
literal[string]
identifier[X] , identifier[Y] = identifier[mnist] . identifier[read_data_sets] ( identifier[location] , identifier[data_type] )
keyword[return] identifier[X] , identifier[Y] + literal[int] | def get_mnist(data_type='train', location='/tmp/mnist'):
"""
Get mnist dataset with features and label as ndarray.
Data would be downloaded automatically if it doesn't present at the specific location.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: (features: ndarray, label: ndarray)
"""
(X, Y) = mnist.read_data_sets(location, data_type)
return (X, Y + 1) |
def is_owner(package, abspath):
"""Determine whether `abspath` belongs to `package`."""
try:
files = package['files']
location = package['location']
except KeyError:
return False
paths = (os.path.abspath(os.path.join(location, f))
for f in files)
return abspath in paths | def function[is_owner, parameter[package, abspath]]:
constant[Determine whether `abspath` belongs to `package`.]
<ast.Try object at 0x7da204564fd0>
variable[paths] assign[=] <ast.GeneratorExp object at 0x7da204565240>
return[compare[name[abspath] in name[paths]]] | keyword[def] identifier[is_owner] ( identifier[package] , identifier[abspath] ):
literal[string]
keyword[try] :
identifier[files] = identifier[package] [ literal[string] ]
identifier[location] = identifier[package] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[return] keyword[False]
identifier[paths] =( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[location] , identifier[f] ))
keyword[for] identifier[f] keyword[in] identifier[files] )
keyword[return] identifier[abspath] keyword[in] identifier[paths] | def is_owner(package, abspath):
"""Determine whether `abspath` belongs to `package`."""
try:
files = package['files']
location = package['location'] # depends on [control=['try'], data=[]]
except KeyError:
return False # depends on [control=['except'], data=[]]
paths = (os.path.abspath(os.path.join(location, f)) for f in files)
return abspath in paths |
def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None,
property_min=None, property_max=None, property_units=None, reference_doi=None,
include_datasets=[], exclude_datasets=[], from_index=None, size=None):
"""
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
"""
pif_system_query = PifSystemQuery()
pif_system_query.names = FieldQuery(
extract_as='name',
filter=[Filter(equal=i) for i in self._get_list(name)])
pif_system_query.chemical_formula = ChemicalFieldQuery(
extract_as='chemical_formula',
filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)])
pif_system_query.references = ReferenceQuery(doi=FieldQuery(
extract_as='reference_doi',
filter=[Filter(equal=i) for i in self._get_list(reference_doi)]))
# Generate the parts of the property query
property_name_query = FieldQuery(
extract_as='property_name',
filter=[Filter(equal=i) for i in self._get_list(property_name)])
property_units_query = FieldQuery(
extract_as='property_units',
filter=[Filter(equal=i) for i in self._get_list(property_units)])
property_value_query = FieldQuery(
extract_as='property_value',
filter=[])
for i in self._get_list(property_value):
property_value_query.filter.append(Filter(equal=i))
if property_min is not None or property_max is not None:
property_value_query.filter.append(Filter(min=property_min, max=property_max))
# Generate the full property query
pif_system_query.properties = PropertyQuery(
name=property_name_query,
value=property_value_query,
units=property_units_query)
# Generate the dataset query
dataset_query = list()
if include_datasets:
dataset_query.append(DatasetQuery(logic='MUST', id=[Filter(equal=i) for i in include_datasets]))
if exclude_datasets:
dataset_query.append(DatasetQuery(logic='MUST_NOT', id=[Filter(equal=i) for i in exclude_datasets]))
# Run the query
pif_system_returning_query = PifSystemReturningQuery(
query=DataQuery(
system=pif_system_query,
dataset=dataset_query),
from_index=from_index,
size=size,
score_relevance=True)
return pif_system_returning_query | def function[generate_simple_chemical_query, parameter[self, name, chemical_formula, property_name, property_value, property_min, property_max, property_units, reference_doi, include_datasets, exclude_datasets, from_index, size]]:
constant[
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
]
variable[pif_system_query] assign[=] call[name[PifSystemQuery], parameter[]]
name[pif_system_query].names assign[=] call[name[FieldQuery], parameter[]]
name[pif_system_query].chemical_formula assign[=] call[name[ChemicalFieldQuery], parameter[]]
name[pif_system_query].references assign[=] call[name[ReferenceQuery], parameter[]]
variable[property_name_query] assign[=] call[name[FieldQuery], parameter[]]
variable[property_units_query] assign[=] call[name[FieldQuery], parameter[]]
variable[property_value_query] assign[=] call[name[FieldQuery], parameter[]]
for taget[name[i]] in starred[call[name[self]._get_list, parameter[name[property_value]]]] begin[:]
call[name[property_value_query].filter.append, parameter[call[name[Filter], parameter[]]]]
if <ast.BoolOp object at 0x7da18c4cc370> begin[:]
call[name[property_value_query].filter.append, parameter[call[name[Filter], parameter[]]]]
name[pif_system_query].properties assign[=] call[name[PropertyQuery], parameter[]]
variable[dataset_query] assign[=] call[name[list], parameter[]]
if name[include_datasets] begin[:]
call[name[dataset_query].append, parameter[call[name[DatasetQuery], parameter[]]]]
if name[exclude_datasets] begin[:]
call[name[dataset_query].append, parameter[call[name[DatasetQuery], parameter[]]]]
variable[pif_system_returning_query] assign[=] call[name[PifSystemReturningQuery], parameter[]]
return[name[pif_system_returning_query]] | keyword[def] identifier[generate_simple_chemical_query] ( identifier[self] , identifier[name] = keyword[None] , identifier[chemical_formula] = keyword[None] , identifier[property_name] = keyword[None] , identifier[property_value] = keyword[None] ,
identifier[property_min] = keyword[None] , identifier[property_max] = keyword[None] , identifier[property_units] = keyword[None] , identifier[reference_doi] = keyword[None] ,
identifier[include_datasets] =[], identifier[exclude_datasets] =[], identifier[from_index] = keyword[None] , identifier[size] = keyword[None] ):
literal[string]
identifier[pif_system_query] = identifier[PifSystemQuery] ()
identifier[pif_system_query] . identifier[names] = identifier[FieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[name] )])
identifier[pif_system_query] . identifier[chemical_formula] = identifier[ChemicalFieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[ identifier[ChemicalFilter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[chemical_formula] )])
identifier[pif_system_query] . identifier[references] = identifier[ReferenceQuery] ( identifier[doi] = identifier[FieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[reference_doi] )]))
identifier[property_name_query] = identifier[FieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[property_name] )])
identifier[property_units_query] = identifier[FieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[property_units] )])
identifier[property_value_query] = identifier[FieldQuery] (
identifier[extract_as] = literal[string] ,
identifier[filter] =[])
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_get_list] ( identifier[property_value] ):
identifier[property_value_query] . identifier[filter] . identifier[append] ( identifier[Filter] ( identifier[equal] = identifier[i] ))
keyword[if] identifier[property_min] keyword[is] keyword[not] keyword[None] keyword[or] identifier[property_max] keyword[is] keyword[not] keyword[None] :
identifier[property_value_query] . identifier[filter] . identifier[append] ( identifier[Filter] ( identifier[min] = identifier[property_min] , identifier[max] = identifier[property_max] ))
identifier[pif_system_query] . identifier[properties] = identifier[PropertyQuery] (
identifier[name] = identifier[property_name_query] ,
identifier[value] = identifier[property_value_query] ,
identifier[units] = identifier[property_units_query] )
identifier[dataset_query] = identifier[list] ()
keyword[if] identifier[include_datasets] :
identifier[dataset_query] . identifier[append] ( identifier[DatasetQuery] ( identifier[logic] = literal[string] , identifier[id] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[include_datasets] ]))
keyword[if] identifier[exclude_datasets] :
identifier[dataset_query] . identifier[append] ( identifier[DatasetQuery] ( identifier[logic] = literal[string] , identifier[id] =[ identifier[Filter] ( identifier[equal] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[exclude_datasets] ]))
identifier[pif_system_returning_query] = identifier[PifSystemReturningQuery] (
identifier[query] = identifier[DataQuery] (
identifier[system] = identifier[pif_system_query] ,
identifier[dataset] = identifier[dataset_query] ),
identifier[from_index] = identifier[from_index] ,
identifier[size] = identifier[size] ,
identifier[score_relevance] = keyword[True] )
keyword[return] identifier[pif_system_returning_query] | def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None, property_min=None, property_max=None, property_units=None, reference_doi=None, include_datasets=[], exclude_datasets=[], from_index=None, size=None):
"""
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
"""
pif_system_query = PifSystemQuery()
pif_system_query.names = FieldQuery(extract_as='name', filter=[Filter(equal=i) for i in self._get_list(name)])
pif_system_query.chemical_formula = ChemicalFieldQuery(extract_as='chemical_formula', filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)])
pif_system_query.references = ReferenceQuery(doi=FieldQuery(extract_as='reference_doi', filter=[Filter(equal=i) for i in self._get_list(reference_doi)]))
# Generate the parts of the property query
property_name_query = FieldQuery(extract_as='property_name', filter=[Filter(equal=i) for i in self._get_list(property_name)])
property_units_query = FieldQuery(extract_as='property_units', filter=[Filter(equal=i) for i in self._get_list(property_units)])
property_value_query = FieldQuery(extract_as='property_value', filter=[])
for i in self._get_list(property_value):
property_value_query.filter.append(Filter(equal=i)) # depends on [control=['for'], data=['i']]
if property_min is not None or property_max is not None:
property_value_query.filter.append(Filter(min=property_min, max=property_max)) # depends on [control=['if'], data=[]]
# Generate the full property query
pif_system_query.properties = PropertyQuery(name=property_name_query, value=property_value_query, units=property_units_query)
# Generate the dataset query
dataset_query = list()
if include_datasets:
dataset_query.append(DatasetQuery(logic='MUST', id=[Filter(equal=i) for i in include_datasets])) # depends on [control=['if'], data=[]]
if exclude_datasets:
dataset_query.append(DatasetQuery(logic='MUST_NOT', id=[Filter(equal=i) for i in exclude_datasets])) # depends on [control=['if'], data=[]]
# Run the query
pif_system_returning_query = PifSystemReturningQuery(query=DataQuery(system=pif_system_query, dataset=dataset_query), from_index=from_index, size=size, score_relevance=True)
return pif_system_returning_query |
def safe_filter(error_output=''):
"""
A safe filter decorator only raising errors when ``THUMBNAIL_DEBUG`` is
``True`` otherwise returning ``error_output``.
"""
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as err:
if sorl_settings.THUMBNAIL_DEBUG:
raise
logger.error('Thumbnail filter failed: %s' % str(err),
exc_info=sys.exc_info())
return error_output
return wrapper
return inner | def function[safe_filter, parameter[error_output]]:
constant[
A safe filter decorator only raising errors when ``THUMBNAIL_DEBUG`` is
``True`` otherwise returning ``error_output``.
]
def function[inner, parameter[f]]:
def function[wrapper, parameter[]]:
<ast.Try object at 0x7da1b20b5180>
return[name[wrapper]]
return[name[inner]] | keyword[def] identifier[safe_filter] ( identifier[error_output] = literal[string] ):
literal[string]
keyword[def] identifier[inner] ( identifier[f] ):
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
keyword[if] identifier[sorl_settings] . identifier[THUMBNAIL_DEBUG] :
keyword[raise]
identifier[logger] . identifier[error] ( literal[string] % identifier[str] ( identifier[err] ),
identifier[exc_info] = identifier[sys] . identifier[exc_info] ())
keyword[return] identifier[error_output]
keyword[return] identifier[wrapper]
keyword[return] identifier[inner] | def safe_filter(error_output=''):
"""
A safe filter decorator only raising errors when ``THUMBNAIL_DEBUG`` is
``True`` otherwise returning ``error_output``.
"""
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs) # depends on [control=['try'], data=[]]
except Exception as err:
if sorl_settings.THUMBNAIL_DEBUG:
raise # depends on [control=['if'], data=[]]
logger.error('Thumbnail filter failed: %s' % str(err), exc_info=sys.exc_info())
return error_output # depends on [control=['except'], data=['err']]
return wrapper
return inner |
def rescale_array_to_z1z2(array, z1z2=(-1.0, 1.0)):
"""Rescale the values in a numpy array to the [z1,z2] interval.
The transformation is carried out following the relation
array_rs = b_flux * array - c_flux
as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680)
Parameters
----------
array : numpy array
Numpy array to be rescaled.
z1z2 : tuple, floats
Minimum and maximum values in the returned array.
Returns
-------
array_rs : numpy array
Array with rescaled values.
coef_rs : tuple, floats
Coefficients b_flux and c_flux employed in the rescaling
operation.
"""
if type(array) is not np.ndarray:
raise ValueError("array=" + str(array) + " must be a numpy.ndarray")
array_min = array.min()
array_max = array.max()
z1, z2 = z1z2
delta = array_max - array_min
b_flux = (z2 - z1) / delta
c_flux = (z2 * array_min - z1 * array_max) / delta
array_rs = b_flux * array - c_flux
return array_rs, (b_flux, c_flux) | def function[rescale_array_to_z1z2, parameter[array, z1z2]]:
constant[Rescale the values in a numpy array to the [z1,z2] interval.
The transformation is carried out following the relation
array_rs = b_flux * array - c_flux
as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680)
Parameters
----------
array : numpy array
Numpy array to be rescaled.
z1z2 : tuple, floats
Minimum and maximum values in the returned array.
Returns
-------
array_rs : numpy array
Array with rescaled values.
coef_rs : tuple, floats
Coefficients b_flux and c_flux employed in the rescaling
operation.
]
if compare[call[name[type], parameter[name[array]]] is_not name[np].ndarray] begin[:]
<ast.Raise object at 0x7da20c6e5750>
variable[array_min] assign[=] call[name[array].min, parameter[]]
variable[array_max] assign[=] call[name[array].max, parameter[]]
<ast.Tuple object at 0x7da20c6e4a90> assign[=] name[z1z2]
variable[delta] assign[=] binary_operation[name[array_max] - name[array_min]]
variable[b_flux] assign[=] binary_operation[binary_operation[name[z2] - name[z1]] / name[delta]]
variable[c_flux] assign[=] binary_operation[binary_operation[binary_operation[name[z2] * name[array_min]] - binary_operation[name[z1] * name[array_max]]] / name[delta]]
variable[array_rs] assign[=] binary_operation[binary_operation[name[b_flux] * name[array]] - name[c_flux]]
return[tuple[[<ast.Name object at 0x7da20c7c9930>, <ast.Tuple object at 0x7da20c7cb820>]]] | keyword[def] identifier[rescale_array_to_z1z2] ( identifier[array] , identifier[z1z2] =(- literal[int] , literal[int] )):
literal[string]
keyword[if] identifier[type] ( identifier[array] ) keyword[is] keyword[not] identifier[np] . identifier[ndarray] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[array] )+ literal[string] )
identifier[array_min] = identifier[array] . identifier[min] ()
identifier[array_max] = identifier[array] . identifier[max] ()
identifier[z1] , identifier[z2] = identifier[z1z2]
identifier[delta] = identifier[array_max] - identifier[array_min]
identifier[b_flux] =( identifier[z2] - identifier[z1] )/ identifier[delta]
identifier[c_flux] =( identifier[z2] * identifier[array_min] - identifier[z1] * identifier[array_max] )/ identifier[delta]
identifier[array_rs] = identifier[b_flux] * identifier[array] - identifier[c_flux]
keyword[return] identifier[array_rs] ,( identifier[b_flux] , identifier[c_flux] ) | def rescale_array_to_z1z2(array, z1z2=(-1.0, 1.0)):
"""Rescale the values in a numpy array to the [z1,z2] interval.
The transformation is carried out following the relation
array_rs = b_flux * array - c_flux
as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680)
Parameters
----------
array : numpy array
Numpy array to be rescaled.
z1z2 : tuple, floats
Minimum and maximum values in the returned array.
Returns
-------
array_rs : numpy array
Array with rescaled values.
coef_rs : tuple, floats
Coefficients b_flux and c_flux employed in the rescaling
operation.
"""
if type(array) is not np.ndarray:
raise ValueError('array=' + str(array) + ' must be a numpy.ndarray') # depends on [control=['if'], data=[]]
array_min = array.min()
array_max = array.max()
(z1, z2) = z1z2
delta = array_max - array_min
b_flux = (z2 - z1) / delta
c_flux = (z2 * array_min - z1 * array_max) / delta
array_rs = b_flux * array - c_flux
return (array_rs, (b_flux, c_flux)) |
def ndb_put(self, entity):
"""Like put(), but for NDB entities."""
assert ndb is not None and isinstance(entity, ndb.Model)
self.ndb_puts.append(entity) | def function[ndb_put, parameter[self, entity]]:
constant[Like put(), but for NDB entities.]
assert[<ast.BoolOp object at 0x7da20e9546d0>]
call[name[self].ndb_puts.append, parameter[name[entity]]] | keyword[def] identifier[ndb_put] ( identifier[self] , identifier[entity] ):
literal[string]
keyword[assert] identifier[ndb] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[entity] , identifier[ndb] . identifier[Model] )
identifier[self] . identifier[ndb_puts] . identifier[append] ( identifier[entity] ) | def ndb_put(self, entity):
"""Like put(), but for NDB entities."""
assert ndb is not None and isinstance(entity, ndb.Model)
self.ndb_puts.append(entity) |
def rangify(number_list):
"""Assumes the list is sorted."""
if not number_list:
return number_list
ranges = []
range_start = prev_num = number_list[0]
for num in number_list[1:]:
if num != (prev_num + 1):
ranges.append((range_start, prev_num))
range_start = num
prev_num = num
ranges.append((range_start, prev_num))
return ranges | def function[rangify, parameter[number_list]]:
constant[Assumes the list is sorted.]
if <ast.UnaryOp object at 0x7da1b0f3eec0> begin[:]
return[name[number_list]]
variable[ranges] assign[=] list[[]]
variable[range_start] assign[=] call[name[number_list]][constant[0]]
for taget[name[num]] in starred[call[name[number_list]][<ast.Slice object at 0x7da1b0f3e470>]] begin[:]
if compare[name[num] not_equal[!=] binary_operation[name[prev_num] + constant[1]]] begin[:]
call[name[ranges].append, parameter[tuple[[<ast.Name object at 0x7da1b0f3d570>, <ast.Name object at 0x7da1b0f3c910>]]]]
variable[range_start] assign[=] name[num]
variable[prev_num] assign[=] name[num]
call[name[ranges].append, parameter[tuple[[<ast.Name object at 0x7da1b0f3fca0>, <ast.Name object at 0x7da1b0f3e6b0>]]]]
return[name[ranges]] | keyword[def] identifier[rangify] ( identifier[number_list] ):
literal[string]
keyword[if] keyword[not] identifier[number_list] :
keyword[return] identifier[number_list]
identifier[ranges] =[]
identifier[range_start] = identifier[prev_num] = identifier[number_list] [ literal[int] ]
keyword[for] identifier[num] keyword[in] identifier[number_list] [ literal[int] :]:
keyword[if] identifier[num] !=( identifier[prev_num] + literal[int] ):
identifier[ranges] . identifier[append] (( identifier[range_start] , identifier[prev_num] ))
identifier[range_start] = identifier[num]
identifier[prev_num] = identifier[num]
identifier[ranges] . identifier[append] (( identifier[range_start] , identifier[prev_num] ))
keyword[return] identifier[ranges] | def rangify(number_list):
"""Assumes the list is sorted."""
if not number_list:
return number_list # depends on [control=['if'], data=[]]
ranges = []
range_start = prev_num = number_list[0]
for num in number_list[1:]:
if num != prev_num + 1:
ranges.append((range_start, prev_num))
range_start = num # depends on [control=['if'], data=['num']]
prev_num = num # depends on [control=['for'], data=['num']]
ranges.append((range_start, prev_num))
return ranges |
def _remove_debug_only(self):
"""Iterate through each handler removing the invalid dictConfig key of
debug_only.
"""
LOGGER.debug('Removing debug only from handlers')
for handler in self.config[self.HANDLERS]:
if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]:
del self.config[self.HANDLERS][handler][self.DEBUG_ONLY] | def function[_remove_debug_only, parameter[self]]:
constant[Iterate through each handler removing the invalid dictConfig key of
debug_only.
]
call[name[LOGGER].debug, parameter[constant[Removing debug only from handlers]]]
for taget[name[handler]] in starred[call[name[self].config][name[self].HANDLERS]] begin[:]
if compare[name[self].DEBUG_ONLY in call[call[name[self].config][name[self].HANDLERS]][name[handler]]] begin[:]
<ast.Delete object at 0x7da207f00760> | keyword[def] identifier[_remove_debug_only] ( identifier[self] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[for] identifier[handler] keyword[in] identifier[self] . identifier[config] [ identifier[self] . identifier[HANDLERS] ]:
keyword[if] identifier[self] . identifier[DEBUG_ONLY] keyword[in] identifier[self] . identifier[config] [ identifier[self] . identifier[HANDLERS] ][ identifier[handler] ]:
keyword[del] identifier[self] . identifier[config] [ identifier[self] . identifier[HANDLERS] ][ identifier[handler] ][ identifier[self] . identifier[DEBUG_ONLY] ] | def _remove_debug_only(self):
"""Iterate through each handler removing the invalid dictConfig key of
debug_only.
"""
LOGGER.debug('Removing debug only from handlers')
for handler in self.config[self.HANDLERS]:
if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]:
del self.config[self.HANDLERS][handler][self.DEBUG_ONLY] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler']] |
def select(cls, **context):
"""
Selects records for the class based on the inputted \
options. If no db is specified, then the current \
global database will be used. If the inflated flag is specified, then \
the results will be inflated to class instances.
If the flag is left as None, then results will be auto-inflated if no
columns were supplied. If columns were supplied, then the results will
not be inflated by default.
If the groupBy flag is specified, then the groupBy columns will be added
to the beginning of the ordered search (to ensure proper paging). See
the Table.groupRecords methods for more details.
:note From version 0.6.0 on, this method now accepts a mutable
keyword dictionary of values. You can supply any member
value for either the <orb.LookupOptions> or
<orb.Context>, as well as the keyword 'lookup' to
an instance of <orb.LookupOptions> and 'context' for
an instance of the <orb.Context>
:return [ <cls>, .. ] || { <variant> grp: <variant> result, .. }
"""
rset_type = getattr(cls, 'Collection', orb.Collection)
return rset_type(model=cls, **context) | def function[select, parameter[cls]]:
constant[
Selects records for the class based on the inputted options. If no db is specified, then the current global database will be used. If the inflated flag is specified, then the results will be inflated to class instances.
If the flag is left as None, then results will be auto-inflated if no
columns were supplied. If columns were supplied, then the results will
not be inflated by default.
If the groupBy flag is specified, then the groupBy columns will be added
to the beginning of the ordered search (to ensure proper paging). See
the Table.groupRecords methods for more details.
:note From version 0.6.0 on, this method now accepts a mutable
keyword dictionary of values. You can supply any member
value for either the <orb.LookupOptions> or
<orb.Context>, as well as the keyword 'lookup' to
an instance of <orb.LookupOptions> and 'context' for
an instance of the <orb.Context>
:return [ <cls>, .. ] || { <variant> grp: <variant> result, .. }
]
variable[rset_type] assign[=] call[name[getattr], parameter[name[cls], constant[Collection], name[orb].Collection]]
return[call[name[rset_type], parameter[]]] | keyword[def] identifier[select] ( identifier[cls] ,** identifier[context] ):
literal[string]
identifier[rset_type] = identifier[getattr] ( identifier[cls] , literal[string] , identifier[orb] . identifier[Collection] )
keyword[return] identifier[rset_type] ( identifier[model] = identifier[cls] ,** identifier[context] ) | def select(cls, **context):
"""
Selects records for the class based on the inputted options. If no db is specified, then the current global database will be used. If the inflated flag is specified, then the results will be inflated to class instances.
If the flag is left as None, then results will be auto-inflated if no
columns were supplied. If columns were supplied, then the results will
not be inflated by default.
If the groupBy flag is specified, then the groupBy columns will be added
to the beginning of the ordered search (to ensure proper paging). See
the Table.groupRecords methods for more details.
:note From version 0.6.0 on, this method now accepts a mutable
keyword dictionary of values. You can supply any member
value for either the <orb.LookupOptions> or
<orb.Context>, as well as the keyword 'lookup' to
an instance of <orb.LookupOptions> and 'context' for
an instance of the <orb.Context>
:return [ <cls>, .. ] || { <variant> grp: <variant> result, .. }
"""
rset_type = getattr(cls, 'Collection', orb.Collection)
return rset_type(model=cls, **context) |
def timer(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs):
"""
Time-measuring context manager: the time spent in the wrapped block
if measured and added to the named metric.
"""
hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
t1 = time.time()
yield
t2 = time.time()
hmetric.notify(t2 - t1) | def function[timer, parameter[name, reservoir_type]]:
constant[
Time-measuring context manager: the time spent in the wrapped block
if measured and added to the named metric.
]
variable[hmetric] assign[=] call[name[get_or_create_histogram], parameter[name[name], name[reservoir_type], <ast.Starred object at 0x7da1b0ce42e0>]]
variable[t1] assign[=] call[name[time].time, parameter[]]
<ast.Yield object at 0x7da1b0ce6f50>
variable[t2] assign[=] call[name[time].time, parameter[]]
call[name[hmetric].notify, parameter[binary_operation[name[t2] - name[t1]]]] | keyword[def] identifier[timer] ( identifier[name] , identifier[reservoir_type] = literal[string] ,* identifier[reservoir_args] ,** identifier[reservoir_kwargs] ):
literal[string]
identifier[hmetric] = identifier[get_or_create_histogram] ( identifier[name] , identifier[reservoir_type] ,* identifier[reservoir_args] ,** identifier[reservoir_kwargs] )
identifier[t1] = identifier[time] . identifier[time] ()
keyword[yield]
identifier[t2] = identifier[time] . identifier[time] ()
identifier[hmetric] . identifier[notify] ( identifier[t2] - identifier[t1] ) | def timer(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs):
"""
Time-measuring context manager: the time spent in the wrapped block
if measured and added to the named metric.
"""
hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
t1 = time.time()
yield
t2 = time.time()
hmetric.notify(t2 - t1) |
def get_case(word, correction):
"""
Best guess of intended case.
manchester => manchester
chilton => Chilton
AAvTech => AAvTech
THe => The
imho => IMHO
"""
if word.istitle():
return correction.title()
if word.isupper():
return correction.upper()
if correction == word and not word.islower():
return word
if len(word) > 2 and word[:2].isupper():
return correction.title()
if not known_as_lower([correction]): #expensive
try:
return CASE_MAPPED[correction]
except KeyError:
pass
return correction | def function[get_case, parameter[word, correction]]:
constant[
Best guess of intended case.
manchester => manchester
chilton => Chilton
AAvTech => AAvTech
THe => The
imho => IMHO
]
if call[name[word].istitle, parameter[]] begin[:]
return[call[name[correction].title, parameter[]]]
if call[name[word].isupper, parameter[]] begin[:]
return[call[name[correction].upper, parameter[]]]
if <ast.BoolOp object at 0x7da20c76dae0> begin[:]
return[name[word]]
if <ast.BoolOp object at 0x7da20c76c160> begin[:]
return[call[name[correction].title, parameter[]]]
if <ast.UnaryOp object at 0x7da20c7959f0> begin[:]
<ast.Try object at 0x7da20c794af0>
return[name[correction]] | keyword[def] identifier[get_case] ( identifier[word] , identifier[correction] ):
literal[string]
keyword[if] identifier[word] . identifier[istitle] ():
keyword[return] identifier[correction] . identifier[title] ()
keyword[if] identifier[word] . identifier[isupper] ():
keyword[return] identifier[correction] . identifier[upper] ()
keyword[if] identifier[correction] == identifier[word] keyword[and] keyword[not] identifier[word] . identifier[islower] ():
keyword[return] identifier[word]
keyword[if] identifier[len] ( identifier[word] )> literal[int] keyword[and] identifier[word] [: literal[int] ]. identifier[isupper] ():
keyword[return] identifier[correction] . identifier[title] ()
keyword[if] keyword[not] identifier[known_as_lower] ([ identifier[correction] ]):
keyword[try] :
keyword[return] identifier[CASE_MAPPED] [ identifier[correction] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] identifier[correction] | def get_case(word, correction):
"""
Best guess of intended case.
manchester => manchester
chilton => Chilton
AAvTech => AAvTech
THe => The
imho => IMHO
"""
if word.istitle():
return correction.title() # depends on [control=['if'], data=[]]
if word.isupper():
return correction.upper() # depends on [control=['if'], data=[]]
if correction == word and (not word.islower()):
return word # depends on [control=['if'], data=[]]
if len(word) > 2 and word[:2].isupper():
return correction.title() # depends on [control=['if'], data=[]]
if not known_as_lower([correction]): #expensive
try:
return CASE_MAPPED[correction] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return correction |
def expand(string, vars, local_vars={}):
"""Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
"""
def exp(m):
var = m.group(1)
if var == '$':
return '$'
return local_vars.get(var, vars.get(var, ''))
return re.sub(r'\$(\$|\w*)', exp, string) | def function[expand, parameter[string, vars, local_vars]]:
constant[Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
]
def function[exp, parameter[m]]:
variable[var] assign[=] call[name[m].group, parameter[constant[1]]]
if compare[name[var] equal[==] constant[$]] begin[:]
return[constant[$]]
return[call[name[local_vars].get, parameter[name[var], call[name[vars].get, parameter[name[var], constant[]]]]]]
return[call[name[re].sub, parameter[constant[\$(\$|\w*)], name[exp], name[string]]]] | keyword[def] identifier[expand] ( identifier[string] , identifier[vars] , identifier[local_vars] ={}):
literal[string]
keyword[def] identifier[exp] ( identifier[m] ):
identifier[var] = identifier[m] . identifier[group] ( literal[int] )
keyword[if] identifier[var] == literal[string] :
keyword[return] literal[string]
keyword[return] identifier[local_vars] . identifier[get] ( identifier[var] , identifier[vars] . identifier[get] ( identifier[var] , literal[string] ))
keyword[return] identifier[re] . identifier[sub] ( literal[string] , identifier[exp] , identifier[string] ) | def expand(string, vars, local_vars={}):
"""Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
"""
def exp(m):
var = m.group(1)
if var == '$':
return '$' # depends on [control=['if'], data=[]]
return local_vars.get(var, vars.get(var, ''))
return re.sub('\\$(\\$|\\w*)', exp, string) |
def rm_raw(ctx, dataset, kwargs):
"removes the raw unprocessed data"
kwargs = parse_kwargs(kwargs)
data(dataset, **ctx.obj).rm_raw(**kwargs) | def function[rm_raw, parameter[ctx, dataset, kwargs]]:
constant[removes the raw unprocessed data]
variable[kwargs] assign[=] call[name[parse_kwargs], parameter[name[kwargs]]]
call[call[name[data], parameter[name[dataset]]].rm_raw, parameter[]] | keyword[def] identifier[rm_raw] ( identifier[ctx] , identifier[dataset] , identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[parse_kwargs] ( identifier[kwargs] )
identifier[data] ( identifier[dataset] ,** identifier[ctx] . identifier[obj] ). identifier[rm_raw] (** identifier[kwargs] ) | def rm_raw(ctx, dataset, kwargs):
"""removes the raw unprocessed data"""
kwargs = parse_kwargs(kwargs)
data(dataset, **ctx.obj).rm_raw(**kwargs) |
def _patch_redirect(session):
# type: (requests.Session) -> None
"""Whether redirect policy should be applied based on status code.
HTTP spec says that on 301/302 not HEAD/GET, should NOT redirect.
But requests does, to follow browser more than spec
https://github.com/requests/requests/blob/f6e13ccfc4b50dc458ee374e5dba347205b9a2da/requests/sessions.py#L305-L314
This patches "requests" to be more HTTP compliant.
Note that this is super dangerous, since technically this is not public API.
"""
def enforce_http_spec(resp, request):
if resp.status_code in (301, 302) and \
request.method not in ['GET', 'HEAD']:
return False
return True
redirect_logic = session.resolve_redirects
def wrapped_redirect(resp, req, **kwargs):
attempt = enforce_http_spec(resp, req)
return redirect_logic(resp, req, **kwargs) if attempt else []
wrapped_redirect.is_msrest_patched = True # type: ignore
session.resolve_redirects = wrapped_redirect | def function[_patch_redirect, parameter[session]]:
constant[Whether redirect policy should be applied based on status code.
HTTP spec says that on 301/302 not HEAD/GET, should NOT redirect.
But requests does, to follow browser more than spec
https://github.com/requests/requests/blob/f6e13ccfc4b50dc458ee374e5dba347205b9a2da/requests/sessions.py#L305-L314
This patches "requests" to be more HTTP compliant.
Note that this is super dangerous, since technically this is not public API.
]
def function[enforce_http_spec, parameter[resp, request]]:
if <ast.BoolOp object at 0x7da18f58f0d0> begin[:]
return[constant[False]]
return[constant[True]]
variable[redirect_logic] assign[=] name[session].resolve_redirects
def function[wrapped_redirect, parameter[resp, req]]:
variable[attempt] assign[=] call[name[enforce_http_spec], parameter[name[resp], name[req]]]
return[<ast.IfExp object at 0x7da18c4cd7e0>]
name[wrapped_redirect].is_msrest_patched assign[=] constant[True]
name[session].resolve_redirects assign[=] name[wrapped_redirect] | keyword[def] identifier[_patch_redirect] ( identifier[session] ):
literal[string]
keyword[def] identifier[enforce_http_spec] ( identifier[resp] , identifier[request] ):
keyword[if] identifier[resp] . identifier[status_code] keyword[in] ( literal[int] , literal[int] ) keyword[and] identifier[request] . identifier[method] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[redirect_logic] = identifier[session] . identifier[resolve_redirects]
keyword[def] identifier[wrapped_redirect] ( identifier[resp] , identifier[req] ,** identifier[kwargs] ):
identifier[attempt] = identifier[enforce_http_spec] ( identifier[resp] , identifier[req] )
keyword[return] identifier[redirect_logic] ( identifier[resp] , identifier[req] ,** identifier[kwargs] ) keyword[if] identifier[attempt] keyword[else] []
identifier[wrapped_redirect] . identifier[is_msrest_patched] = keyword[True]
identifier[session] . identifier[resolve_redirects] = identifier[wrapped_redirect] | def _patch_redirect(session):
# type: (requests.Session) -> None
'Whether redirect policy should be applied based on status code.\n\n HTTP spec says that on 301/302 not HEAD/GET, should NOT redirect.\n But requests does, to follow browser more than spec\n https://github.com/requests/requests/blob/f6e13ccfc4b50dc458ee374e5dba347205b9a2da/requests/sessions.py#L305-L314\n\n This patches "requests" to be more HTTP compliant.\n\n Note that this is super dangerous, since technically this is not public API.\n '
def enforce_http_spec(resp, request):
if resp.status_code in (301, 302) and request.method not in ['GET', 'HEAD']:
return False # depends on [control=['if'], data=[]]
return True
redirect_logic = session.resolve_redirects
def wrapped_redirect(resp, req, **kwargs):
attempt = enforce_http_spec(resp, req)
return redirect_logic(resp, req, **kwargs) if attempt else []
wrapped_redirect.is_msrest_patched = True # type: ignore
session.resolve_redirects = wrapped_redirect |
def on_uninstall(self):
"""Uninstalls the editor extension from the editor."""
self._on_close = True
self.enabled = False
self._editor = None | def function[on_uninstall, parameter[self]]:
constant[Uninstalls the editor extension from the editor.]
name[self]._on_close assign[=] constant[True]
name[self].enabled assign[=] constant[False]
name[self]._editor assign[=] constant[None] | keyword[def] identifier[on_uninstall] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_on_close] = keyword[True]
identifier[self] . identifier[enabled] = keyword[False]
identifier[self] . identifier[_editor] = keyword[None] | def on_uninstall(self):
"""Uninstalls the editor extension from the editor."""
self._on_close = True
self.enabled = False
self._editor = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.