code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def add_key(self, key, first=False):
"""Adds the given key to this row.
:param key: Key to be added to this row.
:param first: BOolean flag that indicates if key is added at the beginning or at the end.
"""
if first:
self.keys = [key] + self.keys
else:
self.keys.append(key)
if isinstance(key, VSpaceKey):
self.space = key | def function[add_key, parameter[self, key, first]]:
constant[Adds the given key to this row.
:param key: Key to be added to this row.
:param first: BOolean flag that indicates if key is added at the beginning or at the end.
]
if name[first] begin[:]
name[self].keys assign[=] binary_operation[list[[<ast.Name object at 0x7da20c7c88b0>]] + name[self].keys]
if call[name[isinstance], parameter[name[key], name[VSpaceKey]]] begin[:]
name[self].space assign[=] name[key] | keyword[def] identifier[add_key] ( identifier[self] , identifier[key] , identifier[first] = keyword[False] ):
literal[string]
keyword[if] identifier[first] :
identifier[self] . identifier[keys] =[ identifier[key] ]+ identifier[self] . identifier[keys]
keyword[else] :
identifier[self] . identifier[keys] . identifier[append] ( identifier[key] )
keyword[if] identifier[isinstance] ( identifier[key] , identifier[VSpaceKey] ):
identifier[self] . identifier[space] = identifier[key] | def add_key(self, key, first=False):
"""Adds the given key to this row.
:param key: Key to be added to this row.
:param first: BOolean flag that indicates if key is added at the beginning or at the end.
"""
if first:
self.keys = [key] + self.keys # depends on [control=['if'], data=[]]
else:
self.keys.append(key)
if isinstance(key, VSpaceKey):
self.space = key # depends on [control=['if'], data=[]] |
def get_applicable_options(self, options: Dict[str, Dict[str, Any]]):
"""
Returns the options that are applicable to this particular converter, from the full map of options.
It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of
the options corresponding to this id, or returns an empty dict().
:param options: a dictionary converter_id > options
:return:
"""
return get_options_for_id(options, self.get_id_for_options()) | def function[get_applicable_options, parameter[self, options]]:
constant[
Returns the options that are applicable to this particular converter, from the full map of options.
It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of
the options corresponding to this id, or returns an empty dict().
:param options: a dictionary converter_id > options
:return:
]
return[call[name[get_options_for_id], parameter[name[options], call[name[self].get_id_for_options, parameter[]]]]] | keyword[def] identifier[get_applicable_options] ( identifier[self] , identifier[options] : identifier[Dict] [ identifier[str] , identifier[Dict] [ identifier[str] , identifier[Any] ]]):
literal[string]
keyword[return] identifier[get_options_for_id] ( identifier[options] , identifier[self] . identifier[get_id_for_options] ()) | def get_applicable_options(self, options: Dict[str, Dict[str, Any]]):
"""
Returns the options that are applicable to this particular converter, from the full map of options.
It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of
the options corresponding to this id, or returns an empty dict().
:param options: a dictionary converter_id > options
:return:
"""
return get_options_for_id(options, self.get_id_for_options()) |
def consume(self, routingKey, msg):
"""
Consumer for this (CaptureData) class. Gets the data sent from yieldMetricsValue and
sends it to the storage backends.
"""
build_data = msg['build_data']
builder_info = yield self.master.data.get(("builders", build_data['builderid']))
if self._builder_name_matches(builder_info) and self._data_name == msg['data_name']:
try:
ret_val = self._callback(msg['post_data'])
except Exception as e:
raise CaptureCallbackError("CaptureData failed for build %s of builder %s."
" Exception generated: %s with message %s"
% (build_data['number'], builder_info['name'],
type(e).__name__, str(e)))
post_data = ret_val
series_name = '%s-%s' % (builder_info['name'], self._data_name)
context = self._defaultContext(build_data, builder_info['name'])
yield self._store(post_data, series_name, context) | def function[consume, parameter[self, routingKey, msg]]:
constant[
Consumer for this (CaptureData) class. Gets the data sent from yieldMetricsValue and
sends it to the storage backends.
]
variable[build_data] assign[=] call[name[msg]][constant[build_data]]
variable[builder_info] assign[=] <ast.Yield object at 0x7da18c4cd2a0>
if <ast.BoolOp object at 0x7da18c4cca30> begin[:]
<ast.Try object at 0x7da1b21e28f0>
variable[post_data] assign[=] name[ret_val]
variable[series_name] assign[=] binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da2044c1b10>, <ast.Attribute object at 0x7da2044c0820>]]]
variable[context] assign[=] call[name[self]._defaultContext, parameter[name[build_data], call[name[builder_info]][constant[name]]]]
<ast.Yield object at 0x7da2044c2230> | keyword[def] identifier[consume] ( identifier[self] , identifier[routingKey] , identifier[msg] ):
literal[string]
identifier[build_data] = identifier[msg] [ literal[string] ]
identifier[builder_info] = keyword[yield] identifier[self] . identifier[master] . identifier[data] . identifier[get] (( literal[string] , identifier[build_data] [ literal[string] ]))
keyword[if] identifier[self] . identifier[_builder_name_matches] ( identifier[builder_info] ) keyword[and] identifier[self] . identifier[_data_name] == identifier[msg] [ literal[string] ]:
keyword[try] :
identifier[ret_val] = identifier[self] . identifier[_callback] ( identifier[msg] [ literal[string] ])
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[CaptureCallbackError] ( literal[string]
literal[string]
%( identifier[build_data] [ literal[string] ], identifier[builder_info] [ literal[string] ],
identifier[type] ( identifier[e] ). identifier[__name__] , identifier[str] ( identifier[e] )))
identifier[post_data] = identifier[ret_val]
identifier[series_name] = literal[string] %( identifier[builder_info] [ literal[string] ], identifier[self] . identifier[_data_name] )
identifier[context] = identifier[self] . identifier[_defaultContext] ( identifier[build_data] , identifier[builder_info] [ literal[string] ])
keyword[yield] identifier[self] . identifier[_store] ( identifier[post_data] , identifier[series_name] , identifier[context] ) | def consume(self, routingKey, msg):
"""
Consumer for this (CaptureData) class. Gets the data sent from yieldMetricsValue and
sends it to the storage backends.
"""
build_data = msg['build_data']
builder_info = (yield self.master.data.get(('builders', build_data['builderid'])))
if self._builder_name_matches(builder_info) and self._data_name == msg['data_name']:
try:
ret_val = self._callback(msg['post_data']) # depends on [control=['try'], data=[]]
except Exception as e:
raise CaptureCallbackError('CaptureData failed for build %s of builder %s. Exception generated: %s with message %s' % (build_data['number'], builder_info['name'], type(e).__name__, str(e))) # depends on [control=['except'], data=['e']]
post_data = ret_val
series_name = '%s-%s' % (builder_info['name'], self._data_name)
context = self._defaultContext(build_data, builder_info['name'])
yield self._store(post_data, series_name, context) # depends on [control=['if'], data=[]] |
def schema(self):
"""
The generated budget data package schema for this resource.
If the resource has any fields that do not conform to the
provided specification this will raise a
NotABudgetDataPackageException.
"""
if self.headers is None:
raise exceptions.NoResourceLoadedException(
'Resource must be loaded to find schema')
try:
fields = self.specification.get('fields', {})
parsed = {
'primaryKey': 'id',
'fields': [{
'name': header,
'type': fields[header]['type'],
'description': fields[header]['description']
} for header in self.headers]
}
except KeyError:
raise exceptions.NotABudgetDataPackageException(
'Includes other fields than the Budget Data Package fields')
return parsed | def function[schema, parameter[self]]:
constant[
The generated budget data package schema for this resource.
If the resource has any fields that do not conform to the
provided specification this will raise a
NotABudgetDataPackageException.
]
if compare[name[self].headers is constant[None]] begin[:]
<ast.Raise object at 0x7da20c990070>
<ast.Try object at 0x7da20c76c5b0>
return[name[parsed]] | keyword[def] identifier[schema] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[headers] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[NoResourceLoadedException] (
literal[string] )
keyword[try] :
identifier[fields] = identifier[self] . identifier[specification] . identifier[get] ( literal[string] ,{})
identifier[parsed] ={
literal[string] : literal[string] ,
literal[string] :[{
literal[string] : identifier[header] ,
literal[string] : identifier[fields] [ identifier[header] ][ literal[string] ],
literal[string] : identifier[fields] [ identifier[header] ][ literal[string] ]
} keyword[for] identifier[header] keyword[in] identifier[self] . identifier[headers] ]
}
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[NotABudgetDataPackageException] (
literal[string] )
keyword[return] identifier[parsed] | def schema(self):
"""
The generated budget data package schema for this resource.
If the resource has any fields that do not conform to the
provided specification this will raise a
NotABudgetDataPackageException.
"""
if self.headers is None:
raise exceptions.NoResourceLoadedException('Resource must be loaded to find schema') # depends on [control=['if'], data=[]]
try:
fields = self.specification.get('fields', {})
parsed = {'primaryKey': 'id', 'fields': [{'name': header, 'type': fields[header]['type'], 'description': fields[header]['description']} for header in self.headers]} # depends on [control=['try'], data=[]]
except KeyError:
raise exceptions.NotABudgetDataPackageException('Includes other fields than the Budget Data Package fields') # depends on [control=['except'], data=[]]
return parsed |
def create_customer(self, name, gender, address, phone, email, vat=None, fax=None,
company_name=None, additional_data=None, extension_additional_data=None):
"""Create a customer"""
response = self.request(E.createCustomerRequest(
E.companyName(company_name),
E.vat(vat),
E.name(
E.initials(name.initials),
E.firstName(name.first_name),
E.prefix(name.prefix or ''),
E.lastName(name.last_name),
),
E.gender(gender),
_get_phone_xml('phone', phone),
_get_phone_xml('fax', fax),
E.address(
E.street(address.street),
E.number(address.number),
E.suffix(address.suffix or ''),
E.zipcode(address.zipcode),
E.city(address.city),
E.state(address.state or ''),
E.country(address.country),
),
E.email(email),
_additional_data(additional_data),
_extension_additional_data(extension_additional_data),
))
return str(response.data.handle) | def function[create_customer, parameter[self, name, gender, address, phone, email, vat, fax, company_name, additional_data, extension_additional_data]]:
constant[Create a customer]
variable[response] assign[=] call[name[self].request, parameter[call[name[E].createCustomerRequest, parameter[call[name[E].companyName, parameter[name[company_name]]], call[name[E].vat, parameter[name[vat]]], call[name[E].name, parameter[call[name[E].initials, parameter[name[name].initials]], call[name[E].firstName, parameter[name[name].first_name]], call[name[E].prefix, parameter[<ast.BoolOp object at 0x7da204623e50>]], call[name[E].lastName, parameter[name[name].last_name]]]], call[name[E].gender, parameter[name[gender]]], call[name[_get_phone_xml], parameter[constant[phone], name[phone]]], call[name[_get_phone_xml], parameter[constant[fax], name[fax]]], call[name[E].address, parameter[call[name[E].street, parameter[name[address].street]], call[name[E].number, parameter[name[address].number]], call[name[E].suffix, parameter[<ast.BoolOp object at 0x7da204620a00>]], call[name[E].zipcode, parameter[name[address].zipcode]], call[name[E].city, parameter[name[address].city]], call[name[E].state, parameter[<ast.BoolOp object at 0x7da204621d50>]], call[name[E].country, parameter[name[address].country]]]], call[name[E].email, parameter[name[email]]], call[name[_additional_data], parameter[name[additional_data]]], call[name[_extension_additional_data], parameter[name[extension_additional_data]]]]]]]
return[call[name[str], parameter[name[response].data.handle]]] | keyword[def] identifier[create_customer] ( identifier[self] , identifier[name] , identifier[gender] , identifier[address] , identifier[phone] , identifier[email] , identifier[vat] = keyword[None] , identifier[fax] = keyword[None] ,
identifier[company_name] = keyword[None] , identifier[additional_data] = keyword[None] , identifier[extension_additional_data] = keyword[None] ):
literal[string]
identifier[response] = identifier[self] . identifier[request] ( identifier[E] . identifier[createCustomerRequest] (
identifier[E] . identifier[companyName] ( identifier[company_name] ),
identifier[E] . identifier[vat] ( identifier[vat] ),
identifier[E] . identifier[name] (
identifier[E] . identifier[initials] ( identifier[name] . identifier[initials] ),
identifier[E] . identifier[firstName] ( identifier[name] . identifier[first_name] ),
identifier[E] . identifier[prefix] ( identifier[name] . identifier[prefix] keyword[or] literal[string] ),
identifier[E] . identifier[lastName] ( identifier[name] . identifier[last_name] ),
),
identifier[E] . identifier[gender] ( identifier[gender] ),
identifier[_get_phone_xml] ( literal[string] , identifier[phone] ),
identifier[_get_phone_xml] ( literal[string] , identifier[fax] ),
identifier[E] . identifier[address] (
identifier[E] . identifier[street] ( identifier[address] . identifier[street] ),
identifier[E] . identifier[number] ( identifier[address] . identifier[number] ),
identifier[E] . identifier[suffix] ( identifier[address] . identifier[suffix] keyword[or] literal[string] ),
identifier[E] . identifier[zipcode] ( identifier[address] . identifier[zipcode] ),
identifier[E] . identifier[city] ( identifier[address] . identifier[city] ),
identifier[E] . identifier[state] ( identifier[address] . identifier[state] keyword[or] literal[string] ),
identifier[E] . identifier[country] ( identifier[address] . identifier[country] ),
),
identifier[E] . identifier[email] ( identifier[email] ),
identifier[_additional_data] ( identifier[additional_data] ),
identifier[_extension_additional_data] ( identifier[extension_additional_data] ),
))
keyword[return] identifier[str] ( identifier[response] . identifier[data] . identifier[handle] ) | def create_customer(self, name, gender, address, phone, email, vat=None, fax=None, company_name=None, additional_data=None, extension_additional_data=None):
"""Create a customer"""
response = self.request(E.createCustomerRequest(E.companyName(company_name), E.vat(vat), E.name(E.initials(name.initials), E.firstName(name.first_name), E.prefix(name.prefix or ''), E.lastName(name.last_name)), E.gender(gender), _get_phone_xml('phone', phone), _get_phone_xml('fax', fax), E.address(E.street(address.street), E.number(address.number), E.suffix(address.suffix or ''), E.zipcode(address.zipcode), E.city(address.city), E.state(address.state or ''), E.country(address.country)), E.email(email), _additional_data(additional_data), _extension_additional_data(extension_additional_data)))
return str(response.data.handle) |
def multi_request(self, reqs_and_resps, threads=20, **kwargs):
"""Use a threadpool to send multiple requests in parallel.
:param reqs_and_resps: iterable of req_and_resp tuples
:param raw_body_only: applied to every request call
:param opt: applies to every request call
:param threads: number of concurrent workers to use
:return: a list of [(pyswagger.io.Request, pyswagger.io.Response), ...]
"""
opt = kwargs.pop('opt', {})
raw_body_only = kwargs.pop('raw_body_only', self.raw_body_only)
# you shouldnt need more than 100, 20 is probably fine in most cases
threads = max(min(threads, 100), 1)
def _multi_shim(req_and_resp):
"""Shim self.request to also return the original request."""
return req_and_resp[0], self.request(
req_and_resp,
raw_body_only=raw_body_only,
opt=opt,
)
results = []
with ThreadPoolExecutor(max_workers=threads) as pool:
for result in pool.map(_multi_shim, reqs_and_resps):
results.append(result)
return results | def function[multi_request, parameter[self, reqs_and_resps, threads]]:
constant[Use a threadpool to send multiple requests in parallel.
:param reqs_and_resps: iterable of req_and_resp tuples
:param raw_body_only: applied to every request call
:param opt: applies to every request call
:param threads: number of concurrent workers to use
:return: a list of [(pyswagger.io.Request, pyswagger.io.Response), ...]
]
variable[opt] assign[=] call[name[kwargs].pop, parameter[constant[opt], dictionary[[], []]]]
variable[raw_body_only] assign[=] call[name[kwargs].pop, parameter[constant[raw_body_only], name[self].raw_body_only]]
variable[threads] assign[=] call[name[max], parameter[call[name[min], parameter[name[threads], constant[100]]], constant[1]]]
def function[_multi_shim, parameter[req_and_resp]]:
constant[Shim self.request to also return the original request.]
return[tuple[[<ast.Subscript object at 0x7da20c9901c0>, <ast.Call object at 0x7da20c992b30>]]]
variable[results] assign[=] list[[]]
with call[name[ThreadPoolExecutor], parameter[]] begin[:]
for taget[name[result]] in starred[call[name[pool].map, parameter[name[_multi_shim], name[reqs_and_resps]]]] begin[:]
call[name[results].append, parameter[name[result]]]
return[name[results]] | keyword[def] identifier[multi_request] ( identifier[self] , identifier[reqs_and_resps] , identifier[threads] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[opt] = identifier[kwargs] . identifier[pop] ( literal[string] ,{})
identifier[raw_body_only] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[raw_body_only] )
identifier[threads] = identifier[max] ( identifier[min] ( identifier[threads] , literal[int] ), literal[int] )
keyword[def] identifier[_multi_shim] ( identifier[req_and_resp] ):
literal[string]
keyword[return] identifier[req_and_resp] [ literal[int] ], identifier[self] . identifier[request] (
identifier[req_and_resp] ,
identifier[raw_body_only] = identifier[raw_body_only] ,
identifier[opt] = identifier[opt] ,
)
identifier[results] =[]
keyword[with] identifier[ThreadPoolExecutor] ( identifier[max_workers] = identifier[threads] ) keyword[as] identifier[pool] :
keyword[for] identifier[result] keyword[in] identifier[pool] . identifier[map] ( identifier[_multi_shim] , identifier[reqs_and_resps] ):
identifier[results] . identifier[append] ( identifier[result] )
keyword[return] identifier[results] | def multi_request(self, reqs_and_resps, threads=20, **kwargs):
"""Use a threadpool to send multiple requests in parallel.
:param reqs_and_resps: iterable of req_and_resp tuples
:param raw_body_only: applied to every request call
:param opt: applies to every request call
:param threads: number of concurrent workers to use
:return: a list of [(pyswagger.io.Request, pyswagger.io.Response), ...]
"""
opt = kwargs.pop('opt', {})
raw_body_only = kwargs.pop('raw_body_only', self.raw_body_only) # you shouldnt need more than 100, 20 is probably fine in most cases
threads = max(min(threads, 100), 1)
def _multi_shim(req_and_resp):
"""Shim self.request to also return the original request."""
return (req_and_resp[0], self.request(req_and_resp, raw_body_only=raw_body_only, opt=opt))
results = []
with ThreadPoolExecutor(max_workers=threads) as pool:
for result in pool.map(_multi_shim, reqs_and_resps):
results.append(result) # depends on [control=['for'], data=['result']] # depends on [control=['with'], data=['pool']]
return results |
def uniontypes(type_: Type[Any]) -> Set[Type[Any]]:
'''
Returns the types of a Union.
Raises ValueError if the argument is not a Union
and AttributeError when running on an unsupported
Python version.
'''
if not is_union(type_):
raise ValueError('Not a Union: ' + str(type_))
if hasattr(type_, '__args__'):
return set(type_.__args__)
elif hasattr(type_, '__union_params__'):
return set(type_.__union_params__)
raise AttributeError('The typing API for this Python version is unknown') | def function[uniontypes, parameter[type_]]:
constant[
Returns the types of a Union.
Raises ValueError if the argument is not a Union
and AttributeError when running on an unsupported
Python version.
]
if <ast.UnaryOp object at 0x7da20c9921a0> begin[:]
<ast.Raise object at 0x7da20c9938b0>
if call[name[hasattr], parameter[name[type_], constant[__args__]]] begin[:]
return[call[name[set], parameter[name[type_].__args__]]]
<ast.Raise object at 0x7da20c992ce0> | keyword[def] identifier[uniontypes] ( identifier[type_] : identifier[Type] [ identifier[Any] ])-> identifier[Set] [ identifier[Type] [ identifier[Any] ]]:
literal[string]
keyword[if] keyword[not] identifier[is_union] ( identifier[type_] ):
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[type_] ))
keyword[if] identifier[hasattr] ( identifier[type_] , literal[string] ):
keyword[return] identifier[set] ( identifier[type_] . identifier[__args__] )
keyword[elif] identifier[hasattr] ( identifier[type_] , literal[string] ):
keyword[return] identifier[set] ( identifier[type_] . identifier[__union_params__] )
keyword[raise] identifier[AttributeError] ( literal[string] ) | def uniontypes(type_: Type[Any]) -> Set[Type[Any]]:
"""
Returns the types of a Union.
Raises ValueError if the argument is not a Union
and AttributeError when running on an unsupported
Python version.
"""
if not is_union(type_):
raise ValueError('Not a Union: ' + str(type_)) # depends on [control=['if'], data=[]]
if hasattr(type_, '__args__'):
return set(type_.__args__) # depends on [control=['if'], data=[]]
elif hasattr(type_, '__union_params__'):
return set(type_.__union_params__) # depends on [control=['if'], data=[]]
raise AttributeError('The typing API for this Python version is unknown') |
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
with db.session.begin_nested():
# Retrieve ORCID from response.
orcid = resp.get('orcid')
full_name = resp.get('name')
# Set ORCID in extra_data.
token.remote_account.extra_data = {
'orcid': orcid,
'full_name': full_name,
}
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, {'id': orcid, 'method': 'orcid'}) | def function[account_setup, parameter[remote, token, resp]]:
constant[Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
]
with call[name[db].session.begin_nested, parameter[]] begin[:]
variable[orcid] assign[=] call[name[resp].get, parameter[constant[orcid]]]
variable[full_name] assign[=] call[name[resp].get, parameter[constant[name]]]
name[token].remote_account.extra_data assign[=] dictionary[[<ast.Constant object at 0x7da18dc997e0>, <ast.Constant object at 0x7da18dc9bb50>], [<ast.Name object at 0x7da18dc9a950>, <ast.Name object at 0x7da18dc9af50>]]
variable[user] assign[=] name[token].remote_account.user
call[name[oauth_link_external_id], parameter[name[user], dictionary[[<ast.Constant object at 0x7da20c6aac80>, <ast.Constant object at 0x7da20c6aadd0>], [<ast.Name object at 0x7da20c6aa890>, <ast.Constant object at 0x7da20c6aa0b0>]]]] | keyword[def] identifier[account_setup] ( identifier[remote] , identifier[token] , identifier[resp] ):
literal[string]
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[orcid] = identifier[resp] . identifier[get] ( literal[string] )
identifier[full_name] = identifier[resp] . identifier[get] ( literal[string] )
identifier[token] . identifier[remote_account] . identifier[extra_data] ={
literal[string] : identifier[orcid] ,
literal[string] : identifier[full_name] ,
}
identifier[user] = identifier[token] . identifier[remote_account] . identifier[user]
identifier[oauth_link_external_id] ( identifier[user] ,{ literal[string] : identifier[orcid] , literal[string] : literal[string] }) | def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
with db.session.begin_nested():
# Retrieve ORCID from response.
orcid = resp.get('orcid')
full_name = resp.get('name')
# Set ORCID in extra_data.
token.remote_account.extra_data = {'orcid': orcid, 'full_name': full_name}
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, {'id': orcid, 'method': 'orcid'}) # depends on [control=['with'], data=[]] |
def get(name, rc_file='~/.odoorpcrc'):
"""Return the session configuration identified by `name`
from the `rc_file` file.
>>> import odoorpc
>>> from pprint import pprint as pp
>>> pp(odoorpc.session.get('foo')) # doctest: +SKIP
{'database': 'db_name',
'host': 'localhost',
'passwd': 'password',
'port': 8069,
'protocol': 'jsonrpc',
'timeout': 120,
'type': 'ODOO',
'user': 'admin'}
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoo.save(session)
>>> data = odoorpc.session.get(session)
>>> data['host'] == HOST
True
>>> data['protocol'] == PROTOCOL
True
>>> data['port'] == int(PORT)
True
>>> data['database'] == DB
True
>>> data['user'] == USER
True
>>> data['passwd'] == PWD
True
>>> data['type'] == 'ODOO'
True
:raise: `ValueError` (wrong session name)
"""
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
raise ValueError(
"'%s' session does not exist in %s" % (name, rc_file))
return {
'type': conf.get(name, 'type'),
'host': conf.get(name, 'host'),
'protocol': conf.get(name, 'protocol'),
'port': conf.getint(name, 'port'),
'timeout': conf.getfloat(name, 'timeout'),
'user': conf.get(name, 'user'),
'passwd': conf.get(name, 'passwd'),
'database': conf.get(name, 'database'),
} | def function[get, parameter[name, rc_file]]:
constant[Return the session configuration identified by `name`
from the `rc_file` file.
>>> import odoorpc
>>> from pprint import pprint as pp
>>> pp(odoorpc.session.get('foo')) # doctest: +SKIP
{'database': 'db_name',
'host': 'localhost',
'passwd': 'password',
'port': 8069,
'protocol': 'jsonrpc',
'timeout': 120,
'type': 'ODOO',
'user': 'admin'}
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoo.save(session)
>>> data = odoorpc.session.get(session)
>>> data['host'] == HOST
True
>>> data['protocol'] == PROTOCOL
True
>>> data['port'] == int(PORT)
True
>>> data['database'] == DB
True
>>> data['user'] == USER
True
>>> data['passwd'] == PWD
True
>>> data['type'] == 'ODOO'
True
:raise: `ValueError` (wrong session name)
]
variable[conf] assign[=] call[name[ConfigParser], parameter[]]
call[name[conf].read, parameter[list[[<ast.Call object at 0x7da18ede4b20>]]]]
if <ast.UnaryOp object at 0x7da18ede4160> begin[:]
<ast.Raise object at 0x7da18ede68f0>
return[dictionary[[<ast.Constant object at 0x7da18ede6620>, <ast.Constant object at 0x7da18ede67a0>, <ast.Constant object at 0x7da18ede7af0>, <ast.Constant object at 0x7da18ede56c0>, <ast.Constant object at 0x7da18ede6d10>, <ast.Constant object at 0x7da18ede5150>, <ast.Constant object at 0x7da18ede4940>, <ast.Constant object at 0x7da18ede5ff0>], [<ast.Call object at 0x7da18ede7f10>, <ast.Call object at 0x7da18ede4f40>, <ast.Call object at 0x7da18ede4a00>, <ast.Call object at 0x7da18ede58d0>, <ast.Call object at 0x7da18ede6b90>, <ast.Call object at 0x7da18ede6b00>, <ast.Call object at 0x7da18ede64d0>, <ast.Call object at 0x7da18ede6ec0>]]] | keyword[def] identifier[get] ( identifier[name] , identifier[rc_file] = literal[string] ):
literal[string]
identifier[conf] = identifier[ConfigParser] ()
identifier[conf] . identifier[read] ([ identifier[os] . identifier[path] . identifier[expanduser] ( identifier[rc_file] )])
keyword[if] keyword[not] identifier[conf] . identifier[has_section] ( identifier[name] ):
keyword[raise] identifier[ValueError] (
literal[string] %( identifier[name] , identifier[rc_file] ))
keyword[return] {
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[getint] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[getfloat] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
literal[string] : identifier[conf] . identifier[get] ( identifier[name] , literal[string] ),
} | def get(name, rc_file='~/.odoorpcrc'):
"""Return the session configuration identified by `name`
from the `rc_file` file.
>>> import odoorpc
>>> from pprint import pprint as pp
>>> pp(odoorpc.session.get('foo')) # doctest: +SKIP
{'database': 'db_name',
'host': 'localhost',
'passwd': 'password',
'port': 8069,
'protocol': 'jsonrpc',
'timeout': 120,
'type': 'ODOO',
'user': 'admin'}
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoo.save(session)
>>> data = odoorpc.session.get(session)
>>> data['host'] == HOST
True
>>> data['protocol'] == PROTOCOL
True
>>> data['port'] == int(PORT)
True
>>> data['database'] == DB
True
>>> data['user'] == USER
True
>>> data['passwd'] == PWD
True
>>> data['type'] == 'ODOO'
True
:raise: `ValueError` (wrong session name)
"""
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
raise ValueError("'%s' session does not exist in %s" % (name, rc_file)) # depends on [control=['if'], data=[]]
return {'type': conf.get(name, 'type'), 'host': conf.get(name, 'host'), 'protocol': conf.get(name, 'protocol'), 'port': conf.getint(name, 'port'), 'timeout': conf.getfloat(name, 'timeout'), 'user': conf.get(name, 'user'), 'passwd': conf.get(name, 'passwd'), 'database': conf.get(name, 'database')} |
def set_contrast_levels(self, contrast_level=0):
"""enhance contrast levels, or use full data range
according to value of self.panel.conf.contrast_level
"""
for cmap_panel, img_panel in zip((self.cmap_panels[0], self.cmap_panels[1]),
(self.img1_panel, self.img2_panel)):
conf = img_panel.conf
img = img_panel.conf.data
if contrast_level is None:
contrast_level = 0
conf.contrast_level = contrast_level
clevels = [contrast_level, 100.0-contrast_level]
jmin = imin = img.min()
jmax = imax = img.max()
cmap_panel.imin_val.SetValue('%.4g' % imin)
cmap_panel.imax_val.SetValue('%.4g' % imax)
jmin, jmax = np.percentile(img, clevels)
conf.int_lo[0] = imin
conf.int_hi[0] = imax
conf.cmap_lo[0] = xlo = (jmin-imin)*conf.cmap_range/(imax-imin)
conf.cmap_hi[0] = xhi = (jmax-imin)*conf.cmap_range/(imax-imin)
cmap_panel.cmap_hi.SetValue(xhi)
cmap_panel.cmap_lo.SetValue(xlo)
cmap_panel.islider_range.SetLabel('Shown: [ %.4g : %.4g ]' % (jmin, jmax))
cmap_panel.redraw_cmap()
img_panel.redraw() | def function[set_contrast_levels, parameter[self, contrast_level]]:
constant[enhance contrast levels, or use full data range
according to value of self.panel.conf.contrast_level
]
for taget[tuple[[<ast.Name object at 0x7da20c6a92d0>, <ast.Name object at 0x7da20c6ab190>]]] in starred[call[name[zip], parameter[tuple[[<ast.Subscript object at 0x7da20c6a87f0>, <ast.Subscript object at 0x7da20c6a88b0>]], tuple[[<ast.Attribute object at 0x7da20c6ab730>, <ast.Attribute object at 0x7da20c6a9e10>]]]]] begin[:]
variable[conf] assign[=] name[img_panel].conf
variable[img] assign[=] name[img_panel].conf.data
if compare[name[contrast_level] is constant[None]] begin[:]
variable[contrast_level] assign[=] constant[0]
name[conf].contrast_level assign[=] name[contrast_level]
variable[clevels] assign[=] list[[<ast.Name object at 0x7da20c6a83d0>, <ast.BinOp object at 0x7da20c6a84f0>]]
variable[jmin] assign[=] call[name[img].min, parameter[]]
variable[jmax] assign[=] call[name[img].max, parameter[]]
call[name[cmap_panel].imin_val.SetValue, parameter[binary_operation[constant[%.4g] <ast.Mod object at 0x7da2590d6920> name[imin]]]]
call[name[cmap_panel].imax_val.SetValue, parameter[binary_operation[constant[%.4g] <ast.Mod object at 0x7da2590d6920> name[imax]]]]
<ast.Tuple object at 0x7da20c6ab250> assign[=] call[name[np].percentile, parameter[name[img], name[clevels]]]
call[name[conf].int_lo][constant[0]] assign[=] name[imin]
call[name[conf].int_hi][constant[0]] assign[=] name[imax]
call[name[conf].cmap_lo][constant[0]] assign[=] binary_operation[binary_operation[binary_operation[name[jmin] - name[imin]] * name[conf].cmap_range] / binary_operation[name[imax] - name[imin]]]
call[name[conf].cmap_hi][constant[0]] assign[=] binary_operation[binary_operation[binary_operation[name[jmax] - name[imin]] * name[conf].cmap_range] / binary_operation[name[imax] - name[imin]]]
call[name[cmap_panel].cmap_hi.SetValue, parameter[name[xhi]]]
call[name[cmap_panel].cmap_lo.SetValue, parameter[name[xlo]]]
call[name[cmap_panel].islider_range.SetLabel, parameter[binary_operation[constant[Shown: [ %.4g : %.4g ]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6a87c0>, <ast.Name object at 0x7da20c6aa3b0>]]]]]
call[name[cmap_panel].redraw_cmap, parameter[]]
call[name[img_panel].redraw, parameter[]] | keyword[def] identifier[set_contrast_levels] ( identifier[self] , identifier[contrast_level] = literal[int] ):
literal[string]
keyword[for] identifier[cmap_panel] , identifier[img_panel] keyword[in] identifier[zip] (( identifier[self] . identifier[cmap_panels] [ literal[int] ], identifier[self] . identifier[cmap_panels] [ literal[int] ]),
( identifier[self] . identifier[img1_panel] , identifier[self] . identifier[img2_panel] )):
identifier[conf] = identifier[img_panel] . identifier[conf]
identifier[img] = identifier[img_panel] . identifier[conf] . identifier[data]
keyword[if] identifier[contrast_level] keyword[is] keyword[None] :
identifier[contrast_level] = literal[int]
identifier[conf] . identifier[contrast_level] = identifier[contrast_level]
identifier[clevels] =[ identifier[contrast_level] , literal[int] - identifier[contrast_level] ]
identifier[jmin] = identifier[imin] = identifier[img] . identifier[min] ()
identifier[jmax] = identifier[imax] = identifier[img] . identifier[max] ()
identifier[cmap_panel] . identifier[imin_val] . identifier[SetValue] ( literal[string] % identifier[imin] )
identifier[cmap_panel] . identifier[imax_val] . identifier[SetValue] ( literal[string] % identifier[imax] )
identifier[jmin] , identifier[jmax] = identifier[np] . identifier[percentile] ( identifier[img] , identifier[clevels] )
identifier[conf] . identifier[int_lo] [ literal[int] ]= identifier[imin]
identifier[conf] . identifier[int_hi] [ literal[int] ]= identifier[imax]
identifier[conf] . identifier[cmap_lo] [ literal[int] ]= identifier[xlo] =( identifier[jmin] - identifier[imin] )* identifier[conf] . identifier[cmap_range] /( identifier[imax] - identifier[imin] )
identifier[conf] . identifier[cmap_hi] [ literal[int] ]= identifier[xhi] =( identifier[jmax] - identifier[imin] )* identifier[conf] . identifier[cmap_range] /( identifier[imax] - identifier[imin] )
identifier[cmap_panel] . identifier[cmap_hi] . identifier[SetValue] ( identifier[xhi] )
identifier[cmap_panel] . identifier[cmap_lo] . identifier[SetValue] ( identifier[xlo] )
identifier[cmap_panel] . identifier[islider_range] . identifier[SetLabel] ( literal[string] %( identifier[jmin] , identifier[jmax] ))
identifier[cmap_panel] . identifier[redraw_cmap] ()
identifier[img_panel] . identifier[redraw] () | def set_contrast_levels(self, contrast_level=0):
"""enhance contrast levels, or use full data range
according to value of self.panel.conf.contrast_level
"""
for (cmap_panel, img_panel) in zip((self.cmap_panels[0], self.cmap_panels[1]), (self.img1_panel, self.img2_panel)):
conf = img_panel.conf
img = img_panel.conf.data
if contrast_level is None:
contrast_level = 0 # depends on [control=['if'], data=['contrast_level']]
conf.contrast_level = contrast_level
clevels = [contrast_level, 100.0 - contrast_level]
jmin = imin = img.min()
jmax = imax = img.max()
cmap_panel.imin_val.SetValue('%.4g' % imin)
cmap_panel.imax_val.SetValue('%.4g' % imax)
(jmin, jmax) = np.percentile(img, clevels)
conf.int_lo[0] = imin
conf.int_hi[0] = imax
conf.cmap_lo[0] = xlo = (jmin - imin) * conf.cmap_range / (imax - imin)
conf.cmap_hi[0] = xhi = (jmax - imin) * conf.cmap_range / (imax - imin)
cmap_panel.cmap_hi.SetValue(xhi)
cmap_panel.cmap_lo.SetValue(xlo)
cmap_panel.islider_range.SetLabel('Shown: [ %.4g : %.4g ]' % (jmin, jmax))
cmap_panel.redraw_cmap()
img_panel.redraw() # depends on [control=['for'], data=[]] |
def _build_index(maf_strm, ref_spec):
"""Build an index for a MAF genome alig file and return StringIO of it."""
idx_strm = StringIO.StringIO()
bound_iter = functools.partial(genome_alignment_iterator,
reference_species=ref_spec)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
idx = IndexedFile(maf_strm, bound_iter, hash_func)
idx.write_index(idx_strm)
idx_strm.seek(0) # seek to the start
return idx_strm | def function[_build_index, parameter[maf_strm, ref_spec]]:
constant[Build an index for a MAF genome alig file and return StringIO of it.]
variable[idx_strm] assign[=] call[name[StringIO].StringIO, parameter[]]
variable[bound_iter] assign[=] call[name[functools].partial, parameter[name[genome_alignment_iterator]]]
variable[hash_func] assign[=] name[JustInTimeGenomeAlignmentBlock].build_hash
variable[idx] assign[=] call[name[IndexedFile], parameter[name[maf_strm], name[bound_iter], name[hash_func]]]
call[name[idx].write_index, parameter[name[idx_strm]]]
call[name[idx_strm].seek, parameter[constant[0]]]
return[name[idx_strm]] | keyword[def] identifier[_build_index] ( identifier[maf_strm] , identifier[ref_spec] ):
literal[string]
identifier[idx_strm] = identifier[StringIO] . identifier[StringIO] ()
identifier[bound_iter] = identifier[functools] . identifier[partial] ( identifier[genome_alignment_iterator] ,
identifier[reference_species] = identifier[ref_spec] )
identifier[hash_func] = identifier[JustInTimeGenomeAlignmentBlock] . identifier[build_hash]
identifier[idx] = identifier[IndexedFile] ( identifier[maf_strm] , identifier[bound_iter] , identifier[hash_func] )
identifier[idx] . identifier[write_index] ( identifier[idx_strm] )
identifier[idx_strm] . identifier[seek] ( literal[int] )
keyword[return] identifier[idx_strm] | def _build_index(maf_strm, ref_spec):
"""Build an index for a MAF genome alig file and return StringIO of it."""
idx_strm = StringIO.StringIO()
bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
idx = IndexedFile(maf_strm, bound_iter, hash_func)
idx.write_index(idx_strm)
idx_strm.seek(0) # seek to the start
return idx_strm |
def set_examples(self, examples):
"""Sets the examples to be displayed in WIT.
Args:
examples: List of example protos.
Returns:
self, in order to enabled method chaining.
"""
self.store('examples', examples)
if len(examples) > 0:
self.store('are_sequence_examples',
isinstance(examples[0], tf.train.SequenceExample))
return self | def function[set_examples, parameter[self, examples]]:
constant[Sets the examples to be displayed in WIT.
Args:
examples: List of example protos.
Returns:
self, in order to enabled method chaining.
]
call[name[self].store, parameter[constant[examples], name[examples]]]
if compare[call[name[len], parameter[name[examples]]] greater[>] constant[0]] begin[:]
call[name[self].store, parameter[constant[are_sequence_examples], call[name[isinstance], parameter[call[name[examples]][constant[0]], name[tf].train.SequenceExample]]]]
return[name[self]] | keyword[def] identifier[set_examples] ( identifier[self] , identifier[examples] ):
literal[string]
identifier[self] . identifier[store] ( literal[string] , identifier[examples] )
keyword[if] identifier[len] ( identifier[examples] )> literal[int] :
identifier[self] . identifier[store] ( literal[string] ,
identifier[isinstance] ( identifier[examples] [ literal[int] ], identifier[tf] . identifier[train] . identifier[SequenceExample] ))
keyword[return] identifier[self] | def set_examples(self, examples):
"""Sets the examples to be displayed in WIT.
Args:
examples: List of example protos.
Returns:
self, in order to enabled method chaining.
"""
self.store('examples', examples)
if len(examples) > 0:
self.store('are_sequence_examples', isinstance(examples[0], tf.train.SequenceExample)) # depends on [control=['if'], data=[]]
return self |
def check_result(state):
"""High level function which wraps other SCTs for checking results.
``check_result()``
* uses ``lowercase()``, then
* runs ``check_all_columns()`` on the state produced by ``lowercase()``, then
* runs ``has_equal_value`` on the state produced by ``check_all_columns()``.
"""
state1 = lowercase(state)
state2 = check_all_columns(state1)
has_equal_value(state2)
return state2 | def function[check_result, parameter[state]]:
constant[High level function which wraps other SCTs for checking results.
``check_result()``
* uses ``lowercase()``, then
* runs ``check_all_columns()`` on the state produced by ``lowercase()``, then
* runs ``has_equal_value`` on the state produced by ``check_all_columns()``.
]
variable[state1] assign[=] call[name[lowercase], parameter[name[state]]]
variable[state2] assign[=] call[name[check_all_columns], parameter[name[state1]]]
call[name[has_equal_value], parameter[name[state2]]]
return[name[state2]] | keyword[def] identifier[check_result] ( identifier[state] ):
literal[string]
identifier[state1] = identifier[lowercase] ( identifier[state] )
identifier[state2] = identifier[check_all_columns] ( identifier[state1] )
identifier[has_equal_value] ( identifier[state2] )
keyword[return] identifier[state2] | def check_result(state):
"""High level function which wraps other SCTs for checking results.
``check_result()``
* uses ``lowercase()``, then
* runs ``check_all_columns()`` on the state produced by ``lowercase()``, then
* runs ``has_equal_value`` on the state produced by ``check_all_columns()``.
"""
state1 = lowercase(state)
state2 = check_all_columns(state1)
has_equal_value(state2)
return state2 |
def erase_down (self): # <ESC>[0J -or- <ESC>[J
'''Erases the screen from the current line down to the bottom of the
screen.'''
self.erase_end_of_line ()
self.fill_region (self.cur_r + 1, 1, self.rows, self.cols) | def function[erase_down, parameter[self]]:
constant[Erases the screen from the current line down to the bottom of the
screen.]
call[name[self].erase_end_of_line, parameter[]]
call[name[self].fill_region, parameter[binary_operation[name[self].cur_r + constant[1]], constant[1], name[self].rows, name[self].cols]] | keyword[def] identifier[erase_down] ( identifier[self] ):
literal[string]
identifier[self] . identifier[erase_end_of_line] ()
identifier[self] . identifier[fill_region] ( identifier[self] . identifier[cur_r] + literal[int] , literal[int] , identifier[self] . identifier[rows] , identifier[self] . identifier[cols] ) | def erase_down(self): # <ESC>[0J -or- <ESC>[J
'Erases the screen from the current line down to the bottom of the\n screen.'
self.erase_end_of_line()
self.fill_region(self.cur_r + 1, 1, self.rows, self.cols) |
def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False | def function[_is_image, parameter[self, var]]:
constant[Return True if variable is a PIL.Image image]
<ast.Try object at 0x7da20e9569b0> | keyword[def] identifier[_is_image] ( identifier[self] , identifier[var] ):
literal[string]
keyword[try] :
keyword[from] identifier[PIL] keyword[import] identifier[Image]
keyword[return] identifier[isinstance] ( identifier[var] , identifier[Image] . identifier[Image] )
keyword[except] :
keyword[return] keyword[False] | def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image) # depends on [control=['try'], data=[]]
except:
return False # depends on [control=['except'], data=[]] |
def _make_x_title(self):
"""Make the X-Axis title"""
y = (self.height - self.margin_box.bottom + self._x_labels_height)
if self._x_title:
for i, title_line in enumerate(self._x_title, 1):
text = self.svg.node(
self.nodes['title'],
'text',
class_='title',
x=self.margin_box.left + self.view.width / 2,
y=y + i * (self.style.title_font_size + self.spacing)
)
text.text = title_line | def function[_make_x_title, parameter[self]]:
constant[Make the X-Axis title]
variable[y] assign[=] binary_operation[binary_operation[name[self].height - name[self].margin_box.bottom] + name[self]._x_labels_height]
if name[self]._x_title begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c991420>, <ast.Name object at 0x7da20c990a60>]]] in starred[call[name[enumerate], parameter[name[self]._x_title, constant[1]]]] begin[:]
variable[text] assign[=] call[name[self].svg.node, parameter[call[name[self].nodes][constant[title]], constant[text]]]
name[text].text assign[=] name[title_line] | keyword[def] identifier[_make_x_title] ( identifier[self] ):
literal[string]
identifier[y] =( identifier[self] . identifier[height] - identifier[self] . identifier[margin_box] . identifier[bottom] + identifier[self] . identifier[_x_labels_height] )
keyword[if] identifier[self] . identifier[_x_title] :
keyword[for] identifier[i] , identifier[title_line] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_x_title] , literal[int] ):
identifier[text] = identifier[self] . identifier[svg] . identifier[node] (
identifier[self] . identifier[nodes] [ literal[string] ],
literal[string] ,
identifier[class_] = literal[string] ,
identifier[x] = identifier[self] . identifier[margin_box] . identifier[left] + identifier[self] . identifier[view] . identifier[width] / literal[int] ,
identifier[y] = identifier[y] + identifier[i] *( identifier[self] . identifier[style] . identifier[title_font_size] + identifier[self] . identifier[spacing] )
)
identifier[text] . identifier[text] = identifier[title_line] | def _make_x_title(self):
"""Make the X-Axis title"""
y = self.height - self.margin_box.bottom + self._x_labels_height
if self._x_title:
for (i, title_line) in enumerate(self._x_title, 1):
text = self.svg.node(self.nodes['title'], 'text', class_='title', x=self.margin_box.left + self.view.width / 2, y=y + i * (self.style.title_font_size + self.spacing))
text.text = title_line # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def discrete_max_likelihood(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Lmax = np.max(likelihoods)
return Lmax | def function[discrete_max_likelihood, parameter[data, xmin, alpharange, n_alpha]]:
constant[
Returns the *argument* of the max of the likelihood of the data given an input xmin
]
variable[likelihoods] assign[=] call[name[discrete_likelihood_vector], parameter[name[data], name[xmin]]]
variable[Lmax] assign[=] call[name[np].max, parameter[name[likelihoods]]]
return[name[Lmax]] | keyword[def] identifier[discrete_max_likelihood] ( identifier[data] , identifier[xmin] , identifier[alpharange] =( literal[int] , literal[int] ), identifier[n_alpha] = literal[int] ):
literal[string]
identifier[likelihoods] = identifier[discrete_likelihood_vector] ( identifier[data] , identifier[xmin] , identifier[alpharange] = identifier[alpharange] , identifier[n_alpha] = identifier[n_alpha] )
identifier[Lmax] = identifier[np] . identifier[max] ( identifier[likelihoods] )
keyword[return] identifier[Lmax] | def discrete_max_likelihood(data, xmin, alpharange=(1.5, 3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Lmax = np.max(likelihoods)
return Lmax |
def regex(self, *patterns, **kwargs):
"""
Search the editor for lines matching the regular expression.
re.MULTILINE is not currently supported.
Args:
\*patterns: Regular expressions to search each line for
keys_only (bool): Only return keys
flags (re.FLAG): flags passed to re.search
Returns:
results (dict): Dictionary of pattern keys, line values (or groups - default)
"""
start = kwargs.pop("start", 0)
stop = kwargs.pop("stop", None)
keys_only = kwargs.pop("keys_only", False)
flags = kwargs.pop("flags", 0)
results = {pattern: [] for pattern in patterns}
stop = stop if stop is not None else -1
for i, line in enumerate(self[start:stop]):
for pattern in patterns:
grps = re.search(pattern, line, flags=flags)
if grps and keys_only:
results[pattern].append(i)
elif grps and grps.groups():
for group in grps.groups():
results[pattern].append((i, group))
elif grps:
results[pattern].append((i, line))
if len(patterns) == 1:
return results[patterns[0]]
return results | def function[regex, parameter[self]]:
constant[
Search the editor for lines matching the regular expression.
re.MULTILINE is not currently supported.
Args:
\*patterns: Regular expressions to search each line for
keys_only (bool): Only return keys
flags (re.FLAG): flags passed to re.search
Returns:
results (dict): Dictionary of pattern keys, line values (or groups - default)
]
variable[start] assign[=] call[name[kwargs].pop, parameter[constant[start], constant[0]]]
variable[stop] assign[=] call[name[kwargs].pop, parameter[constant[stop], constant[None]]]
variable[keys_only] assign[=] call[name[kwargs].pop, parameter[constant[keys_only], constant[False]]]
variable[flags] assign[=] call[name[kwargs].pop, parameter[constant[flags], constant[0]]]
variable[results] assign[=] <ast.DictComp object at 0x7da2041da860>
variable[stop] assign[=] <ast.IfExp object at 0x7da2041d95d0>
for taget[tuple[[<ast.Name object at 0x7da2041db790>, <ast.Name object at 0x7da2041d9d20>]]] in starred[call[name[enumerate], parameter[call[name[self]][<ast.Slice object at 0x7da2041db370>]]]] begin[:]
for taget[name[pattern]] in starred[name[patterns]] begin[:]
variable[grps] assign[=] call[name[re].search, parameter[name[pattern], name[line]]]
if <ast.BoolOp object at 0x7da2041d9ba0> begin[:]
call[call[name[results]][name[pattern]].append, parameter[name[i]]]
if compare[call[name[len], parameter[name[patterns]]] equal[==] constant[1]] begin[:]
return[call[name[results]][call[name[patterns]][constant[0]]]]
return[name[results]] | keyword[def] identifier[regex] ( identifier[self] ,* identifier[patterns] ,** identifier[kwargs] ):
literal[string]
identifier[start] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
identifier[stop] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[keys_only] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[flags] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
identifier[results] ={ identifier[pattern] :[] keyword[for] identifier[pattern] keyword[in] identifier[patterns] }
identifier[stop] = identifier[stop] keyword[if] identifier[stop] keyword[is] keyword[not] keyword[None] keyword[else] - literal[int]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[self] [ identifier[start] : identifier[stop] ]):
keyword[for] identifier[pattern] keyword[in] identifier[patterns] :
identifier[grps] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[line] , identifier[flags] = identifier[flags] )
keyword[if] identifier[grps] keyword[and] identifier[keys_only] :
identifier[results] [ identifier[pattern] ]. identifier[append] ( identifier[i] )
keyword[elif] identifier[grps] keyword[and] identifier[grps] . identifier[groups] ():
keyword[for] identifier[group] keyword[in] identifier[grps] . identifier[groups] ():
identifier[results] [ identifier[pattern] ]. identifier[append] (( identifier[i] , identifier[group] ))
keyword[elif] identifier[grps] :
identifier[results] [ identifier[pattern] ]. identifier[append] (( identifier[i] , identifier[line] ))
keyword[if] identifier[len] ( identifier[patterns] )== literal[int] :
keyword[return] identifier[results] [ identifier[patterns] [ literal[int] ]]
keyword[return] identifier[results] | def regex(self, *patterns, **kwargs):
"""
Search the editor for lines matching the regular expression.
re.MULTILINE is not currently supported.
Args:
\\*patterns: Regular expressions to search each line for
keys_only (bool): Only return keys
flags (re.FLAG): flags passed to re.search
Returns:
results (dict): Dictionary of pattern keys, line values (or groups - default)
"""
start = kwargs.pop('start', 0)
stop = kwargs.pop('stop', None)
keys_only = kwargs.pop('keys_only', False)
flags = kwargs.pop('flags', 0)
results = {pattern: [] for pattern in patterns}
stop = stop if stop is not None else -1
for (i, line) in enumerate(self[start:stop]):
for pattern in patterns:
grps = re.search(pattern, line, flags=flags)
if grps and keys_only:
results[pattern].append(i) # depends on [control=['if'], data=[]]
elif grps and grps.groups():
for group in grps.groups():
results[pattern].append((i, group)) # depends on [control=['for'], data=['group']] # depends on [control=['if'], data=[]]
elif grps:
results[pattern].append((i, line)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']] # depends on [control=['for'], data=[]]
if len(patterns) == 1:
return results[patterns[0]] # depends on [control=['if'], data=[]]
return results |
def validate_signature_fragments(
fragments,
hash_,
public_key,
sponge_type=Kerl,
):
# type: (Sequence[TryteString], Hash, TryteString, type) -> bool
"""
Returns whether a sequence of signature fragments is valid.
:param fragments:
Sequence of signature fragments (usually
:py:class:`iota.transaction.Fragment` instances).
:param hash_:
Hash used to generate the signature fragments (usually a
:py:class:`iota.transaction.BundleHash` instance).
:param public_key:
The public key value used to verify the signature digest (usually a
:py:class:`iota.types.Address` instance).
:param sponge_type:
The class used to create the cryptographic sponge (i.e., Curl or Kerl).
"""
checksum = [0] * (HASH_LENGTH * len(fragments))
normalized_hash = normalize(hash_)
for i, fragment in enumerate(fragments):
outer_sponge = sponge_type()
# If there are more than 3 iterations, loop back around to the
# start.
normalized_chunk = normalized_hash[i % len(normalized_hash)]
buffer = []
for j, hash_trytes in enumerate(fragment.iter_chunks(Hash.LEN)):
buffer = hash_trytes.as_trits() # type: List[int]
inner_sponge = sponge_type()
# Note the sign flip compared to
# :py;class:`SignatureFragmentGenerator`.
for _ in range(13 + normalized_chunk[j]):
inner_sponge.reset()
inner_sponge.absorb(buffer)
inner_sponge.squeeze(buffer)
outer_sponge.absorb(buffer)
outer_sponge.squeeze(buffer)
checksum[i * HASH_LENGTH:(i + 1) * HASH_LENGTH] = buffer
actual_public_key = [0] * HASH_LENGTH
addy_sponge = sponge_type()
addy_sponge.absorb(checksum)
addy_sponge.squeeze(actual_public_key)
return actual_public_key == public_key.as_trits() | def function[validate_signature_fragments, parameter[fragments, hash_, public_key, sponge_type]]:
constant[
Returns whether a sequence of signature fragments is valid.
:param fragments:
Sequence of signature fragments (usually
:py:class:`iota.transaction.Fragment` instances).
:param hash_:
Hash used to generate the signature fragments (usually a
:py:class:`iota.transaction.BundleHash` instance).
:param public_key:
The public key value used to verify the signature digest (usually a
:py:class:`iota.types.Address` instance).
:param sponge_type:
The class used to create the cryptographic sponge (i.e., Curl or Kerl).
]
variable[checksum] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b024e290>]] * binary_operation[name[HASH_LENGTH] * call[name[len], parameter[name[fragments]]]]]
variable[normalized_hash] assign[=] call[name[normalize], parameter[name[hash_]]]
for taget[tuple[[<ast.Name object at 0x7da1b024df90>, <ast.Name object at 0x7da1b024c790>]]] in starred[call[name[enumerate], parameter[name[fragments]]]] begin[:]
variable[outer_sponge] assign[=] call[name[sponge_type], parameter[]]
variable[normalized_chunk] assign[=] call[name[normalized_hash]][binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[normalized_hash]]]]]
variable[buffer] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c7c9090>, <ast.Name object at 0x7da20c7c9750>]]] in starred[call[name[enumerate], parameter[call[name[fragment].iter_chunks, parameter[name[Hash].LEN]]]]] begin[:]
variable[buffer] assign[=] call[name[hash_trytes].as_trits, parameter[]]
variable[inner_sponge] assign[=] call[name[sponge_type], parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[constant[13] + call[name[normalized_chunk]][name[j]]]]]] begin[:]
call[name[inner_sponge].reset, parameter[]]
call[name[inner_sponge].absorb, parameter[name[buffer]]]
call[name[inner_sponge].squeeze, parameter[name[buffer]]]
call[name[outer_sponge].absorb, parameter[name[buffer]]]
call[name[outer_sponge].squeeze, parameter[name[buffer]]]
call[name[checksum]][<ast.Slice object at 0x7da20cabf6a0>] assign[=] name[buffer]
variable[actual_public_key] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20cabc790>]] * name[HASH_LENGTH]]
variable[addy_sponge] assign[=] call[name[sponge_type], parameter[]]
call[name[addy_sponge].absorb, parameter[name[checksum]]]
call[name[addy_sponge].squeeze, parameter[name[actual_public_key]]]
return[compare[name[actual_public_key] equal[==] call[name[public_key].as_trits, parameter[]]]] | keyword[def] identifier[validate_signature_fragments] (
identifier[fragments] ,
identifier[hash_] ,
identifier[public_key] ,
identifier[sponge_type] = identifier[Kerl] ,
):
literal[string]
identifier[checksum] =[ literal[int] ]*( identifier[HASH_LENGTH] * identifier[len] ( identifier[fragments] ))
identifier[normalized_hash] = identifier[normalize] ( identifier[hash_] )
keyword[for] identifier[i] , identifier[fragment] keyword[in] identifier[enumerate] ( identifier[fragments] ):
identifier[outer_sponge] = identifier[sponge_type] ()
identifier[normalized_chunk] = identifier[normalized_hash] [ identifier[i] % identifier[len] ( identifier[normalized_hash] )]
identifier[buffer] =[]
keyword[for] identifier[j] , identifier[hash_trytes] keyword[in] identifier[enumerate] ( identifier[fragment] . identifier[iter_chunks] ( identifier[Hash] . identifier[LEN] )):
identifier[buffer] = identifier[hash_trytes] . identifier[as_trits] ()
identifier[inner_sponge] = identifier[sponge_type] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] + identifier[normalized_chunk] [ identifier[j] ]):
identifier[inner_sponge] . identifier[reset] ()
identifier[inner_sponge] . identifier[absorb] ( identifier[buffer] )
identifier[inner_sponge] . identifier[squeeze] ( identifier[buffer] )
identifier[outer_sponge] . identifier[absorb] ( identifier[buffer] )
identifier[outer_sponge] . identifier[squeeze] ( identifier[buffer] )
identifier[checksum] [ identifier[i] * identifier[HASH_LENGTH] :( identifier[i] + literal[int] )* identifier[HASH_LENGTH] ]= identifier[buffer]
identifier[actual_public_key] =[ literal[int] ]* identifier[HASH_LENGTH]
identifier[addy_sponge] = identifier[sponge_type] ()
identifier[addy_sponge] . identifier[absorb] ( identifier[checksum] )
identifier[addy_sponge] . identifier[squeeze] ( identifier[actual_public_key] )
keyword[return] identifier[actual_public_key] == identifier[public_key] . identifier[as_trits] () | def validate_signature_fragments(fragments, hash_, public_key, sponge_type=Kerl):
# type: (Sequence[TryteString], Hash, TryteString, type) -> bool
'\n Returns whether a sequence of signature fragments is valid.\n\n :param fragments:\n Sequence of signature fragments (usually\n :py:class:`iota.transaction.Fragment` instances).\n\n :param hash_:\n Hash used to generate the signature fragments (usually a\n :py:class:`iota.transaction.BundleHash` instance).\n\n :param public_key:\n The public key value used to verify the signature digest (usually a\n :py:class:`iota.types.Address` instance).\n\n :param sponge_type:\n The class used to create the cryptographic sponge (i.e., Curl or Kerl).\n '
checksum = [0] * (HASH_LENGTH * len(fragments))
normalized_hash = normalize(hash_)
for (i, fragment) in enumerate(fragments):
outer_sponge = sponge_type()
# If there are more than 3 iterations, loop back around to the
# start.
normalized_chunk = normalized_hash[i % len(normalized_hash)]
buffer = []
for (j, hash_trytes) in enumerate(fragment.iter_chunks(Hash.LEN)):
buffer = hash_trytes.as_trits() # type: List[int]
inner_sponge = sponge_type()
# Note the sign flip compared to
# :py;class:`SignatureFragmentGenerator`.
for _ in range(13 + normalized_chunk[j]):
inner_sponge.reset()
inner_sponge.absorb(buffer)
inner_sponge.squeeze(buffer) # depends on [control=['for'], data=[]]
outer_sponge.absorb(buffer) # depends on [control=['for'], data=[]]
outer_sponge.squeeze(buffer)
checksum[i * HASH_LENGTH:(i + 1) * HASH_LENGTH] = buffer # depends on [control=['for'], data=[]]
actual_public_key = [0] * HASH_LENGTH
addy_sponge = sponge_type()
addy_sponge.absorb(checksum)
addy_sponge.squeeze(actual_public_key)
return actual_public_key == public_key.as_trits() |
def delete_trigger(self, trigger):
"""
Deletes from the Alert API the trigger record identified by the ID of the provided
`pyowm.alertapi30.trigger.Trigger`, along with all related alerts
:param trigger: the `pyowm.alertapi30.trigger.Trigger` object to be deleted
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:returns: `None` if deletion is successful, an exception otherwise
"""
assert trigger is not None
assert isinstance(trigger.id, str), "Value must be a string"
status, _ = self.http_client.delete(
NAMED_TRIGGER_URI % trigger.id,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'}) | def function[delete_trigger, parameter[self, trigger]]:
constant[
Deletes from the Alert API the trigger record identified by the ID of the provided
`pyowm.alertapi30.trigger.Trigger`, along with all related alerts
:param trigger: the `pyowm.alertapi30.trigger.Trigger` object to be deleted
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:returns: `None` if deletion is successful, an exception otherwise
]
assert[compare[name[trigger] is_not constant[None]]]
assert[call[name[isinstance], parameter[name[trigger].id, name[str]]]]
<ast.Tuple object at 0x7da20c6e6860> assign[=] call[name[self].http_client.delete, parameter[binary_operation[name[NAMED_TRIGGER_URI] <ast.Mod object at 0x7da2590d6920> name[trigger].id]]] | keyword[def] identifier[delete_trigger] ( identifier[self] , identifier[trigger] ):
literal[string]
keyword[assert] identifier[trigger] keyword[is] keyword[not] keyword[None]
keyword[assert] identifier[isinstance] ( identifier[trigger] . identifier[id] , identifier[str] ), literal[string]
identifier[status] , identifier[_] = identifier[self] . identifier[http_client] . identifier[delete] (
identifier[NAMED_TRIGGER_URI] % identifier[trigger] . identifier[id] ,
identifier[params] ={ literal[string] : identifier[self] . identifier[API_key] },
identifier[headers] ={ literal[string] : literal[string] }) | def delete_trigger(self, trigger):
"""
Deletes from the Alert API the trigger record identified by the ID of the provided
`pyowm.alertapi30.trigger.Trigger`, along with all related alerts
:param trigger: the `pyowm.alertapi30.trigger.Trigger` object to be deleted
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:returns: `None` if deletion is successful, an exception otherwise
"""
assert trigger is not None
assert isinstance(trigger.id, str), 'Value must be a string'
(status, _) = self.http_client.delete(NAMED_TRIGGER_URI % trigger.id, params={'appid': self.API_key}, headers={'Content-Type': 'application/json'}) |
def einsum_sequence(self, other_arrays, einsum_string=None):
"""
Calculates the result of an einstein summation expression
"""
if not isinstance(other_arrays, list):
raise ValueError("other tensors must be list of "
"tensors or tensor input")
other_arrays = [np.array(a) for a in other_arrays]
if not einsum_string:
lc = string.ascii_lowercase
einsum_string = lc[:self.rank]
other_ranks = [len(a.shape) for a in other_arrays]
idx = self.rank - sum(other_ranks)
for length in other_ranks:
einsum_string += ',' + lc[idx:idx + length]
idx += length
einsum_args = [self] + list(other_arrays)
return np.einsum(einsum_string, *einsum_args) | def function[einsum_sequence, parameter[self, other_arrays, einsum_string]]:
constant[
Calculates the result of an einstein summation expression
]
if <ast.UnaryOp object at 0x7da18ede6ef0> begin[:]
<ast.Raise object at 0x7da18ede5a20>
variable[other_arrays] assign[=] <ast.ListComp object at 0x7da18ede5c00>
if <ast.UnaryOp object at 0x7da18ede6500> begin[:]
variable[lc] assign[=] name[string].ascii_lowercase
variable[einsum_string] assign[=] call[name[lc]][<ast.Slice object at 0x7da18f723b80>]
variable[other_ranks] assign[=] <ast.ListComp object at 0x7da18f720d00>
variable[idx] assign[=] binary_operation[name[self].rank - call[name[sum], parameter[name[other_ranks]]]]
for taget[name[length]] in starred[name[other_ranks]] begin[:]
<ast.AugAssign object at 0x7da18f720970>
<ast.AugAssign object at 0x7da18f721720>
variable[einsum_args] assign[=] binary_operation[list[[<ast.Name object at 0x7da18f721480>]] + call[name[list], parameter[name[other_arrays]]]]
return[call[name[np].einsum, parameter[name[einsum_string], <ast.Starred object at 0x7da18f722e90>]]] | keyword[def] identifier[einsum_sequence] ( identifier[self] , identifier[other_arrays] , identifier[einsum_string] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other_arrays] , identifier[list] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[other_arrays] =[ identifier[np] . identifier[array] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[other_arrays] ]
keyword[if] keyword[not] identifier[einsum_string] :
identifier[lc] = identifier[string] . identifier[ascii_lowercase]
identifier[einsum_string] = identifier[lc] [: identifier[self] . identifier[rank] ]
identifier[other_ranks] =[ identifier[len] ( identifier[a] . identifier[shape] ) keyword[for] identifier[a] keyword[in] identifier[other_arrays] ]
identifier[idx] = identifier[self] . identifier[rank] - identifier[sum] ( identifier[other_ranks] )
keyword[for] identifier[length] keyword[in] identifier[other_ranks] :
identifier[einsum_string] += literal[string] + identifier[lc] [ identifier[idx] : identifier[idx] + identifier[length] ]
identifier[idx] += identifier[length]
identifier[einsum_args] =[ identifier[self] ]+ identifier[list] ( identifier[other_arrays] )
keyword[return] identifier[np] . identifier[einsum] ( identifier[einsum_string] ,* identifier[einsum_args] ) | def einsum_sequence(self, other_arrays, einsum_string=None):
"""
Calculates the result of an einstein summation expression
"""
if not isinstance(other_arrays, list):
raise ValueError('other tensors must be list of tensors or tensor input') # depends on [control=['if'], data=[]]
other_arrays = [np.array(a) for a in other_arrays]
if not einsum_string:
lc = string.ascii_lowercase
einsum_string = lc[:self.rank]
other_ranks = [len(a.shape) for a in other_arrays]
idx = self.rank - sum(other_ranks)
for length in other_ranks:
einsum_string += ',' + lc[idx:idx + length]
idx += length # depends on [control=['for'], data=['length']] # depends on [control=['if'], data=[]]
einsum_args = [self] + list(other_arrays)
return np.einsum(einsum_string, *einsum_args) |
def createCompoundFromChecked(self):
"""
Creates a new compound query from the checked entry list.
:return <orb.QueryCompound>
"""
checked_entries = self.checkedEntries()
if len(checked_entries) <= 1:
return QueryCompound()
self.setUpdatesEnabled(False)
joiner = self.currentJoiner()
query = Query()
for entry in checked_entries:
if joiner == QueryCompound.Op.And:
query &= entry.query()
else:
query |= entry.query()
# clear out the existing containers
first = checked_entries[0]
first.setQuery(query)
first.setChecked(False)
layout = self._entryWidget.layout()
for i in range(len(checked_entries) - 1, 0, -1):
w = checked_entries[i]
layout.takeAt(layout.indexOf(w))
w.close()
self.refreshEntries()
self.setUpdatesEnabled(True)
if not self.signalsBlocked():
self.enterCompound(first, query) | def function[createCompoundFromChecked, parameter[self]]:
constant[
Creates a new compound query from the checked entry list.
:return <orb.QueryCompound>
]
variable[checked_entries] assign[=] call[name[self].checkedEntries, parameter[]]
if compare[call[name[len], parameter[name[checked_entries]]] less_or_equal[<=] constant[1]] begin[:]
return[call[name[QueryCompound], parameter[]]]
call[name[self].setUpdatesEnabled, parameter[constant[False]]]
variable[joiner] assign[=] call[name[self].currentJoiner, parameter[]]
variable[query] assign[=] call[name[Query], parameter[]]
for taget[name[entry]] in starred[name[checked_entries]] begin[:]
if compare[name[joiner] equal[==] name[QueryCompound].Op.And] begin[:]
<ast.AugAssign object at 0x7da18f09dff0>
variable[first] assign[=] call[name[checked_entries]][constant[0]]
call[name[first].setQuery, parameter[name[query]]]
call[name[first].setChecked, parameter[constant[False]]]
variable[layout] assign[=] call[name[self]._entryWidget.layout, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[checked_entries]]] - constant[1]], constant[0], <ast.UnaryOp object at 0x7da18f09ee90>]]] begin[:]
variable[w] assign[=] call[name[checked_entries]][name[i]]
call[name[layout].takeAt, parameter[call[name[layout].indexOf, parameter[name[w]]]]]
call[name[w].close, parameter[]]
call[name[self].refreshEntries, parameter[]]
call[name[self].setUpdatesEnabled, parameter[constant[True]]]
if <ast.UnaryOp object at 0x7da1b246ace0> begin[:]
call[name[self].enterCompound, parameter[name[first], name[query]]] | keyword[def] identifier[createCompoundFromChecked] ( identifier[self] ):
literal[string]
identifier[checked_entries] = identifier[self] . identifier[checkedEntries] ()
keyword[if] identifier[len] ( identifier[checked_entries] )<= literal[int] :
keyword[return] identifier[QueryCompound] ()
identifier[self] . identifier[setUpdatesEnabled] ( keyword[False] )
identifier[joiner] = identifier[self] . identifier[currentJoiner] ()
identifier[query] = identifier[Query] ()
keyword[for] identifier[entry] keyword[in] identifier[checked_entries] :
keyword[if] identifier[joiner] == identifier[QueryCompound] . identifier[Op] . identifier[And] :
identifier[query] &= identifier[entry] . identifier[query] ()
keyword[else] :
identifier[query] |= identifier[entry] . identifier[query] ()
identifier[first] = identifier[checked_entries] [ literal[int] ]
identifier[first] . identifier[setQuery] ( identifier[query] )
identifier[first] . identifier[setChecked] ( keyword[False] )
identifier[layout] = identifier[self] . identifier[_entryWidget] . identifier[layout] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[checked_entries] )- literal[int] , literal[int] ,- literal[int] ):
identifier[w] = identifier[checked_entries] [ identifier[i] ]
identifier[layout] . identifier[takeAt] ( identifier[layout] . identifier[indexOf] ( identifier[w] ))
identifier[w] . identifier[close] ()
identifier[self] . identifier[refreshEntries] ()
identifier[self] . identifier[setUpdatesEnabled] ( keyword[True] )
keyword[if] keyword[not] identifier[self] . identifier[signalsBlocked] ():
identifier[self] . identifier[enterCompound] ( identifier[first] , identifier[query] ) | def createCompoundFromChecked(self):
"""
Creates a new compound query from the checked entry list.
:return <orb.QueryCompound>
"""
checked_entries = self.checkedEntries()
if len(checked_entries) <= 1:
return QueryCompound() # depends on [control=['if'], data=[]]
self.setUpdatesEnabled(False)
joiner = self.currentJoiner()
query = Query()
for entry in checked_entries:
if joiner == QueryCompound.Op.And:
query &= entry.query() # depends on [control=['if'], data=[]]
else:
query |= entry.query() # depends on [control=['for'], data=['entry']] # clear out the existing containers
first = checked_entries[0]
first.setQuery(query)
first.setChecked(False)
layout = self._entryWidget.layout()
for i in range(len(checked_entries) - 1, 0, -1):
w = checked_entries[i]
layout.takeAt(layout.indexOf(w))
w.close() # depends on [control=['for'], data=['i']]
self.refreshEntries()
self.setUpdatesEnabled(True)
if not self.signalsBlocked():
self.enterCompound(first, query) # depends on [control=['if'], data=[]] |
def _set_drop_monitor(self, v, load=False):
"""
Setter method for drop_monitor, mapped from YANG variable /interface/ethernet/qos/drop_monitor (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_drop_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_drop_monitor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=drop_monitor.drop_monitor, is_container='container', presence=False, yang_name="drop-monitor", rest_name="drop-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure QoS drop monitor polling', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """drop_monitor must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=drop_monitor.drop_monitor, is_container='container', presence=False, yang_name="drop-monitor", rest_name="drop-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure QoS drop monitor polling', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)""",
})
self.__drop_monitor = t
if hasattr(self, '_set'):
self._set() | def function[_set_drop_monitor, parameter[self, v, load]]:
constant[
Setter method for drop_monitor, mapped from YANG variable /interface/ethernet/qos/drop_monitor (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_drop_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_drop_monitor() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20e9b28f0>
name[self].__drop_monitor assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_drop_monitor] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[drop_monitor] . identifier[drop_monitor] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__drop_monitor] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_drop_monitor(self, v, load=False):
"""
Setter method for drop_monitor, mapped from YANG variable /interface/ethernet/qos/drop_monitor (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_drop_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_drop_monitor() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=drop_monitor.drop_monitor, is_container='container', presence=False, yang_name='drop-monitor', rest_name='drop-monitor', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure QoS drop monitor polling', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'drop_monitor must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=drop_monitor.drop_monitor, is_container=\'container\', presence=False, yang_name="drop-monitor", rest_name="drop-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure QoS drop monitor polling\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-mls\', defining_module=\'brocade-qos-mls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__drop_monitor = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def resource_policies(ctx):
'''
List and manage resource policies.
(admin privilege required)
'''
if ctx.invoked_subcommand is not None:
return
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
items = session.ResourcePolicy.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no keypair resource policies.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields))) | def function[resource_policies, parameter[ctx]]:
constant[
List and manage resource policies.
(admin privilege required)
]
if compare[name[ctx].invoked_subcommand is_not constant[None]] begin[:]
return[None]
variable[fields] assign[=] list[[<ast.Tuple object at 0x7da18eb566b0>, <ast.Tuple object at 0x7da18eb57550>, <ast.Tuple object at 0x7da18eb573d0>, <ast.Tuple object at 0x7da18eb54460>, <ast.Tuple object at 0x7da18eb54e50>, <ast.Tuple object at 0x7da18eb570a0>, <ast.Tuple object at 0x7da18eb540d0>, <ast.Tuple object at 0x7da18eb55510>, <ast.Tuple object at 0x7da18eb56b30>, <ast.Tuple object at 0x7da18eb56020>]]
with call[name[Session], parameter[]] begin[:]
<ast.Try object at 0x7da18eb56470>
if compare[call[name[len], parameter[name[items]]] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[There are no keypair resource policies.]]]
return[None]
call[name[print], parameter[call[name[tabulate], parameter[<ast.GeneratorExp object at 0x7da18eb54b80>]]]] | keyword[def] identifier[resource_policies] ( identifier[ctx] ):
literal[string]
keyword[if] identifier[ctx] . identifier[invoked_subcommand] keyword[is] keyword[not] keyword[None] :
keyword[return]
identifier[fields] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
]
keyword[with] identifier[Session] () keyword[as] identifier[session] :
keyword[try] :
identifier[items] = identifier[session] . identifier[ResourcePolicy] . identifier[list] ( identifier[fields] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print_error] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
identifier[print] ( literal[string] )
keyword[return]
identifier[print] ( identifier[tabulate] (( identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ),
identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))) | def resource_policies(ctx):
"""
List and manage resource policies.
(admin privilege required)
"""
if ctx.invoked_subcommand is not None:
return # depends on [control=['if'], data=[]]
fields = [('Name', 'name'), ('Created At', 'created_at'), ('Default for Unspecified', 'default_for_unspecified'), ('Total Resource Slot', 'total_resource_slots'), ('Max Concurrent Sessions', 'max_concurrent_sessions'), ('Max Containers per Session', 'max_containers_per_session'), ('Max vFolder Count', 'max_vfolder_count'), ('Max vFolder Size', 'max_vfolder_size'), ('Idle Timeeout', 'idle_timeout'), ('Allowed vFolder Hosts', 'allowed_vfolder_hosts')]
with Session() as session:
try:
items = session.ResourcePolicy.list(fields=(item[1] for item in fields)) # depends on [control=['try'], data=[]]
except Exception as e:
print_error(e)
sys.exit(1) # depends on [control=['except'], data=['e']]
if len(items) == 0:
print('There are no keypair resource policies.')
return # depends on [control=['if'], data=[]]
print(tabulate((item.values() for item in items), headers=(item[0] for item in fields))) # depends on [control=['with'], data=['session']] |
def shuffled_batches(self, batch_size):
""" Generate randomized batches of data - only sample whole trajectories """
if batch_size >= self.num_envs * self.num_steps:
yield self
else:
rollouts_in_batch = batch_size // self.num_steps
batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch)
indices = list(range(self.num_envs))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
yield Trajectories(
num_steps=self.num_steps,
num_envs=len(sub_indices),
# Dont use it in batches for a moment, can be uncommented later if needed
# environment_information=[x[sub_indices.tolist()] for x in self.environment_information],
environment_information=None,
transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()},
rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()},
# extra_data does not go into batches
) | def function[shuffled_batches, parameter[self, batch_size]]:
constant[ Generate randomized batches of data - only sample whole trajectories ]
if compare[name[batch_size] greater_or_equal[>=] binary_operation[name[self].num_envs * name[self].num_steps]] begin[:]
<ast.Yield object at 0x7da1b17fa3e0> | keyword[def] identifier[shuffled_batches] ( identifier[self] , identifier[batch_size] ):
literal[string]
keyword[if] identifier[batch_size] >= identifier[self] . identifier[num_envs] * identifier[self] . identifier[num_steps] :
keyword[yield] identifier[self]
keyword[else] :
identifier[rollouts_in_batch] = identifier[batch_size] // identifier[self] . identifier[num_steps]
identifier[batch_splits] = identifier[math_util] . identifier[divide_ceiling] ( identifier[self] . identifier[num_envs] , identifier[rollouts_in_batch] )
identifier[indices] = identifier[list] ( identifier[range] ( identifier[self] . identifier[num_envs] ))
identifier[np] . identifier[random] . identifier[shuffle] ( identifier[indices] )
keyword[for] identifier[sub_indices] keyword[in] identifier[np] . identifier[array_split] ( identifier[indices] , identifier[batch_splits] ):
keyword[yield] identifier[Trajectories] (
identifier[num_steps] = identifier[self] . identifier[num_steps] ,
identifier[num_envs] = identifier[len] ( identifier[sub_indices] ),
identifier[environment_information] = keyword[None] ,
identifier[transition_tensors] ={ identifier[k] : identifier[x] [:, identifier[sub_indices] ] keyword[for] identifier[k] , identifier[x] keyword[in] identifier[self] . identifier[transition_tensors] . identifier[items] ()},
identifier[rollout_tensors] ={ identifier[k] : identifier[x] [ identifier[sub_indices] ] keyword[for] identifier[k] , identifier[x] keyword[in] identifier[self] . identifier[rollout_tensors] . identifier[items] ()},
) | def shuffled_batches(self, batch_size):
""" Generate randomized batches of data - only sample whole trajectories """
if batch_size >= self.num_envs * self.num_steps:
yield self # depends on [control=['if'], data=[]]
else:
rollouts_in_batch = batch_size // self.num_steps
batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch)
indices = list(range(self.num_envs))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
# Dont use it in batches for a moment, can be uncommented later if needed
# environment_information=[x[sub_indices.tolist()] for x in self.environment_information],
# extra_data does not go into batches
yield Trajectories(num_steps=self.num_steps, num_envs=len(sub_indices), environment_information=None, transition_tensors={k: x[:, sub_indices] for (k, x) in self.transition_tensors.items()}, rollout_tensors={k: x[sub_indices] for (k, x) in self.rollout_tensors.items()}) # depends on [control=['for'], data=['sub_indices']] |
def calc_bin(self, _bin=None):
"""
Calculate the smallest UCSC genomic bin that will contain this feature.
"""
if _bin is None:
try:
_bin = bins.bins(self.start, self.end, one=True)
except TypeError:
_bin = None
return _bin | def function[calc_bin, parameter[self, _bin]]:
constant[
Calculate the smallest UCSC genomic bin that will contain this feature.
]
if compare[name[_bin] is constant[None]] begin[:]
<ast.Try object at 0x7da18f00cd90>
return[name[_bin]] | keyword[def] identifier[calc_bin] ( identifier[self] , identifier[_bin] = keyword[None] ):
literal[string]
keyword[if] identifier[_bin] keyword[is] keyword[None] :
keyword[try] :
identifier[_bin] = identifier[bins] . identifier[bins] ( identifier[self] . identifier[start] , identifier[self] . identifier[end] , identifier[one] = keyword[True] )
keyword[except] identifier[TypeError] :
identifier[_bin] = keyword[None]
keyword[return] identifier[_bin] | def calc_bin(self, _bin=None):
"""
Calculate the smallest UCSC genomic bin that will contain this feature.
"""
if _bin is None:
try:
_bin = bins.bins(self.start, self.end, one=True) # depends on [control=['try'], data=[]]
except TypeError:
_bin = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['_bin']]
return _bin |
def _premis_version_from_data(data):
"""Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version.
"""
for child in data:
if isinstance(child, dict):
version = child.get("version")
if version:
return version
return utils.PREMIS_VERSION | def function[_premis_version_from_data, parameter[data]]:
constant[Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version.
]
for taget[name[child]] in starred[name[data]] begin[:]
if call[name[isinstance], parameter[name[child], name[dict]]] begin[:]
variable[version] assign[=] call[name[child].get, parameter[constant[version]]]
if name[version] begin[:]
return[name[version]]
return[name[utils].PREMIS_VERSION] | keyword[def] identifier[_premis_version_from_data] ( identifier[data] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[data] :
keyword[if] identifier[isinstance] ( identifier[child] , identifier[dict] ):
identifier[version] = identifier[child] . identifier[get] ( literal[string] )
keyword[if] identifier[version] :
keyword[return] identifier[version]
keyword[return] identifier[utils] . identifier[PREMIS_VERSION] | def _premis_version_from_data(data):
"""Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version.
"""
for child in data:
if isinstance(child, dict):
version = child.get('version')
if version:
return version # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return utils.PREMIS_VERSION |
def queue(users, label, extra_context=None, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, sender))
NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save() | def function[queue, parameter[users, label, extra_context, sender]]:
constant[
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
]
if compare[name[extra_context] is constant[None]] begin[:]
variable[extra_context] assign[=] dictionary[[], []]
if call[name[isinstance], parameter[name[users], name[QuerySet]]] begin[:]
variable[users] assign[=] <ast.ListComp object at 0x7da1afe0d780>
variable[notices] assign[=] list[[]]
for taget[name[user]] in starred[name[users]] begin[:]
call[name[notices].append, parameter[tuple[[<ast.Name object at 0x7da1afe0d750>, <ast.Name object at 0x7da1afe0ecb0>, <ast.Name object at 0x7da1afe0d690>, <ast.Name object at 0x7da1afe0f160>]]]]
call[call[name[NoticeQueueBatch], parameter[]].save, parameter[]] | keyword[def] identifier[queue] ( identifier[users] , identifier[label] , identifier[extra_context] = keyword[None] , identifier[sender] = keyword[None] ):
literal[string]
keyword[if] identifier[extra_context] keyword[is] keyword[None] :
identifier[extra_context] ={}
keyword[if] identifier[isinstance] ( identifier[users] , identifier[QuerySet] ):
identifier[users] =[ identifier[row] [ literal[string] ] keyword[for] identifier[row] keyword[in] identifier[users] . identifier[values] ( literal[string] )]
keyword[else] :
identifier[users] =[ identifier[user] . identifier[pk] keyword[for] identifier[user] keyword[in] identifier[users] ]
identifier[notices] =[]
keyword[for] identifier[user] keyword[in] identifier[users] :
identifier[notices] . identifier[append] (( identifier[user] , identifier[label] , identifier[extra_context] , identifier[sender] ))
identifier[NoticeQueueBatch] ( identifier[pickled_data] = identifier[base64] . identifier[b64encode] ( identifier[pickle] . identifier[dumps] ( identifier[notices] ))). identifier[save] () | def queue(users, label, extra_context=None, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {} # depends on [control=['if'], data=['extra_context']]
if isinstance(users, QuerySet):
users = [row['pk'] for row in users.values('pk')] # depends on [control=['if'], data=[]]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, sender)) # depends on [control=['for'], data=['user']]
NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save() |
def matches(self, request):
"""
:param request: a :class:`pyramid.request.Request`
:returns: True if this matcher matches the request, False otherwise
"""
return (
partial_path_match(request.path_info, self.path) and
request.method == self.method
) | def function[matches, parameter[self, request]]:
constant[
:param request: a :class:`pyramid.request.Request`
:returns: True if this matcher matches the request, False otherwise
]
return[<ast.BoolOp object at 0x7da1b287c520>] | keyword[def] identifier[matches] ( identifier[self] , identifier[request] ):
literal[string]
keyword[return] (
identifier[partial_path_match] ( identifier[request] . identifier[path_info] , identifier[self] . identifier[path] ) keyword[and]
identifier[request] . identifier[method] == identifier[self] . identifier[method]
) | def matches(self, request):
"""
:param request: a :class:`pyramid.request.Request`
:returns: True if this matcher matches the request, False otherwise
"""
return partial_path_match(request.path_info, self.path) and request.method == self.method |
def keras_dropout(layer, rate):
'''keras dropout layer.
'''
from keras import layers
input_dim = len(layer.input.shape)
if input_dim == 2:
return layers.SpatialDropout1D(rate)
elif input_dim == 3:
return layers.SpatialDropout2D(rate)
elif input_dim == 4:
return layers.SpatialDropout3D(rate)
else:
return layers.Dropout(rate) | def function[keras_dropout, parameter[layer, rate]]:
constant[keras dropout layer.
]
from relative_module[keras] import module[layers]
variable[input_dim] assign[=] call[name[len], parameter[name[layer].input.shape]]
if compare[name[input_dim] equal[==] constant[2]] begin[:]
return[call[name[layers].SpatialDropout1D, parameter[name[rate]]]] | keyword[def] identifier[keras_dropout] ( identifier[layer] , identifier[rate] ):
literal[string]
keyword[from] identifier[keras] keyword[import] identifier[layers]
identifier[input_dim] = identifier[len] ( identifier[layer] . identifier[input] . identifier[shape] )
keyword[if] identifier[input_dim] == literal[int] :
keyword[return] identifier[layers] . identifier[SpatialDropout1D] ( identifier[rate] )
keyword[elif] identifier[input_dim] == literal[int] :
keyword[return] identifier[layers] . identifier[SpatialDropout2D] ( identifier[rate] )
keyword[elif] identifier[input_dim] == literal[int] :
keyword[return] identifier[layers] . identifier[SpatialDropout3D] ( identifier[rate] )
keyword[else] :
keyword[return] identifier[layers] . identifier[Dropout] ( identifier[rate] ) | def keras_dropout(layer, rate):
"""keras dropout layer.
"""
from keras import layers
input_dim = len(layer.input.shape)
if input_dim == 2:
return layers.SpatialDropout1D(rate) # depends on [control=['if'], data=[]]
elif input_dim == 3:
return layers.SpatialDropout2D(rate) # depends on [control=['if'], data=[]]
elif input_dim == 4:
return layers.SpatialDropout3D(rate) # depends on [control=['if'], data=[]]
else:
return layers.Dropout(rate) |
def _get_result_paths(self, output_dir):
"""Return a dict of output files.
"""
# Only include the properties file here. Add the other result
# paths in the __call__ method, so we can catch errors if an
# output file is not written.
self._write_properties_file()
properties_fp = os.path.join(self.ModelDir, self.PropertiesFile)
result_paths = {
'properties': ResultPath(properties_fp, IsWritten=True,)
}
return result_paths | def function[_get_result_paths, parameter[self, output_dir]]:
constant[Return a dict of output files.
]
call[name[self]._write_properties_file, parameter[]]
variable[properties_fp] assign[=] call[name[os].path.join, parameter[name[self].ModelDir, name[self].PropertiesFile]]
variable[result_paths] assign[=] dictionary[[<ast.Constant object at 0x7da1b0bdb280>], [<ast.Call object at 0x7da1b0bd9cf0>]]
return[name[result_paths]] | keyword[def] identifier[_get_result_paths] ( identifier[self] , identifier[output_dir] ):
literal[string]
identifier[self] . identifier[_write_properties_file] ()
identifier[properties_fp] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[ModelDir] , identifier[self] . identifier[PropertiesFile] )
identifier[result_paths] ={
literal[string] : identifier[ResultPath] ( identifier[properties_fp] , identifier[IsWritten] = keyword[True] ,)
}
keyword[return] identifier[result_paths] | def _get_result_paths(self, output_dir):
"""Return a dict of output files.
"""
# Only include the properties file here. Add the other result
# paths in the __call__ method, so we can catch errors if an
# output file is not written.
self._write_properties_file()
properties_fp = os.path.join(self.ModelDir, self.PropertiesFile)
result_paths = {'properties': ResultPath(properties_fp, IsWritten=True)}
return result_paths |
def filter_belief():
"""Filter to beliefs above a given threshold."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
belief_cutoff = body.get('belief_cutoff')
if belief_cutoff is not None:
belief_cutoff = float(belief_cutoff)
stmts = stmts_from_json(stmts_json)
stmts_out = ac.filter_belief(stmts, belief_cutoff)
return _return_stmts(stmts_out) | def function[filter_belief, parameter[]]:
constant[Filter to beliefs above a given threshold.]
if compare[name[request].method equal[==] constant[OPTIONS]] begin[:]
return[dictionary[[], []]]
variable[response] assign[=] call[call[name[request].body.read, parameter[]].decode, parameter[constant[utf-8]]]
variable[body] assign[=] call[name[json].loads, parameter[name[response]]]
variable[stmts_json] assign[=] call[name[body].get, parameter[constant[statements]]]
variable[belief_cutoff] assign[=] call[name[body].get, parameter[constant[belief_cutoff]]]
if compare[name[belief_cutoff] is_not constant[None]] begin[:]
variable[belief_cutoff] assign[=] call[name[float], parameter[name[belief_cutoff]]]
variable[stmts] assign[=] call[name[stmts_from_json], parameter[name[stmts_json]]]
variable[stmts_out] assign[=] call[name[ac].filter_belief, parameter[name[stmts], name[belief_cutoff]]]
return[call[name[_return_stmts], parameter[name[stmts_out]]]] | keyword[def] identifier[filter_belief] ():
literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
keyword[return] {}
identifier[response] = identifier[request] . identifier[body] . identifier[read] (). identifier[decode] ( literal[string] )
identifier[body] = identifier[json] . identifier[loads] ( identifier[response] )
identifier[stmts_json] = identifier[body] . identifier[get] ( literal[string] )
identifier[belief_cutoff] = identifier[body] . identifier[get] ( literal[string] )
keyword[if] identifier[belief_cutoff] keyword[is] keyword[not] keyword[None] :
identifier[belief_cutoff] = identifier[float] ( identifier[belief_cutoff] )
identifier[stmts] = identifier[stmts_from_json] ( identifier[stmts_json] )
identifier[stmts_out] = identifier[ac] . identifier[filter_belief] ( identifier[stmts] , identifier[belief_cutoff] )
keyword[return] identifier[_return_stmts] ( identifier[stmts_out] ) | def filter_belief():
"""Filter to beliefs above a given threshold."""
if request.method == 'OPTIONS':
return {} # depends on [control=['if'], data=[]]
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
belief_cutoff = body.get('belief_cutoff')
if belief_cutoff is not None:
belief_cutoff = float(belief_cutoff) # depends on [control=['if'], data=['belief_cutoff']]
stmts = stmts_from_json(stmts_json)
stmts_out = ac.filter_belief(stmts, belief_cutoff)
return _return_stmts(stmts_out) |
def packet2chain(packet):
"""Fetch Scapy packet protocol chain."""
if scapy_all is None:
raise ModuleNotFound("No module named 'scapy'", name='scapy')
chain = [packet.name]
payload = packet.payload
while not isinstance(payload, scapy_all.packet.NoPayload):
chain.append(payload.name)
payload = payload.payload
return ':'.join(chain) | def function[packet2chain, parameter[packet]]:
constant[Fetch Scapy packet protocol chain.]
if compare[name[scapy_all] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b060af80>
variable[chain] assign[=] list[[<ast.Attribute object at 0x7da1b0609f00>]]
variable[payload] assign[=] name[packet].payload
while <ast.UnaryOp object at 0x7da1b060b670> begin[:]
call[name[chain].append, parameter[name[payload].name]]
variable[payload] assign[=] name[payload].payload
return[call[constant[:].join, parameter[name[chain]]]] | keyword[def] identifier[packet2chain] ( identifier[packet] ):
literal[string]
keyword[if] identifier[scapy_all] keyword[is] keyword[None] :
keyword[raise] identifier[ModuleNotFound] ( literal[string] , identifier[name] = literal[string] )
identifier[chain] =[ identifier[packet] . identifier[name] ]
identifier[payload] = identifier[packet] . identifier[payload]
keyword[while] keyword[not] identifier[isinstance] ( identifier[payload] , identifier[scapy_all] . identifier[packet] . identifier[NoPayload] ):
identifier[chain] . identifier[append] ( identifier[payload] . identifier[name] )
identifier[payload] = identifier[payload] . identifier[payload]
keyword[return] literal[string] . identifier[join] ( identifier[chain] ) | def packet2chain(packet):
"""Fetch Scapy packet protocol chain."""
if scapy_all is None:
raise ModuleNotFound("No module named 'scapy'", name='scapy') # depends on [control=['if'], data=[]]
chain = [packet.name]
payload = packet.payload
while not isinstance(payload, scapy_all.packet.NoPayload):
chain.append(payload.name)
payload = payload.payload # depends on [control=['while'], data=[]]
return ':'.join(chain) |
def debug_query_result(rows: Sequence[Any]) -> None:
"""Writes a query result to the log."""
log.info("Retrieved {} rows", len(rows))
for i in range(len(rows)):
log.info("Row {}: {}", i, rows[i]) | def function[debug_query_result, parameter[rows]]:
constant[Writes a query result to the log.]
call[name[log].info, parameter[constant[Retrieved {} rows], call[name[len], parameter[name[rows]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[rows]]]]]] begin[:]
call[name[log].info, parameter[constant[Row {}: {}], name[i], call[name[rows]][name[i]]]] | keyword[def] identifier[debug_query_result] ( identifier[rows] : identifier[Sequence] [ identifier[Any] ])-> keyword[None] :
literal[string]
identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[rows] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[rows] )):
identifier[log] . identifier[info] ( literal[string] , identifier[i] , identifier[rows] [ identifier[i] ]) | def debug_query_result(rows: Sequence[Any]) -> None:
"""Writes a query result to the log."""
log.info('Retrieved {} rows', len(rows))
for i in range(len(rows)):
log.info('Row {}: {}', i, rows[i]) # depends on [control=['for'], data=['i']] |
def rotate_orth(self, quads):
"Orthographic rotation, quads: 0-3, number of clockwise rotations"
with _LeptonicaErrorTrap():
return Pix(lept.pixRotateOrth(self._cdata, quads)) | def function[rotate_orth, parameter[self, quads]]:
constant[Orthographic rotation, quads: 0-3, number of clockwise rotations]
with call[name[_LeptonicaErrorTrap], parameter[]] begin[:]
return[call[name[Pix], parameter[call[name[lept].pixRotateOrth, parameter[name[self]._cdata, name[quads]]]]]] | keyword[def] identifier[rotate_orth] ( identifier[self] , identifier[quads] ):
literal[string]
keyword[with] identifier[_LeptonicaErrorTrap] ():
keyword[return] identifier[Pix] ( identifier[lept] . identifier[pixRotateOrth] ( identifier[self] . identifier[_cdata] , identifier[quads] )) | def rotate_orth(self, quads):
"""Orthographic rotation, quads: 0-3, number of clockwise rotations"""
with _LeptonicaErrorTrap():
return Pix(lept.pixRotateOrth(self._cdata, quads)) # depends on [control=['with'], data=[]] |
def sort_dict(self, data, key):
'''Sort a list of dictionaries by dictionary key'''
return sorted(data, key=itemgetter(key)) if data else [] | def function[sort_dict, parameter[self, data, key]]:
constant[Sort a list of dictionaries by dictionary key]
return[<ast.IfExp object at 0x7da1b25d5ae0>] | keyword[def] identifier[sort_dict] ( identifier[self] , identifier[data] , identifier[key] ):
literal[string]
keyword[return] identifier[sorted] ( identifier[data] , identifier[key] = identifier[itemgetter] ( identifier[key] )) keyword[if] identifier[data] keyword[else] [] | def sort_dict(self, data, key):
"""Sort a list of dictionaries by dictionary key"""
return sorted(data, key=itemgetter(key)) if data else [] |
def find_features(feats, sequ, annotated, start_pos, cutoff):
"""
find_features - Finds the reference sequence features in the alignments and records the positions
:param feats: Dictonary of sequence features
:type feats: ``dict``
:param sequ: The sequence alignment for the input sequence
:type sequ: ``List``
:param annotated: dictonary of the annotated features
:type annotated: ``dict``
:param start_pos: Where the reference sequence starts
:type start_pos: ``int``
:param missing: List of the unmapped features
:type missing: ``List``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List``
"""
feats_a = list(feats.keys())
j = 0
s = 0
en = 0
start = 0
for i in range(0, len(sequ)):
if j <= len(feats_a)-1:
if i > int(feats[feats_a[j]].location.end):
j += 1
if(sequ[i] == '-'):
if i == 0:
start += 1
en += 1
s = 1
else:
start += 1
en += 1
if s == 0:
start_val = feats[feats_a[j]].location.start
#if feats_a[j] == "five_prime_UTR":
# start_val = 0
if((annotated == 0 and start_pos == 0
and cutoff < 0.9) or
(annotated == 0 and start_pos == 0
and st < 6)
or (start_pos == 0 and
len(feats) == 1 and cutoff < .9)):
start_val = 0
else:
if feats_a[j] == 'five_prime_UTR':
start_val = 0
feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(int(feats[feats_a[j]].location.end + 1)), strand=1), type=feats[feats_a[j]].type)
if j != len(feats_a):
for l in range(j+1, len(feats_a)):
feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start+1), ExactPosition(int(feats[feats_a[l]].location.end + 1)), strand=1), type=feats[feats_a[l]].type)
else:
if s == 1:
st = feats[feats_a[j]].location.start + start
end = feats[feats_a[j]].location.end + en
start_val = st
if feats_a[j] != 'five_prime_UTR' and start_pos == 0:
if((annotated == 0 and start_pos == 0
and cutoff < 0.9) or
(annotated == 0 and start_pos == 0
and st < 6)
or (start_pos == 0 and
len(feats) == 1 and cutoff < .9)):
start_val = 0
else:
if feats_a[j] == 'five_prime_UTR':
start_val = 0
feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(end), strand=1), type=feats[feats_a[j]].type)
if j != len(feats_a):
for l in range(j+1, len(feats_a)):
feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start+st), ExactPosition(int(feats[feats_a[l]].location.end + st)), strand=1), type=feats[feats_a[l]].type)
s = 0
return feats | def function[find_features, parameter[feats, sequ, annotated, start_pos, cutoff]]:
constant[
find_features - Finds the reference sequence features in the alignments and records the positions
:param feats: Dictonary of sequence features
:type feats: ``dict``
:param sequ: The sequence alignment for the input sequence
:type sequ: ``List``
:param annotated: dictonary of the annotated features
:type annotated: ``dict``
:param start_pos: Where the reference sequence starts
:type start_pos: ``int``
:param missing: List of the unmapped features
:type missing: ``List``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List``
]
variable[feats_a] assign[=] call[name[list], parameter[call[name[feats].keys, parameter[]]]]
variable[j] assign[=] constant[0]
variable[s] assign[=] constant[0]
variable[en] assign[=] constant[0]
variable[start] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[sequ]]]]]] begin[:]
if compare[name[j] less_or_equal[<=] binary_operation[call[name[len], parameter[name[feats_a]]] - constant[1]]] begin[:]
if compare[name[i] greater[>] call[name[int], parameter[call[name[feats]][call[name[feats_a]][name[j]]].location.end]]] begin[:]
<ast.AugAssign object at 0x7da1b24b1a80>
if compare[call[name[sequ]][name[i]] equal[==] constant[-]] begin[:]
if compare[name[i] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b24b1b40>
<ast.AugAssign object at 0x7da204346800>
variable[s] assign[=] constant[1]
return[name[feats]] | keyword[def] identifier[find_features] ( identifier[feats] , identifier[sequ] , identifier[annotated] , identifier[start_pos] , identifier[cutoff] ):
literal[string]
identifier[feats_a] = identifier[list] ( identifier[feats] . identifier[keys] ())
identifier[j] = literal[int]
identifier[s] = literal[int]
identifier[en] = literal[int]
identifier[start] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[sequ] )):
keyword[if] identifier[j] <= identifier[len] ( identifier[feats_a] )- literal[int] :
keyword[if] identifier[i] > identifier[int] ( identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[location] . identifier[end] ):
identifier[j] += literal[int]
keyword[if] ( identifier[sequ] [ identifier[i] ]== literal[string] ):
keyword[if] identifier[i] == literal[int] :
identifier[start] += literal[int]
identifier[en] += literal[int]
identifier[s] = literal[int]
keyword[else] :
identifier[start] += literal[int]
identifier[en] += literal[int]
keyword[if] identifier[s] == literal[int] :
identifier[start_val] = identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[location] . identifier[start]
keyword[if] (( identifier[annotated] == literal[int] keyword[and] identifier[start_pos] == literal[int]
keyword[and] identifier[cutoff] < literal[int] ) keyword[or]
( identifier[annotated] == literal[int] keyword[and] identifier[start_pos] == literal[int]
keyword[and] identifier[st] < literal[int] )
keyword[or] ( identifier[start_pos] == literal[int] keyword[and]
identifier[len] ( identifier[feats] )== literal[int] keyword[and] identifier[cutoff] < literal[int] )):
identifier[start_val] = literal[int]
keyword[else] :
keyword[if] identifier[feats_a] [ identifier[j] ]== literal[string] :
identifier[start_val] = literal[int]
identifier[feats] [ identifier[feats_a] [ identifier[j] ]]= identifier[SeqFeature] ( identifier[FeatureLocation] ( identifier[ExactPosition] ( identifier[start_val] ), identifier[ExactPosition] ( identifier[int] ( identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[location] . identifier[end] + literal[int] )), identifier[strand] = literal[int] ), identifier[type] = identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[type] )
keyword[if] identifier[j] != identifier[len] ( identifier[feats_a] ):
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[j] + literal[int] , identifier[len] ( identifier[feats_a] )):
identifier[feats] [ identifier[feats_a] [ identifier[l] ]]= identifier[SeqFeature] ( identifier[FeatureLocation] ( identifier[ExactPosition] ( identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[location] . identifier[start] + literal[int] ), identifier[ExactPosition] ( identifier[int] ( identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[location] . identifier[end] + literal[int] )), identifier[strand] = literal[int] ), identifier[type] = identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[type] )
keyword[else] :
keyword[if] identifier[s] == literal[int] :
identifier[st] = identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[location] . identifier[start] + identifier[start]
identifier[end] = identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[location] . identifier[end] + identifier[en]
identifier[start_val] = identifier[st]
keyword[if] identifier[feats_a] [ identifier[j] ]!= literal[string] keyword[and] identifier[start_pos] == literal[int] :
keyword[if] (( identifier[annotated] == literal[int] keyword[and] identifier[start_pos] == literal[int]
keyword[and] identifier[cutoff] < literal[int] ) keyword[or]
( identifier[annotated] == literal[int] keyword[and] identifier[start_pos] == literal[int]
keyword[and] identifier[st] < literal[int] )
keyword[or] ( identifier[start_pos] == literal[int] keyword[and]
identifier[len] ( identifier[feats] )== literal[int] keyword[and] identifier[cutoff] < literal[int] )):
identifier[start_val] = literal[int]
keyword[else] :
keyword[if] identifier[feats_a] [ identifier[j] ]== literal[string] :
identifier[start_val] = literal[int]
identifier[feats] [ identifier[feats_a] [ identifier[j] ]]= identifier[SeqFeature] ( identifier[FeatureLocation] ( identifier[ExactPosition] ( identifier[start_val] ), identifier[ExactPosition] ( identifier[end] ), identifier[strand] = literal[int] ), identifier[type] = identifier[feats] [ identifier[feats_a] [ identifier[j] ]]. identifier[type] )
keyword[if] identifier[j] != identifier[len] ( identifier[feats_a] ):
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[j] + literal[int] , identifier[len] ( identifier[feats_a] )):
identifier[feats] [ identifier[feats_a] [ identifier[l] ]]= identifier[SeqFeature] ( identifier[FeatureLocation] ( identifier[ExactPosition] ( identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[location] . identifier[start] + identifier[st] ), identifier[ExactPosition] ( identifier[int] ( identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[location] . identifier[end] + identifier[st] )), identifier[strand] = literal[int] ), identifier[type] = identifier[feats] [ identifier[feats_a] [ identifier[l] ]]. identifier[type] )
identifier[s] = literal[int]
keyword[return] identifier[feats] | def find_features(feats, sequ, annotated, start_pos, cutoff):
"""
find_features - Finds the reference sequence features in the alignments and records the positions
:param feats: Dictonary of sequence features
:type feats: ``dict``
:param sequ: The sequence alignment for the input sequence
:type sequ: ``List``
:param annotated: dictonary of the annotated features
:type annotated: ``dict``
:param start_pos: Where the reference sequence starts
:type start_pos: ``int``
:param missing: List of the unmapped features
:type missing: ``List``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List``
"""
feats_a = list(feats.keys())
j = 0
s = 0
en = 0
start = 0
for i in range(0, len(sequ)):
if j <= len(feats_a) - 1:
if i > int(feats[feats_a[j]].location.end):
j += 1 # depends on [control=['if'], data=[]]
if sequ[i] == '-':
if i == 0:
start += 1
en += 1
s = 1 # depends on [control=['if'], data=[]]
else:
start += 1
en += 1
if s == 0:
start_val = feats[feats_a[j]].location.start
#if feats_a[j] == "five_prime_UTR":
# start_val = 0
if annotated == 0 and start_pos == 0 and (cutoff < 0.9) or (annotated == 0 and start_pos == 0 and (st < 6)) or (start_pos == 0 and len(feats) == 1 and (cutoff < 0.9)):
start_val = 0 # depends on [control=['if'], data=[]]
elif feats_a[j] == 'five_prime_UTR':
start_val = 0 # depends on [control=['if'], data=[]]
feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(int(feats[feats_a[j]].location.end + 1)), strand=1), type=feats[feats_a[j]].type)
if j != len(feats_a):
for l in range(j + 1, len(feats_a)):
feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start + 1), ExactPosition(int(feats[feats_a[l]].location.end + 1)), strand=1), type=feats[feats_a[l]].type) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=['j']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif s == 1:
st = feats[feats_a[j]].location.start + start
end = feats[feats_a[j]].location.end + en
start_val = st
if feats_a[j] != 'five_prime_UTR' and start_pos == 0:
if annotated == 0 and start_pos == 0 and (cutoff < 0.9) or (annotated == 0 and start_pos == 0 and (st < 6)) or (start_pos == 0 and len(feats) == 1 and (cutoff < 0.9)):
start_val = 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif feats_a[j] == 'five_prime_UTR':
start_val = 0 # depends on [control=['if'], data=[]]
feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(end), strand=1), type=feats[feats_a[j]].type)
if j != len(feats_a):
for l in range(j + 1, len(feats_a)):
feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start + st), ExactPosition(int(feats[feats_a[l]].location.end + st)), strand=1), type=feats[feats_a[l]].type) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=['j']]
s = 0 # depends on [control=['if'], data=['s']] # depends on [control=['if'], data=['j']] # depends on [control=['for'], data=['i']]
return feats |
def all_to_annot(self, annot, names=['TPd', 'TPs', 'FP', 'FN']):
"""Convenience function to write all events to XML by category, showing
overlapping TP detection and TP standard."""
self.to_annot(annot, 'tp_det', names[0])
self.to_annot(annot, 'tp_std', names[1])
self.to_annot(annot, 'fp', names[2])
self.to_annot(annot, 'fn', names[3]) | def function[all_to_annot, parameter[self, annot, names]]:
constant[Convenience function to write all events to XML by category, showing
overlapping TP detection and TP standard.]
call[name[self].to_annot, parameter[name[annot], constant[tp_det], call[name[names]][constant[0]]]]
call[name[self].to_annot, parameter[name[annot], constant[tp_std], call[name[names]][constant[1]]]]
call[name[self].to_annot, parameter[name[annot], constant[fp], call[name[names]][constant[2]]]]
call[name[self].to_annot, parameter[name[annot], constant[fn], call[name[names]][constant[3]]]] | keyword[def] identifier[all_to_annot] ( identifier[self] , identifier[annot] , identifier[names] =[ literal[string] , literal[string] , literal[string] , literal[string] ]):
literal[string]
identifier[self] . identifier[to_annot] ( identifier[annot] , literal[string] , identifier[names] [ literal[int] ])
identifier[self] . identifier[to_annot] ( identifier[annot] , literal[string] , identifier[names] [ literal[int] ])
identifier[self] . identifier[to_annot] ( identifier[annot] , literal[string] , identifier[names] [ literal[int] ])
identifier[self] . identifier[to_annot] ( identifier[annot] , literal[string] , identifier[names] [ literal[int] ]) | def all_to_annot(self, annot, names=['TPd', 'TPs', 'FP', 'FN']):
"""Convenience function to write all events to XML by category, showing
overlapping TP detection and TP standard."""
self.to_annot(annot, 'tp_det', names[0])
self.to_annot(annot, 'tp_std', names[1])
self.to_annot(annot, 'fp', names[2])
self.to_annot(annot, 'fn', names[3]) |
def get_detail_intro(self, content_id):
"""
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
"""
content_type_id = self.get_detail_common(content_id)['content_type_id']
# Get content type id
resp = json.loads(urlopen(self.detail_intro_url.format(content_id, content_type_id)).read().decode('utf-8'))
data = resp['response']['body']['items']['item']
# Extract data
del data['contentid']
del data['contenttypeid']
if content_type_id == 12:
# 관광지
keychain = {
'accomcount': ('capacity', None),
'chkbabycarriage': ('baby_carriage', None),
'chkcreditcard': ('credit_card', None),
'chkpet': ('pet', None),
'expagerange': ('age_range', None),
'expguide': ('guide', None),
'infocenter': ('info_center', None),
'opendate': ('open_date', None),
'parking': ('parking', None),
'restdate': ('rest_date', None),
'useseason': ('season', None),
'usetime': ('use_time', None)
}
_dict_key_changer(data, keychain)
data['cultural_heritage'] = data.pop('heritage1', None) == 1
data['natural_heritage'] = data.pop('heritage2', None) == 1
data['archival_heritage'] = data.pop('heritage3', None) == 1
elif content_type_id == 14:
# 문화시설
keychain = {
'accomcountculture': ('capacity', None),
'chkbabycarriageculture': ('baby_carriage', None),
'chkcreditcardculture': ('credit_card', None),
'chkpetculture': ('pet', None),
'discountinfo': ('discount_info', None),
'infocenterculture': ('info_center', None),
'parkingculture': ('parking', None),
'parkingfee': ('parking_fee', None),
'restdateculture': ('rest_date', None),
'usefee': ('use_fee', None),
'usetimeculture': ('use_time', None),
# 이용시간
'scale': ('scale', None),
'spendtime': ('spend_time', None)
# 관람 소요시간
}
_dict_key_changer(data, keychain)
elif content_type_id == 15:
# 축제/공연/행사
keychain = {
'agelimit': ('age_limit', None),
'bookingplace': ('reservation_place', None),
'eventstartdate': ('start_date', None),
'eventenddate': ('end_date', None),
'eventplace': ('place', None),
'festivalgrade': ('festival_grade', None),
'placeinfo': ('place_guide', None),
'spendtimefestival': ('spend_time', None),
'sponsor1': ('organizer', None),
'sponsor2': ('host', None),
'subevent': ('sub_event', None),
'usetimefestival': ('use_fee', None)
}
_dict_key_changer(data, keychain)
data.pop('eventhomepage', None)
elif content_type_id == 25:
# 여행코스
keychain = {
'distance': ('distance', None),
'infocentertourcourse': ('info_center', None),
'schedule': ('schedule', None),
'taketime': ('spend_time', None),
'theme': ('theme', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 28:
# 레포츠
keychain = {
'accomcountleports': ('capacity', None),
'chkbabycarriageleports': ('baby_carriage', None),
'chkcreditcardleports': ('credit_card', None),
'chkpetleports': ('pet', None),
'expagerangeleports': ('age_range', None),
'infocenterleports': ('info_center', None),
'openperiod': ('open_period', None),
'parkingleports': ('parking', None),
'parkingfeeleports': ('parking_fee', None),
'reservation': ('reservation_info', None),
'restdateleports': ('rest_date', None),
'scaleleports': ('scale', None),
'usetimeleports': ('use_time', None),
'usefeeleports': ('use_fee', None),
}
_dict_key_changer(data, keychain)
elif content_type_id == 32:
# 숙박
keychain = {
'accomcountlodging': ('capacity', None),
'checkintime': ('checkin_time', None),
'checkouttime': ('checkout_time', None),
'foodplace': ('food_field', None),
'infocenterlodging': ('info_center', None),
'parkinglodging': ('parking', None),
'pickup': ('pickup_service', None),
'reservationlodging': ('reservation_info', None),
'roomtype': ('room_type', None),
'scalelodging': ('scale', None),
'subfacility': ('sub_facility', None)
}
_dict_key_changer(data, keychain)
data['benikia'] = data.pop('benikia', False) == 1
data['cooking'] = data.pop('chkcooking', False) == 1
data['goodstay'] = data.pop('goodstay', False) == 1
data['korean_house'] = data.pop('hanok', False) == 1
data['barbecue'] = data.pop('barbecue', False) == 1
data['beauty'] = data.pop('beauty', False) == 1
data['beverage'] = data.pop('beverage', False) == 1
data['bicycle'] = data.pop('bicycle', False) == 1
data['campfire'] = data.pop('campfire', False) == 1
data['fitness'] = data.pop('fitness', False) == 1
data['karaoke'] = data.pop('karaoke', False) == 1
data['public_bath'] = data.pop('publicbath', False) == 1
data['public_pc'] = data.pop('publicpc', False) == 1
data['sauna'] = data.pop('sauna', False) == 1
data['seminar'] = data.pop('seminar', False) == 1
data['sports'] = data.pop('sports', False) == 1
elif content_type_id == 38:
# 쇼핑
keychain = {
'chkbabycarriageshopping': ('baby_carriage', None),
'chkcreditcardshopping': ('credit_card', None),
'chkpetshopping': ('pet', None),
'fairday': ('fair_day', None),
'infocentershopping': ('info_center', None),
'opendateshopping': ('open_date', None),
'opentime': ('use_time', None),
'parkingshopping': ('parking', None),
'restdateshopping': ('rest_date', None),
'restroom': ('restroom_info', None),
'saleitem': ('sale_item', None),
'saleitemcost': ('sale_item_cost', None),
'scaleshopping': ('scale', None),
'shopguide': ('guide', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 39:
# 음식
keychain = {
'chkcreditcardfood': ('credit_card', None),
'discountinfofodd': ('discount_info', None),
'firstmenu': ('rep_menu', None),
'infocenterfood': ('info_center', None),
'kidsfacility': ('kids_facility', None),
'opendatefood': ('open_date', None),
'opentimefood': ('open_time', None),
'packing': ('packing', None),
'parkingfood': ('parking', None),
'reservationfood': ('reservation_info', None),
'restdatefood': ('rest_date', None),
'scalefood': ('scale', None),
'seat': ('seat', None),
'smoking': ('smoking', None),
'treatmenu': ('treat_menus', None)
}
_dict_key_changer(data, keychain)
data['kids_facility'] = data.pop('kidsfacility') == 1 if 'kidsfacility' in data else False
return data | def function[get_detail_intro, parameter[self, content_id]]:
constant[
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
]
variable[content_type_id] assign[=] call[call[name[self].get_detail_common, parameter[name[content_id]]]][constant[content_type_id]]
variable[resp] assign[=] call[name[json].loads, parameter[call[call[call[name[urlopen], parameter[call[name[self].detail_intro_url.format, parameter[name[content_id], name[content_type_id]]]]].read, parameter[]].decode, parameter[constant[utf-8]]]]]
variable[data] assign[=] call[call[call[call[name[resp]][constant[response]]][constant[body]]][constant[items]]][constant[item]]
<ast.Delete object at 0x7da1b222db10>
<ast.Delete object at 0x7da1b222c4f0>
if compare[name[content_type_id] equal[==] constant[12]] begin[:]
variable[keychain] assign[=] dictionary[[<ast.Constant object at 0x7da1b222ca90>, <ast.Constant object at 0x7da1b222e080>, <ast.Constant object at 0x7da1b222f100>, <ast.Constant object at 0x7da1b222ca30>, <ast.Constant object at 0x7da1b222e170>, <ast.Constant object at 0x7da1b222f280>, <ast.Constant object at 0x7da1b222d060>, <ast.Constant object at 0x7da1b222f2b0>, <ast.Constant object at 0x7da1b222e440>, <ast.Constant object at 0x7da1b222d300>, <ast.Constant object at 0x7da1b222ce20>, <ast.Constant object at 0x7da1b222eaa0>], [<ast.Tuple object at 0x7da1b222e6b0>, <ast.Tuple object at 0x7da1b222d3c0>, <ast.Tuple object at 0x7da1b222e5c0>, <ast.Tuple object at 0x7da1b222e350>, <ast.Tuple object at 0x7da1b222c5e0>, <ast.Tuple object at 0x7da1b222ee30>, <ast.Tuple object at 0x7da1b222caf0>, <ast.Tuple object at 0x7da1b222d9f0>, <ast.Tuple object at 0x7da1b23440d0>, <ast.Tuple object at 0x7da1b2345ab0>, <ast.Tuple object at 0x7da1b2344130>, <ast.Tuple object at 0x7da1b2346710>]]
call[name[_dict_key_changer], parameter[name[data], name[keychain]]]
call[name[data]][constant[cultural_heritage]] assign[=] compare[call[name[data].pop, parameter[constant[heritage1], constant[None]]] equal[==] constant[1]]
call[name[data]][constant[natural_heritage]] assign[=] compare[call[name[data].pop, parameter[constant[heritage2], constant[None]]] equal[==] constant[1]]
call[name[data]][constant[archival_heritage]] assign[=] compare[call[name[data].pop, parameter[constant[heritage3], constant[None]]] equal[==] constant[1]]
return[name[data]] | keyword[def] identifier[get_detail_intro] ( identifier[self] , identifier[content_id] ):
literal[string]
identifier[content_type_id] = identifier[self] . identifier[get_detail_common] ( identifier[content_id] )[ literal[string] ]
identifier[resp] = identifier[json] . identifier[loads] ( identifier[urlopen] ( identifier[self] . identifier[detail_intro_url] . identifier[format] ( identifier[content_id] , identifier[content_type_id] )). identifier[read] (). identifier[decode] ( literal[string] ))
identifier[data] = identifier[resp] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
keyword[del] identifier[data] [ literal[string] ]
keyword[del] identifier[data] [ literal[string] ]
keyword[if] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[None] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[None] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[None] )== literal[int]
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
identifier[data] . identifier[pop] ( literal[string] , keyword[None] )
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] , keyword[False] )== literal[int]
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
keyword[elif] identifier[content_type_id] == literal[int] :
identifier[keychain] ={
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] ),
literal[string] :( literal[string] , keyword[None] )
}
identifier[_dict_key_changer] ( identifier[data] , identifier[keychain] )
identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( literal[string] )== literal[int] keyword[if] literal[string] keyword[in] identifier[data] keyword[else] keyword[False]
keyword[return] identifier[data] | def get_detail_intro(self, content_id):
"""
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
"""
content_type_id = self.get_detail_common(content_id)['content_type_id']
# Get content type id
resp = json.loads(urlopen(self.detail_intro_url.format(content_id, content_type_id)).read().decode('utf-8'))
data = resp['response']['body']['items']['item']
# Extract data
del data['contentid']
del data['contenttypeid']
if content_type_id == 12:
# 관광지
keychain = {'accomcount': ('capacity', None), 'chkbabycarriage': ('baby_carriage', None), 'chkcreditcard': ('credit_card', None), 'chkpet': ('pet', None), 'expagerange': ('age_range', None), 'expguide': ('guide', None), 'infocenter': ('info_center', None), 'opendate': ('open_date', None), 'parking': ('parking', None), 'restdate': ('rest_date', None), 'useseason': ('season', None), 'usetime': ('use_time', None)}
_dict_key_changer(data, keychain)
data['cultural_heritage'] = data.pop('heritage1', None) == 1
data['natural_heritage'] = data.pop('heritage2', None) == 1
data['archival_heritage'] = data.pop('heritage3', None) == 1 # depends on [control=['if'], data=[]]
elif content_type_id == 14:
# 문화시설
# 이용시간
# 관람 소요시간
keychain = {'accomcountculture': ('capacity', None), 'chkbabycarriageculture': ('baby_carriage', None), 'chkcreditcardculture': ('credit_card', None), 'chkpetculture': ('pet', None), 'discountinfo': ('discount_info', None), 'infocenterculture': ('info_center', None), 'parkingculture': ('parking', None), 'parkingfee': ('parking_fee', None), 'restdateculture': ('rest_date', None), 'usefee': ('use_fee', None), 'usetimeculture': ('use_time', None), 'scale': ('scale', None), 'spendtime': ('spend_time', None)}
_dict_key_changer(data, keychain) # depends on [control=['if'], data=[]]
elif content_type_id == 15:
# 축제/공연/행사
keychain = {'agelimit': ('age_limit', None), 'bookingplace': ('reservation_place', None), 'eventstartdate': ('start_date', None), 'eventenddate': ('end_date', None), 'eventplace': ('place', None), 'festivalgrade': ('festival_grade', None), 'placeinfo': ('place_guide', None), 'spendtimefestival': ('spend_time', None), 'sponsor1': ('organizer', None), 'sponsor2': ('host', None), 'subevent': ('sub_event', None), 'usetimefestival': ('use_fee', None)}
_dict_key_changer(data, keychain)
data.pop('eventhomepage', None) # depends on [control=['if'], data=[]]
elif content_type_id == 25:
# 여행코스
keychain = {'distance': ('distance', None), 'infocentertourcourse': ('info_center', None), 'schedule': ('schedule', None), 'taketime': ('spend_time', None), 'theme': ('theme', None)}
_dict_key_changer(data, keychain) # depends on [control=['if'], data=[]]
elif content_type_id == 28:
# 레포츠
keychain = {'accomcountleports': ('capacity', None), 'chkbabycarriageleports': ('baby_carriage', None), 'chkcreditcardleports': ('credit_card', None), 'chkpetleports': ('pet', None), 'expagerangeleports': ('age_range', None), 'infocenterleports': ('info_center', None), 'openperiod': ('open_period', None), 'parkingleports': ('parking', None), 'parkingfeeleports': ('parking_fee', None), 'reservation': ('reservation_info', None), 'restdateleports': ('rest_date', None), 'scaleleports': ('scale', None), 'usetimeleports': ('use_time', None), 'usefeeleports': ('use_fee', None)}
_dict_key_changer(data, keychain) # depends on [control=['if'], data=[]]
elif content_type_id == 32:
# 숙박
keychain = {'accomcountlodging': ('capacity', None), 'checkintime': ('checkin_time', None), 'checkouttime': ('checkout_time', None), 'foodplace': ('food_field', None), 'infocenterlodging': ('info_center', None), 'parkinglodging': ('parking', None), 'pickup': ('pickup_service', None), 'reservationlodging': ('reservation_info', None), 'roomtype': ('room_type', None), 'scalelodging': ('scale', None), 'subfacility': ('sub_facility', None)}
_dict_key_changer(data, keychain)
data['benikia'] = data.pop('benikia', False) == 1
data['cooking'] = data.pop('chkcooking', False) == 1
data['goodstay'] = data.pop('goodstay', False) == 1
data['korean_house'] = data.pop('hanok', False) == 1
data['barbecue'] = data.pop('barbecue', False) == 1
data['beauty'] = data.pop('beauty', False) == 1
data['beverage'] = data.pop('beverage', False) == 1
data['bicycle'] = data.pop('bicycle', False) == 1
data['campfire'] = data.pop('campfire', False) == 1
data['fitness'] = data.pop('fitness', False) == 1
data['karaoke'] = data.pop('karaoke', False) == 1
data['public_bath'] = data.pop('publicbath', False) == 1
data['public_pc'] = data.pop('publicpc', False) == 1
data['sauna'] = data.pop('sauna', False) == 1
data['seminar'] = data.pop('seminar', False) == 1
data['sports'] = data.pop('sports', False) == 1 # depends on [control=['if'], data=[]]
elif content_type_id == 38:
# 쇼핑
keychain = {'chkbabycarriageshopping': ('baby_carriage', None), 'chkcreditcardshopping': ('credit_card', None), 'chkpetshopping': ('pet', None), 'fairday': ('fair_day', None), 'infocentershopping': ('info_center', None), 'opendateshopping': ('open_date', None), 'opentime': ('use_time', None), 'parkingshopping': ('parking', None), 'restdateshopping': ('rest_date', None), 'restroom': ('restroom_info', None), 'saleitem': ('sale_item', None), 'saleitemcost': ('sale_item_cost', None), 'scaleshopping': ('scale', None), 'shopguide': ('guide', None)}
_dict_key_changer(data, keychain) # depends on [control=['if'], data=[]]
elif content_type_id == 39:
# 음식
keychain = {'chkcreditcardfood': ('credit_card', None), 'discountinfofodd': ('discount_info', None), 'firstmenu': ('rep_menu', None), 'infocenterfood': ('info_center', None), 'kidsfacility': ('kids_facility', None), 'opendatefood': ('open_date', None), 'opentimefood': ('open_time', None), 'packing': ('packing', None), 'parkingfood': ('parking', None), 'reservationfood': ('reservation_info', None), 'restdatefood': ('rest_date', None), 'scalefood': ('scale', None), 'seat': ('seat', None), 'smoking': ('smoking', None), 'treatmenu': ('treat_menus', None)}
_dict_key_changer(data, keychain)
data['kids_facility'] = data.pop('kidsfacility') == 1 if 'kidsfacility' in data else False # depends on [control=['if'], data=[]]
return data |
def with_indices(*args):
'''
Create indices for an event class. Every event class must be decorated with this decorator.
'''
def decorator(cls):
for c in cls.__bases__:
if hasattr(c, '_indicesNames'):
cls._classnameIndex = c._classnameIndex + 1
for i in range(0, cls._classnameIndex):
setattr(cls, '_classname' + str(i), getattr(c, '_classname' + str(i)))
setattr(cls, '_classname' + str(cls._classnameIndex), cls._getTypename())
cls._indicesNames = c._indicesNames + ('_classname' + str(cls._classnameIndex),) + args
cls._generateTemplate()
return cls
cls._classnameIndex = -1
cls._indicesNames = args
cls._generateTemplate()
return cls
return decorator | def function[with_indices, parameter[]]:
constant[
Create indices for an event class. Every event class must be decorated with this decorator.
]
def function[decorator, parameter[cls]]:
for taget[name[c]] in starred[name[cls].__bases__] begin[:]
if call[name[hasattr], parameter[name[c], constant[_indicesNames]]] begin[:]
name[cls]._classnameIndex assign[=] binary_operation[name[c]._classnameIndex + constant[1]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[cls]._classnameIndex]]] begin[:]
call[name[setattr], parameter[name[cls], binary_operation[constant[_classname] + call[name[str], parameter[name[i]]]], call[name[getattr], parameter[name[c], binary_operation[constant[_classname] + call[name[str], parameter[name[i]]]]]]]]
call[name[setattr], parameter[name[cls], binary_operation[constant[_classname] + call[name[str], parameter[name[cls]._classnameIndex]]], call[name[cls]._getTypename, parameter[]]]]
name[cls]._indicesNames assign[=] binary_operation[binary_operation[name[c]._indicesNames + tuple[[<ast.BinOp object at 0x7da204565510>]]] + name[args]]
call[name[cls]._generateTemplate, parameter[]]
return[name[cls]]
name[cls]._classnameIndex assign[=] <ast.UnaryOp object at 0x7da204566e00>
name[cls]._indicesNames assign[=] name[args]
call[name[cls]._generateTemplate, parameter[]]
return[name[cls]]
return[name[decorator]] | keyword[def] identifier[with_indices] (* identifier[args] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[cls] ):
keyword[for] identifier[c] keyword[in] identifier[cls] . identifier[__bases__] :
keyword[if] identifier[hasattr] ( identifier[c] , literal[string] ):
identifier[cls] . identifier[_classnameIndex] = identifier[c] . identifier[_classnameIndex] + literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[cls] . identifier[_classnameIndex] ):
identifier[setattr] ( identifier[cls] , literal[string] + identifier[str] ( identifier[i] ), identifier[getattr] ( identifier[c] , literal[string] + identifier[str] ( identifier[i] )))
identifier[setattr] ( identifier[cls] , literal[string] + identifier[str] ( identifier[cls] . identifier[_classnameIndex] ), identifier[cls] . identifier[_getTypename] ())
identifier[cls] . identifier[_indicesNames] = identifier[c] . identifier[_indicesNames] +( literal[string] + identifier[str] ( identifier[cls] . identifier[_classnameIndex] ),)+ identifier[args]
identifier[cls] . identifier[_generateTemplate] ()
keyword[return] identifier[cls]
identifier[cls] . identifier[_classnameIndex] =- literal[int]
identifier[cls] . identifier[_indicesNames] = identifier[args]
identifier[cls] . identifier[_generateTemplate] ()
keyword[return] identifier[cls]
keyword[return] identifier[decorator] | def with_indices(*args):
"""
Create indices for an event class. Every event class must be decorated with this decorator.
"""
def decorator(cls):
for c in cls.__bases__:
if hasattr(c, '_indicesNames'):
cls._classnameIndex = c._classnameIndex + 1
for i in range(0, cls._classnameIndex):
setattr(cls, '_classname' + str(i), getattr(c, '_classname' + str(i))) # depends on [control=['for'], data=['i']]
setattr(cls, '_classname' + str(cls._classnameIndex), cls._getTypename())
cls._indicesNames = c._indicesNames + ('_classname' + str(cls._classnameIndex),) + args
cls._generateTemplate()
return cls # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
cls._classnameIndex = -1
cls._indicesNames = args
cls._generateTemplate()
return cls
return decorator |
def as_wfn(self):
"""
Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
"""
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for ck in CPEComponent.CPE_COMP_KEYS:
lc = self._get_attribute_components(ck)
comp = lc[0]
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty)):
# Do not set the attribute
continue
else:
v = []
v.append(ck)
v.append("=")
# Get the value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append("".join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return "".join(wfn) | def function[as_wfn, parameter[self]]:
constant[
Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
]
variable[wfn] assign[=] list[[]]
call[name[wfn].append, parameter[name[CPE2_3_WFN].CPE_PREFIX]]
for taget[name[ck]] in starred[name[CPEComponent].CPE_COMP_KEYS] begin[:]
variable[lc] assign[=] call[name[self]._get_attribute_components, parameter[name[ck]]]
variable[comp] assign[=] call[name[lc]][constant[0]]
if <ast.BoolOp object at 0x7da204622f80> begin[:]
continue
variable[wfn] assign[=] call[name[wfn]][<ast.Slice object at 0x7da204621bd0>]
call[name[wfn].append, parameter[name[CPE2_3_WFN].CPE_SUFFIX]]
return[call[constant[].join, parameter[name[wfn]]]] | keyword[def] identifier[as_wfn] ( identifier[self] ):
literal[string]
identifier[wfn] =[]
identifier[wfn] . identifier[append] ( identifier[CPE2_3_WFN] . identifier[CPE_PREFIX] )
keyword[for] identifier[ck] keyword[in] identifier[CPEComponent] . identifier[CPE_COMP_KEYS] :
identifier[lc] = identifier[self] . identifier[_get_attribute_components] ( identifier[ck] )
identifier[comp] = identifier[lc] [ literal[int] ]
keyword[if] ( identifier[isinstance] ( identifier[comp] , identifier[CPEComponentUndefined] ) keyword[or]
identifier[isinstance] ( identifier[comp] , identifier[CPEComponentEmpty] )):
keyword[continue]
keyword[else] :
identifier[v] =[]
identifier[v] . identifier[append] ( identifier[ck] )
identifier[v] . identifier[append] ( literal[string] )
identifier[v] . identifier[append] ( literal[string] )
identifier[v] . identifier[append] ( identifier[comp] . identifier[as_wfn] ())
identifier[v] . identifier[append] ( literal[string] )
identifier[wfn] . identifier[append] ( literal[string] . identifier[join] ( identifier[v] ))
identifier[wfn] . identifier[append] ( identifier[CPEComponent2_3_WFN] . identifier[SEPARATOR_COMP] )
identifier[wfn] = identifier[wfn] [:- literal[int] ]
identifier[wfn] . identifier[append] ( identifier[CPE2_3_WFN] . identifier[CPE_SUFFIX] )
keyword[return] literal[string] . identifier[join] ( identifier[wfn] ) | def as_wfn(self):
"""
Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
"""
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for ck in CPEComponent.CPE_COMP_KEYS:
lc = self._get_attribute_components(ck)
comp = lc[0]
if isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty):
# Do not set the attribute
continue # depends on [control=['if'], data=[]]
else:
v = []
v.append(ck)
v.append('=')
# Get the value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append(''.join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) # depends on [control=['for'], data=['ck']]
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return ''.join(wfn) |
def removeHtmlTags(self, text):
"""convert bad tags into HTML identities"""
sb = []
text = self.removeHtmlComments(text)
bits = text.split(u'<')
sb.append(bits.pop(0))
tagstack = []
tablestack = tagstack
for x in bits:
m = _tagPattern.match(x)
if not m:
continue
slash, t, params, brace, rest = m.groups()
t = t.lower()
badtag = False
if t in _htmlelements:
# Check our stack
if slash:
# Closing a tag...
if t in _htmlsingleonly or len(tagstack) == 0:
badtag = True
else:
ot = tagstack.pop()
if ot != t:
if ot in _htmlsingleallowed:
# Pop all elements with an optional close tag
# and see if we find a match below them
optstack = []
optstack.append(ot)
while True:
if len(tagstack) == 0:
break
ot = tagstack.pop()
if ot == t or ot not in _htmlsingleallowed:
break
optstack.append(ot)
if t != ot:
# No match. Push the optinal elements back again
badtag = True
tagstack += reversed(optstack)
else:
tagstack.append(ot)
# <li> can be nested in <ul> or <ol>, skip those cases:
if ot not in _htmllist and t in _listtags:
badtag = True
elif t == u'table':
if len(tablestack) == 0:
bagtag = True
else:
tagstack = tablestack.pop()
newparams = u''
else:
# Keep track for later
if t in _tabletags and u'table' not in tagstack:
badtag = True
elif t in tagstack and t not in _htmlnest:
badtag = True
# Is it a self-closed htmlpair? (bug 5487)
elif brace == u'/>' and t in _htmlpairs:
badTag = True
elif t in _htmlsingleonly:
# Hack to force empty tag for uncloseable elements
brace = u'/>'
elif t in _htmlsingle:
# Hack to not close $htmlsingle tags
brace = None
else:
if t == u'table':
tablestack.append(tagstack)
tagstack = []
tagstack.append(t)
newparams = self.fixTagAttributes(params, t)
if not badtag:
rest = rest.replace(u'>', u'>')
if brace == u'/>':
close = u' /'
else:
close = u''
sb.append(u'<')
sb.append(slash)
sb.append(t)
sb.append(newparams)
sb.append(close)
sb.append(u'>')
sb.append(rest)
continue
sb.append(u'<')
sb.append(x.replace(u'>', u'>'))
# Close off any remaining tags
while tagstack:
t = tagstack.pop()
sb.append(u'</')
sb.append(t)
sb.append(u'>\n')
if t == u'table':
if not tablestack:
break
tagstack = tablestack.pop()
return u''.join(sb) | def function[removeHtmlTags, parameter[self, text]]:
constant[convert bad tags into HTML identities]
variable[sb] assign[=] list[[]]
variable[text] assign[=] call[name[self].removeHtmlComments, parameter[name[text]]]
variable[bits] assign[=] call[name[text].split, parameter[constant[<]]]
call[name[sb].append, parameter[call[name[bits].pop, parameter[constant[0]]]]]
variable[tagstack] assign[=] list[[]]
variable[tablestack] assign[=] name[tagstack]
for taget[name[x]] in starred[name[bits]] begin[:]
variable[m] assign[=] call[name[_tagPattern].match, parameter[name[x]]]
if <ast.UnaryOp object at 0x7da18c4cc880> begin[:]
continue
<ast.Tuple object at 0x7da18c4cdb40> assign[=] call[name[m].groups, parameter[]]
variable[t] assign[=] call[name[t].lower, parameter[]]
variable[badtag] assign[=] constant[False]
if compare[name[t] in name[_htmlelements]] begin[:]
if name[slash] begin[:]
if <ast.BoolOp object at 0x7da18c4ce860> begin[:]
variable[badtag] assign[=] constant[True]
variable[newparams] assign[=] constant[]
if <ast.UnaryOp object at 0x7da18ede6170> begin[:]
variable[rest] assign[=] call[name[rest].replace, parameter[constant[>], constant[>]]]
if compare[name[brace] equal[==] constant[/>]] begin[:]
variable[close] assign[=] constant[ /]
call[name[sb].append, parameter[constant[<]]]
call[name[sb].append, parameter[name[slash]]]
call[name[sb].append, parameter[name[t]]]
call[name[sb].append, parameter[name[newparams]]]
call[name[sb].append, parameter[name[close]]]
call[name[sb].append, parameter[constant[>]]]
call[name[sb].append, parameter[name[rest]]]
continue
call[name[sb].append, parameter[constant[<]]]
call[name[sb].append, parameter[call[name[x].replace, parameter[constant[>], constant[>]]]]]
while name[tagstack] begin[:]
variable[t] assign[=] call[name[tagstack].pop, parameter[]]
call[name[sb].append, parameter[constant[</]]]
call[name[sb].append, parameter[name[t]]]
call[name[sb].append, parameter[constant[>
]]]
if compare[name[t] equal[==] constant[table]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e4e20> begin[:]
break
variable[tagstack] assign[=] call[name[tablestack].pop, parameter[]]
return[call[constant[].join, parameter[name[sb]]]] | keyword[def] identifier[removeHtmlTags] ( identifier[self] , identifier[text] ):
literal[string]
identifier[sb] =[]
identifier[text] = identifier[self] . identifier[removeHtmlComments] ( identifier[text] )
identifier[bits] = identifier[text] . identifier[split] ( literal[string] )
identifier[sb] . identifier[append] ( identifier[bits] . identifier[pop] ( literal[int] ))
identifier[tagstack] =[]
identifier[tablestack] = identifier[tagstack]
keyword[for] identifier[x] keyword[in] identifier[bits] :
identifier[m] = identifier[_tagPattern] . identifier[match] ( identifier[x] )
keyword[if] keyword[not] identifier[m] :
keyword[continue]
identifier[slash] , identifier[t] , identifier[params] , identifier[brace] , identifier[rest] = identifier[m] . identifier[groups] ()
identifier[t] = identifier[t] . identifier[lower] ()
identifier[badtag] = keyword[False]
keyword[if] identifier[t] keyword[in] identifier[_htmlelements] :
keyword[if] identifier[slash] :
keyword[if] identifier[t] keyword[in] identifier[_htmlsingleonly] keyword[or] identifier[len] ( identifier[tagstack] )== literal[int] :
identifier[badtag] = keyword[True]
keyword[else] :
identifier[ot] = identifier[tagstack] . identifier[pop] ()
keyword[if] identifier[ot] != identifier[t] :
keyword[if] identifier[ot] keyword[in] identifier[_htmlsingleallowed] :
identifier[optstack] =[]
identifier[optstack] . identifier[append] ( identifier[ot] )
keyword[while] keyword[True] :
keyword[if] identifier[len] ( identifier[tagstack] )== literal[int] :
keyword[break]
identifier[ot] = identifier[tagstack] . identifier[pop] ()
keyword[if] identifier[ot] == identifier[t] keyword[or] identifier[ot] keyword[not] keyword[in] identifier[_htmlsingleallowed] :
keyword[break]
identifier[optstack] . identifier[append] ( identifier[ot] )
keyword[if] identifier[t] != identifier[ot] :
identifier[badtag] = keyword[True]
identifier[tagstack] += identifier[reversed] ( identifier[optstack] )
keyword[else] :
identifier[tagstack] . identifier[append] ( identifier[ot] )
keyword[if] identifier[ot] keyword[not] keyword[in] identifier[_htmllist] keyword[and] identifier[t] keyword[in] identifier[_listtags] :
identifier[badtag] = keyword[True]
keyword[elif] identifier[t] == literal[string] :
keyword[if] identifier[len] ( identifier[tablestack] )== literal[int] :
identifier[bagtag] = keyword[True]
keyword[else] :
identifier[tagstack] = identifier[tablestack] . identifier[pop] ()
identifier[newparams] = literal[string]
keyword[else] :
keyword[if] identifier[t] keyword[in] identifier[_tabletags] keyword[and] literal[string] keyword[not] keyword[in] identifier[tagstack] :
identifier[badtag] = keyword[True]
keyword[elif] identifier[t] keyword[in] identifier[tagstack] keyword[and] identifier[t] keyword[not] keyword[in] identifier[_htmlnest] :
identifier[badtag] = keyword[True]
keyword[elif] identifier[brace] == literal[string] keyword[and] identifier[t] keyword[in] identifier[_htmlpairs] :
identifier[badTag] = keyword[True]
keyword[elif] identifier[t] keyword[in] identifier[_htmlsingleonly] :
identifier[brace] = literal[string]
keyword[elif] identifier[t] keyword[in] identifier[_htmlsingle] :
identifier[brace] = keyword[None]
keyword[else] :
keyword[if] identifier[t] == literal[string] :
identifier[tablestack] . identifier[append] ( identifier[tagstack] )
identifier[tagstack] =[]
identifier[tagstack] . identifier[append] ( identifier[t] )
identifier[newparams] = identifier[self] . identifier[fixTagAttributes] ( identifier[params] , identifier[t] )
keyword[if] keyword[not] identifier[badtag] :
identifier[rest] = identifier[rest] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[brace] == literal[string] :
identifier[close] = literal[string]
keyword[else] :
identifier[close] = literal[string]
identifier[sb] . identifier[append] ( literal[string] )
identifier[sb] . identifier[append] ( identifier[slash] )
identifier[sb] . identifier[append] ( identifier[t] )
identifier[sb] . identifier[append] ( identifier[newparams] )
identifier[sb] . identifier[append] ( identifier[close] )
identifier[sb] . identifier[append] ( literal[string] )
identifier[sb] . identifier[append] ( identifier[rest] )
keyword[continue]
identifier[sb] . identifier[append] ( literal[string] )
identifier[sb] . identifier[append] ( identifier[x] . identifier[replace] ( literal[string] , literal[string] ))
keyword[while] identifier[tagstack] :
identifier[t] = identifier[tagstack] . identifier[pop] ()
identifier[sb] . identifier[append] ( literal[string] )
identifier[sb] . identifier[append] ( identifier[t] )
identifier[sb] . identifier[append] ( literal[string] )
keyword[if] identifier[t] == literal[string] :
keyword[if] keyword[not] identifier[tablestack] :
keyword[break]
identifier[tagstack] = identifier[tablestack] . identifier[pop] ()
keyword[return] literal[string] . identifier[join] ( identifier[sb] ) | def removeHtmlTags(self, text):
"""convert bad tags into HTML identities"""
sb = []
text = self.removeHtmlComments(text)
bits = text.split(u'<')
sb.append(bits.pop(0))
tagstack = []
tablestack = tagstack
for x in bits:
m = _tagPattern.match(x)
if not m:
continue # depends on [control=['if'], data=[]]
(slash, t, params, brace, rest) = m.groups()
t = t.lower()
badtag = False
if t in _htmlelements: # Check our stack
if slash: # Closing a tag...
if t in _htmlsingleonly or len(tagstack) == 0:
badtag = True # depends on [control=['if'], data=[]]
else:
ot = tagstack.pop()
if ot != t:
if ot in _htmlsingleallowed: # Pop all elements with an optional close tag
# and see if we find a match below them
optstack = []
optstack.append(ot)
while True:
if len(tagstack) == 0:
break # depends on [control=['if'], data=[]]
ot = tagstack.pop()
if ot == t or ot not in _htmlsingleallowed:
break # depends on [control=['if'], data=[]]
optstack.append(ot) # depends on [control=['while'], data=[]]
if t != ot: # No match. Push the optinal elements back again
badtag = True
tagstack += reversed(optstack) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ot', '_htmlsingleallowed']]
else:
tagstack.append(ot) # <li> can be nested in <ul> or <ol>, skip those cases:
if ot not in _htmllist and t in _listtags:
badtag = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ot', 't']]
elif t == u'table':
if len(tablestack) == 0:
bagtag = True # depends on [control=['if'], data=[]]
else:
tagstack = tablestack.pop() # depends on [control=['if'], data=[]]
newparams = u'' # depends on [control=['if'], data=[]]
else: # Keep track for later
if t in _tabletags and u'table' not in tagstack:
badtag = True # depends on [control=['if'], data=[]]
elif t in tagstack and t not in _htmlnest:
badtag = True # depends on [control=['if'], data=[]] # Is it a self-closed htmlpair? (bug 5487)
elif brace == u'/>' and t in _htmlpairs:
badTag = True # depends on [control=['if'], data=[]]
elif t in _htmlsingleonly: # Hack to force empty tag for uncloseable elements
brace = u'/>' # depends on [control=['if'], data=[]]
elif t in _htmlsingle: # Hack to not close $htmlsingle tags
brace = None # depends on [control=['if'], data=[]]
else:
if t == u'table':
tablestack.append(tagstack)
tagstack = [] # depends on [control=['if'], data=[]]
tagstack.append(t)
newparams = self.fixTagAttributes(params, t)
if not badtag:
rest = rest.replace(u'>', u'>')
if brace == u'/>':
close = u' /' # depends on [control=['if'], data=[]]
else:
close = u''
sb.append(u'<')
sb.append(slash)
sb.append(t)
sb.append(newparams)
sb.append(close)
sb.append(u'>')
sb.append(rest)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['t']]
sb.append(u'<')
sb.append(x.replace(u'>', u'>')) # depends on [control=['for'], data=['x']] # Close off any remaining tags
while tagstack:
t = tagstack.pop()
sb.append(u'</')
sb.append(t)
sb.append(u'>\n')
if t == u'table':
if not tablestack:
break # depends on [control=['if'], data=[]]
tagstack = tablestack.pop() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return u''.join(sb) |
def envdict2listdict(envdict):
"""Dict --> Dict of lists"""
sep = os.path.pathsep
for key in envdict:
if sep in envdict[key]:
envdict[key] = [path.strip() for path in envdict[key].split(sep)]
return envdict | def function[envdict2listdict, parameter[envdict]]:
constant[Dict --> Dict of lists]
variable[sep] assign[=] name[os].path.pathsep
for taget[name[key]] in starred[name[envdict]] begin[:]
if compare[name[sep] in call[name[envdict]][name[key]]] begin[:]
call[name[envdict]][name[key]] assign[=] <ast.ListComp object at 0x7da18ede5a80>
return[name[envdict]] | keyword[def] identifier[envdict2listdict] ( identifier[envdict] ):
literal[string]
identifier[sep] = identifier[os] . identifier[path] . identifier[pathsep]
keyword[for] identifier[key] keyword[in] identifier[envdict] :
keyword[if] identifier[sep] keyword[in] identifier[envdict] [ identifier[key] ]:
identifier[envdict] [ identifier[key] ]=[ identifier[path] . identifier[strip] () keyword[for] identifier[path] keyword[in] identifier[envdict] [ identifier[key] ]. identifier[split] ( identifier[sep] )]
keyword[return] identifier[envdict] | def envdict2listdict(envdict):
"""Dict --> Dict of lists"""
sep = os.path.pathsep
for key in envdict:
if sep in envdict[key]:
envdict[key] = [path.strip() for path in envdict[key].split(sep)] # depends on [control=['if'], data=['sep']] # depends on [control=['for'], data=['key']]
return envdict |
def listing_searchable_text(instance):
"""Fulltext search for the audit metadata
"""
# get all snapshots
snapshots = get_snapshots(instance)
# extract all snapshot values, because we are not interested in the
# fieldnames (keys)
values = map(lambda s: s.values(), snapshots)
# prepare a set of unified catalog data
catalog_data = set()
# values to skip
skip_values = ["None", "true", "True", "false", "False"]
# internal uid -> title cache
uid_title_cache = {}
# helper function to recursively unpack the snapshot values
def append(value):
if isinstance(value, (list, tuple)):
map(append, value)
elif isinstance(value, (dict)):
map(append, value.items())
elif isinstance(value, basestring):
# convert unicode to UTF8
if isinstance(value, unicode):
value = api.safe_unicode(value).encode("utf8")
# skip single short values
if len(value) < 2:
return
# flush non meaningful values
if value in skip_values:
return
# flush ISO dates
if re.match(DATE_RX, value):
return
# fetch the title
if re.match(UID_RX, value):
if value in uid_title_cache:
value = uid_title_cache[value]
else:
title_or_id = get_title_or_id_from_uid(value)
uid_title_cache[value] = title_or_id
value = title_or_id
catalog_data.add(value)
# extract all meaningful values
for value in itertools.chain(values):
append(value)
return " ".join(catalog_data) | def function[listing_searchable_text, parameter[instance]]:
constant[Fulltext search for the audit metadata
]
variable[snapshots] assign[=] call[name[get_snapshots], parameter[name[instance]]]
variable[values] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da2049617b0>, name[snapshots]]]
variable[catalog_data] assign[=] call[name[set], parameter[]]
variable[skip_values] assign[=] list[[<ast.Constant object at 0x7da2049637f0>, <ast.Constant object at 0x7da204961870>, <ast.Constant object at 0x7da204960040>, <ast.Constant object at 0x7da2049607f0>, <ast.Constant object at 0x7da204961e40>]]
variable[uid_title_cache] assign[=] dictionary[[], []]
def function[append, parameter[value]]:
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da204960760>, <ast.Name object at 0x7da204963550>]]]] begin[:]
call[name[map], parameter[name[append], name[value]]]
for taget[name[value]] in starred[call[name[itertools].chain, parameter[name[values]]]] begin[:]
call[name[append], parameter[name[value]]]
return[call[constant[ ].join, parameter[name[catalog_data]]]] | keyword[def] identifier[listing_searchable_text] ( identifier[instance] ):
literal[string]
identifier[snapshots] = identifier[get_snapshots] ( identifier[instance] )
identifier[values] = identifier[map] ( keyword[lambda] identifier[s] : identifier[s] . identifier[values] (), identifier[snapshots] )
identifier[catalog_data] = identifier[set] ()
identifier[skip_values] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[uid_title_cache] ={}
keyword[def] identifier[append] ( identifier[value] ):
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )):
identifier[map] ( identifier[append] , identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[dict] )):
identifier[map] ( identifier[append] , identifier[value] . identifier[items] ())
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[basestring] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[unicode] ):
identifier[value] = identifier[api] . identifier[safe_unicode] ( identifier[value] ). identifier[encode] ( literal[string] )
keyword[if] identifier[len] ( identifier[value] )< literal[int] :
keyword[return]
keyword[if] identifier[value] keyword[in] identifier[skip_values] :
keyword[return]
keyword[if] identifier[re] . identifier[match] ( identifier[DATE_RX] , identifier[value] ):
keyword[return]
keyword[if] identifier[re] . identifier[match] ( identifier[UID_RX] , identifier[value] ):
keyword[if] identifier[value] keyword[in] identifier[uid_title_cache] :
identifier[value] = identifier[uid_title_cache] [ identifier[value] ]
keyword[else] :
identifier[title_or_id] = identifier[get_title_or_id_from_uid] ( identifier[value] )
identifier[uid_title_cache] [ identifier[value] ]= identifier[title_or_id]
identifier[value] = identifier[title_or_id]
identifier[catalog_data] . identifier[add] ( identifier[value] )
keyword[for] identifier[value] keyword[in] identifier[itertools] . identifier[chain] ( identifier[values] ):
identifier[append] ( identifier[value] )
keyword[return] literal[string] . identifier[join] ( identifier[catalog_data] ) | def listing_searchable_text(instance):
"""Fulltext search for the audit metadata
"""
# get all snapshots
snapshots = get_snapshots(instance)
# extract all snapshot values, because we are not interested in the
# fieldnames (keys)
values = map(lambda s: s.values(), snapshots)
# prepare a set of unified catalog data
catalog_data = set()
# values to skip
skip_values = ['None', 'true', 'True', 'false', 'False']
# internal uid -> title cache
uid_title_cache = {}
# helper function to recursively unpack the snapshot values
def append(value):
if isinstance(value, (list, tuple)):
map(append, value) # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
map(append, value.items()) # depends on [control=['if'], data=[]]
elif isinstance(value, basestring):
# convert unicode to UTF8
if isinstance(value, unicode):
value = api.safe_unicode(value).encode('utf8') # depends on [control=['if'], data=[]]
# skip single short values
if len(value) < 2:
return # depends on [control=['if'], data=[]]
# flush non meaningful values
if value in skip_values:
return # depends on [control=['if'], data=[]]
# flush ISO dates
if re.match(DATE_RX, value):
return # depends on [control=['if'], data=[]]
# fetch the title
if re.match(UID_RX, value):
if value in uid_title_cache:
value = uid_title_cache[value] # depends on [control=['if'], data=['value', 'uid_title_cache']]
else:
title_or_id = get_title_or_id_from_uid(value)
uid_title_cache[value] = title_or_id
value = title_or_id # depends on [control=['if'], data=[]]
catalog_data.add(value) # depends on [control=['if'], data=[]]
# extract all meaningful values
for value in itertools.chain(values):
append(value) # depends on [control=['for'], data=['value']]
return ' '.join(catalog_data) |
def change_logger_levels(logger=None, level=logging.DEBUG):
"""
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.setLevel(level)
for handler in logger.handlers:
handler.level = level | def function[change_logger_levels, parameter[logger, level]]:
constant[
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
]
if <ast.UnaryOp object at 0x7da1b0fde9b0> begin[:]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[logger]]]
call[name[logger].setLevel, parameter[name[level]]]
for taget[name[handler]] in starred[name[logger].handlers] begin[:]
name[handler].level assign[=] name[level] | keyword[def] identifier[change_logger_levels] ( identifier[logger] = keyword[None] , identifier[level] = identifier[logging] . identifier[DEBUG] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[logger] , identifier[logging] . identifier[Logger] ):
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[logger] )
identifier[logger] . identifier[setLevel] ( identifier[level] )
keyword[for] identifier[handler] keyword[in] identifier[logger] . identifier[handlers] :
identifier[handler] . identifier[level] = identifier[level] | def change_logger_levels(logger=None, level=logging.DEBUG):
"""
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger) # depends on [control=['if'], data=[]]
logger.setLevel(level)
for handler in logger.handlers:
handler.level = level # depends on [control=['for'], data=['handler']] |
def conda_prefix(user=None):
"""
Get the conda prefix for a particular user (~/anaconda)
If user is None it defaults to /opt/anaconda
"""
if user == 'root':
return __salt__['grains.get']('conda:prefix', default='/opt/anaconda')
else:
if user is None:
user = __salt__['pillar.get']('system:user', 'ubuntu')
for u in pwd.getpwall():
if u.pw_name == user:
return os.path.join(u.pw_dir, 'anaconda') | def function[conda_prefix, parameter[user]]:
constant[
Get the conda prefix for a particular user (~/anaconda)
If user is None it defaults to /opt/anaconda
]
if compare[name[user] equal[==] constant[root]] begin[:]
return[call[call[name[__salt__]][constant[grains.get]], parameter[constant[conda:prefix]]]] | keyword[def] identifier[conda_prefix] ( identifier[user] = keyword[None] ):
literal[string]
keyword[if] identifier[user] == literal[string] :
keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] , identifier[default] = literal[string] )
keyword[else] :
keyword[if] identifier[user] keyword[is] keyword[None] :
identifier[user] = identifier[__salt__] [ literal[string] ]( literal[string] , literal[string] )
keyword[for] identifier[u] keyword[in] identifier[pwd] . identifier[getpwall] ():
keyword[if] identifier[u] . identifier[pw_name] == identifier[user] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[u] . identifier[pw_dir] , literal[string] ) | def conda_prefix(user=None):
"""
Get the conda prefix for a particular user (~/anaconda)
If user is None it defaults to /opt/anaconda
"""
if user == 'root':
return __salt__['grains.get']('conda:prefix', default='/opt/anaconda') # depends on [control=['if'], data=[]]
else:
if user is None:
user = __salt__['pillar.get']('system:user', 'ubuntu') # depends on [control=['if'], data=['user']]
for u in pwd.getpwall():
if u.pw_name == user:
return os.path.join(u.pw_dir, 'anaconda') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['u']] |
def _find_templates(self, name):
"""Yields all :class:`Template`\ s in the template hierarchy:
- template matching `name` in this TemplateConfiguration
- templates in base TemplateConfigurations (recursively)
- the default template defined in the DocumentTemplate class
"""
for template in self.configuration._find_templates_recursive(name):
yield template
yield self.get_default_template(name) | def function[_find_templates, parameter[self, name]]:
constant[Yields all :class:`Template`\ s in the template hierarchy:
- template matching `name` in this TemplateConfiguration
- templates in base TemplateConfigurations (recursively)
- the default template defined in the DocumentTemplate class
]
for taget[name[template]] in starred[call[name[self].configuration._find_templates_recursive, parameter[name[name]]]] begin[:]
<ast.Yield object at 0x7da2043440a0>
<ast.Yield object at 0x7da204346ce0> | keyword[def] identifier[_find_templates] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[template] keyword[in] identifier[self] . identifier[configuration] . identifier[_find_templates_recursive] ( identifier[name] ):
keyword[yield] identifier[template]
keyword[yield] identifier[self] . identifier[get_default_template] ( identifier[name] ) | def _find_templates(self, name):
"""Yields all :class:`Template`\\ s in the template hierarchy:
- template matching `name` in this TemplateConfiguration
- templates in base TemplateConfigurations (recursively)
- the default template defined in the DocumentTemplate class
"""
for template in self.configuration._find_templates_recursive(name):
yield template # depends on [control=['for'], data=['template']]
yield self.get_default_template(name) |
def obfuscate_builtins(module, tokens, name_generator, table=None):
"""
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
used_builtins = analyze.enumerate_builtins(tokens)
obfuscated_assignments = remap_name(name_generator, used_builtins, table)
replacements = []
for assignment in obfuscated_assignments.split('\n'):
replacements.append(assignment.split('=')[0])
replacement_dict = dict(zip(used_builtins, replacements))
if table:
table[0].update(replacement_dict)
iter_replacements = iter(replacements)
for builtin in used_builtins:
replace_obfuscatables(
module, tokens, obfuscate_unique, builtin, iter_replacements)
# Check for shebangs and encodings before we do anything else
skip_tokens = 0
matched_shebang = False
matched_encoding = False
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
if not matched_shebang:
matched_shebang = True
skip_tokens += 1
elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
if not matched_encoding:
matched_encoding = True
skip_tokens += 1
insert_in_next_line(tokens, skip_tokens, obfuscated_assignments) | def function[obfuscate_builtins, parameter[module, tokens, name_generator, table]]:
constant[
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
]
variable[used_builtins] assign[=] call[name[analyze].enumerate_builtins, parameter[name[tokens]]]
variable[obfuscated_assignments] assign[=] call[name[remap_name], parameter[name[name_generator], name[used_builtins], name[table]]]
variable[replacements] assign[=] list[[]]
for taget[name[assignment]] in starred[call[name[obfuscated_assignments].split, parameter[constant[
]]]] begin[:]
call[name[replacements].append, parameter[call[call[name[assignment].split, parameter[constant[=]]]][constant[0]]]]
variable[replacement_dict] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[used_builtins], name[replacements]]]]]
if name[table] begin[:]
call[call[name[table]][constant[0]].update, parameter[name[replacement_dict]]]
variable[iter_replacements] assign[=] call[name[iter], parameter[name[replacements]]]
for taget[name[builtin]] in starred[name[used_builtins]] begin[:]
call[name[replace_obfuscatables], parameter[name[module], name[tokens], name[obfuscate_unique], name[builtin], name[iter_replacements]]]
variable[skip_tokens] assign[=] constant[0]
variable[matched_shebang] assign[=] constant[False]
variable[matched_encoding] assign[=] constant[False]
for taget[name[tok]] in starred[call[name[tokens]][<ast.Slice object at 0x7da18c4cf5e0>]] begin[:]
variable[line] assign[=] call[name[tok]][constant[4]]
if call[name[analyze].shebang.match, parameter[name[line]]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cd330> begin[:]
variable[matched_shebang] assign[=] constant[True]
<ast.AugAssign object at 0x7da18c4cf610>
call[name[insert_in_next_line], parameter[name[tokens], name[skip_tokens], name[obfuscated_assignments]]] | keyword[def] identifier[obfuscate_builtins] ( identifier[module] , identifier[tokens] , identifier[name_generator] , identifier[table] = keyword[None] ):
literal[string]
identifier[used_builtins] = identifier[analyze] . identifier[enumerate_builtins] ( identifier[tokens] )
identifier[obfuscated_assignments] = identifier[remap_name] ( identifier[name_generator] , identifier[used_builtins] , identifier[table] )
identifier[replacements] =[]
keyword[for] identifier[assignment] keyword[in] identifier[obfuscated_assignments] . identifier[split] ( literal[string] ):
identifier[replacements] . identifier[append] ( identifier[assignment] . identifier[split] ( literal[string] )[ literal[int] ])
identifier[replacement_dict] = identifier[dict] ( identifier[zip] ( identifier[used_builtins] , identifier[replacements] ))
keyword[if] identifier[table] :
identifier[table] [ literal[int] ]. identifier[update] ( identifier[replacement_dict] )
identifier[iter_replacements] = identifier[iter] ( identifier[replacements] )
keyword[for] identifier[builtin] keyword[in] identifier[used_builtins] :
identifier[replace_obfuscatables] (
identifier[module] , identifier[tokens] , identifier[obfuscate_unique] , identifier[builtin] , identifier[iter_replacements] )
identifier[skip_tokens] = literal[int]
identifier[matched_shebang] = keyword[False]
identifier[matched_encoding] = keyword[False]
keyword[for] identifier[tok] keyword[in] identifier[tokens] [ literal[int] : literal[int] ]:
identifier[line] = identifier[tok] [ literal[int] ]
keyword[if] identifier[analyze] . identifier[shebang] . identifier[match] ( identifier[line] ):
keyword[if] keyword[not] identifier[matched_shebang] :
identifier[matched_shebang] = keyword[True]
identifier[skip_tokens] += literal[int]
keyword[elif] identifier[analyze] . identifier[encoding] . identifier[match] ( identifier[line] ):
keyword[if] keyword[not] identifier[matched_encoding] :
identifier[matched_encoding] = keyword[True]
identifier[skip_tokens] += literal[int]
identifier[insert_in_next_line] ( identifier[tokens] , identifier[skip_tokens] , identifier[obfuscated_assignments] ) | def obfuscate_builtins(module, tokens, name_generator, table=None):
"""
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
used_builtins = analyze.enumerate_builtins(tokens)
obfuscated_assignments = remap_name(name_generator, used_builtins, table)
replacements = []
for assignment in obfuscated_assignments.split('\n'):
replacements.append(assignment.split('=')[0]) # depends on [control=['for'], data=['assignment']]
replacement_dict = dict(zip(used_builtins, replacements))
if table:
table[0].update(replacement_dict) # depends on [control=['if'], data=[]]
iter_replacements = iter(replacements)
for builtin in used_builtins:
replace_obfuscatables(module, tokens, obfuscate_unique, builtin, iter_replacements) # depends on [control=['for'], data=['builtin']]
# Check for shebangs and encodings before we do anything else
skip_tokens = 0
matched_shebang = False
matched_encoding = False
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
if not matched_shebang:
matched_shebang = True
skip_tokens += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
if not matched_encoding:
matched_encoding = True
skip_tokens += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tok']]
insert_in_next_line(tokens, skip_tokens, obfuscated_assignments) |
def libvlc_video_set_key_input(p_mi, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_key_input', None) or \
_Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on) | def function[libvlc_video_set_key_input, parameter[p_mi, on]]:
constant[Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
]
variable[f] assign[=] <ast.BoolOp object at 0x7da1b2345990>
return[call[name[f], parameter[name[p_mi], name[on]]]] | keyword[def] identifier[libvlc_video_set_key_input] ( identifier[p_mi] , identifier[on] ):
literal[string]
identifier[f] = identifier[_Cfunctions] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[_Cfunction] ( literal[string] ,(( literal[int] ,),( literal[int] ,),), keyword[None] ,
keyword[None] , identifier[MediaPlayer] , identifier[ctypes] . identifier[c_uint] )
keyword[return] identifier[f] ( identifier[p_mi] , identifier[on] ) | def libvlc_video_set_key_input(p_mi, on):
"""Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
"""
f = _Cfunctions.get('libvlc_video_set_key_input', None) or _Cfunction('libvlc_video_set_key_input', ((1,), (1,)), None, None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on) |
def getPluginsList(self, enable=True):
"""Return the plugins list.
if enable is True, only return the active plugins (default)
if enable is False, return all the plugins
Return: list of plugin name
"""
if enable:
return [p for p in self._plugins if self._plugins[p].is_enable()]
else:
return [p for p in self._plugins] | def function[getPluginsList, parameter[self, enable]]:
constant[Return the plugins list.
if enable is True, only return the active plugins (default)
if enable is False, return all the plugins
Return: list of plugin name
]
if name[enable] begin[:]
return[<ast.ListComp object at 0x7da1b21e2380>] | keyword[def] identifier[getPluginsList] ( identifier[self] , identifier[enable] = keyword[True] ):
literal[string]
keyword[if] identifier[enable] :
keyword[return] [ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_plugins] keyword[if] identifier[self] . identifier[_plugins] [ identifier[p] ]. identifier[is_enable] ()]
keyword[else] :
keyword[return] [ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_plugins] ] | def getPluginsList(self, enable=True):
"""Return the plugins list.
if enable is True, only return the active plugins (default)
if enable is False, return all the plugins
Return: list of plugin name
"""
if enable:
return [p for p in self._plugins if self._plugins[p].is_enable()] # depends on [control=['if'], data=[]]
else:
return [p for p in self._plugins] |
def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data)
elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8'))
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8'))
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1',
buffer=data.flatten().tostring())[()]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape,
dtype='S' + str(data.dtype.itemsize))
for index, x in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8'))
return new_data
elif isinstance(data, np.ndarray) \
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()])
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],)
else:
length2 = length
new_shape = (shape[0]//length2,)
else:
if length is None:
length2 = shape[-1]
else:
length2 = length
new_shape = copy.deepcopy(shape)
new_shape[-1] //= length2
# The new array can be made as all zeros (nulls) with enough
# padding to hold everything (dtype='UL' where 'L' is the
# length). It will start out as a 1d array and be reshaped into
# the proper shape later (makes indexing easier).
new_data = np.zeros(shape=(np.prod(new_shape),),
dtype='S'+str(length2))
# With data flattened into a 1d array, we just need to take
# length sized chunks, convert them (if they are uint8 or 16,
# then decode to str first, if they are uint32, put them as an
# input buffer for an ndarray of type 'U').
data = data.flatten()
for i in range(0, new_data.shape[0]):
chunk = data[(i*length2):((i+1)*length2)]
if data.dtype.name == 'uint8':
new_data[i] = np.ndarray(shape=tuple(),
dtype=new_data.dtype,
buffer=chunk.tostring())[()]
else:
new_data[i] = np.bytes_( \
convert_to_str(chunk).encode('UTF-8'))
# Only thing is left is to reshape it.
return new_data.reshape(tuple(new_shape))
else:
# Couldn't figure out what it is, so nothing can be done but
# return it as is.
return data | def function[convert_to_numpy_bytes, parameter[data, length]]:
constant[ Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
]
if <ast.BoolOp object at 0x7da1b2864640> begin[:]
return[name[data]] | keyword[def] identifier[convert_to_numpy_bytes] ( identifier[data] , identifier[length] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[bytes_] ) keyword[or] ( identifier[isinstance] ( identifier[data] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[data] . identifier[dtype] . identifier[char] == literal[string] ):
keyword[return] identifier[data]
keyword[elif] identifier[isinstance] ( identifier[data] ,( identifier[bytes] , identifier[bytearray] )):
keyword[return] identifier[np] . identifier[bytes_] ( identifier[data] )
keyword[elif] ( identifier[sys] . identifier[hexversion] >= literal[int] keyword[and] identifier[isinstance] ( identifier[data] , identifier[str] )) keyword[or] ( identifier[sys] . identifier[hexversion] < literal[int] keyword[and] identifier[isinstance] ( identifier[data] , identifier[unicode] )):
keyword[return] identifier[np] . identifier[bytes_] ( identifier[data] . identifier[encode] ( literal[string] ))
keyword[elif] identifier[isinstance] ( identifier[data] ,( identifier[np] . identifier[uint16] , identifier[np] . identifier[uint32] )):
keyword[return] identifier[np] . identifier[bytes_] ( identifier[convert_to_str] ( identifier[data] ). identifier[encode] ( literal[string] ))
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[uint8] ):
keyword[return] identifier[np] . identifier[ndarray] ( identifier[shape] = identifier[tuple] (), identifier[dtype] = literal[string] ,
identifier[buffer] = identifier[data] . identifier[flatten] (). identifier[tostring] ())[()]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[data] . identifier[dtype] . identifier[char] == literal[string] :
identifier[new_data] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[data] . identifier[shape] ,
identifier[dtype] = literal[string] + identifier[str] ( identifier[data] . identifier[dtype] . identifier[itemsize] ))
keyword[for] identifier[index] , identifier[x] keyword[in] identifier[np] . identifier[ndenumerate] ( identifier[data] ):
identifier[new_data] [ identifier[index] ]= identifier[np] . identifier[bytes_] ( identifier[x] . identifier[encode] ( literal[string] ))
keyword[return] identifier[new_data]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[data] . identifier[dtype] . identifier[name] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[shape] = identifier[list] ( identifier[data] . identifier[shape] )
keyword[if] identifier[len] ( identifier[shape] )== literal[int] :
keyword[return] identifier[convert_to_numpy_bytes] ( identifier[data] [()])
keyword[if] identifier[len] ( identifier[shape] )== literal[int] :
keyword[if] identifier[length] keyword[is] keyword[None] :
identifier[length2] = identifier[shape] [ literal[int] ]
identifier[new_shape] =( identifier[shape] [ literal[int] ],)
keyword[else] :
identifier[length2] = identifier[length]
identifier[new_shape] =( identifier[shape] [ literal[int] ]// identifier[length2] ,)
keyword[else] :
keyword[if] identifier[length] keyword[is] keyword[None] :
identifier[length2] = identifier[shape] [- literal[int] ]
keyword[else] :
identifier[length2] = identifier[length]
identifier[new_shape] = identifier[copy] . identifier[deepcopy] ( identifier[shape] )
identifier[new_shape] [- literal[int] ]//= identifier[length2]
identifier[new_data] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[np] . identifier[prod] ( identifier[new_shape] ),),
identifier[dtype] = literal[string] + identifier[str] ( identifier[length2] ))
identifier[data] = identifier[data] . identifier[flatten] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[new_data] . identifier[shape] [ literal[int] ]):
identifier[chunk] = identifier[data] [( identifier[i] * identifier[length2] ):(( identifier[i] + literal[int] )* identifier[length2] )]
keyword[if] identifier[data] . identifier[dtype] . identifier[name] == literal[string] :
identifier[new_data] [ identifier[i] ]= identifier[np] . identifier[ndarray] ( identifier[shape] = identifier[tuple] (),
identifier[dtype] = identifier[new_data] . identifier[dtype] ,
identifier[buffer] = identifier[chunk] . identifier[tostring] ())[()]
keyword[else] :
identifier[new_data] [ identifier[i] ]= identifier[np] . identifier[bytes_] ( identifier[convert_to_str] ( identifier[chunk] ). identifier[encode] ( literal[string] ))
keyword[return] identifier[new_data] . identifier[reshape] ( identifier[tuple] ( identifier[new_shape] ))
keyword[else] :
keyword[return] identifier[data] | def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes\\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\\_ or numpy.ndarray of numpy.bytes\\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\\_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data # depends on [control=['if'], data=[]]
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data) # depends on [control=['if'], data=[]]
elif sys.hexversion >= 50331648 and isinstance(data, str) or (sys.hexversion < 50331648 and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8')) # depends on [control=['if'], data=[]]
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8')) # depends on [control=['if'], data=[]]
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1', buffer=data.flatten().tostring())[()] # depends on [control=['if'], data=[]]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape, dtype='S' + str(data.dtype.itemsize))
for (index, x) in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8')) # depends on [control=['for'], data=[]]
return new_data # depends on [control=['if'], data=[]]
elif isinstance(data, np.ndarray) and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()]) # depends on [control=['if'], data=[]]
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],) # depends on [control=['if'], data=[]]
else:
length2 = length
new_shape = (shape[0] // length2,) # depends on [control=['if'], data=[]]
else:
if length is None:
length2 = shape[-1] # depends on [control=['if'], data=[]]
else:
length2 = length
new_shape = copy.deepcopy(shape)
new_shape[-1] //= length2
# The new array can be made as all zeros (nulls) with enough
# padding to hold everything (dtype='UL' where 'L' is the
# length). It will start out as a 1d array and be reshaped into
# the proper shape later (makes indexing easier).
new_data = np.zeros(shape=(np.prod(new_shape),), dtype='S' + str(length2))
# With data flattened into a 1d array, we just need to take
# length sized chunks, convert them (if they are uint8 or 16,
# then decode to str first, if they are uint32, put them as an
# input buffer for an ndarray of type 'U').
data = data.flatten()
for i in range(0, new_data.shape[0]):
chunk = data[i * length2:(i + 1) * length2]
if data.dtype.name == 'uint8':
new_data[i] = np.ndarray(shape=tuple(), dtype=new_data.dtype, buffer=chunk.tostring())[()] # depends on [control=['if'], data=[]]
else:
new_data[i] = np.bytes_(convert_to_str(chunk).encode('UTF-8')) # depends on [control=['for'], data=['i']]
# Only thing is left is to reshape it.
return new_data.reshape(tuple(new_shape)) # depends on [control=['if'], data=[]]
else:
# Couldn't figure out what it is, so nothing can be done but
# return it as is.
return data |
def _compute_experiment_from_runs(self):
"""Computes a minimal Experiment protocol buffer by scanning the runs."""
hparam_infos = self._compute_hparam_infos()
if not hparam_infos:
return None
metric_infos = self._compute_metric_infos()
return api_pb2.Experiment(hparam_infos=hparam_infos,
metric_infos=metric_infos) | def function[_compute_experiment_from_runs, parameter[self]]:
constant[Computes a minimal Experiment protocol buffer by scanning the runs.]
variable[hparam_infos] assign[=] call[name[self]._compute_hparam_infos, parameter[]]
if <ast.UnaryOp object at 0x7da1b1ea0160> begin[:]
return[constant[None]]
variable[metric_infos] assign[=] call[name[self]._compute_metric_infos, parameter[]]
return[call[name[api_pb2].Experiment, parameter[]]] | keyword[def] identifier[_compute_experiment_from_runs] ( identifier[self] ):
literal[string]
identifier[hparam_infos] = identifier[self] . identifier[_compute_hparam_infos] ()
keyword[if] keyword[not] identifier[hparam_infos] :
keyword[return] keyword[None]
identifier[metric_infos] = identifier[self] . identifier[_compute_metric_infos] ()
keyword[return] identifier[api_pb2] . identifier[Experiment] ( identifier[hparam_infos] = identifier[hparam_infos] ,
identifier[metric_infos] = identifier[metric_infos] ) | def _compute_experiment_from_runs(self):
"""Computes a minimal Experiment protocol buffer by scanning the runs."""
hparam_infos = self._compute_hparam_infos()
if not hparam_infos:
return None # depends on [control=['if'], data=[]]
metric_infos = self._compute_metric_infos()
return api_pb2.Experiment(hparam_infos=hparam_infos, metric_infos=metric_infos) |
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d | def function[all_settings, parameter[self, uppercase_keys]]:
constant[Return all settings as a `dict`.]
variable[d] assign[=] dictionary[[], []]
for taget[name[k]] in starred[call[name[self].all_keys, parameter[name[uppercase_keys]]]] begin[:]
call[name[d]][name[k]] assign[=] call[name[self].get, parameter[name[k]]]
return[name[d]] | keyword[def] identifier[all_settings] ( identifier[self] , identifier[uppercase_keys] = keyword[False] ):
literal[string]
identifier[d] ={}
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[all_keys] ( identifier[uppercase_keys] ):
identifier[d] [ identifier[k] ]= identifier[self] . identifier[get] ( identifier[k] )
keyword[return] identifier[d] | def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k) # depends on [control=['for'], data=['k']]
return d |
def create_bokeh_server(io_loop, files, argvs, host, port):
'''Start bokeh server with applications paths'''
from bokeh.server.server import Server
from bokeh.command.util import build_single_handler_applications
# Turn file paths into bokeh apps
apps = build_single_handler_applications(files, argvs)
# kwargs lifted from bokeh serve call to Server, with created io_loop
kwargs = {
'io_loop':io_loop,
'generate_session_ids':True,
'redirect_root':True,
'use_x_headers':False,
'secret_key':None,
'num_procs':1,
'host': host,
'sign_sessions':False,
'develop':False,
'port':port,
'use_index':True
}
server = Server(apps,**kwargs)
return server | def function[create_bokeh_server, parameter[io_loop, files, argvs, host, port]]:
constant[Start bokeh server with applications paths]
from relative_module[bokeh.server.server] import module[Server]
from relative_module[bokeh.command.util] import module[build_single_handler_applications]
variable[apps] assign[=] call[name[build_single_handler_applications], parameter[name[files], name[argvs]]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5810>, <ast.Constant object at 0x7da18ede5150>, <ast.Constant object at 0x7da18ede75e0>, <ast.Constant object at 0x7da18ede6950>, <ast.Constant object at 0x7da18ede6650>, <ast.Constant object at 0x7da18ede58d0>, <ast.Constant object at 0x7da18ede5f60>, <ast.Constant object at 0x7da18ede6a40>, <ast.Constant object at 0x7da18ede4280>, <ast.Constant object at 0x7da18ede6860>, <ast.Constant object at 0x7da18ede4670>], [<ast.Name object at 0x7da18ede6560>, <ast.Constant object at 0x7da18ede46a0>, <ast.Constant object at 0x7da18ede61a0>, <ast.Constant object at 0x7da18ede53c0>, <ast.Constant object at 0x7da18ede76d0>, <ast.Constant object at 0x7da18ede4e80>, <ast.Name object at 0x7da18ede6f20>, <ast.Constant object at 0x7da18ede4fa0>, <ast.Constant object at 0x7da18ede6350>, <ast.Name object at 0x7da18ede6d40>, <ast.Constant object at 0x7da18ede5330>]]
variable[server] assign[=] call[name[Server], parameter[name[apps]]]
return[name[server]] | keyword[def] identifier[create_bokeh_server] ( identifier[io_loop] , identifier[files] , identifier[argvs] , identifier[host] , identifier[port] ):
literal[string]
keyword[from] identifier[bokeh] . identifier[server] . identifier[server] keyword[import] identifier[Server]
keyword[from] identifier[bokeh] . identifier[command] . identifier[util] keyword[import] identifier[build_single_handler_applications]
identifier[apps] = identifier[build_single_handler_applications] ( identifier[files] , identifier[argvs] )
identifier[kwargs] ={
literal[string] : identifier[io_loop] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True] ,
literal[string] : keyword[False] ,
literal[string] : keyword[None] ,
literal[string] : literal[int] ,
literal[string] : identifier[host] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : identifier[port] ,
literal[string] : keyword[True]
}
identifier[server] = identifier[Server] ( identifier[apps] ,** identifier[kwargs] )
keyword[return] identifier[server] | def create_bokeh_server(io_loop, files, argvs, host, port):
"""Start bokeh server with applications paths"""
from bokeh.server.server import Server
from bokeh.command.util import build_single_handler_applications
# Turn file paths into bokeh apps
apps = build_single_handler_applications(files, argvs)
# kwargs lifted from bokeh serve call to Server, with created io_loop
kwargs = {'io_loop': io_loop, 'generate_session_ids': True, 'redirect_root': True, 'use_x_headers': False, 'secret_key': None, 'num_procs': 1, 'host': host, 'sign_sessions': False, 'develop': False, 'port': port, 'use_index': True}
server = Server(apps, **kwargs)
return server |
def get_default_object_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
"""
Utility method to return the default parsers able to parse an object from a file.
Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in
RootParser
:return:
"""
return [SingleFileParserFunction(parser_function=read_object_from_pickle,
streaming_mode=False,
supported_exts={'.pyc'},
supported_types={AnyObject}),
MultifileObjectParser(parser_finder, conversion_finder)
] | def function[get_default_object_parsers, parameter[parser_finder, conversion_finder]]:
constant[
Utility method to return the default parsers able to parse an object from a file.
Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in
RootParser
:return:
]
return[list[[<ast.Call object at 0x7da18ede7d30>, <ast.Call object at 0x7da18ede6140>]]] | keyword[def] identifier[get_default_object_parsers] ( identifier[parser_finder] : identifier[ParserFinder] , identifier[conversion_finder] : identifier[ConversionFinder] )-> identifier[List] [ identifier[AnyParser] ]:
literal[string]
keyword[return] [ identifier[SingleFileParserFunction] ( identifier[parser_function] = identifier[read_object_from_pickle] ,
identifier[streaming_mode] = keyword[False] ,
identifier[supported_exts] ={ literal[string] },
identifier[supported_types] ={ identifier[AnyObject] }),
identifier[MultifileObjectParser] ( identifier[parser_finder] , identifier[conversion_finder] )
] | def get_default_object_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
"""
Utility method to return the default parsers able to parse an object from a file.
Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in
RootParser
:return:
"""
return [SingleFileParserFunction(parser_function=read_object_from_pickle, streaming_mode=False, supported_exts={'.pyc'}, supported_types={AnyObject}), MultifileObjectParser(parser_finder, conversion_finder)] |
def get_first_element_index(root, tag_name):
"""
In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert()
"""
tag_index = 1
for tag in root:
if tag.tag == tag_name:
# Return the first one found if there is a match
return tag_index
tag_index = tag_index + 1
# Default
return None | def function[get_first_element_index, parameter[root, tag_name]]:
constant[
In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert()
]
variable[tag_index] assign[=] constant[1]
for taget[name[tag]] in starred[name[root]] begin[:]
if compare[name[tag].tag equal[==] name[tag_name]] begin[:]
return[name[tag_index]]
variable[tag_index] assign[=] binary_operation[name[tag_index] + constant[1]]
return[constant[None]] | keyword[def] identifier[get_first_element_index] ( identifier[root] , identifier[tag_name] ):
literal[string]
identifier[tag_index] = literal[int]
keyword[for] identifier[tag] keyword[in] identifier[root] :
keyword[if] identifier[tag] . identifier[tag] == identifier[tag_name] :
keyword[return] identifier[tag_index]
identifier[tag_index] = identifier[tag_index] + literal[int]
keyword[return] keyword[None] | def get_first_element_index(root, tag_name):
"""
In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert()
"""
tag_index = 1
for tag in root:
if tag.tag == tag_name:
# Return the first one found if there is a match
return tag_index # depends on [control=['if'], data=[]]
tag_index = tag_index + 1 # depends on [control=['for'], data=['tag']]
# Default
return None |
def create_finding(
self,
parent,
finding_id,
finding,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a finding. The corresponding source must exist for finding creation
to succeed.
Example:
>>> from google.cloud import securitycenter_v1
>>>
>>> client = securitycenter_v1.SecurityCenterClient()
>>>
>>> parent = client.source_path('[ORGANIZATION]', '[SOURCE]')
>>>
>>> # TODO: Initialize `finding_id`:
>>> finding_id = ''
>>>
>>> # TODO: Initialize `finding`:
>>> finding = {}
>>>
>>> response = client.create_finding(parent, finding_id, finding)
Args:
parent (str): Resource name of the new finding's parent. Its format should be
"organizations/[organization\_id]/sources/[source\_id]".
finding_id (str): Unique identifier provided by the client within the parent scope.
It must be alphanumeric and less than or equal to 32 characters and
greater than 0 characters in length.
finding (Union[dict, ~google.cloud.securitycenter_v1.types.Finding]): The Finding being created. The name and security\_marks will be ignored
as they are both output only fields on this resource.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.Finding`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.securitycenter_v1.types.Finding` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_finding" not in self._inner_api_calls:
self._inner_api_calls[
"create_finding"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_finding,
default_retry=self._method_configs["CreateFinding"].retry,
default_timeout=self._method_configs["CreateFinding"].timeout,
client_info=self._client_info,
)
request = securitycenter_service_pb2.CreateFindingRequest(
parent=parent, finding_id=finding_id, finding=finding
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_finding"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[create_finding, parameter[self, parent, finding_id, finding, retry, timeout, metadata]]:
constant[
Creates a finding. The corresponding source must exist for finding creation
to succeed.
Example:
>>> from google.cloud import securitycenter_v1
>>>
>>> client = securitycenter_v1.SecurityCenterClient()
>>>
>>> parent = client.source_path('[ORGANIZATION]', '[SOURCE]')
>>>
>>> # TODO: Initialize `finding_id`:
>>> finding_id = ''
>>>
>>> # TODO: Initialize `finding`:
>>> finding = {}
>>>
>>> response = client.create_finding(parent, finding_id, finding)
Args:
parent (str): Resource name of the new finding's parent. Its format should be
"organizations/[organization\_id]/sources/[source\_id]".
finding_id (str): Unique identifier provided by the client within the parent scope.
It must be alphanumeric and less than or equal to 32 characters and
greater than 0 characters in length.
finding (Union[dict, ~google.cloud.securitycenter_v1.types.Finding]): The Finding being created. The name and security\_marks will be ignored
as they are both output only fields on this resource.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.Finding`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.securitycenter_v1.types.Finding` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[create_finding] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[create_finding]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.create_finding]]
variable[request] assign[=] call[name[securitycenter_service_pb2].CreateFindingRequest, parameter[]]
if compare[name[metadata] is constant[None]] begin[:]
variable[metadata] assign[=] list[[]]
variable[metadata] assign[=] call[name[list], parameter[name[metadata]]]
<ast.Try object at 0x7da2046225c0>
return[call[call[name[self]._inner_api_calls][constant[create_finding]], parameter[name[request]]]] | keyword[def] identifier[create_finding] (
identifier[self] ,
identifier[parent] ,
identifier[finding_id] ,
identifier[finding] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[create_finding] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[securitycenter_service_pb2] . identifier[CreateFindingRequest] (
identifier[parent] = identifier[parent] , identifier[finding_id] = identifier[finding_id] , identifier[finding] = identifier[finding]
)
keyword[if] identifier[metadata] keyword[is] keyword[None] :
identifier[metadata] =[]
identifier[metadata] = identifier[list] ( identifier[metadata] )
keyword[try] :
identifier[routing_header] =[( literal[string] , identifier[parent] )]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
identifier[routing_metadata] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[routing_header] . identifier[to_grpc_metadata] (
identifier[routing_header]
)
identifier[metadata] . identifier[append] ( identifier[routing_metadata] )
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def create_finding(self, parent, finding_id, finding, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Creates a finding. The corresponding source must exist for finding creation
to succeed.
Example:
>>> from google.cloud import securitycenter_v1
>>>
>>> client = securitycenter_v1.SecurityCenterClient()
>>>
>>> parent = client.source_path('[ORGANIZATION]', '[SOURCE]')
>>>
>>> # TODO: Initialize `finding_id`:
>>> finding_id = ''
>>>
>>> # TODO: Initialize `finding`:
>>> finding = {}
>>>
>>> response = client.create_finding(parent, finding_id, finding)
Args:
parent (str): Resource name of the new finding's parent. Its format should be
"organizations/[organization\\_id]/sources/[source\\_id]".
finding_id (str): Unique identifier provided by the client within the parent scope.
It must be alphanumeric and less than or equal to 32 characters and
greater than 0 characters in length.
finding (Union[dict, ~google.cloud.securitycenter_v1.types.Finding]): The Finding being created. The name and security\\_marks will be ignored
as they are both output only fields on this resource.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.Finding`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.securitycenter_v1.types.Finding` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_finding' not in self._inner_api_calls:
self._inner_api_calls['create_finding'] = google.api_core.gapic_v1.method.wrap_method(self.transport.create_finding, default_retry=self._method_configs['CreateFinding'].retry, default_timeout=self._method_configs['CreateFinding'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = securitycenter_service_pb2.CreateFindingRequest(parent=parent, finding_id=finding_id, finding=finding)
if metadata is None:
metadata = [] # depends on [control=['if'], data=['metadata']]
metadata = list(metadata)
try:
routing_header = [('parent', parent)] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['create_finding'](request, retry=retry, timeout=timeout, metadata=metadata) |
def rank(self, item):
'''Return the rank (index) of ``item`` in this :class:`zset`.'''
score = self._dict.get(item)
if score is not None:
return self._sl.rank(score) | def function[rank, parameter[self, item]]:
constant[Return the rank (index) of ``item`` in this :class:`zset`.]
variable[score] assign[=] call[name[self]._dict.get, parameter[name[item]]]
if compare[name[score] is_not constant[None]] begin[:]
return[call[name[self]._sl.rank, parameter[name[score]]]] | keyword[def] identifier[rank] ( identifier[self] , identifier[item] ):
literal[string]
identifier[score] = identifier[self] . identifier[_dict] . identifier[get] ( identifier[item] )
keyword[if] identifier[score] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_sl] . identifier[rank] ( identifier[score] ) | def rank(self, item):
"""Return the rank (index) of ``item`` in this :class:`zset`."""
score = self._dict.get(item)
if score is not None:
return self._sl.rank(score) # depends on [control=['if'], data=['score']] |
def update_annotation_version(xml_file):
"""Update the fields that have changed over different versions.
Parameters
----------
xml_file : path to file
xml file with the sleep scoring
Notes
-----
new in version 4: use 'marker_name' instead of simply 'name' etc
new in version 5: use 'bookmark' instead of 'marker'
"""
with open(xml_file, 'r') as f:
s = f.read()
m = search('<annotations version="([0-9]*)">', s)
current = int(m.groups()[0])
if current < 4:
s = sub('<marker><name>(.*?)</name><time>(.*?)</time></marker>',
'<marker><marker_name>\g<1></marker_name><marker_start>\g<2></marker_start><marker_end>\g<2></marker_end><marker_chan/></marker>',
s)
if current < 5:
s = s.replace('marker', 'bookmark')
# note indentation
s = sub('<annotations version="[0-9]*">',
'<annotations version="5">', s)
with open(xml_file, 'w') as f:
f.write(s) | def function[update_annotation_version, parameter[xml_file]]:
constant[Update the fields that have changed over different versions.
Parameters
----------
xml_file : path to file
xml file with the sleep scoring
Notes
-----
new in version 4: use 'marker_name' instead of simply 'name' etc
new in version 5: use 'bookmark' instead of 'marker'
]
with call[name[open], parameter[name[xml_file], constant[r]]] begin[:]
variable[s] assign[=] call[name[f].read, parameter[]]
variable[m] assign[=] call[name[search], parameter[constant[<annotations version="([0-9]*)">], name[s]]]
variable[current] assign[=] call[name[int], parameter[call[call[name[m].groups, parameter[]]][constant[0]]]]
if compare[name[current] less[<] constant[4]] begin[:]
variable[s] assign[=] call[name[sub], parameter[constant[<marker><name>(.*?)</name><time>(.*?)</time></marker>], constant[<marker><marker_name>\g<1></marker_name><marker_start>\g<2></marker_start><marker_end>\g<2></marker_end><marker_chan/></marker>], name[s]]]
if compare[name[current] less[<] constant[5]] begin[:]
variable[s] assign[=] call[name[s].replace, parameter[constant[marker], constant[bookmark]]]
variable[s] assign[=] call[name[sub], parameter[constant[<annotations version="[0-9]*">], constant[<annotations version="5">], name[s]]]
with call[name[open], parameter[name[xml_file], constant[w]]] begin[:]
call[name[f].write, parameter[name[s]]] | keyword[def] identifier[update_annotation_version] ( identifier[xml_file] ):
literal[string]
keyword[with] identifier[open] ( identifier[xml_file] , literal[string] ) keyword[as] identifier[f] :
identifier[s] = identifier[f] . identifier[read] ()
identifier[m] = identifier[search] ( literal[string] , identifier[s] )
identifier[current] = identifier[int] ( identifier[m] . identifier[groups] ()[ literal[int] ])
keyword[if] identifier[current] < literal[int] :
identifier[s] = identifier[sub] ( literal[string] ,
literal[string] ,
identifier[s] )
keyword[if] identifier[current] < literal[int] :
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
identifier[s] = identifier[sub] ( literal[string] ,
literal[string] , identifier[s] )
keyword[with] identifier[open] ( identifier[xml_file] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[s] ) | def update_annotation_version(xml_file):
"""Update the fields that have changed over different versions.
Parameters
----------
xml_file : path to file
xml file with the sleep scoring
Notes
-----
new in version 4: use 'marker_name' instead of simply 'name' etc
new in version 5: use 'bookmark' instead of 'marker'
"""
with open(xml_file, 'r') as f:
s = f.read() # depends on [control=['with'], data=['f']]
m = search('<annotations version="([0-9]*)">', s)
current = int(m.groups()[0])
if current < 4:
s = sub('<marker><name>(.*?)</name><time>(.*?)</time></marker>', '<marker><marker_name>\\g<1></marker_name><marker_start>\\g<2></marker_start><marker_end>\\g<2></marker_end><marker_chan/></marker>', s) # depends on [control=['if'], data=[]]
if current < 5:
s = s.replace('marker', 'bookmark')
# note indentation
s = sub('<annotations version="[0-9]*">', '<annotations version="5">', s)
with open(xml_file, 'w') as f:
f.write(s) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] |
def on_batch_end(self, last_loss, epoch, num_batch, **kwargs:Any)->None:
"Test if `last_loss` is NaN and interrupts training."
if self.stop: return True #to skip validation after stopping during training
if torch.isnan(last_loss):
print (f'Epoch/Batch ({epoch}/{num_batch}): Invalid loss, terminating training.')
return {'stop_epoch': True, 'stop_training': True, 'skip_validate': True} | def function[on_batch_end, parameter[self, last_loss, epoch, num_batch]]:
constant[Test if `last_loss` is NaN and interrupts training.]
if name[self].stop begin[:]
return[constant[True]]
if call[name[torch].isnan, parameter[name[last_loss]]] begin[:]
call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1d6f010>]]
return[dictionary[[<ast.Constant object at 0x7da1b1d6cca0>, <ast.Constant object at 0x7da1b1d6cb20>, <ast.Constant object at 0x7da1b1d6f250>], [<ast.Constant object at 0x7da1b1d6e800>, <ast.Constant object at 0x7da1b1d6cbb0>, <ast.Constant object at 0x7da1b1d6e3b0>]]] | keyword[def] identifier[on_batch_end] ( identifier[self] , identifier[last_loss] , identifier[epoch] , identifier[num_batch] ,** identifier[kwargs] : identifier[Any] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[stop] : keyword[return] keyword[True]
keyword[if] identifier[torch] . identifier[isnan] ( identifier[last_loss] ):
identifier[print] ( literal[string] )
keyword[return] { literal[string] : keyword[True] , literal[string] : keyword[True] , literal[string] : keyword[True] } | def on_batch_end(self, last_loss, epoch, num_batch, **kwargs: Any) -> None:
"""Test if `last_loss` is NaN and interrupts training."""
if self.stop:
return True #to skip validation after stopping during training # depends on [control=['if'], data=[]]
if torch.isnan(last_loss):
print(f'Epoch/Batch ({epoch}/{num_batch}): Invalid loss, terminating training.')
return {'stop_epoch': True, 'stop_training': True, 'skip_validate': True} # depends on [control=['if'], data=[]] |
def add_row(self, data: list):
"""
Add a row of buttons each with their own callbacks to the
current widget. Each element in `data` will consist of a
label and a command.
:param data: a list of tuples of the form ('label', <callback>)
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError
offset = 0 if not self.headers else 1
row = list()
for i, e in enumerate(data):
if not isinstance(e, tuple):
raise ValueError('all elements must be a tuple '
'consisting of ("label", <command>)')
label, command = e
button = tk.Button(self, text=str(label), relief=tk.RAISED,
command=command,
padx=self.padding,
pady=self.padding)
button.grid(row=len(self._rows) + offset, column=i, sticky='ew')
row.append(button)
self._rows.append(row) | def function[add_row, parameter[self, data]]:
constant[
Add a row of buttons each with their own callbacks to the
current widget. Each element in `data` will consist of a
label and a command.
:param data: a list of tuples of the form ('label', <callback>)
:return: None
]
if <ast.BoolOp object at 0x7da1b1139540> begin[:]
if compare[call[name[len], parameter[name[self].headers]] not_equal[!=] call[name[len], parameter[name[data]]]] begin[:]
<ast.Raise object at 0x7da1b11a0460>
variable[offset] assign[=] <ast.IfExp object at 0x7da1b11a3340>
variable[row] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b11a1570>, <ast.Name object at 0x7da1b11a1ab0>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b11a3cd0> begin[:]
<ast.Raise object at 0x7da1b11a25c0>
<ast.Tuple object at 0x7da1b11a1660> assign[=] name[e]
variable[button] assign[=] call[name[tk].Button, parameter[name[self]]]
call[name[button].grid, parameter[]]
call[name[row].append, parameter[name[button]]]
call[name[self]._rows.append, parameter[name[row]]] | keyword[def] identifier[add_row] ( identifier[self] , identifier[data] : identifier[list] ):
literal[string]
keyword[if] identifier[self] . identifier[headers] keyword[and] identifier[data] :
keyword[if] identifier[len] ( identifier[self] . identifier[headers] )!= identifier[len] ( identifier[data] ):
keyword[raise] identifier[ValueError]
identifier[offset] = literal[int] keyword[if] keyword[not] identifier[self] . identifier[headers] keyword[else] literal[int]
identifier[row] = identifier[list] ()
keyword[for] identifier[i] , identifier[e] keyword[in] identifier[enumerate] ( identifier[data] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[e] , identifier[tuple] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[label] , identifier[command] = identifier[e]
identifier[button] = identifier[tk] . identifier[Button] ( identifier[self] , identifier[text] = identifier[str] ( identifier[label] ), identifier[relief] = identifier[tk] . identifier[RAISED] ,
identifier[command] = identifier[command] ,
identifier[padx] = identifier[self] . identifier[padding] ,
identifier[pady] = identifier[self] . identifier[padding] )
identifier[button] . identifier[grid] ( identifier[row] = identifier[len] ( identifier[self] . identifier[_rows] )+ identifier[offset] , identifier[column] = identifier[i] , identifier[sticky] = literal[string] )
identifier[row] . identifier[append] ( identifier[button] )
identifier[self] . identifier[_rows] . identifier[append] ( identifier[row] ) | def add_row(self, data: list):
"""
Add a row of buttons each with their own callbacks to the
current widget. Each element in `data` will consist of a
label and a command.
:param data: a list of tuples of the form ('label', <callback>)
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
offset = 0 if not self.headers else 1
row = list()
for (i, e) in enumerate(data):
if not isinstance(e, tuple):
raise ValueError('all elements must be a tuple consisting of ("label", <command>)') # depends on [control=['if'], data=[]]
(label, command) = e
button = tk.Button(self, text=str(label), relief=tk.RAISED, command=command, padx=self.padding, pady=self.padding)
button.grid(row=len(self._rows) + offset, column=i, sticky='ew')
row.append(button) # depends on [control=['for'], data=[]]
self._rows.append(row) |
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
self.focus_changed.connect(self.main.plugin_focus_changed)
self.edit_goto.connect(self.main.editor.load)
self.edit_goto[str, int, str, bool].connect(
lambda fname, lineno, word, processevents:
self.main.editor.load(fname, lineno, word,
processevents=processevents))
self.main.editor.breakpoints_saved.connect(self.set_spyder_breakpoints)
self.main.editor.run_in_current_ipyclient.connect(self.run_script)
self.main.editor.run_cell_in_ipyclient.connect(self.run_cell)
self.main.workingdirectory.set_current_console_wd.connect(
self.set_current_client_working_directory)
self.tabwidget.currentChanged.connect(self.update_working_directory)
self._remove_old_stderr_files() | def function[register_plugin, parameter[self]]:
constant[Register plugin in Spyder's main window]
call[name[self].main.add_dockwidget, parameter[name[self]]]
call[name[self].focus_changed.connect, parameter[name[self].main.plugin_focus_changed]]
call[name[self].edit_goto.connect, parameter[name[self].main.editor.load]]
call[call[name[self].edit_goto][tuple[[<ast.Name object at 0x7da18f09e1a0>, <ast.Name object at 0x7da18f09fc40>, <ast.Name object at 0x7da18f09cc10>, <ast.Name object at 0x7da18f09da80>]]].connect, parameter[<ast.Lambda object at 0x7da18f09f940>]]
call[name[self].main.editor.breakpoints_saved.connect, parameter[name[self].set_spyder_breakpoints]]
call[name[self].main.editor.run_in_current_ipyclient.connect, parameter[name[self].run_script]]
call[name[self].main.editor.run_cell_in_ipyclient.connect, parameter[name[self].run_cell]]
call[name[self].main.workingdirectory.set_current_console_wd.connect, parameter[name[self].set_current_client_working_directory]]
call[name[self].tabwidget.currentChanged.connect, parameter[name[self].update_working_directory]]
call[name[self]._remove_old_stderr_files, parameter[]] | keyword[def] identifier[register_plugin] ( identifier[self] ):
literal[string]
identifier[self] . identifier[main] . identifier[add_dockwidget] ( identifier[self] )
identifier[self] . identifier[focus_changed] . identifier[connect] ( identifier[self] . identifier[main] . identifier[plugin_focus_changed] )
identifier[self] . identifier[edit_goto] . identifier[connect] ( identifier[self] . identifier[main] . identifier[editor] . identifier[load] )
identifier[self] . identifier[edit_goto] [ identifier[str] , identifier[int] , identifier[str] , identifier[bool] ]. identifier[connect] (
keyword[lambda] identifier[fname] , identifier[lineno] , identifier[word] , identifier[processevents] :
identifier[self] . identifier[main] . identifier[editor] . identifier[load] ( identifier[fname] , identifier[lineno] , identifier[word] ,
identifier[processevents] = identifier[processevents] ))
identifier[self] . identifier[main] . identifier[editor] . identifier[breakpoints_saved] . identifier[connect] ( identifier[self] . identifier[set_spyder_breakpoints] )
identifier[self] . identifier[main] . identifier[editor] . identifier[run_in_current_ipyclient] . identifier[connect] ( identifier[self] . identifier[run_script] )
identifier[self] . identifier[main] . identifier[editor] . identifier[run_cell_in_ipyclient] . identifier[connect] ( identifier[self] . identifier[run_cell] )
identifier[self] . identifier[main] . identifier[workingdirectory] . identifier[set_current_console_wd] . identifier[connect] (
identifier[self] . identifier[set_current_client_working_directory] )
identifier[self] . identifier[tabwidget] . identifier[currentChanged] . identifier[connect] ( identifier[self] . identifier[update_working_directory] )
identifier[self] . identifier[_remove_old_stderr_files] () | def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
self.focus_changed.connect(self.main.plugin_focus_changed)
self.edit_goto.connect(self.main.editor.load)
self.edit_goto[str, int, str, bool].connect(lambda fname, lineno, word, processevents: self.main.editor.load(fname, lineno, word, processevents=processevents))
self.main.editor.breakpoints_saved.connect(self.set_spyder_breakpoints)
self.main.editor.run_in_current_ipyclient.connect(self.run_script)
self.main.editor.run_cell_in_ipyclient.connect(self.run_cell)
self.main.workingdirectory.set_current_console_wd.connect(self.set_current_client_working_directory)
self.tabwidget.currentChanged.connect(self.update_working_directory)
self._remove_old_stderr_files() |
def marshal(self, values):
"""
Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]]
"""
if values is not None:
return [super(EntityCollection, self).marshal(v) for v in values] | def function[marshal, parameter[self, values]]:
constant[
Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]]
]
if compare[name[values] is_not constant[None]] begin[:]
return[<ast.ListComp object at 0x7da1b0796710>] | keyword[def] identifier[marshal] ( identifier[self] , identifier[values] ):
literal[string]
keyword[if] identifier[values] keyword[is] keyword[not] keyword[None] :
keyword[return] [ identifier[super] ( identifier[EntityCollection] , identifier[self] ). identifier[marshal] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ] | def marshal(self, values):
"""
Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]]
"""
if values is not None:
return [super(EntityCollection, self).marshal(v) for v in values] # depends on [control=['if'], data=['values']] |
def get_common_password_hash(self, salt):
"""x = H(s | H(I | ":" | P))
:param int salt:
:rtype: int
"""
password = self._password
if password is None:
raise SRPException('User password should be in context for this scenario.')
return self.hash(salt, self.hash(self._user, password, joiner=':')) | def function[get_common_password_hash, parameter[self, salt]]:
constant[x = H(s | H(I | ":" | P))
:param int salt:
:rtype: int
]
variable[password] assign[=] name[self]._password
if compare[name[password] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0bd9780>
return[call[name[self].hash, parameter[name[salt], call[name[self].hash, parameter[name[self]._user, name[password]]]]]] | keyword[def] identifier[get_common_password_hash] ( identifier[self] , identifier[salt] ):
literal[string]
identifier[password] = identifier[self] . identifier[_password]
keyword[if] identifier[password] keyword[is] keyword[None] :
keyword[raise] identifier[SRPException] ( literal[string] )
keyword[return] identifier[self] . identifier[hash] ( identifier[salt] , identifier[self] . identifier[hash] ( identifier[self] . identifier[_user] , identifier[password] , identifier[joiner] = literal[string] )) | def get_common_password_hash(self, salt):
"""x = H(s | H(I | ":" | P))
:param int salt:
:rtype: int
"""
password = self._password
if password is None:
raise SRPException('User password should be in context for this scenario.') # depends on [control=['if'], data=[]]
return self.hash(salt, self.hash(self._user, password, joiner=':')) |
def _training_stats(self):
"""
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
TopicModel.
See Also
--------
summary
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> m._training_stats()
{'training_iterations': 20,
'training_time': 20.5034}
"""
fields = self._list_fields()
stat_fields = ['training_time',
'training_iterations']
if 'validation_perplexity' in fields:
stat_fields.append('validation_perplexity')
ret = {k : self._get(k) for k in stat_fields}
return ret | def function[_training_stats, parameter[self]]:
constant[
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
TopicModel.
See Also
--------
summary
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> m._training_stats()
{'training_iterations': 20,
'training_time': 20.5034}
]
variable[fields] assign[=] call[name[self]._list_fields, parameter[]]
variable[stat_fields] assign[=] list[[<ast.Constant object at 0x7da1b1f0a3e0>, <ast.Constant object at 0x7da1b1f08fd0>]]
if compare[constant[validation_perplexity] in name[fields]] begin[:]
call[name[stat_fields].append, parameter[constant[validation_perplexity]]]
variable[ret] assign[=] <ast.DictComp object at 0x7da1b1f0ace0>
return[name[ret]] | keyword[def] identifier[_training_stats] ( identifier[self] ):
literal[string]
identifier[fields] = identifier[self] . identifier[_list_fields] ()
identifier[stat_fields] =[ literal[string] ,
literal[string] ]
keyword[if] literal[string] keyword[in] identifier[fields] :
identifier[stat_fields] . identifier[append] ( literal[string] )
identifier[ret] ={ identifier[k] : identifier[self] . identifier[_get] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[stat_fields] }
keyword[return] identifier[ret] | def _training_stats(self):
"""
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
TopicModel.
See Also
--------
summary
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> m._training_stats()
{'training_iterations': 20,
'training_time': 20.5034}
"""
fields = self._list_fields()
stat_fields = ['training_time', 'training_iterations']
if 'validation_perplexity' in fields:
stat_fields.append('validation_perplexity') # depends on [control=['if'], data=[]]
ret = {k: self._get(k) for k in stat_fields}
return ret |
def exec_before_request_actions(actions, **kwargs):
"""Execute actions in the "before" and "before_METHOD" groups
"""
groups = ("before", "before_" + flask.request.method.lower())
return execute_actions(actions, limit_groups=groups, **kwargs) | def function[exec_before_request_actions, parameter[actions]]:
constant[Execute actions in the "before" and "before_METHOD" groups
]
variable[groups] assign[=] tuple[[<ast.Constant object at 0x7da18bc72b90>, <ast.BinOp object at 0x7da18bc732b0>]]
return[call[name[execute_actions], parameter[name[actions]]]] | keyword[def] identifier[exec_before_request_actions] ( identifier[actions] ,** identifier[kwargs] ):
literal[string]
identifier[groups] =( literal[string] , literal[string] + identifier[flask] . identifier[request] . identifier[method] . identifier[lower] ())
keyword[return] identifier[execute_actions] ( identifier[actions] , identifier[limit_groups] = identifier[groups] ,** identifier[kwargs] ) | def exec_before_request_actions(actions, **kwargs):
"""Execute actions in the "before" and "before_METHOD" groups
"""
groups = ('before', 'before_' + flask.request.method.lower())
return execute_actions(actions, limit_groups=groups, **kwargs) |
def setup(self):
"""Setup."""
self.comments = self.config['comments']
self.docstrings = self.config['docstrings']
self.strings = self.config['strings']
self.group_comments = self.config['group_comments']
self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types'])
self.decode_escapes = self.config['decode_escapes'] | def function[setup, parameter[self]]:
constant[Setup.]
name[self].comments assign[=] call[name[self].config][constant[comments]]
name[self].docstrings assign[=] call[name[self].config][constant[docstrings]]
name[self].strings assign[=] call[name[self].config][constant[strings]]
name[self].group_comments assign[=] call[name[self].config][constant[group_comments]]
<ast.Tuple object at 0x7da18bcc8d60> assign[=] call[name[self].eval_string_type, parameter[call[name[self].config][constant[string_types]]]]
name[self].decode_escapes assign[=] call[name[self].config][constant[decode_escapes]] | keyword[def] identifier[setup] ( identifier[self] ):
literal[string]
identifier[self] . identifier[comments] = identifier[self] . identifier[config] [ literal[string] ]
identifier[self] . identifier[docstrings] = identifier[self] . identifier[config] [ literal[string] ]
identifier[self] . identifier[strings] = identifier[self] . identifier[config] [ literal[string] ]
identifier[self] . identifier[group_comments] = identifier[self] . identifier[config] [ literal[string] ]
identifier[self] . identifier[string_types] , identifier[self] . identifier[wild_string_types] = identifier[self] . identifier[eval_string_type] ( identifier[self] . identifier[config] [ literal[string] ])
identifier[self] . identifier[decode_escapes] = identifier[self] . identifier[config] [ literal[string] ] | def setup(self):
"""Setup."""
self.comments = self.config['comments']
self.docstrings = self.config['docstrings']
self.strings = self.config['strings']
self.group_comments = self.config['group_comments']
(self.string_types, self.wild_string_types) = self.eval_string_type(self.config['string_types'])
self.decode_escapes = self.config['decode_escapes'] |
def _download_raw(self, url=None):
"""Download content from URL directly."""
if url is None:
url = self.url
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode("utf8") | def function[_download_raw, parameter[self, url]]:
constant[Download content from URL directly.]
if compare[name[url] is constant[None]] begin[:]
variable[url] assign[=] name[self].url
variable[req] assign[=] call[name[request].Request, parameter[name[url]]]
return[call[call[call[name[request].urlopen, parameter[name[req]]].read, parameter[]].decode, parameter[constant[utf8]]]] | keyword[def] identifier[_download_raw] ( identifier[self] , identifier[url] = keyword[None] ):
literal[string]
keyword[if] identifier[url] keyword[is] keyword[None] :
identifier[url] = identifier[self] . identifier[url]
identifier[req] = identifier[request] . identifier[Request] ( identifier[url] , identifier[headers] = identifier[self] . identifier[HEADERS_PLAIN] )
keyword[return] identifier[request] . identifier[urlopen] ( identifier[req] ). identifier[read] (). identifier[decode] ( literal[string] ) | def _download_raw(self, url=None):
"""Download content from URL directly."""
if url is None:
url = self.url # depends on [control=['if'], data=['url']]
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode('utf8') |
def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None):
""" Issue a PUT request.
Kwargs:
url (str): Destination URL
post_data (dict): Dictionary of parameter and values
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch("DELETE", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True) | def function[delete, parameter[self, url, post_data, parse_data, key, parameters]]:
constant[ Issue a PUT request.
Kwargs:
url (str): Destination URL
post_data (dict): Dictionary of parameter and values
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
]
return[call[name[self]._fetch, parameter[constant[DELETE], name[url]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[url] = keyword[None] , identifier[post_data] ={}, identifier[parse_data] = keyword[False] , identifier[key] = keyword[None] , identifier[parameters] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_fetch] ( literal[string] , identifier[url] , identifier[post_data] = identifier[post_data] , identifier[parse_data] = identifier[parse_data] , identifier[key] = identifier[key] , identifier[parameters] = identifier[parameters] , identifier[full_return] = keyword[True] ) | def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None):
""" Issue a PUT request.
Kwargs:
url (str): Destination URL
post_data (dict): Dictionary of parameter and values
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch('DELETE', url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True) |
def dict_fields(obj, parent=[]):
"""
reads a dictionary and returns a list of fields cojoined with a dot
notation
args:
obj: the dictionary to parse
parent: name for a parent key. used with a recursive call
"""
rtn_obj = {}
for key, value in obj.items():
new_key = parent + [key]
new_key = ".".join(new_key)
if isinstance(value, list):
if value:
value = value[0]
if isinstance(value, dict):
rtn_obj.update(dict_fields(value, [new_key]))
else:
rtn_obj.update({new_key: value})
return rtn_obj | def function[dict_fields, parameter[obj, parent]]:
constant[
reads a dictionary and returns a list of fields cojoined with a dot
notation
args:
obj: the dictionary to parse
parent: name for a parent key. used with a recursive call
]
variable[rtn_obj] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b13403a0>, <ast.Name object at 0x7da1b1343d30>]]] in starred[call[name[obj].items, parameter[]]] begin[:]
variable[new_key] assign[=] binary_operation[name[parent] + list[[<ast.Name object at 0x7da1b1341750>]]]
variable[new_key] assign[=] call[constant[.].join, parameter[name[new_key]]]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
if name[value] begin[:]
variable[value] assign[=] call[name[value]][constant[0]]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
call[name[rtn_obj].update, parameter[call[name[dict_fields], parameter[name[value], list[[<ast.Name object at 0x7da1b1342080>]]]]]]
return[name[rtn_obj]] | keyword[def] identifier[dict_fields] ( identifier[obj] , identifier[parent] =[]):
literal[string]
identifier[rtn_obj] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj] . identifier[items] ():
identifier[new_key] = identifier[parent] +[ identifier[key] ]
identifier[new_key] = literal[string] . identifier[join] ( identifier[new_key] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[if] identifier[value] :
identifier[value] = identifier[value] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[rtn_obj] . identifier[update] ( identifier[dict_fields] ( identifier[value] ,[ identifier[new_key] ]))
keyword[else] :
identifier[rtn_obj] . identifier[update] ({ identifier[new_key] : identifier[value] })
keyword[return] identifier[rtn_obj] | def dict_fields(obj, parent=[]):
"""
reads a dictionary and returns a list of fields cojoined with a dot
notation
args:
obj: the dictionary to parse
parent: name for a parent key. used with a recursive call
"""
rtn_obj = {}
for (key, value) in obj.items():
new_key = parent + [key]
new_key = '.'.join(new_key)
if isinstance(value, list):
if value:
value = value[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(value, dict):
rtn_obj.update(dict_fields(value, [new_key])) # depends on [control=['if'], data=[]]
else:
rtn_obj.update({new_key: value}) # depends on [control=['for'], data=[]]
return rtn_obj |
def latex_visit_inheritance_diagram(
self: NodeVisitor, node: inheritance_diagram
) -> None:
"""
Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
"""
inheritance_graph = node["graph"]
graphviz_graph = inheritance_graph.build_graph()
graphviz_graph.attributes["size"] = 6.0
dot_code = format(graphviz_graph, "graphviz")
render_dot_latex(self, node, dot_code, {}, "inheritance")
raise SkipNode | def function[latex_visit_inheritance_diagram, parameter[self, node]]:
constant[
Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
]
variable[inheritance_graph] assign[=] call[name[node]][constant[graph]]
variable[graphviz_graph] assign[=] call[name[inheritance_graph].build_graph, parameter[]]
call[name[graphviz_graph].attributes][constant[size]] assign[=] constant[6.0]
variable[dot_code] assign[=] call[name[format], parameter[name[graphviz_graph], constant[graphviz]]]
call[name[render_dot_latex], parameter[name[self], name[node], name[dot_code], dictionary[[], []], constant[inheritance]]]
<ast.Raise object at 0x7da20c76f6a0> | keyword[def] identifier[latex_visit_inheritance_diagram] (
identifier[self] : identifier[NodeVisitor] , identifier[node] : identifier[inheritance_diagram]
)-> keyword[None] :
literal[string]
identifier[inheritance_graph] = identifier[node] [ literal[string] ]
identifier[graphviz_graph] = identifier[inheritance_graph] . identifier[build_graph] ()
identifier[graphviz_graph] . identifier[attributes] [ literal[string] ]= literal[int]
identifier[dot_code] = identifier[format] ( identifier[graphviz_graph] , literal[string] )
identifier[render_dot_latex] ( identifier[self] , identifier[node] , identifier[dot_code] ,{}, literal[string] )
keyword[raise] identifier[SkipNode] | def latex_visit_inheritance_diagram(self: NodeVisitor, node: inheritance_diagram) -> None:
"""
Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
"""
inheritance_graph = node['graph']
graphviz_graph = inheritance_graph.build_graph()
graphviz_graph.attributes['size'] = 6.0
dot_code = format(graphviz_graph, 'graphviz')
render_dot_latex(self, node, dot_code, {}, 'inheritance')
raise SkipNode |
def disasters(value=disasters_array,
early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Annual occurences of coal mining disasters."""
return pm.poisson_like(
value[:switchpoint], early_mean) + pm.poisson_like(value[switchpoint:], late_mean) | def function[disasters, parameter[value, early_mean, late_mean, switchpoint]]:
constant[Annual occurences of coal mining disasters.]
return[binary_operation[call[name[pm].poisson_like, parameter[call[name[value]][<ast.Slice object at 0x7da2041db190>], name[early_mean]]] + call[name[pm].poisson_like, parameter[call[name[value]][<ast.Slice object at 0x7da2041d8b80>], name[late_mean]]]]] | keyword[def] identifier[disasters] ( identifier[value] = identifier[disasters_array] ,
identifier[early_mean] = identifier[early_mean] ,
identifier[late_mean] = identifier[late_mean] ,
identifier[switchpoint] = identifier[switchpoint] ):
literal[string]
keyword[return] identifier[pm] . identifier[poisson_like] (
identifier[value] [: identifier[switchpoint] ], identifier[early_mean] )+ identifier[pm] . identifier[poisson_like] ( identifier[value] [ identifier[switchpoint] :], identifier[late_mean] ) | def disasters(value=disasters_array, early_mean=early_mean, late_mean=late_mean, switchpoint=switchpoint):
"""Annual occurences of coal mining disasters."""
return pm.poisson_like(value[:switchpoint], early_mean) + pm.poisson_like(value[switchpoint:], late_mean) |
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary | def function[_policy_loss, parameter[self, old_policy, policy, action, advantage, length]]:
constant[Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
]
with call[name[tf].name_scope, parameter[constant[policy_loss]]] begin[:]
variable[kl] assign[=] call[name[tf].contrib.distributions.kl_divergence, parameter[name[old_policy], name[policy]]]
variable[kl] assign[=] call[name[tf].check_numerics, parameter[name[kl], constant[kl]]]
variable[kl] assign[=] call[name[tf].reduce_mean, parameter[call[name[self]._mask, parameter[name[kl], name[length]]], constant[1]]]
variable[policy_gradient] assign[=] call[name[tf].exp, parameter[binary_operation[call[name[policy].log_prob, parameter[name[action]]] - call[name[old_policy].log_prob, parameter[name[action]]]]]]
variable[surrogate_loss] assign[=] <ast.UnaryOp object at 0x7da1b26ac190>
variable[surrogate_loss] assign[=] call[name[tf].check_numerics, parameter[name[surrogate_loss], constant[surrogate_loss]]]
variable[kl_penalty] assign[=] binary_operation[name[self]._penalty * name[kl]]
variable[cutoff_threshold] assign[=] binary_operation[name[self]._config.kl_target * name[self]._config.kl_cutoff_factor]
variable[cutoff_count] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].cast, parameter[compare[name[kl] greater[>] name[cutoff_threshold]], name[tf].int32]]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Call object at 0x7da1b26adcf0>]]]] begin[:]
variable[kl_cutoff] assign[=] binary_operation[binary_operation[name[self]._config.kl_cutoff_coef * call[name[tf].cast, parameter[compare[name[kl] greater[>] name[cutoff_threshold]], name[tf].float32]]] * binary_operation[binary_operation[name[kl] - name[cutoff_threshold]] ** constant[2]]]
variable[policy_loss] assign[=] binary_operation[binary_operation[name[surrogate_loss] + name[kl_penalty]] + name[kl_cutoff]]
variable[entropy] assign[=] call[name[tf].reduce_mean, parameter[call[name[policy].entropy, parameter[]]]]
if name[self]._config.entropy_regularization begin[:]
<ast.AugAssign object at 0x7da1b26af7f0>
variable[summary] assign[=] call[name[tf].summary.merge, parameter[list[[<ast.Call object at 0x7da1b26af850>, <ast.Call object at 0x7da1b26ae920>, <ast.Call object at 0x7da1b26aeb90>, <ast.Call object at 0x7da1b26ac2b0>, <ast.Call object at 0x7da1b26acd90>, <ast.Call object at 0x7da1b26ad810>, <ast.Call object at 0x7da1b26acd00>, <ast.Call object at 0x7da2041da800>, <ast.Call object at 0x7da2041dbaf0>, <ast.Call object at 0x7da2041da2c0>]]]]
variable[policy_loss] assign[=] call[name[tf].reduce_mean, parameter[name[policy_loss], constant[0]]]
return[tuple[[<ast.Call object at 0x7da2041d9ba0>, <ast.Name object at 0x7da2041dba60>]]] | keyword[def] identifier[_policy_loss] (
identifier[self] , identifier[old_policy] , identifier[policy] , identifier[action] , identifier[advantage] , identifier[length] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[kl] = identifier[tf] . identifier[contrib] . identifier[distributions] . identifier[kl_divergence] ( identifier[old_policy] , identifier[policy] )
identifier[kl] = identifier[tf] . identifier[check_numerics] ( identifier[kl] , literal[string] )
identifier[kl] = identifier[tf] . identifier[reduce_mean] ( identifier[self] . identifier[_mask] ( identifier[kl] , identifier[length] ), literal[int] )
identifier[policy_gradient] = identifier[tf] . identifier[exp] (
identifier[policy] . identifier[log_prob] ( identifier[action] )- identifier[old_policy] . identifier[log_prob] ( identifier[action] ))
identifier[surrogate_loss] =- identifier[tf] . identifier[reduce_mean] ( identifier[self] . identifier[_mask] (
identifier[policy_gradient] * identifier[tf] . identifier[stop_gradient] ( identifier[advantage] ), identifier[length] ), literal[int] )
identifier[surrogate_loss] = identifier[tf] . identifier[check_numerics] ( identifier[surrogate_loss] , literal[string] )
identifier[kl_penalty] = identifier[self] . identifier[_penalty] * identifier[kl]
identifier[cutoff_threshold] = identifier[self] . identifier[_config] . identifier[kl_target] * identifier[self] . identifier[_config] . identifier[kl_cutoff_factor]
identifier[cutoff_count] = identifier[tf] . identifier[reduce_sum] (
identifier[tf] . identifier[cast] ( identifier[kl] > identifier[cutoff_threshold] , identifier[tf] . identifier[int32] ))
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[tf] . identifier[cond] (
identifier[cutoff_count] > literal[int] ,
keyword[lambda] : identifier[tf] . identifier[Print] ( literal[int] ,[ identifier[cutoff_count] ], literal[string] ), identifier[int] )]):
identifier[kl_cutoff] =(
identifier[self] . identifier[_config] . identifier[kl_cutoff_coef] *
identifier[tf] . identifier[cast] ( identifier[kl] > identifier[cutoff_threshold] , identifier[tf] . identifier[float32] )*
( identifier[kl] - identifier[cutoff_threshold] )** literal[int] )
identifier[policy_loss] = identifier[surrogate_loss] + identifier[kl_penalty] + identifier[kl_cutoff]
identifier[entropy] = identifier[tf] . identifier[reduce_mean] ( identifier[policy] . identifier[entropy] (), identifier[axis] = literal[int] )
keyword[if] identifier[self] . identifier[_config] . identifier[entropy_regularization] :
identifier[policy_loss] -= identifier[self] . identifier[_config] . identifier[entropy_regularization] * identifier[entropy]
identifier[summary] = identifier[tf] . identifier[summary] . identifier[merge] ([
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[entropy] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[kl] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[surrogate_loss] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[kl_penalty] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[kl_cutoff] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[kl_penalty] + identifier[kl_cutoff] ),
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[policy_loss] ),
identifier[tf] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[reduce_mean] ( identifier[surrogate_loss] )),
identifier[tf] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[reduce_mean] ( identifier[kl_penalty] )),
identifier[tf] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[reduce_mean] ( identifier[policy_loss] ))])
identifier[policy_loss] = identifier[tf] . identifier[reduce_mean] ( identifier[policy_loss] , literal[int] )
keyword[return] identifier[tf] . identifier[check_numerics] ( identifier[policy_loss] , literal[string] ), identifier[summary] | def _policy_loss(self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(cutoff_count > 0, lambda : tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) * (kl - cutoff_threshold) ** 2 # depends on [control=['with'], data=[]]
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy # depends on [control=['if'], data=[]]
summary = tf.summary.merge([tf.summary.histogram('entropy', entropy), tf.summary.histogram('kl', kl), tf.summary.histogram('surrogate_loss', surrogate_loss), tf.summary.histogram('kl_penalty', kl_penalty), tf.summary.histogram('kl_cutoff', kl_cutoff), tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff), tf.summary.histogram('policy_loss', policy_loss), tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)), tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)), tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return (tf.check_numerics(policy_loss, 'policy_loss'), summary) # depends on [control=['with'], data=[]] |
def write_enum(fo, datum, schema):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema['symbols'].index(datum)
write_int(fo, index) | def function[write_enum, parameter[fo, datum, schema]]:
constant[An enum is encoded by a int, representing the zero-based position of
the symbol in the schema.]
variable[index] assign[=] call[call[name[schema]][constant[symbols]].index, parameter[name[datum]]]
call[name[write_int], parameter[name[fo], name[index]]] | keyword[def] identifier[write_enum] ( identifier[fo] , identifier[datum] , identifier[schema] ):
literal[string]
identifier[index] = identifier[schema] [ literal[string] ]. identifier[index] ( identifier[datum] )
identifier[write_int] ( identifier[fo] , identifier[index] ) | def write_enum(fo, datum, schema):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema['symbols'].index(datum)
write_int(fo, index) |
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Choice._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# look for the chosen element
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class)
use_dict.__setitem__(element.name, mapped_value)
break
# return what we built/updated
return use_dict | def function[dict_contents, parameter[self, use_dict, as_class]]:
constant[Return the contents of an object as a dict.]
if name[_debug] begin[:]
call[name[Choice]._debug, parameter[constant[dict_contents use_dict=%r as_class=%r], name[use_dict], name[as_class]]]
if compare[name[use_dict] is constant[None]] begin[:]
variable[use_dict] assign[=] call[name[as_class], parameter[]]
for taget[name[element]] in starred[name[self].choiceElements] begin[:]
variable[value] assign[=] call[name[getattr], parameter[name[self], name[element].name, constant[None]]]
if compare[name[value] is constant[None]] begin[:]
continue
if call[name[issubclass], parameter[name[element].klass, name[Atomic]]] begin[:]
variable[mapped_value] assign[=] name[value]
call[name[use_dict].__setitem__, parameter[name[element].name, name[mapped_value]]]
break
return[name[use_dict]] | keyword[def] identifier[dict_contents] ( identifier[self] , identifier[use_dict] = keyword[None] , identifier[as_class] = identifier[dict] ):
literal[string]
keyword[if] identifier[_debug] : identifier[Choice] . identifier[_debug] ( literal[string] , identifier[use_dict] , identifier[as_class] )
keyword[if] identifier[use_dict] keyword[is] keyword[None] :
identifier[use_dict] = identifier[as_class] ()
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[choiceElements] :
identifier[value] = identifier[getattr] ( identifier[self] , identifier[element] . identifier[name] , keyword[None] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[issubclass] ( identifier[element] . identifier[klass] , identifier[Atomic] ):
identifier[mapped_value] = identifier[value]
keyword[elif] identifier[issubclass] ( identifier[element] . identifier[klass] , identifier[AnyAtomic] ):
identifier[mapped_value] = identifier[value] . identifier[value]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[element] . identifier[klass] ):
identifier[mapped_value] = identifier[value] . identifier[dict_contents] ( identifier[as_class] = identifier[as_class] )
identifier[use_dict] . identifier[__setitem__] ( identifier[element] . identifier[name] , identifier[mapped_value] )
keyword[break]
keyword[return] identifier[use_dict] | def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug:
Choice._debug('dict_contents use_dict=%r as_class=%r', use_dict, as_class) # depends on [control=['if'], data=[]]
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class() # depends on [control=['if'], data=['use_dict']]
# look for the chosen element
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue # depends on [control=['if'], data=[]]
if issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous # depends on [control=['if'], data=[]]
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous # depends on [control=['if'], data=[]]
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class) # depends on [control=['if'], data=[]]
use_dict.__setitem__(element.name, mapped_value)
break # depends on [control=['for'], data=['element']]
# return what we built/updated
return use_dict |
def dimension_values(self, dim, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
dim = self.get_dimension(dim, strict=True)
if dim in self.vdims:
return np.full(len(self), np.NaN)
return self.interface.values(self, dim, expanded, flat) | def function[dimension_values, parameter[self, dim, expanded, flat]]:
constant[Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
]
variable[dim] assign[=] call[name[self].get_dimension, parameter[name[dim]]]
if compare[name[dim] in name[self].vdims] begin[:]
return[call[name[np].full, parameter[call[name[len], parameter[name[self]]], name[np].NaN]]]
return[call[name[self].interface.values, parameter[name[self], name[dim], name[expanded], name[flat]]]] | keyword[def] identifier[dimension_values] ( identifier[self] , identifier[dim] , identifier[expanded] = keyword[True] , identifier[flat] = keyword[True] ):
literal[string]
identifier[dim] = identifier[self] . identifier[get_dimension] ( identifier[dim] , identifier[strict] = keyword[True] )
keyword[if] identifier[dim] keyword[in] identifier[self] . identifier[vdims] :
keyword[return] identifier[np] . identifier[full] ( identifier[len] ( identifier[self] ), identifier[np] . identifier[NaN] )
keyword[return] identifier[self] . identifier[interface] . identifier[values] ( identifier[self] , identifier[dim] , identifier[expanded] , identifier[flat] ) | def dimension_values(self, dim, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
dim = self.get_dimension(dim, strict=True)
if dim in self.vdims:
return np.full(len(self), np.NaN) # depends on [control=['if'], data=[]]
return self.interface.values(self, dim, expanded, flat) |
def to_plain_text(str):
'''
Return a plain-text version of a given string
This is a dumb approach that tags and then removing entity markers
but this is fine for the content from biocyc where entities are β etc.
Stripping in this way turns these into plaintext 'beta' which is preferable
to unicode
'''
str = strip_tags_re.sub('', str)
str = strip_entities_re.sub('', str)
return str | def function[to_plain_text, parameter[str]]:
constant[
Return a plain-text version of a given string
This is a dumb approach that tags and then removing entity markers
but this is fine for the content from biocyc where entities are β etc.
Stripping in this way turns these into plaintext 'beta' which is preferable
to unicode
]
variable[str] assign[=] call[name[strip_tags_re].sub, parameter[constant[], name[str]]]
variable[str] assign[=] call[name[strip_entities_re].sub, parameter[constant[], name[str]]]
return[name[str]] | keyword[def] identifier[to_plain_text] ( identifier[str] ):
literal[string]
identifier[str] = identifier[strip_tags_re] . identifier[sub] ( literal[string] , identifier[str] )
identifier[str] = identifier[strip_entities_re] . identifier[sub] ( literal[string] , identifier[str] )
keyword[return] identifier[str] | def to_plain_text(str):
"""
Return a plain-text version of a given string
This is a dumb approach that tags and then removing entity markers
but this is fine for the content from biocyc where entities are β etc.
Stripping in this way turns these into plaintext 'beta' which is preferable
to unicode
"""
str = strip_tags_re.sub('', str)
str = strip_entities_re.sub('', str)
return str |
def qnwequi(n, a, b, kind="N", equidist_pp=None, random_state=None):
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
random_state = check_random_state(random_state)
if equidist_pp is None:
import sympy as sym
equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920))))
n, a, b = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b]))))
d = max(list(map(len, [n, a, b])))
n = np.prod(n)
if a.size == 1:
a = np.repeat(a, d)
if b.size == 1:
b = np.repeat(b, d)
i = np.arange(1, n + 1)
if kind.upper() == "N": # Neiderreiter
j = 2.0 ** (np.arange(1, d+1) / (d+1))
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "W": # Weyl
j = equidist_pp[:d]
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "H": # Haber
j = equidist_pp[:d]
nodes = np.outer(i * (i+1) / 2, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "R": # pseudo-random
nodes = random_state.rand(n, d).squeeze()
else:
raise ValueError("Unknown sequence requested")
# compute nodes and weights
r = b - a
nodes = a + nodes * r
weights = (np.prod(r) / n) * np.ones(n)
return nodes, weights | def function[qnwequi, parameter[n, a, b, kind, equidist_pp, random_state]]:
constant[
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
]
variable[random_state] assign[=] call[name[check_random_state], parameter[name[random_state]]]
if compare[name[equidist_pp] is constant[None]] begin[:]
import module[sympy] as alias[sym]
variable[equidist_pp] assign[=] call[name[np].sqrt, parameter[call[name[np].array, parameter[call[name[list], parameter[call[name[sym].primerange, parameter[constant[0], constant[7920]]]]]]]]]
<ast.Tuple object at 0x7da1b1cb3220> assign[=] call[name[list], parameter[call[name[map], parameter[name[np].atleast_1d, call[name[list], parameter[call[name[map], parameter[name[np].asarray, list[[<ast.Name object at 0x7da1b1cb27d0>, <ast.Name object at 0x7da1b1cb0dc0>, <ast.Name object at 0x7da1b1cb0c70>]]]]]]]]]]
variable[d] assign[=] call[name[max], parameter[call[name[list], parameter[call[name[map], parameter[name[len], list[[<ast.Name object at 0x7da1b1cb0ee0>, <ast.Name object at 0x7da1b1cb2080>, <ast.Name object at 0x7da1b1cb06a0>]]]]]]]]
variable[n] assign[=] call[name[np].prod, parameter[name[n]]]
if compare[name[a].size equal[==] constant[1]] begin[:]
variable[a] assign[=] call[name[np].repeat, parameter[name[a], name[d]]]
if compare[name[b].size equal[==] constant[1]] begin[:]
variable[b] assign[=] call[name[np].repeat, parameter[name[b], name[d]]]
variable[i] assign[=] call[name[np].arange, parameter[constant[1], binary_operation[name[n] + constant[1]]]]
if compare[call[name[kind].upper, parameter[]] equal[==] constant[N]] begin[:]
variable[j] assign[=] binary_operation[constant[2.0] ** binary_operation[call[name[np].arange, parameter[constant[1], binary_operation[name[d] + constant[1]]]] / binary_operation[name[d] + constant[1]]]]
variable[nodes] assign[=] call[name[np].outer, parameter[name[i], name[j]]]
variable[nodes] assign[=] call[binary_operation[name[nodes] - call[name[fix], parameter[name[nodes]]]].squeeze, parameter[]]
variable[r] assign[=] binary_operation[name[b] - name[a]]
variable[nodes] assign[=] binary_operation[name[a] + binary_operation[name[nodes] * name[r]]]
variable[weights] assign[=] binary_operation[binary_operation[call[name[np].prod, parameter[name[r]]] / name[n]] * call[name[np].ones, parameter[name[n]]]]
return[tuple[[<ast.Name object at 0x7da204567dc0>, <ast.Name object at 0x7da2045658a0>]]] | keyword[def] identifier[qnwequi] ( identifier[n] , identifier[a] , identifier[b] , identifier[kind] = literal[string] , identifier[equidist_pp] = keyword[None] , identifier[random_state] = keyword[None] ):
literal[string]
identifier[random_state] = identifier[check_random_state] ( identifier[random_state] )
keyword[if] identifier[equidist_pp] keyword[is] keyword[None] :
keyword[import] identifier[sympy] keyword[as] identifier[sym]
identifier[equidist_pp] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[array] ( identifier[list] ( identifier[sym] . identifier[primerange] ( literal[int] , literal[int] ))))
identifier[n] , identifier[a] , identifier[b] = identifier[list] ( identifier[map] ( identifier[np] . identifier[atleast_1d] , identifier[list] ( identifier[map] ( identifier[np] . identifier[asarray] ,[ identifier[n] , identifier[a] , identifier[b] ]))))
identifier[d] = identifier[max] ( identifier[list] ( identifier[map] ( identifier[len] ,[ identifier[n] , identifier[a] , identifier[b] ])))
identifier[n] = identifier[np] . identifier[prod] ( identifier[n] )
keyword[if] identifier[a] . identifier[size] == literal[int] :
identifier[a] = identifier[np] . identifier[repeat] ( identifier[a] , identifier[d] )
keyword[if] identifier[b] . identifier[size] == literal[int] :
identifier[b] = identifier[np] . identifier[repeat] ( identifier[b] , identifier[d] )
identifier[i] = identifier[np] . identifier[arange] ( literal[int] , identifier[n] + literal[int] )
keyword[if] identifier[kind] . identifier[upper] ()== literal[string] :
identifier[j] = literal[int] **( identifier[np] . identifier[arange] ( literal[int] , identifier[d] + literal[int] )/( identifier[d] + literal[int] ))
identifier[nodes] = identifier[np] . identifier[outer] ( identifier[i] , identifier[j] )
identifier[nodes] =( identifier[nodes] - identifier[fix] ( identifier[nodes] )). identifier[squeeze] ()
keyword[elif] identifier[kind] . identifier[upper] ()== literal[string] :
identifier[j] = identifier[equidist_pp] [: identifier[d] ]
identifier[nodes] = identifier[np] . identifier[outer] ( identifier[i] , identifier[j] )
identifier[nodes] =( identifier[nodes] - identifier[fix] ( identifier[nodes] )). identifier[squeeze] ()
keyword[elif] identifier[kind] . identifier[upper] ()== literal[string] :
identifier[j] = identifier[equidist_pp] [: identifier[d] ]
identifier[nodes] = identifier[np] . identifier[outer] ( identifier[i] *( identifier[i] + literal[int] )/ literal[int] , identifier[j] )
identifier[nodes] =( identifier[nodes] - identifier[fix] ( identifier[nodes] )). identifier[squeeze] ()
keyword[elif] identifier[kind] . identifier[upper] ()== literal[string] :
identifier[nodes] = identifier[random_state] . identifier[rand] ( identifier[n] , identifier[d] ). identifier[squeeze] ()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[r] = identifier[b] - identifier[a]
identifier[nodes] = identifier[a] + identifier[nodes] * identifier[r]
identifier[weights] =( identifier[np] . identifier[prod] ( identifier[r] )/ identifier[n] )* identifier[np] . identifier[ones] ( identifier[n] )
keyword[return] identifier[nodes] , identifier[weights] | def qnwequi(n, a, b, kind='N', equidist_pp=None, random_state=None):
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
random_state = check_random_state(random_state)
if equidist_pp is None:
import sympy as sym
equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920)))) # depends on [control=['if'], data=['equidist_pp']]
(n, a, b) = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b]))))
d = max(list(map(len, [n, a, b])))
n = np.prod(n)
if a.size == 1:
a = np.repeat(a, d) # depends on [control=['if'], data=[]]
if b.size == 1:
b = np.repeat(b, d) # depends on [control=['if'], data=[]]
i = np.arange(1, n + 1)
if kind.upper() == 'N': # Neiderreiter
j = 2.0 ** (np.arange(1, d + 1) / (d + 1))
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze() # depends on [control=['if'], data=[]]
elif kind.upper() == 'W': # Weyl
j = equidist_pp[:d]
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze() # depends on [control=['if'], data=[]]
elif kind.upper() == 'H': # Haber
j = equidist_pp[:d]
nodes = np.outer(i * (i + 1) / 2, j)
nodes = (nodes - fix(nodes)).squeeze() # depends on [control=['if'], data=[]]
elif kind.upper() == 'R': # pseudo-random
nodes = random_state.rand(n, d).squeeze() # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown sequence requested')
# compute nodes and weights
r = b - a
nodes = a + nodes * r
weights = np.prod(r) / n * np.ones(n)
return (nodes, weights) |
def liste_stations(self, station=None, detail=False):
"""
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
"""
condition = ""
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ('%s')" % station
select = ""
if detail:
select = """,
ISIT AS DESCRIPTION,
NO_TELEPHONE AS TELEPHONE,
ADRESSE_IP,
LONGI AS LONGITUDE,
LATI AS LATITUDE,
ALTI AS ALTITUDE,
AXE AS ADR,
CODE_POSTAL AS CP,
FLAG_VALID AS VALID"""
_sql = """SELECT
NSIT AS NUMERO,
IDENTIFIANT AS STATION %s
FROM STATION
%s
ORDER BY NSIT""" % (select, condition)
return psql.read_sql(_sql, self.conn) | def function[liste_stations, parameter[self, station, detail]]:
constant[
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
]
variable[condition] assign[=] constant[]
if name[station] begin[:]
variable[station] assign[=] call[name[_format], parameter[name[station]]]
variable[condition] assign[=] binary_operation[constant[WHERE IDENTIFIANT IN ('%s')] <ast.Mod object at 0x7da2590d6920> name[station]]
variable[select] assign[=] constant[]
if name[detail] begin[:]
variable[select] assign[=] constant[,
ISIT AS DESCRIPTION,
NO_TELEPHONE AS TELEPHONE,
ADRESSE_IP,
LONGI AS LONGITUDE,
LATI AS LATITUDE,
ALTI AS ALTITUDE,
AXE AS ADR,
CODE_POSTAL AS CP,
FLAG_VALID AS VALID]
variable[_sql] assign[=] binary_operation[constant[SELECT
NSIT AS NUMERO,
IDENTIFIANT AS STATION %s
FROM STATION
%s
ORDER BY NSIT] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1aff6ef80>, <ast.Name object at 0x7da1aff6d210>]]]
return[call[name[psql].read_sql, parameter[name[_sql], name[self].conn]]] | keyword[def] identifier[liste_stations] ( identifier[self] , identifier[station] = keyword[None] , identifier[detail] = keyword[False] ):
literal[string]
identifier[condition] = literal[string]
keyword[if] identifier[station] :
identifier[station] = identifier[_format] ( identifier[station] )
identifier[condition] = literal[string] % identifier[station]
identifier[select] = literal[string]
keyword[if] identifier[detail] :
identifier[select] = literal[string]
identifier[_sql] = literal[string] %( identifier[select] , identifier[condition] )
keyword[return] identifier[psql] . identifier[read_sql] ( identifier[_sql] , identifier[self] . identifier[conn] ) | def liste_stations(self, station=None, detail=False):
"""
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
"""
condition = ''
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ('%s')" % station # depends on [control=['if'], data=[]]
select = ''
if detail:
select = ',\n ISIT AS DESCRIPTION,\n NO_TELEPHONE AS TELEPHONE,\n ADRESSE_IP,\n LONGI AS LONGITUDE,\n LATI AS LATITUDE,\n ALTI AS ALTITUDE,\n AXE AS ADR,\n CODE_POSTAL AS CP,\n FLAG_VALID AS VALID' # depends on [control=['if'], data=[]]
_sql = 'SELECT\n NSIT AS NUMERO,\n IDENTIFIANT AS STATION %s\n FROM STATION\n %s\n ORDER BY NSIT' % (select, condition)
return psql.read_sql(_sql, self.conn) |
def pause(self, message: Optional[Message_T] = None, **kwargs) -> None:
"""Pause the session for further interaction."""
if message:
asyncio.ensure_future(self.send(message, **kwargs))
raise _PauseException | def function[pause, parameter[self, message]]:
constant[Pause the session for further interaction.]
if name[message] begin[:]
call[name[asyncio].ensure_future, parameter[call[name[self].send, parameter[name[message]]]]]
<ast.Raise object at 0x7da2041db5b0> | keyword[def] identifier[pause] ( identifier[self] , identifier[message] : identifier[Optional] [ identifier[Message_T] ]= keyword[None] ,** identifier[kwargs] )-> keyword[None] :
literal[string]
keyword[if] identifier[message] :
identifier[asyncio] . identifier[ensure_future] ( identifier[self] . identifier[send] ( identifier[message] ,** identifier[kwargs] ))
keyword[raise] identifier[_PauseException] | def pause(self, message: Optional[Message_T]=None, **kwargs) -> None:
"""Pause the session for further interaction."""
if message:
asyncio.ensure_future(self.send(message, **kwargs)) # depends on [control=['if'], data=[]]
raise _PauseException |
def _scaled_square_dist(self, X, X2):
"""
Returns ((X - X2ᵀ)/lengthscales)².
Due to the implementation and floating-point imprecision, the
result may actually be very slightly negative for entries very
close to each other.
This function can deal with leading dimensions in X and X2.
In the sample case, where X and X2 are both 2 dimensional,
for example, X is [N, D] and X2 is [M, D], then a tensor of shape
[N, M] is returned. If X is [N1, S1, D] and X2 is [N2, S2, D]
then the output will be [N1, S1, N2, S2].
"""
X = X / self.lengthscales
if X2 is None:
Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += Xs + tf.matrix_transpose(Xs)
return dist
Xs = tf.reduce_sum(tf.square(X), axis=-1)
X2 = X2 / self.lengthscales
X2s = tf.reduce_sum(tf.square(X2), axis=-1)
dist = -2 * tf.tensordot(X, X2, [[-1], [-1]])
dist += _broadcasting_elementwise_op(tf.add, Xs, X2s)
return dist | def function[_scaled_square_dist, parameter[self, X, X2]]:
constant[
Returns ((X - X2ᵀ)/lengthscales)².
Due to the implementation and floating-point imprecision, the
result may actually be very slightly negative for entries very
close to each other.
This function can deal with leading dimensions in X and X2.
In the sample case, where X and X2 are both 2 dimensional,
for example, X is [N, D] and X2 is [M, D], then a tensor of shape
[N, M] is returned. If X is [N1, S1, D] and X2 is [N2, S2, D]
then the output will be [N1, S1, N2, S2].
]
variable[X] assign[=] binary_operation[name[X] / name[self].lengthscales]
if compare[name[X2] is constant[None]] begin[:]
variable[Xs] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].square, parameter[name[X]]]]]
variable[dist] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b21a1930> * call[name[tf].matmul, parameter[name[X], name[X]]]]
<ast.AugAssign object at 0x7da1b21a1720>
return[name[dist]]
variable[Xs] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].square, parameter[name[X]]]]]
variable[X2] assign[=] binary_operation[name[X2] / name[self].lengthscales]
variable[X2s] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].square, parameter[name[X2]]]]]
variable[dist] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b21a2350> * call[name[tf].tensordot, parameter[name[X], name[X2], list[[<ast.List object at 0x7da1b21a0100>, <ast.List object at 0x7da1b21a3820>]]]]]
<ast.AugAssign object at 0x7da1b21a3d60>
return[name[dist]] | keyword[def] identifier[_scaled_square_dist] ( identifier[self] , identifier[X] , identifier[X2] ):
literal[string]
identifier[X] = identifier[X] / identifier[self] . identifier[lengthscales]
keyword[if] identifier[X2] keyword[is] keyword[None] :
identifier[Xs] = identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[X] ), identifier[axis] =- literal[int] , identifier[keepdims] = keyword[True] )
identifier[dist] =- literal[int] * identifier[tf] . identifier[matmul] ( identifier[X] , identifier[X] , identifier[transpose_b] = keyword[True] )
identifier[dist] += identifier[Xs] + identifier[tf] . identifier[matrix_transpose] ( identifier[Xs] )
keyword[return] identifier[dist]
identifier[Xs] = identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[X] ), identifier[axis] =- literal[int] )
identifier[X2] = identifier[X2] / identifier[self] . identifier[lengthscales]
identifier[X2s] = identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[X2] ), identifier[axis] =- literal[int] )
identifier[dist] =- literal[int] * identifier[tf] . identifier[tensordot] ( identifier[X] , identifier[X2] ,[[- literal[int] ],[- literal[int] ]])
identifier[dist] += identifier[_broadcasting_elementwise_op] ( identifier[tf] . identifier[add] , identifier[Xs] , identifier[X2s] )
keyword[return] identifier[dist] | def _scaled_square_dist(self, X, X2):
"""
Returns ((X - X2ᵀ)/lengthscales)².
Due to the implementation and floating-point imprecision, the
result may actually be very slightly negative for entries very
close to each other.
This function can deal with leading dimensions in X and X2.
In the sample case, where X and X2 are both 2 dimensional,
for example, X is [N, D] and X2 is [M, D], then a tensor of shape
[N, M] is returned. If X is [N1, S1, D] and X2 is [N2, S2, D]
then the output will be [N1, S1, N2, S2].
"""
X = X / self.lengthscales
if X2 is None:
Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += Xs + tf.matrix_transpose(Xs)
return dist # depends on [control=['if'], data=[]]
Xs = tf.reduce_sum(tf.square(X), axis=-1)
X2 = X2 / self.lengthscales
X2s = tf.reduce_sum(tf.square(X2), axis=-1)
dist = -2 * tf.tensordot(X, X2, [[-1], [-1]])
dist += _broadcasting_elementwise_op(tf.add, Xs, X2s)
return dist |
def child(self, number):
"""
:type number: int
:rtype: ProtocolTreeItem
"""
if number < self.childCount():
return self.__childItems[number]
else:
return False | def function[child, parameter[self, number]]:
constant[
:type number: int
:rtype: ProtocolTreeItem
]
if compare[name[number] less[<] call[name[self].childCount, parameter[]]] begin[:]
return[call[name[self].__childItems][name[number]]] | keyword[def] identifier[child] ( identifier[self] , identifier[number] ):
literal[string]
keyword[if] identifier[number] < identifier[self] . identifier[childCount] ():
keyword[return] identifier[self] . identifier[__childItems] [ identifier[number] ]
keyword[else] :
keyword[return] keyword[False] | def child(self, number):
"""
:type number: int
:rtype: ProtocolTreeItem
"""
if number < self.childCount():
return self.__childItems[number] # depends on [control=['if'], data=['number']]
else:
return False |
def emitTriggered(self, action):
"""
Emits the triggered action for this widget.
:param action | <QAction>
"""
self.currentActionChanged.emit(action)
self.currentIndexChanged.emit(self.indexOf(action))
if not self.signalsBlocked():
self.triggered.emit(action) | def function[emitTriggered, parameter[self, action]]:
constant[
Emits the triggered action for this widget.
:param action | <QAction>
]
call[name[self].currentActionChanged.emit, parameter[name[action]]]
call[name[self].currentIndexChanged.emit, parameter[call[name[self].indexOf, parameter[name[action]]]]]
if <ast.UnaryOp object at 0x7da18f09c760> begin[:]
call[name[self].triggered.emit, parameter[name[action]]] | keyword[def] identifier[emitTriggered] ( identifier[self] , identifier[action] ):
literal[string]
identifier[self] . identifier[currentActionChanged] . identifier[emit] ( identifier[action] )
identifier[self] . identifier[currentIndexChanged] . identifier[emit] ( identifier[self] . identifier[indexOf] ( identifier[action] ))
keyword[if] keyword[not] identifier[self] . identifier[signalsBlocked] ():
identifier[self] . identifier[triggered] . identifier[emit] ( identifier[action] ) | def emitTriggered(self, action):
"""
Emits the triggered action for this widget.
:param action | <QAction>
"""
self.currentActionChanged.emit(action)
self.currentIndexChanged.emit(self.indexOf(action))
if not self.signalsBlocked():
self.triggered.emit(action) # depends on [control=['if'], data=[]] |
def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return org, org_config
return None, None | def function[get_default_org, parameter[self]]:
constant[ retrieve the name and configuration of the default org ]
for taget[name[org]] in starred[call[name[self].list_orgs, parameter[]]] begin[:]
variable[org_config] assign[=] call[name[self].get_org, parameter[name[org]]]
if name[org_config].default begin[:]
return[tuple[[<ast.Name object at 0x7da1b162a890>, <ast.Name object at 0x7da1b16297e0>]]]
return[tuple[[<ast.Constant object at 0x7da1b162b880>, <ast.Constant object at 0x7da1b162ab00>]]] | keyword[def] identifier[get_default_org] ( identifier[self] ):
literal[string]
keyword[for] identifier[org] keyword[in] identifier[self] . identifier[list_orgs] ():
identifier[org_config] = identifier[self] . identifier[get_org] ( identifier[org] )
keyword[if] identifier[org_config] . identifier[default] :
keyword[return] identifier[org] , identifier[org_config]
keyword[return] keyword[None] , keyword[None] | def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return (org, org_config) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['org']]
return (None, None) |
def pauli_string(self, qubits=None):
"""
Return a string representation of this PauliTerm without its coefficient and with
implicit qubit indices.
If a list of qubits is provided, each character in the resulting string represents
a Pauli operator on the corresponding qubit. If qubit indices are not provided as input,
the returned string will be all non-identity operators in the order. This doesn't make
much sense, so please provide a list of qubits. Not providing a list of qubits is
deprecated.
>>> p = PauliTerm("X", 0) * PauliTerm("Y", 1, 1.j)
>>> p.pauli_string()
"XY"
>>> p.pauli_string(qubits=[0])
"X"
>>> p.pauli_string(qubits=[0, 2])
"XI"
:param list qubits: The list of qubits to represent, given as ints. If None, defaults to
all qubits in this PauliTerm.
:return: The string representation of this PauliTerm, sans coefficient
"""
if qubits is None:
warnings.warn("Please provide a list of qubits when using PauliTerm.pauli_string",
DeprecationWarning)
qubits = self.get_qubits()
return ''.join(self[q] for q in qubits) | def function[pauli_string, parameter[self, qubits]]:
constant[
Return a string representation of this PauliTerm without its coefficient and with
implicit qubit indices.
If a list of qubits is provided, each character in the resulting string represents
a Pauli operator on the corresponding qubit. If qubit indices are not provided as input,
the returned string will be all non-identity operators in the order. This doesn't make
much sense, so please provide a list of qubits. Not providing a list of qubits is
deprecated.
>>> p = PauliTerm("X", 0) * PauliTerm("Y", 1, 1.j)
>>> p.pauli_string()
"XY"
>>> p.pauli_string(qubits=[0])
"X"
>>> p.pauli_string(qubits=[0, 2])
"XI"
:param list qubits: The list of qubits to represent, given as ints. If None, defaults to
all qubits in this PauliTerm.
:return: The string representation of this PauliTerm, sans coefficient
]
if compare[name[qubits] is constant[None]] begin[:]
call[name[warnings].warn, parameter[constant[Please provide a list of qubits when using PauliTerm.pauli_string], name[DeprecationWarning]]]
variable[qubits] assign[=] call[name[self].get_qubits, parameter[]]
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b1b60640>]]] | keyword[def] identifier[pauli_string] ( identifier[self] , identifier[qubits] = keyword[None] ):
literal[string]
keyword[if] identifier[qubits] keyword[is] keyword[None] :
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[DeprecationWarning] )
identifier[qubits] = identifier[self] . identifier[get_qubits] ()
keyword[return] literal[string] . identifier[join] ( identifier[self] [ identifier[q] ] keyword[for] identifier[q] keyword[in] identifier[qubits] ) | def pauli_string(self, qubits=None):
"""
Return a string representation of this PauliTerm without its coefficient and with
implicit qubit indices.
If a list of qubits is provided, each character in the resulting string represents
a Pauli operator on the corresponding qubit. If qubit indices are not provided as input,
the returned string will be all non-identity operators in the order. This doesn't make
much sense, so please provide a list of qubits. Not providing a list of qubits is
deprecated.
>>> p = PauliTerm("X", 0) * PauliTerm("Y", 1, 1.j)
>>> p.pauli_string()
"XY"
>>> p.pauli_string(qubits=[0])
"X"
>>> p.pauli_string(qubits=[0, 2])
"XI"
:param list qubits: The list of qubits to represent, given as ints. If None, defaults to
all qubits in this PauliTerm.
:return: The string representation of this PauliTerm, sans coefficient
"""
if qubits is None:
warnings.warn('Please provide a list of qubits when using PauliTerm.pauli_string', DeprecationWarning)
qubits = self.get_qubits() # depends on [control=['if'], data=['qubits']]
return ''.join((self[q] for q in qubits)) |
def rows(self):
"""Yield rows for the section"""
for t in self.terms:
for row in t.rows:
term, value = row # Value can either be a string, or a dict
if isinstance(value, dict): # Dict is for properties, which might be arg-children
term, args, remain = self._args(term, value)
yield term, args
# 'remain' is all of the children that didn't have an arg-child column -- the
# section didn't have a column heder for that ther.
for k, v in remain.items():
yield term.split('.')[-1] + '.' + k, v
else:
yield row | def function[rows, parameter[self]]:
constant[Yield rows for the section]
for taget[name[t]] in starred[name[self].terms] begin[:]
for taget[name[row]] in starred[name[t].rows] begin[:]
<ast.Tuple object at 0x7da2041dbc40> assign[=] name[row]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
<ast.Tuple object at 0x7da2041d97b0> assign[=] call[name[self]._args, parameter[name[term], name[value]]]
<ast.Yield object at 0x7da2041d91b0>
for taget[tuple[[<ast.Name object at 0x7da2041dab30>, <ast.Name object at 0x7da2041d8040>]]] in starred[call[name[remain].items, parameter[]]] begin[:]
<ast.Yield object at 0x7da2041daf80> | keyword[def] identifier[rows] ( identifier[self] ):
literal[string]
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[terms] :
keyword[for] identifier[row] keyword[in] identifier[t] . identifier[rows] :
identifier[term] , identifier[value] = identifier[row]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[term] , identifier[args] , identifier[remain] = identifier[self] . identifier[_args] ( identifier[term] , identifier[value] )
keyword[yield] identifier[term] , identifier[args]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[remain] . identifier[items] ():
keyword[yield] identifier[term] . identifier[split] ( literal[string] )[- literal[int] ]+ literal[string] + identifier[k] , identifier[v]
keyword[else] :
keyword[yield] identifier[row] | def rows(self):
"""Yield rows for the section"""
for t in self.terms:
for row in t.rows:
(term, value) = row # Value can either be a string, or a dict
if isinstance(value, dict): # Dict is for properties, which might be arg-children
(term, args, remain) = self._args(term, value)
yield (term, args)
# 'remain' is all of the children that didn't have an arg-child column -- the
# section didn't have a column heder for that ther.
for (k, v) in remain.items():
yield (term.split('.')[-1] + '.' + k, v) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
yield row # depends on [control=['for'], data=['row']] # depends on [control=['for'], data=['t']] |
def title_to_name(title, decode=True, max_len=None, use_complete_words=True):
"""Convierte un título en un nombre normalizado para generar urls."""
# decodifica y pasa a minúsculas
if decode:
title = unidecode(title)
title = title.lower()
# remueve caracteres no permitidos
filtered_title = re.sub(r'[^a-z0-9- ]+', '', title)
# remueve stop words y espacios y une palabras sólo con un "-"
normalized_title = '-'.join([word for word in filtered_title.split()
if word not in STOP_WORDS])
# recorto el titulo normalizado si excede la longitud máxima
if max_len and len(normalized_title) > max_len:
# busco la última palabra completa
if use_complete_words:
last_word_index = normalized_title.rindex("-", 0, max_len)
normalized_title = normalized_title[:last_word_index]
# corto en el último caracter
else:
normalized_title = normalized_title[:max_len]
return normalized_title | def function[title_to_name, parameter[title, decode, max_len, use_complete_words]]:
constant[Convierte un título en un nombre normalizado para generar urls.]
if name[decode] begin[:]
variable[title] assign[=] call[name[unidecode], parameter[name[title]]]
variable[title] assign[=] call[name[title].lower, parameter[]]
variable[filtered_title] assign[=] call[name[re].sub, parameter[constant[[^a-z0-9- ]+], constant[], name[title]]]
variable[normalized_title] assign[=] call[constant[-].join, parameter[<ast.ListComp object at 0x7da1b04415d0>]]
if <ast.BoolOp object at 0x7da1b0440100> begin[:]
if name[use_complete_words] begin[:]
variable[last_word_index] assign[=] call[name[normalized_title].rindex, parameter[constant[-], constant[0], name[max_len]]]
variable[normalized_title] assign[=] call[name[normalized_title]][<ast.Slice object at 0x7da1b0441e10>]
return[name[normalized_title]] | keyword[def] identifier[title_to_name] ( identifier[title] , identifier[decode] = keyword[True] , identifier[max_len] = keyword[None] , identifier[use_complete_words] = keyword[True] ):
literal[string]
keyword[if] identifier[decode] :
identifier[title] = identifier[unidecode] ( identifier[title] )
identifier[title] = identifier[title] . identifier[lower] ()
identifier[filtered_title] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[title] )
identifier[normalized_title] = literal[string] . identifier[join] ([ identifier[word] keyword[for] identifier[word] keyword[in] identifier[filtered_title] . identifier[split] ()
keyword[if] identifier[word] keyword[not] keyword[in] identifier[STOP_WORDS] ])
keyword[if] identifier[max_len] keyword[and] identifier[len] ( identifier[normalized_title] )> identifier[max_len] :
keyword[if] identifier[use_complete_words] :
identifier[last_word_index] = identifier[normalized_title] . identifier[rindex] ( literal[string] , literal[int] , identifier[max_len] )
identifier[normalized_title] = identifier[normalized_title] [: identifier[last_word_index] ]
keyword[else] :
identifier[normalized_title] = identifier[normalized_title] [: identifier[max_len] ]
keyword[return] identifier[normalized_title] | def title_to_name(title, decode=True, max_len=None, use_complete_words=True):
"""Convierte un título en un nombre normalizado para generar urls."""
# decodifica y pasa a minúsculas
if decode:
title = unidecode(title) # depends on [control=['if'], data=[]]
title = title.lower()
# remueve caracteres no permitidos
filtered_title = re.sub('[^a-z0-9- ]+', '', title)
# remueve stop words y espacios y une palabras sólo con un "-"
normalized_title = '-'.join([word for word in filtered_title.split() if word not in STOP_WORDS])
# recorto el titulo normalizado si excede la longitud máxima
if max_len and len(normalized_title) > max_len:
# busco la última palabra completa
if use_complete_words:
last_word_index = normalized_title.rindex('-', 0, max_len)
normalized_title = normalized_title[:last_word_index] # depends on [control=['if'], data=[]]
else:
# corto en el último caracter
normalized_title = normalized_title[:max_len] # depends on [control=['if'], data=[]]
return normalized_title |
def get_current_container_id(read_from='/proc/self/cgroup'):
"""
Get the ID of the container the application is currently running in,
otherwise return `None` if not running in a container.
This is a best-effort guess, based on cgroups.
:param read_from: the cgroups file to read from (default: `/proc/self/cgroup`)
"""
if not os.path.exists(read_from):
return
with open(read_from, 'r') as cgroup:
for line in cgroup:
if re.match('.*/[0-9a-f]{64}$', line.strip()):
return re.sub('.*/([0-9a-f]{64})$', '\\1', line.strip()) | def function[get_current_container_id, parameter[read_from]]:
constant[
Get the ID of the container the application is currently running in,
otherwise return `None` if not running in a container.
This is a best-effort guess, based on cgroups.
:param read_from: the cgroups file to read from (default: `/proc/self/cgroup`)
]
if <ast.UnaryOp object at 0x7da207f9bca0> begin[:]
return[None]
with call[name[open], parameter[name[read_from], constant[r]]] begin[:]
for taget[name[line]] in starred[name[cgroup]] begin[:]
if call[name[re].match, parameter[constant[.*/[0-9a-f]{64}$], call[name[line].strip, parameter[]]]] begin[:]
return[call[name[re].sub, parameter[constant[.*/([0-9a-f]{64})$], constant[\1], call[name[line].strip, parameter[]]]]] | keyword[def] identifier[get_current_container_id] ( identifier[read_from] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[read_from] ):
keyword[return]
keyword[with] identifier[open] ( identifier[read_from] , literal[string] ) keyword[as] identifier[cgroup] :
keyword[for] identifier[line] keyword[in] identifier[cgroup] :
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[line] . identifier[strip] ()):
keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] . identifier[strip] ()) | def get_current_container_id(read_from='/proc/self/cgroup'):
"""
Get the ID of the container the application is currently running in,
otherwise return `None` if not running in a container.
This is a best-effort guess, based on cgroups.
:param read_from: the cgroups file to read from (default: `/proc/self/cgroup`)
"""
if not os.path.exists(read_from):
return # depends on [control=['if'], data=[]]
with open(read_from, 'r') as cgroup:
for line in cgroup:
if re.match('.*/[0-9a-f]{64}$', line.strip()):
return re.sub('.*/([0-9a-f]{64})$', '\\1', line.strip()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['cgroup']] |
async def _handle_exception(self, e, sock):
"""
Given an exception, we want to handle it appropriately. Some exceptions we
prefer to shadow with an asks exception, and some we want to raise directly.
In all cases we clean up the underlying socket.
"""
if isinstance(e, (RemoteProtocolError, AssertionError)):
await sock.close()
raise BadHttpResponse('Invalid HTTP response from server.') from e
if isinstance(e, Exception):
await sock.close()
raise e | <ast.AsyncFunctionDef object at 0x7da1b0794dc0> | keyword[async] keyword[def] identifier[_handle_exception] ( identifier[self] , identifier[e] , identifier[sock] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[e] ,( identifier[RemoteProtocolError] , identifier[AssertionError] )):
keyword[await] identifier[sock] . identifier[close] ()
keyword[raise] identifier[BadHttpResponse] ( literal[string] ) keyword[from] identifier[e]
keyword[if] identifier[isinstance] ( identifier[e] , identifier[Exception] ):
keyword[await] identifier[sock] . identifier[close] ()
keyword[raise] identifier[e] | async def _handle_exception(self, e, sock):
"""
Given an exception, we want to handle it appropriately. Some exceptions we
prefer to shadow with an asks exception, and some we want to raise directly.
In all cases we clean up the underlying socket.
"""
if isinstance(e, (RemoteProtocolError, AssertionError)):
await sock.close()
raise BadHttpResponse('Invalid HTTP response from server.') from e # depends on [control=['if'], data=[]]
if isinstance(e, Exception):
await sock.close()
raise e # depends on [control=['if'], data=[]] |
def time_slice(self,timerange,surf=False):
'''
slice object given a time range
:parameter timerange: rime range to be used.
'''
if isinstance(timerange[0],str): trange = cnes_convert(timerange)[0]
else: trange=timerange
return self.slice('date',trange,surf=surf) | def function[time_slice, parameter[self, timerange, surf]]:
constant[
slice object given a time range
:parameter timerange: rime range to be used.
]
if call[name[isinstance], parameter[call[name[timerange]][constant[0]], name[str]]] begin[:]
variable[trange] assign[=] call[call[name[cnes_convert], parameter[name[timerange]]]][constant[0]]
return[call[name[self].slice, parameter[constant[date], name[trange]]]] | keyword[def] identifier[time_slice] ( identifier[self] , identifier[timerange] , identifier[surf] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[timerange] [ literal[int] ], identifier[str] ): identifier[trange] = identifier[cnes_convert] ( identifier[timerange] )[ literal[int] ]
keyword[else] : identifier[trange] = identifier[timerange]
keyword[return] identifier[self] . identifier[slice] ( literal[string] , identifier[trange] , identifier[surf] = identifier[surf] ) | def time_slice(self, timerange, surf=False):
"""
slice object given a time range
:parameter timerange: rime range to be used.
"""
if isinstance(timerange[0], str):
trange = cnes_convert(timerange)[0] # depends on [control=['if'], data=[]]
else:
trange = timerange
return self.slice('date', trange, surf=surf) |
def install_completion(
shell: arg(choices=('bash', 'fish'), help='Shell to install completion for'),
to: arg(help='~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish') = None,
overwrite: 'Overwrite if exists' = False):
"""Install command line completion script.
Currently, bash and fish are supported. The corresponding script
will be copied to an appropriate directory. If the script already
exists at that location, it will be overwritten by default.
"""
if shell == 'bash':
source = 'runcommands:completion/bash/runcommands.rc'
to = to or '~/.bashrc.d'
elif shell == 'fish':
source = 'runcommands:completion/fish/runcommands.fish'
to = to or '~/.config/fish/runcommands.fish'
source = asset_path(source)
destination = os.path.expanduser(to)
if os.path.isdir(destination):
destination = os.path.join(destination, os.path.basename(source))
printer.info('Installing', shell, 'completion script to:\n ', destination)
if os.path.exists(destination):
if overwrite:
printer.info('Overwriting:\n {destination}'.format_map(locals()))
else:
message = 'File exists. Overwrite?'.format_map(locals())
overwrite = confirm(message, abort_on_unconfirmed=True)
copy_file(source, destination)
printer.info('Installed; remember to:\n source {destination}'.format_map(locals())) | def function[install_completion, parameter[shell, to, overwrite]]:
constant[Install command line completion script.
Currently, bash and fish are supported. The corresponding script
will be copied to an appropriate directory. If the script already
exists at that location, it will be overwritten by default.
]
if compare[name[shell] equal[==] constant[bash]] begin[:]
variable[source] assign[=] constant[runcommands:completion/bash/runcommands.rc]
variable[to] assign[=] <ast.BoolOp object at 0x7da1b1d64640>
variable[source] assign[=] call[name[asset_path], parameter[name[source]]]
variable[destination] assign[=] call[name[os].path.expanduser, parameter[name[to]]]
if call[name[os].path.isdir, parameter[name[destination]]] begin[:]
variable[destination] assign[=] call[name[os].path.join, parameter[name[destination], call[name[os].path.basename, parameter[name[source]]]]]
call[name[printer].info, parameter[constant[Installing], name[shell], constant[completion script to:
], name[destination]]]
if call[name[os].path.exists, parameter[name[destination]]] begin[:]
if name[overwrite] begin[:]
call[name[printer].info, parameter[call[constant[Overwriting:
{destination}].format_map, parameter[call[name[locals], parameter[]]]]]]
call[name[copy_file], parameter[name[source], name[destination]]]
call[name[printer].info, parameter[call[constant[Installed; remember to:
source {destination}].format_map, parameter[call[name[locals], parameter[]]]]]] | keyword[def] identifier[install_completion] (
identifier[shell] : identifier[arg] ( identifier[choices] =( literal[string] , literal[string] ), identifier[help] = literal[string] ),
identifier[to] : identifier[arg] ( identifier[help] = literal[string] )= keyword[None] ,
identifier[overwrite] : literal[string] = keyword[False] ):
literal[string]
keyword[if] identifier[shell] == literal[string] :
identifier[source] = literal[string]
identifier[to] = identifier[to] keyword[or] literal[string]
keyword[elif] identifier[shell] == literal[string] :
identifier[source] = literal[string]
identifier[to] = identifier[to] keyword[or] literal[string]
identifier[source] = identifier[asset_path] ( identifier[source] )
identifier[destination] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[to] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[destination] ):
identifier[destination] = identifier[os] . identifier[path] . identifier[join] ( identifier[destination] , identifier[os] . identifier[path] . identifier[basename] ( identifier[source] ))
identifier[printer] . identifier[info] ( literal[string] , identifier[shell] , literal[string] , identifier[destination] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[destination] ):
keyword[if] identifier[overwrite] :
identifier[printer] . identifier[info] ( literal[string] . identifier[format_map] ( identifier[locals] ()))
keyword[else] :
identifier[message] = literal[string] . identifier[format_map] ( identifier[locals] ())
identifier[overwrite] = identifier[confirm] ( identifier[message] , identifier[abort_on_unconfirmed] = keyword[True] )
identifier[copy_file] ( identifier[source] , identifier[destination] )
identifier[printer] . identifier[info] ( literal[string] . identifier[format_map] ( identifier[locals] ())) | def install_completion(shell: arg(choices=('bash', 'fish'), help='Shell to install completion for'), to: arg(help='~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish')=None, overwrite: 'Overwrite if exists'=False):
"""Install command line completion script.
Currently, bash and fish are supported. The corresponding script
will be copied to an appropriate directory. If the script already
exists at that location, it will be overwritten by default.
"""
if shell == 'bash':
source = 'runcommands:completion/bash/runcommands.rc'
to = to or '~/.bashrc.d' # depends on [control=['if'], data=[]]
elif shell == 'fish':
source = 'runcommands:completion/fish/runcommands.fish'
to = to or '~/.config/fish/runcommands.fish' # depends on [control=['if'], data=[]]
source = asset_path(source)
destination = os.path.expanduser(to)
if os.path.isdir(destination):
destination = os.path.join(destination, os.path.basename(source)) # depends on [control=['if'], data=[]]
printer.info('Installing', shell, 'completion script to:\n ', destination)
if os.path.exists(destination):
if overwrite:
printer.info('Overwriting:\n {destination}'.format_map(locals())) # depends on [control=['if'], data=[]]
else:
message = 'File exists. Overwrite?'.format_map(locals())
overwrite = confirm(message, abort_on_unconfirmed=True) # depends on [control=['if'], data=[]]
copy_file(source, destination)
printer.info('Installed; remember to:\n source {destination}'.format_map(locals())) |
def find_gene_knockout_reactions(cobra_model, gene_list,
compiled_gene_reaction_rules=None):
"""identify reactions which will be disabled when the genes are knocked out
cobra_model: :class:`~cobra.core.Model.Model`
gene_list: iterable of :class:`~cobra.core.Gene.Gene`
compiled_gene_reaction_rules: dict of {reaction_id: compiled_string}
If provided, this gives pre-compiled gene_reaction_rule strings.
The compiled rule strings can be evaluated much faster. If a rule
is not provided, the regular expression evaluation will be used.
Because not all gene_reaction_rule strings can be evaluated, this
dict must exclude any rules which can not be used with eval.
"""
potential_reactions = set()
for gene in gene_list:
if isinstance(gene, string_types):
gene = cobra_model.genes.get_by_id(gene)
potential_reactions.update(gene._reaction)
gene_set = {str(i) for i in gene_list}
if compiled_gene_reaction_rules is None:
compiled_gene_reaction_rules = {r: parse_gpr(r.gene_reaction_rule)[0]
for r in potential_reactions}
return [r for r in potential_reactions
if not eval_gpr(compiled_gene_reaction_rules[r], gene_set)] | def function[find_gene_knockout_reactions, parameter[cobra_model, gene_list, compiled_gene_reaction_rules]]:
constant[identify reactions which will be disabled when the genes are knocked out
cobra_model: :class:`~cobra.core.Model.Model`
gene_list: iterable of :class:`~cobra.core.Gene.Gene`
compiled_gene_reaction_rules: dict of {reaction_id: compiled_string}
If provided, this gives pre-compiled gene_reaction_rule strings.
The compiled rule strings can be evaluated much faster. If a rule
is not provided, the regular expression evaluation will be used.
Because not all gene_reaction_rule strings can be evaluated, this
dict must exclude any rules which can not be used with eval.
]
variable[potential_reactions] assign[=] call[name[set], parameter[]]
for taget[name[gene]] in starred[name[gene_list]] begin[:]
if call[name[isinstance], parameter[name[gene], name[string_types]]] begin[:]
variable[gene] assign[=] call[name[cobra_model].genes.get_by_id, parameter[name[gene]]]
call[name[potential_reactions].update, parameter[name[gene]._reaction]]
variable[gene_set] assign[=] <ast.SetComp object at 0x7da1b01f85e0>
if compare[name[compiled_gene_reaction_rules] is constant[None]] begin[:]
variable[compiled_gene_reaction_rules] assign[=] <ast.DictComp object at 0x7da20e9562f0>
return[<ast.ListComp object at 0x7da1b01c3e50>] | keyword[def] identifier[find_gene_knockout_reactions] ( identifier[cobra_model] , identifier[gene_list] ,
identifier[compiled_gene_reaction_rules] = keyword[None] ):
literal[string]
identifier[potential_reactions] = identifier[set] ()
keyword[for] identifier[gene] keyword[in] identifier[gene_list] :
keyword[if] identifier[isinstance] ( identifier[gene] , identifier[string_types] ):
identifier[gene] = identifier[cobra_model] . identifier[genes] . identifier[get_by_id] ( identifier[gene] )
identifier[potential_reactions] . identifier[update] ( identifier[gene] . identifier[_reaction] )
identifier[gene_set] ={ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[gene_list] }
keyword[if] identifier[compiled_gene_reaction_rules] keyword[is] keyword[None] :
identifier[compiled_gene_reaction_rules] ={ identifier[r] : identifier[parse_gpr] ( identifier[r] . identifier[gene_reaction_rule] )[ literal[int] ]
keyword[for] identifier[r] keyword[in] identifier[potential_reactions] }
keyword[return] [ identifier[r] keyword[for] identifier[r] keyword[in] identifier[potential_reactions]
keyword[if] keyword[not] identifier[eval_gpr] ( identifier[compiled_gene_reaction_rules] [ identifier[r] ], identifier[gene_set] )] | def find_gene_knockout_reactions(cobra_model, gene_list, compiled_gene_reaction_rules=None):
"""identify reactions which will be disabled when the genes are knocked out
cobra_model: :class:`~cobra.core.Model.Model`
gene_list: iterable of :class:`~cobra.core.Gene.Gene`
compiled_gene_reaction_rules: dict of {reaction_id: compiled_string}
If provided, this gives pre-compiled gene_reaction_rule strings.
The compiled rule strings can be evaluated much faster. If a rule
is not provided, the regular expression evaluation will be used.
Because not all gene_reaction_rule strings can be evaluated, this
dict must exclude any rules which can not be used with eval.
"""
potential_reactions = set()
for gene in gene_list:
if isinstance(gene, string_types):
gene = cobra_model.genes.get_by_id(gene) # depends on [control=['if'], data=[]]
potential_reactions.update(gene._reaction) # depends on [control=['for'], data=['gene']]
gene_set = {str(i) for i in gene_list}
if compiled_gene_reaction_rules is None:
compiled_gene_reaction_rules = {r: parse_gpr(r.gene_reaction_rule)[0] for r in potential_reactions} # depends on [control=['if'], data=['compiled_gene_reaction_rules']]
return [r for r in potential_reactions if not eval_gpr(compiled_gene_reaction_rules[r], gene_set)] |
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v) | def function[export_envars, parameter[self, env]]:
constant[Export the environment variables contained in the dict env.]
for taget[tuple[[<ast.Name object at 0x7da20c992a40>, <ast.Name object at 0x7da20c991840>]]] in starred[call[name[env].items, parameter[]]] begin[:]
call[name[self].export_envar, parameter[name[k], name[v]]] | keyword[def] identifier[export_envars] ( identifier[self] , identifier[env] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[env] . identifier[items] ():
identifier[self] . identifier[export_envar] ( identifier[k] , identifier[v] ) | def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for (k, v) in env.items():
self.export_envar(k, v) # depends on [control=['for'], data=[]] |
def calcSphereCoordinates(pos,radius,rot):
"""
Calculates the Cartesian coordinates from spherical coordinates.
``pos`` is a simple offset to offset the result with.
``radius`` is the radius of the input.
``rot`` is a 2-tuple of ``(azimuth,polar)`` angles.
Angles are given in degrees. Most directions in this game use the same convention.
The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis.
The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up.
A visualization of the angles required is given in the source code of this function.
"""
# Input angles should be in degrees, as in the rest of the game
# E.g. phi=inclination and theta=azimuth
# phi is yrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| yrad |
# "near" / |<-----+
# / | "polar angle"
# Z+ Y+
# theta is xrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| xrad |
# "near" /<-------+
# / | "azimuth angle"
# Z+ Y+
# Based on http://stackoverflow.com/questions/39647735/calculation-of-spherical-coordinates
# https://en.wikipedia.org/wiki/Spherical_coordinate_system
# http://stackoverflow.com/questions/25404613/converting-spherical-coordinates-to-cartesian?rq=1
phi,theta = rot
phi+=90 # very important, took me four days of head-scratching to figure out
phi,theta = math.radians(phi),math.radians(theta)
x = pos[0]+radius * math.sin(phi) * math.cos(theta)
y = pos[1]+radius * math.sin(phi) * math.sin(theta)
z = pos[2]+radius * math.cos(phi)
return x,y,z | def function[calcSphereCoordinates, parameter[pos, radius, rot]]:
constant[
Calculates the Cartesian coordinates from spherical coordinates.
``pos`` is a simple offset to offset the result with.
``radius`` is the radius of the input.
``rot`` is a 2-tuple of ``(azimuth,polar)`` angles.
Angles are given in degrees. Most directions in this game use the same convention.
The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis.
The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up.
A visualization of the angles required is given in the source code of this function.
]
<ast.Tuple object at 0x7da1b016f910> assign[=] name[rot]
<ast.AugAssign object at 0x7da1b016f460>
<ast.Tuple object at 0x7da1b016fd60> assign[=] tuple[[<ast.Call object at 0x7da1b016d270>, <ast.Call object at 0x7da1b016d780>]]
variable[x] assign[=] binary_operation[call[name[pos]][constant[0]] + binary_operation[binary_operation[name[radius] * call[name[math].sin, parameter[name[phi]]]] * call[name[math].cos, parameter[name[theta]]]]]
variable[y] assign[=] binary_operation[call[name[pos]][constant[1]] + binary_operation[binary_operation[name[radius] * call[name[math].sin, parameter[name[phi]]]] * call[name[math].sin, parameter[name[theta]]]]]
variable[z] assign[=] binary_operation[call[name[pos]][constant[2]] + binary_operation[name[radius] * call[name[math].cos, parameter[name[phi]]]]]
return[tuple[[<ast.Name object at 0x7da1b016fd00>, <ast.Name object at 0x7da1b016c880>, <ast.Name object at 0x7da1b016e770>]]] | keyword[def] identifier[calcSphereCoordinates] ( identifier[pos] , identifier[radius] , identifier[rot] ):
literal[string]
identifier[phi] , identifier[theta] = identifier[rot]
identifier[phi] += literal[int]
identifier[phi] , identifier[theta] = identifier[math] . identifier[radians] ( identifier[phi] ), identifier[math] . identifier[radians] ( identifier[theta] )
identifier[x] = identifier[pos] [ literal[int] ]+ identifier[radius] * identifier[math] . identifier[sin] ( identifier[phi] )* identifier[math] . identifier[cos] ( identifier[theta] )
identifier[y] = identifier[pos] [ literal[int] ]+ identifier[radius] * identifier[math] . identifier[sin] ( identifier[phi] )* identifier[math] . identifier[sin] ( identifier[theta] )
identifier[z] = identifier[pos] [ literal[int] ]+ identifier[radius] * identifier[math] . identifier[cos] ( identifier[phi] )
keyword[return] identifier[x] , identifier[y] , identifier[z] | def calcSphereCoordinates(pos, radius, rot):
"""
Calculates the Cartesian coordinates from spherical coordinates.
``pos`` is a simple offset to offset the result with.
``radius`` is the radius of the input.
``rot`` is a 2-tuple of ``(azimuth,polar)`` angles.
Angles are given in degrees. Most directions in this game use the same convention.
The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis.
The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up.
A visualization of the angles required is given in the source code of this function.
"""
# Input angles should be in degrees, as in the rest of the game
# E.g. phi=inclination and theta=azimuth
# phi is yrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| yrad |
# "near" / |<-----+
# / | "polar angle"
# Z+ Y+
# theta is xrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| xrad |
# "near" /<-------+
# / | "azimuth angle"
# Z+ Y+
# Based on http://stackoverflow.com/questions/39647735/calculation-of-spherical-coordinates
# https://en.wikipedia.org/wiki/Spherical_coordinate_system
# http://stackoverflow.com/questions/25404613/converting-spherical-coordinates-to-cartesian?rq=1
(phi, theta) = rot
phi += 90 # very important, took me four days of head-scratching to figure out
(phi, theta) = (math.radians(phi), math.radians(theta))
x = pos[0] + radius * math.sin(phi) * math.cos(theta)
y = pos[1] + radius * math.sin(phi) * math.sin(theta)
z = pos[2] + radius * math.cos(phi)
return (x, y, z) |
def get_events(self):
"""Get event list from satellite
:return: A copy of the events list
:rtype: list
"""
res = copy.copy(self.events)
del self.events[:]
return res | def function[get_events, parameter[self]]:
constant[Get event list from satellite
:return: A copy of the events list
:rtype: list
]
variable[res] assign[=] call[name[copy].copy, parameter[name[self].events]]
<ast.Delete object at 0x7da207f00460>
return[name[res]] | keyword[def] identifier[get_events] ( identifier[self] ):
literal[string]
identifier[res] = identifier[copy] . identifier[copy] ( identifier[self] . identifier[events] )
keyword[del] identifier[self] . identifier[events] [:]
keyword[return] identifier[res] | def get_events(self):
"""Get event list from satellite
:return: A copy of the events list
:rtype: list
"""
res = copy.copy(self.events)
del self.events[:]
return res |
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return | def function[fit, parameter[self, data, method]]:
constant[
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
]
name[self].method assign[=] name[method]
variable[ds_fit] assign[=] call[name[self].fitting_data, parameter[name[data]]]
variable[mdict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0211210>, <ast.Constant object at 0x7da1b0211000>], [<ast.Attribute object at 0x7da1b0210250>, <ast.Attribute object at 0x7da1b0211ab0>]]
variable[clust] assign[=] call[name[mdict]][name[method]]
name[self].classifier assign[=] call[name[clust], parameter[]]
variable[c0] assign[=] call[name[self].classifier.cluster_centers_.T][name[self].sort_by]
name[self].classifier.cluster_centers_ assign[=] call[name[self].classifier.cluster_centers_][call[name[np].argsort, parameter[name[c0]]]]
name[self].classifier.labels_ assign[=] call[name[self].classifier.predict, parameter[name[ds_fit]]]
name[self].classifier.ulabels_ assign[=] call[name[np].unique, parameter[name[self].classifier.labels_]]
return[None] | keyword[def] identifier[fit] ( identifier[self] , identifier[data] , identifier[method] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[method] = identifier[method]
identifier[ds_fit] = identifier[self] . identifier[fitting_data] ( identifier[data] )
identifier[mdict] ={ literal[string] : identifier[self] . identifier[fit_kmeans] ,
literal[string] : identifier[self] . identifier[fit_meanshift] }
identifier[clust] = identifier[mdict] [ identifier[method] ]
identifier[self] . identifier[classifier] = identifier[clust] ( identifier[data] = identifier[ds_fit] ,** identifier[kwargs] )
identifier[c0] = identifier[self] . identifier[classifier] . identifier[cluster_centers_] . identifier[T] [ identifier[self] . identifier[sort_by] ]
identifier[self] . identifier[classifier] . identifier[cluster_centers_] = identifier[self] . identifier[classifier] . identifier[cluster_centers_] [ identifier[np] . identifier[argsort] ( identifier[c0] )]
identifier[self] . identifier[classifier] . identifier[labels_] = identifier[self] . identifier[classifier] . identifier[predict] ( identifier[ds_fit] )
identifier[self] . identifier[classifier] . identifier[ulabels_] = identifier[np] . identifier[unique] ( identifier[self] . identifier[classifier] . identifier[labels_] )
keyword[return] | def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans, 'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return |
def set_azure_web_info(self, resource_group_name, website_name, credentials,
subscription_id, subscription_name, tenant_id, webapp_location):
"""
Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return:
"""
self._azure_info.resource_group_name = resource_group_name
self._azure_info.website_name = website_name
self._azure_info.credentials = credentials
self._azure_info.subscription_id = subscription_id
self._azure_info.subscription_name = subscription_name
self._azure_info.tenant_id = tenant_id
self._azure_info.webapp_location = webapp_location | def function[set_azure_web_info, parameter[self, resource_group_name, website_name, credentials, subscription_id, subscription_name, tenant_id, webapp_location]]:
constant[
Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return:
]
name[self]._azure_info.resource_group_name assign[=] name[resource_group_name]
name[self]._azure_info.website_name assign[=] name[website_name]
name[self]._azure_info.credentials assign[=] name[credentials]
name[self]._azure_info.subscription_id assign[=] name[subscription_id]
name[self]._azure_info.subscription_name assign[=] name[subscription_name]
name[self]._azure_info.tenant_id assign[=] name[tenant_id]
name[self]._azure_info.webapp_location assign[=] name[webapp_location] | keyword[def] identifier[set_azure_web_info] ( identifier[self] , identifier[resource_group_name] , identifier[website_name] , identifier[credentials] ,
identifier[subscription_id] , identifier[subscription_name] , identifier[tenant_id] , identifier[webapp_location] ):
literal[string]
identifier[self] . identifier[_azure_info] . identifier[resource_group_name] = identifier[resource_group_name]
identifier[self] . identifier[_azure_info] . identifier[website_name] = identifier[website_name]
identifier[self] . identifier[_azure_info] . identifier[credentials] = identifier[credentials]
identifier[self] . identifier[_azure_info] . identifier[subscription_id] = identifier[subscription_id]
identifier[self] . identifier[_azure_info] . identifier[subscription_name] = identifier[subscription_name]
identifier[self] . identifier[_azure_info] . identifier[tenant_id] = identifier[tenant_id]
identifier[self] . identifier[_azure_info] . identifier[webapp_location] = identifier[webapp_location] | def set_azure_web_info(self, resource_group_name, website_name, credentials, subscription_id, subscription_name, tenant_id, webapp_location):
"""
Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return:
"""
self._azure_info.resource_group_name = resource_group_name
self._azure_info.website_name = website_name
self._azure_info.credentials = credentials
self._azure_info.subscription_id = subscription_id
self._azure_info.subscription_name = subscription_name
self._azure_info.tenant_id = tenant_id
self._azure_info.webapp_location = webapp_location |
def validate(self):
'''Check if ofiles and ifiles match signatures recorded in md5file'''
if 'TARGET' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TARGET', f'Validating {self.sig_id}')
#
# file not exist?
sig_files = self.input_files._targets + self.output_files._targets + \
self.dependent_files._targets
for x in sig_files:
if not x.target_exists('any'):
return f'Missing target {x}'
#
sig = request_answer_from_controller(['step_sig', 'get', self.sig_id])
if not sig:
return f"No signature found for {self.sig_id}"
return super(RuntimeInfo, self).validate(sig) | def function[validate, parameter[self]]:
constant[Check if ofiles and ifiles match signatures recorded in md5file]
if <ast.BoolOp object at 0x7da1b1237970> begin[:]
call[name[env].log_to_file, parameter[constant[TARGET], <ast.JoinedStr object at 0x7da1b1237d90>]]
variable[sig_files] assign[=] binary_operation[binary_operation[name[self].input_files._targets + name[self].output_files._targets] + name[self].dependent_files._targets]
for taget[name[x]] in starred[name[sig_files]] begin[:]
if <ast.UnaryOp object at 0x7da1b1236cb0> begin[:]
return[<ast.JoinedStr object at 0x7da1b1236e60>]
variable[sig] assign[=] call[name[request_answer_from_controller], parameter[list[[<ast.Constant object at 0x7da1b2345210>, <ast.Constant object at 0x7da1b2344b20>, <ast.Attribute object at 0x7da1b2344ee0>]]]]
if <ast.UnaryOp object at 0x7da1b2346b60> begin[:]
return[<ast.JoinedStr object at 0x7da1b2347340>]
return[call[call[name[super], parameter[name[RuntimeInfo], name[self]]].validate, parameter[name[sig]]]] | keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[env] . identifier[config] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[env] . identifier[config] [ literal[string] ]:
identifier[env] . identifier[log_to_file] ( literal[string] , literal[string] )
identifier[sig_files] = identifier[self] . identifier[input_files] . identifier[_targets] + identifier[self] . identifier[output_files] . identifier[_targets] + identifier[self] . identifier[dependent_files] . identifier[_targets]
keyword[for] identifier[x] keyword[in] identifier[sig_files] :
keyword[if] keyword[not] identifier[x] . identifier[target_exists] ( literal[string] ):
keyword[return] literal[string]
identifier[sig] = identifier[request_answer_from_controller] ([ literal[string] , literal[string] , identifier[self] . identifier[sig_id] ])
keyword[if] keyword[not] identifier[sig] :
keyword[return] literal[string]
keyword[return] identifier[super] ( identifier[RuntimeInfo] , identifier[self] ). identifier[validate] ( identifier[sig] ) | def validate(self):
"""Check if ofiles and ifiles match signatures recorded in md5file"""
if 'TARGET' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TARGET', f'Validating {self.sig_id}') # depends on [control=['if'], data=[]]
#
# file not exist?
sig_files = self.input_files._targets + self.output_files._targets + self.dependent_files._targets
for x in sig_files:
if not x.target_exists('any'):
return f'Missing target {x}' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']]
#
sig = request_answer_from_controller(['step_sig', 'get', self.sig_id])
if not sig:
return f'No signature found for {self.sig_id}' # depends on [control=['if'], data=[]]
return super(RuntimeInfo, self).validate(sig) |
def _prettify_response(self, response):
"""
Prettify the HTML response.
:param response: A Flask Response object.
"""
if response.content_type == 'text/html; charset=utf-8':
ugly = response.get_data(as_text=True)
soup = BeautifulSoup(ugly, 'html.parser')
pretty = soup.prettify(formatter='html')
response.direct_passthrough = False
response.set_data(pretty)
return response | def function[_prettify_response, parameter[self, response]]:
constant[
Prettify the HTML response.
:param response: A Flask Response object.
]
if compare[name[response].content_type equal[==] constant[text/html; charset=utf-8]] begin[:]
variable[ugly] assign[=] call[name[response].get_data, parameter[]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[ugly], constant[html.parser]]]
variable[pretty] assign[=] call[name[soup].prettify, parameter[]]
name[response].direct_passthrough assign[=] constant[False]
call[name[response].set_data, parameter[name[pretty]]]
return[name[response]] | keyword[def] identifier[_prettify_response] ( identifier[self] , identifier[response] ):
literal[string]
keyword[if] identifier[response] . identifier[content_type] == literal[string] :
identifier[ugly] = identifier[response] . identifier[get_data] ( identifier[as_text] = keyword[True] )
identifier[soup] = identifier[BeautifulSoup] ( identifier[ugly] , literal[string] )
identifier[pretty] = identifier[soup] . identifier[prettify] ( identifier[formatter] = literal[string] )
identifier[response] . identifier[direct_passthrough] = keyword[False]
identifier[response] . identifier[set_data] ( identifier[pretty] )
keyword[return] identifier[response] | def _prettify_response(self, response):
"""
Prettify the HTML response.
:param response: A Flask Response object.
"""
if response.content_type == 'text/html; charset=utf-8':
ugly = response.get_data(as_text=True)
soup = BeautifulSoup(ugly, 'html.parser')
pretty = soup.prettify(formatter='html')
response.direct_passthrough = False
response.set_data(pretty) # depends on [control=['if'], data=[]]
return response |
def refresh(self, app, client_id, ttl):
"""Query the API to update this instance.
:raise APIError: When Imgur responds with errors or unexpected data.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:return: self._parse() return value.
:rtype: list
"""
return super(Album, self).refresh(app, client_id, ttl) or list() | def function[refresh, parameter[self, app, client_id, ttl]]:
constant[Query the API to update this instance.
:raise APIError: When Imgur responds with errors or unexpected data.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:return: self._parse() return value.
:rtype: list
]
return[<ast.BoolOp object at 0x7da1b23463e0>] | keyword[def] identifier[refresh] ( identifier[self] , identifier[app] , identifier[client_id] , identifier[ttl] ):
literal[string]
keyword[return] identifier[super] ( identifier[Album] , identifier[self] ). identifier[refresh] ( identifier[app] , identifier[client_id] , identifier[ttl] ) keyword[or] identifier[list] () | def refresh(self, app, client_id, ttl):
"""Query the API to update this instance.
:raise APIError: When Imgur responds with errors or unexpected data.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:return: self._parse() return value.
:rtype: list
"""
return super(Album, self).refresh(app, client_id, ttl) or list() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.