code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def get_data_home(path=None):
"""
Return the path of the Yellowbrick data directory. This folder is used by
dataset loaders to avoid downloading data several times.
By default, this folder is colocated with the code in the install directory
so that data shipped with the package can be easily located. Alternatively
it can be set by the YELLOWBRICK_DATA environment variable, or
programmatically by giving a folder path. Note that the '~' symbol is
expanded to the user home directory, and environment variables are also
expanded when resolving the path.
"""
if path is None:
path = os.environ.get('YELLOWBRICK_DATA', FIXTURES)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if not os.path.exists(path):
os.makedirs(path)
return path
|
def function[get_data_home, parameter[path]]:
constant[
Return the path of the Yellowbrick data directory. This folder is used by
dataset loaders to avoid downloading data several times.
By default, this folder is colocated with the code in the install directory
so that data shipped with the package can be easily located. Alternatively
it can be set by the YELLOWBRICK_DATA environment variable, or
programmatically by giving a folder path. Note that the '~' symbol is
expanded to the user home directory, and environment variables are also
expanded when resolving the path.
]
if compare[name[path] is constant[None]] begin[:]
variable[path] assign[=] call[name[os].environ.get, parameter[constant[YELLOWBRICK_DATA], name[FIXTURES]]]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[path] assign[=] call[name[os].path.expandvars, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da20c7cba90> begin[:]
call[name[os].makedirs, parameter[name[path]]]
return[name[path]]
|
keyword[def] identifier[get_data_home] ( identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[path] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[FIXTURES] )
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[path] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[makedirs] ( identifier[path] )
keyword[return] identifier[path]
|
def get_data_home(path=None):
"""
Return the path of the Yellowbrick data directory. This folder is used by
dataset loaders to avoid downloading data several times.
By default, this folder is colocated with the code in the install directory
so that data shipped with the package can be easily located. Alternatively
it can be set by the YELLOWBRICK_DATA environment variable, or
programmatically by giving a folder path. Note that the '~' symbol is
expanded to the user home directory, and environment variables are also
expanded when resolving the path.
"""
if path is None:
path = os.environ.get('YELLOWBRICK_DATA', FIXTURES) # depends on [control=['if'], data=['path']]
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if not os.path.exists(path):
os.makedirs(path) # depends on [control=['if'], data=[]]
return path
|
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason)
|
def function[addSkip, parameter[self, test, reason]]:
constant[Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
]
call[name[self]._recordAndPrintHeadline, parameter[name[test], name[SkipTest], name[reason]]]
if call[name[isinstance], parameter[name[reason], name[Exception]]] begin[:]
variable[reason] assign[=] <ast.BoolOp object at 0x7da1b282be50>
if <ast.BoolOp object at 0x7da18eb57b80> begin[:]
with call[name[self].bar.dodging, parameter[]] begin[:]
call[name[self].stream.writeln, parameter[name[reason]]]
|
keyword[def] identifier[addSkip] ( identifier[self] , identifier[test] , identifier[reason] ):
literal[string]
identifier[self] . identifier[_recordAndPrintHeadline] ( identifier[test] , identifier[SkipTest] , identifier[reason] )
keyword[if] identifier[isinstance] ( identifier[reason] , identifier[Exception] ):
identifier[reason] = identifier[getattr] ( identifier[reason] , literal[string] , keyword[None] ) keyword[or] identifier[getattr] (
identifier[reason] , literal[string] )[ literal[int] ]
keyword[if] identifier[reason] keyword[and] identifier[self] . identifier[_options] . identifier[show_advisories] :
keyword[with] identifier[self] . identifier[bar] . identifier[dodging] ():
identifier[self] . identifier[stream] . identifier[writeln] ( identifier[reason] )
|
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(reason, 'args')[0] # depends on [control=['if'], data=[]]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
|
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format(
"Database dump file {dump_fn} does not exist."
)
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
#r.pc('Uploading PostgreSQL database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
#r.local('rsync -rvz --progress --no-p --no-g '
#'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
#'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
with settings(warn_only=True):
r.sudo('dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}', user=r.env.postgres_user)
r.sudo('psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"', user=r.env.postgres_user)
with settings(warn_only=True):
if r.env.engine == POSTGIS:
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"',
user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"',
user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"', user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"', user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; '
'CREATE USER {db_user} WITH PASSWORD \'{db_password}\'; '
'GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"', user=r.env.postgres_user)
for createlang in r.env.createlangs:
r.env.createlang = createlang
r.sudo('createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true', user=r.env.postgres_user)
if not prep_only:
# Ignore errors needed to work around bug "ERROR: schema "public" already exists", which is thrown in 9.6 even if we use --clean and --if-exists?
with settings(warn_only=True):
r.sudo(r.env.load_command, user=r.env.postgres_user)
|
def function[load, parameter[self, dump_fn, prep_only, force_upload, from_local, name, site, dest_dir, force_host]]:
constant[
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
]
variable[r] assign[=] call[name[self].database_renderer, parameter[]]
name[r].env.dump_fn assign[=] call[name[self].get_default_db_fn, parameter[]]
variable[from_local] assign[=] call[name[int], parameter[name[from_local]]]
variable[prep_only] assign[=] call[name[int], parameter[name[prep_only]]]
variable[missing_local_dump_error] assign[=] call[name[r].format, parameter[constant[Database dump file {dump_fn} does not exist.]]]
if name[self].is_local begin[:]
name[r].env.remote_dump_fn assign[=] name[dump_fn]
if <ast.BoolOp object at 0x7da1b00b6530> begin[:]
if <ast.UnaryOp object at 0x7da1b00b5cf0> begin[:]
assert[call[name[os].path.isfile, parameter[name[r].env.dump_fn]]]
call[name[self].upload_snapshot, parameter[]]
if <ast.BoolOp object at 0x7da1b008ab00> begin[:]
assert[call[name[os].path.isfile, parameter[name[r].env.dump_fn]]]
if name[force_host] begin[:]
name[r].env.db_host assign[=] name[force_host]
with call[name[settings], parameter[]] begin[:]
call[name[r].sudo, parameter[constant[dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}]]]
call[name[r].sudo, parameter[constant[psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"]]]
with call[name[settings], parameter[]] begin[:]
if compare[name[r].env.engine equal[==] name[POSTGIS]] begin[:]
call[name[r].sudo, parameter[constant[psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"]]]
call[name[r].sudo, parameter[constant[psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"]]]
with call[name[settings], parameter[]] begin[:]
call[name[r].sudo, parameter[constant[psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"]]]
with call[name[settings], parameter[]] begin[:]
call[name[r].sudo, parameter[constant[psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"]]]
call[name[r].sudo, parameter[constant[psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; CREATE USER {db_user} WITH PASSWORD '{db_password}'; GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"]]]
for taget[name[createlang]] in starred[name[r].env.createlangs] begin[:]
name[r].env.createlang assign[=] name[createlang]
call[name[r].sudo, parameter[constant[createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true]]]
if <ast.UnaryOp object at 0x7da1b0089660> begin[:]
with call[name[settings], parameter[]] begin[:]
call[name[r].sudo, parameter[name[r].env.load_command]]
|
keyword[def] identifier[load] ( identifier[self] , identifier[dump_fn] = literal[string] , identifier[prep_only] = literal[int] , identifier[force_upload] = literal[int] , identifier[from_local] = literal[int] , identifier[name] = keyword[None] , identifier[site] = keyword[None] , identifier[dest_dir] = keyword[None] , identifier[force_host] = keyword[None] ):
literal[string]
identifier[r] = identifier[self] . identifier[database_renderer] ( identifier[name] = identifier[name] , identifier[site] = identifier[site] )
identifier[r] . identifier[env] . identifier[dump_fn] = identifier[self] . identifier[get_default_db_fn] ( identifier[fn_template] = identifier[dump_fn] , identifier[dest_dir] = identifier[dest_dir] )
identifier[from_local] = identifier[int] ( identifier[from_local] )
identifier[prep_only] = identifier[int] ( identifier[prep_only] )
identifier[missing_local_dump_error] = identifier[r] . identifier[format] (
literal[string]
)
keyword[if] identifier[self] . identifier[is_local] :
identifier[r] . identifier[env] . identifier[remote_dump_fn] = identifier[dump_fn]
keyword[else] :
identifier[r] . identifier[env] . identifier[remote_dump_fn] = literal[string] + identifier[os] . identifier[path] . identifier[split] ( identifier[r] . identifier[env] . identifier[dump_fn] )[- literal[int] ]
keyword[if] keyword[not] identifier[prep_only] keyword[and] keyword[not] identifier[self] . identifier[is_local] :
keyword[if] keyword[not] identifier[self] . identifier[dryrun] :
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[r] . identifier[env] . identifier[dump_fn] ), identifier[missing_local_dump_error]
identifier[self] . identifier[upload_snapshot] ( identifier[name] = identifier[name] , identifier[site] = identifier[site] , identifier[local_dump_fn] = identifier[r] . identifier[env] . identifier[dump_fn] , identifier[remote_dump_fn] = identifier[r] . identifier[env] . identifier[remote_dump_fn] )
keyword[if] identifier[self] . identifier[is_local] keyword[and] keyword[not] identifier[prep_only] keyword[and] keyword[not] identifier[self] . identifier[dryrun] :
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[r] . identifier[env] . identifier[dump_fn] ), identifier[missing_local_dump_error]
keyword[if] identifier[force_host] :
identifier[r] . identifier[env] . identifier[db_host] = identifier[force_host]
keyword[with] identifier[settings] ( identifier[warn_only] = keyword[True] ):
identifier[r] . identifier[sudo] ( literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
identifier[r] . identifier[sudo] ( literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
keyword[with] identifier[settings] ( identifier[warn_only] = keyword[True] ):
keyword[if] identifier[r] . identifier[env] . identifier[engine] == identifier[POSTGIS] :
identifier[r] . identifier[sudo] ( literal[string] ,
identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
identifier[r] . identifier[sudo] ( literal[string] ,
identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
keyword[with] identifier[settings] ( identifier[warn_only] = keyword[True] ):
identifier[r] . identifier[sudo] ( literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
keyword[with] identifier[settings] ( identifier[warn_only] = keyword[True] ):
identifier[r] . identifier[sudo] ( literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
identifier[r] . identifier[sudo] ( literal[string]
literal[string]
literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
keyword[for] identifier[createlang] keyword[in] identifier[r] . identifier[env] . identifier[createlangs] :
identifier[r] . identifier[env] . identifier[createlang] = identifier[createlang]
identifier[r] . identifier[sudo] ( literal[string] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
keyword[if] keyword[not] identifier[prep_only] :
keyword[with] identifier[settings] ( identifier[warn_only] = keyword[True] ):
identifier[r] . identifier[sudo] ( identifier[r] . identifier[env] . identifier[load_command] , identifier[user] = identifier[r] . identifier[env] . identifier[postgres_user] )
|
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format('Database dump file {dump_fn} does not exist.')
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn # depends on [control=['if'], data=[]]
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and (not self.is_local):
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error # depends on [control=['if'], data=[]]
#r.pc('Uploading PostgreSQL database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
#r.local('rsync -rvz --progress --no-p --no-g '
#'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
#'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn) # depends on [control=['if'], data=[]]
if self.is_local and (not prep_only) and (not self.dryrun):
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error # depends on [control=['if'], data=[]]
if force_host:
r.env.db_host = force_host # depends on [control=['if'], data=[]]
with settings(warn_only=True):
r.sudo('dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}', user=r.env.postgres_user) # depends on [control=['with'], data=[]]
r.sudo('psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"', user=r.env.postgres_user)
with settings(warn_only=True):
if r.env.engine == POSTGIS:
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"', user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"', user=r.env.postgres_user) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"', user=r.env.postgres_user) # depends on [control=['with'], data=[]]
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"', user=r.env.postgres_user) # depends on [control=['with'], data=[]]
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; CREATE USER {db_user} WITH PASSWORD \'{db_password}\'; GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"', user=r.env.postgres_user)
for createlang in r.env.createlangs:
r.env.createlang = createlang
r.sudo('createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true', user=r.env.postgres_user) # depends on [control=['for'], data=['createlang']]
if not prep_only:
# Ignore errors needed to work around bug "ERROR: schema "public" already exists", which is thrown in 9.6 even if we use --clean and --if-exists?
with settings(warn_only=True):
r.sudo(r.env.load_command, user=r.env.postgres_user) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
|
def DICOMfile_read(self, *args, **kwargs):
"""
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
"""
b_status = False
l_tags = []
l_tagsToUse = []
d_tagsInString = {}
str_file = ""
d_DICOM = {
'dcm': None,
'd_dcm': {},
'strRaw': '',
'l_tagRaw': [],
'd_json': {},
'd_dicom': {},
'd_dicomSimple': {}
}
for k, v in kwargs.items():
if k == 'file': str_file = v
if k == 'l_tagsToUse': l_tags = v
if len(args):
l_file = args[0]
str_file = l_file[0]
str_localFile = os.path.basename(str_file)
str_path = os.path.dirname(str_file)
# self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir))
# self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path))
# self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile))
# self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file))
try:
# self.dcm = dicom.read_file(str_file)
d_DICOM['dcm'] = dicom.read_file(str_file)
b_status = True
except:
self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error')
self.dp.qprint('Failed to read %s' % str_file, comms = 'error')
b_status = False
d_DICOM['d_dcm'] = dict(d_DICOM['dcm'])
d_DICOM['strRaw'] = str(d_DICOM['dcm'])
d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir()
if len(l_tags):
l_tagsToUse = l_tags
else:
l_tagsToUse = d_DICOM['l_tagRaw']
if 'PixelData' in l_tagsToUse:
l_tagsToUse.remove('PixelData')
for key in l_tagsToUse:
d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key)
try:
d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key)
except:
d_DICOM['d_dicomSimple'][key] = "no attribute"
d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key])
# pudb.set_trace()
d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem)
str_outputFile = d_tagsInString['str_result']
return {
'status': b_status,
'inputPath': str_path,
'inputFilename': str_localFile,
'outputFileStem': str_outputFile,
'd_DICOM': d_DICOM,
'l_tagsToUse': l_tagsToUse
}
|
def function[DICOMfile_read, parameter[self]]:
constant[
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
]
variable[b_status] assign[=] constant[False]
variable[l_tags] assign[=] list[[]]
variable[l_tagsToUse] assign[=] list[[]]
variable[d_tagsInString] assign[=] dictionary[[], []]
variable[str_file] assign[=] constant[]
variable[d_DICOM] assign[=] dictionary[[<ast.Constant object at 0x7da20c9911e0>, <ast.Constant object at 0x7da20c990b20>, <ast.Constant object at 0x7da20c993460>, <ast.Constant object at 0x7da20c991630>, <ast.Constant object at 0x7da20c991180>, <ast.Constant object at 0x7da20c993610>, <ast.Constant object at 0x7da20c992f80>], [<ast.Constant object at 0x7da20c991990>, <ast.Dict object at 0x7da20c990670>, <ast.Constant object at 0x7da20c993c40>, <ast.List object at 0x7da20c9917b0>, <ast.Dict object at 0x7da20c990c70>, <ast.Dict object at 0x7da20c990490>, <ast.Dict object at 0x7da20c993ee0>]]
for taget[tuple[[<ast.Name object at 0x7da20c992fb0>, <ast.Name object at 0x7da20c9927a0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[name[k] equal[==] constant[file]] begin[:]
variable[str_file] assign[=] name[v]
if compare[name[k] equal[==] constant[l_tagsToUse]] begin[:]
variable[l_tags] assign[=] name[v]
if call[name[len], parameter[name[args]]] begin[:]
variable[l_file] assign[=] call[name[args]][constant[0]]
variable[str_file] assign[=] call[name[l_file]][constant[0]]
variable[str_localFile] assign[=] call[name[os].path.basename, parameter[name[str_file]]]
variable[str_path] assign[=] call[name[os].path.dirname, parameter[name[str_file]]]
<ast.Try object at 0x7da20c990460>
call[name[d_DICOM]][constant[d_dcm]] assign[=] call[name[dict], parameter[call[name[d_DICOM]][constant[dcm]]]]
call[name[d_DICOM]][constant[strRaw]] assign[=] call[name[str], parameter[call[name[d_DICOM]][constant[dcm]]]]
call[name[d_DICOM]][constant[l_tagRaw]] assign[=] call[call[name[d_DICOM]][constant[dcm]].dir, parameter[]]
if call[name[len], parameter[name[l_tags]]] begin[:]
variable[l_tagsToUse] assign[=] name[l_tags]
if compare[constant[PixelData] in name[l_tagsToUse]] begin[:]
call[name[l_tagsToUse].remove, parameter[constant[PixelData]]]
for taget[name[key]] in starred[name[l_tagsToUse]] begin[:]
call[call[name[d_DICOM]][constant[d_dicom]]][name[key]] assign[=] call[call[name[d_DICOM]][constant[dcm]].data_element, parameter[name[key]]]
<ast.Try object at 0x7da20c992320>
call[call[name[d_DICOM]][constant[d_json]]][name[key]] assign[=] call[name[str], parameter[call[call[name[d_DICOM]][constant[d_dicomSimple]]][name[key]]]]
variable[d_tagsInString] assign[=] call[name[self].tagsInString_process, parameter[name[d_DICOM], name[self].str_outputFileStem]]
variable[str_outputFile] assign[=] call[name[d_tagsInString]][constant[str_result]]
return[dictionary[[<ast.Constant object at 0x7da204622fb0>, <ast.Constant object at 0x7da204622410>, <ast.Constant object at 0x7da204621c60>, <ast.Constant object at 0x7da204621cc0>, <ast.Constant object at 0x7da204622590>, <ast.Constant object at 0x7da204622aa0>], [<ast.Name object at 0x7da204620b50>, <ast.Name object at 0x7da2046211e0>, <ast.Name object at 0x7da204623640>, <ast.Name object at 0x7da204623a30>, <ast.Name object at 0x7da204623d60>, <ast.Name object at 0x7da2046213f0>]]]
|
keyword[def] identifier[DICOMfile_read] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[b_status] = keyword[False]
identifier[l_tags] =[]
identifier[l_tagsToUse] =[]
identifier[d_tagsInString] ={}
identifier[str_file] = literal[string]
identifier[d_DICOM] ={
literal[string] : keyword[None] ,
literal[string] :{},
literal[string] : literal[string] ,
literal[string] :[],
literal[string] :{},
literal[string] :{},
literal[string] :{}
}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[k] == literal[string] : identifier[str_file] = identifier[v]
keyword[if] identifier[k] == literal[string] : identifier[l_tags] = identifier[v]
keyword[if] identifier[len] ( identifier[args] ):
identifier[l_file] = identifier[args] [ literal[int] ]
identifier[str_file] = identifier[l_file] [ literal[int] ]
identifier[str_localFile] = identifier[os] . identifier[path] . identifier[basename] ( identifier[str_file] )
identifier[str_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[str_file] )
keyword[try] :
identifier[d_DICOM] [ literal[string] ]= identifier[dicom] . identifier[read_file] ( identifier[str_file] )
identifier[b_status] = keyword[True]
keyword[except] :
identifier[self] . identifier[dp] . identifier[qprint] ( literal[string] % identifier[os] . identifier[getcwd] (), identifier[comms] = literal[string] )
identifier[self] . identifier[dp] . identifier[qprint] ( literal[string] % identifier[str_file] , identifier[comms] = literal[string] )
identifier[b_status] = keyword[False]
identifier[d_DICOM] [ literal[string] ]= identifier[dict] ( identifier[d_DICOM] [ literal[string] ])
identifier[d_DICOM] [ literal[string] ]= identifier[str] ( identifier[d_DICOM] [ literal[string] ])
identifier[d_DICOM] [ literal[string] ]= identifier[d_DICOM] [ literal[string] ]. identifier[dir] ()
keyword[if] identifier[len] ( identifier[l_tags] ):
identifier[l_tagsToUse] = identifier[l_tags]
keyword[else] :
identifier[l_tagsToUse] = identifier[d_DICOM] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[l_tagsToUse] :
identifier[l_tagsToUse] . identifier[remove] ( literal[string] )
keyword[for] identifier[key] keyword[in] identifier[l_tagsToUse] :
identifier[d_DICOM] [ literal[string] ][ identifier[key] ]= identifier[d_DICOM] [ literal[string] ]. identifier[data_element] ( identifier[key] )
keyword[try] :
identifier[d_DICOM] [ literal[string] ][ identifier[key] ]= identifier[getattr] ( identifier[d_DICOM] [ literal[string] ], identifier[key] )
keyword[except] :
identifier[d_DICOM] [ literal[string] ][ identifier[key] ]= literal[string]
identifier[d_DICOM] [ literal[string] ][ identifier[key] ]= identifier[str] ( identifier[d_DICOM] [ literal[string] ][ identifier[key] ])
identifier[d_tagsInString] = identifier[self] . identifier[tagsInString_process] ( identifier[d_DICOM] , identifier[self] . identifier[str_outputFileStem] )
identifier[str_outputFile] = identifier[d_tagsInString] [ literal[string] ]
keyword[return] {
literal[string] : identifier[b_status] ,
literal[string] : identifier[str_path] ,
literal[string] : identifier[str_localFile] ,
literal[string] : identifier[str_outputFile] ,
literal[string] : identifier[d_DICOM] ,
literal[string] : identifier[l_tagsToUse]
}
|
def DICOMfile_read(self, *args, **kwargs):
"""
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
"""
b_status = False
l_tags = []
l_tagsToUse = []
d_tagsInString = {}
str_file = ''
d_DICOM = {'dcm': None, 'd_dcm': {}, 'strRaw': '', 'l_tagRaw': [], 'd_json': {}, 'd_dicom': {}, 'd_dicomSimple': {}}
for (k, v) in kwargs.items():
if k == 'file':
str_file = v # depends on [control=['if'], data=[]]
if k == 'l_tagsToUse':
l_tags = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(args):
l_file = args[0]
str_file = l_file[0] # depends on [control=['if'], data=[]]
str_localFile = os.path.basename(str_file)
str_path = os.path.dirname(str_file)
# self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir))
# self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path))
# self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile))
# self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file))
try:
# self.dcm = dicom.read_file(str_file)
d_DICOM['dcm'] = dicom.read_file(str_file)
b_status = True # depends on [control=['try'], data=[]]
except:
self.dp.qprint('In directory: %s' % os.getcwd(), comms='error')
self.dp.qprint('Failed to read %s' % str_file, comms='error')
b_status = False # depends on [control=['except'], data=[]]
d_DICOM['d_dcm'] = dict(d_DICOM['dcm'])
d_DICOM['strRaw'] = str(d_DICOM['dcm'])
d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir()
if len(l_tags):
l_tagsToUse = l_tags # depends on [control=['if'], data=[]]
else:
l_tagsToUse = d_DICOM['l_tagRaw']
if 'PixelData' in l_tagsToUse:
l_tagsToUse.remove('PixelData') # depends on [control=['if'], data=['l_tagsToUse']]
for key in l_tagsToUse:
d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key)
try:
d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key) # depends on [control=['try'], data=[]]
except:
d_DICOM['d_dicomSimple'][key] = 'no attribute' # depends on [control=['except'], data=[]]
d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key]) # depends on [control=['for'], data=['key']]
# pudb.set_trace()
d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem)
str_outputFile = d_tagsInString['str_result']
return {'status': b_status, 'inputPath': str_path, 'inputFilename': str_localFile, 'outputFileStem': str_outputFile, 'd_DICOM': d_DICOM, 'l_tagsToUse': l_tagsToUse}
|
def _update(self, data):
'''Update the step using the blob of json-parsed data directly from the
API.
'''
self.orderby = data['orderby']
self.revision = data['revisionid']
self.title = data['title']
self.lines = [Line(self.guideid, self.stepid, line['lineid'], data=line)
for line in data['lines']]
# TODO: Support video.
if data['media']['type'] == 'image':
self.media = []
for image in data['media']['data']:
self.media.append(Image(image['id']))
|
def function[_update, parameter[self, data]]:
constant[Update the step using the blob of json-parsed data directly from the
API.
]
name[self].orderby assign[=] call[name[data]][constant[orderby]]
name[self].revision assign[=] call[name[data]][constant[revisionid]]
name[self].title assign[=] call[name[data]][constant[title]]
name[self].lines assign[=] <ast.ListComp object at 0x7da18dc07b80>
if compare[call[call[name[data]][constant[media]]][constant[type]] equal[==] constant[image]] begin[:]
name[self].media assign[=] list[[]]
for taget[name[image]] in starred[call[call[name[data]][constant[media]]][constant[data]]] begin[:]
call[name[self].media.append, parameter[call[name[Image], parameter[call[name[image]][constant[id]]]]]]
|
keyword[def] identifier[_update] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[orderby] = identifier[data] [ literal[string] ]
identifier[self] . identifier[revision] = identifier[data] [ literal[string] ]
identifier[self] . identifier[title] = identifier[data] [ literal[string] ]
identifier[self] . identifier[lines] =[ identifier[Line] ( identifier[self] . identifier[guideid] , identifier[self] . identifier[stepid] , identifier[line] [ literal[string] ], identifier[data] = identifier[line] )
keyword[for] identifier[line] keyword[in] identifier[data] [ literal[string] ]]
keyword[if] identifier[data] [ literal[string] ][ literal[string] ]== literal[string] :
identifier[self] . identifier[media] =[]
keyword[for] identifier[image] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]:
identifier[self] . identifier[media] . identifier[append] ( identifier[Image] ( identifier[image] [ literal[string] ]))
|
def _update(self, data):
"""Update the step using the blob of json-parsed data directly from the
API.
"""
self.orderby = data['orderby']
self.revision = data['revisionid']
self.title = data['title']
self.lines = [Line(self.guideid, self.stepid, line['lineid'], data=line) for line in data['lines']]
# TODO: Support video.
if data['media']['type'] == 'image':
self.media = []
for image in data['media']['data']:
self.media.append(Image(image['id'])) # depends on [control=['for'], data=['image']] # depends on [control=['if'], data=[]]
|
def get_default(name, value):
"""return value from environment variables with name EXAMPLE_<name>
or value"""
return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)
|
def function[get_default, parameter[name, value]]:
constant[return value from environment variables with name EXAMPLE_<name>
or value]
return[call[name[os].environ.get, parameter[call[constant[EXAMPLE_{}].format, parameter[call[name[name].upper, parameter[]]]], name[value]]]]
|
keyword[def] identifier[get_default] ( identifier[name] , identifier[value] ):
literal[string]
keyword[return] identifier[os] . identifier[environ] . identifier[get] ( literal[string] . identifier[format] ( identifier[name] . identifier[upper] ()), identifier[value] )
|
def get_default(name, value):
"""return value from environment variables with name EXAMPLE_<name>
or value"""
return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)
|
def allocate_seg_vlan(self, net_id, is_fw_virt, direc, tenant_id):
"""allocate segmentation ID and VLAN ID.
Allocate vlan, seg thereby storing NetID atomically.
This saves an extra step to update DB with NetID after allocation.
Also may save an extra step after restart, if process crashed
after allocation but before updating DB with NetID. Now, since
both steps are combined, Vlan/Seg won't be allocated w/o NetID.
"""
seg = self.alloc_seg(net_id)
vlan = 0
# VLAN allocation is only needed for physical firewall case
if not is_fw_virt:
vlan = self.alloc_vlan(net_id)
# Updating the local cache
self.update_net_info(tenant_id, direc, vlan, seg)
|
def function[allocate_seg_vlan, parameter[self, net_id, is_fw_virt, direc, tenant_id]]:
constant[allocate segmentation ID and VLAN ID.
Allocate vlan, seg thereby storing NetID atomically.
This saves an extra step to update DB with NetID after allocation.
Also may save an extra step after restart, if process crashed
after allocation but before updating DB with NetID. Now, since
both steps are combined, Vlan/Seg won't be allocated w/o NetID.
]
variable[seg] assign[=] call[name[self].alloc_seg, parameter[name[net_id]]]
variable[vlan] assign[=] constant[0]
if <ast.UnaryOp object at 0x7da1b1b85570> begin[:]
variable[vlan] assign[=] call[name[self].alloc_vlan, parameter[name[net_id]]]
call[name[self].update_net_info, parameter[name[tenant_id], name[direc], name[vlan], name[seg]]]
|
keyword[def] identifier[allocate_seg_vlan] ( identifier[self] , identifier[net_id] , identifier[is_fw_virt] , identifier[direc] , identifier[tenant_id] ):
literal[string]
identifier[seg] = identifier[self] . identifier[alloc_seg] ( identifier[net_id] )
identifier[vlan] = literal[int]
keyword[if] keyword[not] identifier[is_fw_virt] :
identifier[vlan] = identifier[self] . identifier[alloc_vlan] ( identifier[net_id] )
identifier[self] . identifier[update_net_info] ( identifier[tenant_id] , identifier[direc] , identifier[vlan] , identifier[seg] )
|
def allocate_seg_vlan(self, net_id, is_fw_virt, direc, tenant_id):
"""allocate segmentation ID and VLAN ID.
Allocate vlan, seg thereby storing NetID atomically.
This saves an extra step to update DB with NetID after allocation.
Also may save an extra step after restart, if process crashed
after allocation but before updating DB with NetID. Now, since
both steps are combined, Vlan/Seg won't be allocated w/o NetID.
"""
seg = self.alloc_seg(net_id)
vlan = 0
# VLAN allocation is only needed for physical firewall case
if not is_fw_virt:
vlan = self.alloc_vlan(net_id) # depends on [control=['if'], data=[]]
# Updating the local cache
self.update_net_info(tenant_id, direc, vlan, seg)
|
def train(self):
'''
Train Linear Regression Algorithm
From f(x) = WX
Find best h(x) = WX similar to f(x)
Output W
'''
if (self.status != 'init'):
print("Please load train data and init W first.")
return self.W
self.status = 'train'
self.xpsedo = self.calculate_psedo_X(self.train_X)
self.W = np.dot(self.xpsedo, self.train_Y)
return self.W
|
def function[train, parameter[self]]:
constant[
Train Linear Regression Algorithm
From f(x) = WX
Find best h(x) = WX similar to f(x)
Output W
]
if compare[name[self].status not_equal[!=] constant[init]] begin[:]
call[name[print], parameter[constant[Please load train data and init W first.]]]
return[name[self].W]
name[self].status assign[=] constant[train]
name[self].xpsedo assign[=] call[name[self].calculate_psedo_X, parameter[name[self].train_X]]
name[self].W assign[=] call[name[np].dot, parameter[name[self].xpsedo, name[self].train_Y]]
return[name[self].W]
|
keyword[def] identifier[train] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[status] != literal[string] ):
identifier[print] ( literal[string] )
keyword[return] identifier[self] . identifier[W]
identifier[self] . identifier[status] = literal[string]
identifier[self] . identifier[xpsedo] = identifier[self] . identifier[calculate_psedo_X] ( identifier[self] . identifier[train_X] )
identifier[self] . identifier[W] = identifier[np] . identifier[dot] ( identifier[self] . identifier[xpsedo] , identifier[self] . identifier[train_Y] )
keyword[return] identifier[self] . identifier[W]
|
def train(self):
"""
Train Linear Regression Algorithm
From f(x) = WX
Find best h(x) = WX similar to f(x)
Output W
"""
if self.status != 'init':
print('Please load train data and init W first.')
return self.W # depends on [control=['if'], data=[]]
self.status = 'train'
self.xpsedo = self.calculate_psedo_X(self.train_X)
self.W = np.dot(self.xpsedo, self.train_Y)
return self.W
|
def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
rollout = rollout.to_transitions()
dones = rollout.batch_tensor('dones')
rewards = rollout.batch_tensor('rewards')
observations_next = rollout.batch_tensor('observations_next')
actions = rollout.batch_tensor('actions')
observations = rollout.batch_tensor('observations')
# Calculate value loss - or critic loss
with torch.no_grad():
target_next_value = self.target_model.value(observations_next)
target_value = rewards + (1.0 - dones) * self.discount_factor * target_next_value
# Value estimation error vs the target network
model_value = model.value(observations, actions)
value_loss = F.mse_loss(model_value, target_value)
# It may seem a bit tricky what I'm doing here, but the underlying idea is simple
# All other implementations I found keep two separate optimizers for actor and critic
# and update them separately
# What I'm trying to do is to optimize them both with a single optimizer
# but I need to make sure gradients flow correctly
# From critic loss to critic network only and from actor loss to actor network only
# Backpropagate value loss to critic only
value_loss.backward()
model_action = model.action(observations)
model_action_value = model.value(observations, model_action)
policy_loss = -model_action_value.mean()
model_action_grad = torch.autograd.grad(policy_loss, model_action)[0]
# Backpropagate actor loss to actor only
model_action.backward(gradient=model_action_grad)
return {
'policy_loss': policy_loss.item(),
'value_loss': value_loss.item(),
}
|
def function[calculate_gradient, parameter[self, batch_info, device, model, rollout]]:
constant[ Calculate loss of the supplied rollout ]
variable[rollout] assign[=] call[name[rollout].to_transitions, parameter[]]
variable[dones] assign[=] call[name[rollout].batch_tensor, parameter[constant[dones]]]
variable[rewards] assign[=] call[name[rollout].batch_tensor, parameter[constant[rewards]]]
variable[observations_next] assign[=] call[name[rollout].batch_tensor, parameter[constant[observations_next]]]
variable[actions] assign[=] call[name[rollout].batch_tensor, parameter[constant[actions]]]
variable[observations] assign[=] call[name[rollout].batch_tensor, parameter[constant[observations]]]
with call[name[torch].no_grad, parameter[]] begin[:]
variable[target_next_value] assign[=] call[name[self].target_model.value, parameter[name[observations_next]]]
variable[target_value] assign[=] binary_operation[name[rewards] + binary_operation[binary_operation[binary_operation[constant[1.0] - name[dones]] * name[self].discount_factor] * name[target_next_value]]]
variable[model_value] assign[=] call[name[model].value, parameter[name[observations], name[actions]]]
variable[value_loss] assign[=] call[name[F].mse_loss, parameter[name[model_value], name[target_value]]]
call[name[value_loss].backward, parameter[]]
variable[model_action] assign[=] call[name[model].action, parameter[name[observations]]]
variable[model_action_value] assign[=] call[name[model].value, parameter[name[observations], name[model_action]]]
variable[policy_loss] assign[=] <ast.UnaryOp object at 0x7da18bccb880>
variable[model_action_grad] assign[=] call[call[name[torch].autograd.grad, parameter[name[policy_loss], name[model_action]]]][constant[0]]
call[name[model_action].backward, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da18bcc9a50>, <ast.Constant object at 0x7da18bccafe0>], [<ast.Call object at 0x7da18bccb6d0>, <ast.Call object at 0x7da18bcc9450>]]]
|
keyword[def] identifier[calculate_gradient] ( identifier[self] , identifier[batch_info] , identifier[device] , identifier[model] , identifier[rollout] ):
literal[string]
identifier[rollout] = identifier[rollout] . identifier[to_transitions] ()
identifier[dones] = identifier[rollout] . identifier[batch_tensor] ( literal[string] )
identifier[rewards] = identifier[rollout] . identifier[batch_tensor] ( literal[string] )
identifier[observations_next] = identifier[rollout] . identifier[batch_tensor] ( literal[string] )
identifier[actions] = identifier[rollout] . identifier[batch_tensor] ( literal[string] )
identifier[observations] = identifier[rollout] . identifier[batch_tensor] ( literal[string] )
keyword[with] identifier[torch] . identifier[no_grad] ():
identifier[target_next_value] = identifier[self] . identifier[target_model] . identifier[value] ( identifier[observations_next] )
identifier[target_value] = identifier[rewards] +( literal[int] - identifier[dones] )* identifier[self] . identifier[discount_factor] * identifier[target_next_value]
identifier[model_value] = identifier[model] . identifier[value] ( identifier[observations] , identifier[actions] )
identifier[value_loss] = identifier[F] . identifier[mse_loss] ( identifier[model_value] , identifier[target_value] )
identifier[value_loss] . identifier[backward] ()
identifier[model_action] = identifier[model] . identifier[action] ( identifier[observations] )
identifier[model_action_value] = identifier[model] . identifier[value] ( identifier[observations] , identifier[model_action] )
identifier[policy_loss] =- identifier[model_action_value] . identifier[mean] ()
identifier[model_action_grad] = identifier[torch] . identifier[autograd] . identifier[grad] ( identifier[policy_loss] , identifier[model_action] )[ literal[int] ]
identifier[model_action] . identifier[backward] ( identifier[gradient] = identifier[model_action_grad] )
keyword[return] {
literal[string] : identifier[policy_loss] . identifier[item] (),
literal[string] : identifier[value_loss] . identifier[item] (),
}
|
def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
rollout = rollout.to_transitions()
dones = rollout.batch_tensor('dones')
rewards = rollout.batch_tensor('rewards')
observations_next = rollout.batch_tensor('observations_next')
actions = rollout.batch_tensor('actions')
observations = rollout.batch_tensor('observations')
# Calculate value loss - or critic loss
with torch.no_grad():
target_next_value = self.target_model.value(observations_next)
target_value = rewards + (1.0 - dones) * self.discount_factor * target_next_value # depends on [control=['with'], data=[]]
# Value estimation error vs the target network
model_value = model.value(observations, actions)
value_loss = F.mse_loss(model_value, target_value)
# It may seem a bit tricky what I'm doing here, but the underlying idea is simple
# All other implementations I found keep two separate optimizers for actor and critic
# and update them separately
# What I'm trying to do is to optimize them both with a single optimizer
# but I need to make sure gradients flow correctly
# From critic loss to critic network only and from actor loss to actor network only
# Backpropagate value loss to critic only
value_loss.backward()
model_action = model.action(observations)
model_action_value = model.value(observations, model_action)
policy_loss = -model_action_value.mean()
model_action_grad = torch.autograd.grad(policy_loss, model_action)[0]
# Backpropagate actor loss to actor only
model_action.backward(gradient=model_action_grad)
return {'policy_loss': policy_loss.item(), 'value_loss': value_loss.item()}
|
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker
|
def function[sharey, parameter[axes]]:
constant[
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
]
variable[linker] assign[=] call[name[Linker], parameter[name[axes]]]
for taget[name[ax]] in starred[name[axes]] begin[:]
name[ax]._linker assign[=] name[linker]
|
keyword[def] identifier[sharey] ( identifier[axes] ):
literal[string]
identifier[linker] = identifier[Linker] ( identifier[axes] )
keyword[for] identifier[ax] keyword[in] identifier[axes] :
identifier[ax] . identifier[_linker] = identifier[linker]
|
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker # depends on [control=['for'], data=['ax']]
|
def vectorize(self, docs):
"""
Vectorizes a list of documents using their DCS representations.
"""
doc_core_sems, all_concepts = self._extract_core_semantics(docs)
shape = (len(docs), len(all_concepts))
vecs = np.zeros(shape)
for i, core_sems in enumerate(doc_core_sems):
for con, weight in core_sems:
j = all_concepts.index(con)
vecs[i,j] = weight
# Normalize
return vecs/np.max(vecs)
|
def function[vectorize, parameter[self, docs]]:
constant[
Vectorizes a list of documents using their DCS representations.
]
<ast.Tuple object at 0x7da207f03fa0> assign[=] call[name[self]._extract_core_semantics, parameter[name[docs]]]
variable[shape] assign[=] tuple[[<ast.Call object at 0x7da18dc9ba60>, <ast.Call object at 0x7da18dc9a3b0>]]
variable[vecs] assign[=] call[name[np].zeros, parameter[name[shape]]]
for taget[tuple[[<ast.Name object at 0x7da18f09fca0>, <ast.Name object at 0x7da18f09e6e0>]]] in starred[call[name[enumerate], parameter[name[doc_core_sems]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f09c580>, <ast.Name object at 0x7da18f09eda0>]]] in starred[name[core_sems]] begin[:]
variable[j] assign[=] call[name[all_concepts].index, parameter[name[con]]]
call[name[vecs]][tuple[[<ast.Name object at 0x7da18f09f820>, <ast.Name object at 0x7da18f09fd30>]]] assign[=] name[weight]
return[binary_operation[name[vecs] / call[name[np].max, parameter[name[vecs]]]]]
|
keyword[def] identifier[vectorize] ( identifier[self] , identifier[docs] ):
literal[string]
identifier[doc_core_sems] , identifier[all_concepts] = identifier[self] . identifier[_extract_core_semantics] ( identifier[docs] )
identifier[shape] =( identifier[len] ( identifier[docs] ), identifier[len] ( identifier[all_concepts] ))
identifier[vecs] = identifier[np] . identifier[zeros] ( identifier[shape] )
keyword[for] identifier[i] , identifier[core_sems] keyword[in] identifier[enumerate] ( identifier[doc_core_sems] ):
keyword[for] identifier[con] , identifier[weight] keyword[in] identifier[core_sems] :
identifier[j] = identifier[all_concepts] . identifier[index] ( identifier[con] )
identifier[vecs] [ identifier[i] , identifier[j] ]= identifier[weight]
keyword[return] identifier[vecs] / identifier[np] . identifier[max] ( identifier[vecs] )
|
def vectorize(self, docs):
"""
Vectorizes a list of documents using their DCS representations.
"""
(doc_core_sems, all_concepts) = self._extract_core_semantics(docs)
shape = (len(docs), len(all_concepts))
vecs = np.zeros(shape)
for (i, core_sems) in enumerate(doc_core_sems):
for (con, weight) in core_sems:
j = all_concepts.index(con)
vecs[i, j] = weight # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Normalize
return vecs / np.max(vecs)
|
def atlasdb_renew_peer( peer_hostport, now, con=None, path=None ):
"""
Renew a peer's discovery time
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time()
sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;"
args = (now, peer_hostport)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True
|
def function[atlasdb_renew_peer, parameter[peer_hostport, now, con, path]]:
constant[
Renew a peer's discovery time
]
with call[name[AtlasDBOpen], parameter[]] begin[:]
if compare[name[now] is constant[None]] begin[:]
variable[now] assign[=] call[name[time].time, parameter[]]
variable[sql] assign[=] constant[UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b26ae830>, <ast.Name object at 0x7da1b26acdc0>]]
variable[cur] assign[=] call[name[dbcon].cursor, parameter[]]
variable[res] assign[=] call[name[atlasdb_query_execute], parameter[name[cur], name[sql], name[args]]]
call[name[dbcon].commit, parameter[]]
return[constant[True]]
|
keyword[def] identifier[atlasdb_renew_peer] ( identifier[peer_hostport] , identifier[now] , identifier[con] = keyword[None] , identifier[path] = keyword[None] ):
literal[string]
keyword[with] identifier[AtlasDBOpen] ( identifier[con] = identifier[con] , identifier[path] = identifier[path] ) keyword[as] identifier[dbcon] :
keyword[if] identifier[now] keyword[is] keyword[None] :
identifier[now] = identifier[time] . identifier[time] ()
identifier[sql] = literal[string]
identifier[args] =( identifier[now] , identifier[peer_hostport] )
identifier[cur] = identifier[dbcon] . identifier[cursor] ()
identifier[res] = identifier[atlasdb_query_execute] ( identifier[cur] , identifier[sql] , identifier[args] )
identifier[dbcon] . identifier[commit] ()
keyword[return] keyword[True]
|
def atlasdb_renew_peer(peer_hostport, now, con=None, path=None):
"""
Renew a peer's discovery time
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time() # depends on [control=['if'], data=['now']]
sql = 'UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;'
args = (now, peer_hostport)
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
dbcon.commit() # depends on [control=['with'], data=['dbcon']]
return True
|
def EWFGlobPathSpec(file_system, path_spec):
"""Globs for path specifications according to the EWF naming schema.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
Returns:
list[PathSpec]: path specifications that match the glob.
Raises:
PathSpecError: if the path specification is invalid.
RuntimeError: if the maximum number of supported segment files is
reached.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError(
'Unsupported parent path specification without location.')
parent_location, _, segment_extension = parent_location.rpartition('.')
segment_extension_start = segment_extension[0]
segment_extension_length = len(segment_extension)
if (segment_extension_length not in [3, 4] or
not segment_extension.endswith('01') or (
segment_extension_length == 3 and
segment_extension_start not in ['E', 'e', 's']) or (
segment_extension_length == 4 and
not segment_extension.startswith('Ex'))):
raise errors.PathSpecError((
'Unsupported parent path specification invalid segment file '
'extension: {0:s}').format(segment_extension))
segment_number = 1
segment_files = []
while True:
segment_location = '{0:s}.{1:s}'.format(parent_location, segment_extension)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
if segment_number <= 99:
if segment_extension_length == 3:
segment_extension = '{0:s}{1:02d}'.format(
segment_extension_start, segment_number)
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:02d}'.format(
segment_extension_start, segment_number)
else:
segment_index = segment_number - 100
if segment_extension_start in ['e', 's']:
letter_offset = ord('a')
else:
letter_offset = ord('A')
segment_index, remainder = divmod(segment_index, 26)
third_letter = chr(letter_offset + remainder)
segment_index, remainder = divmod(segment_index, 26)
second_letter = chr(letter_offset + remainder)
first_letter = chr(ord(segment_extension_start) + segment_index)
if first_letter in ['[', '{']:
raise RuntimeError('Unsupported number of segment files.')
if segment_extension_length == 3:
segment_extension = '{0:s}{1:s}{2:s}'.format(
first_letter, second_letter, third_letter)
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:s}{2:s}'.format(
first_letter, second_letter, third_letter)
return segment_files
|
def function[EWFGlobPathSpec, parameter[file_system, path_spec]]:
constant[Globs for path specifications according to the EWF naming schema.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
Returns:
list[PathSpec]: path specifications that match the glob.
Raises:
PathSpecError: if the path specification is invalid.
RuntimeError: if the maximum number of supported segment files is
reached.
]
if <ast.UnaryOp object at 0x7da1b07bae00> begin[:]
<ast.Raise object at 0x7da1b07b96c0>
variable[parent_path_spec] assign[=] name[path_spec].parent
variable[parent_location] assign[=] call[name[getattr], parameter[name[parent_path_spec], constant[location], constant[None]]]
if <ast.UnaryOp object at 0x7da1b07bb970> begin[:]
<ast.Raise object at 0x7da1b07b9ab0>
<ast.Tuple object at 0x7da1b07ba020> assign[=] call[name[parent_location].rpartition, parameter[constant[.]]]
variable[segment_extension_start] assign[=] call[name[segment_extension]][constant[0]]
variable[segment_extension_length] assign[=] call[name[len], parameter[name[segment_extension]]]
if <ast.BoolOp object at 0x7da1b07b9990> begin[:]
<ast.Raise object at 0x7da1b07ba290>
variable[segment_number] assign[=] constant[1]
variable[segment_files] assign[=] list[[]]
while constant[True] begin[:]
variable[segment_location] assign[=] call[constant[{0:s}.{1:s}].format, parameter[name[parent_location], name[segment_extension]]]
variable[kwargs] assign[=] call[name[path_spec_factory].Factory.GetProperties, parameter[name[parent_path_spec]]]
call[name[kwargs]][constant[location]] assign[=] name[segment_location]
if compare[name[parent_path_spec].parent is_not constant[None]] begin[:]
call[name[kwargs]][constant[parent]] assign[=] name[parent_path_spec].parent
variable[segment_path_spec] assign[=] call[name[path_spec_factory].Factory.NewPathSpec, parameter[name[parent_path_spec].type_indicator]]
if <ast.UnaryOp object at 0x7da1b0654550> begin[:]
break
call[name[segment_files].append, parameter[name[segment_path_spec]]]
<ast.AugAssign object at 0x7da1b06543a0>
if compare[name[segment_number] less_or_equal[<=] constant[99]] begin[:]
if compare[name[segment_extension_length] equal[==] constant[3]] begin[:]
variable[segment_extension] assign[=] call[constant[{0:s}{1:02d}].format, parameter[name[segment_extension_start], name[segment_number]]]
return[name[segment_files]]
|
keyword[def] identifier[EWFGlobPathSpec] ( identifier[file_system] , identifier[path_spec] ):
literal[string]
keyword[if] keyword[not] identifier[path_spec] . identifier[HasParent] ():
keyword[raise] identifier[errors] . identifier[PathSpecError] (
literal[string] )
identifier[parent_path_spec] = identifier[path_spec] . identifier[parent]
identifier[parent_location] = identifier[getattr] ( identifier[parent_path_spec] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[parent_location] :
keyword[raise] identifier[errors] . identifier[PathSpecError] (
literal[string] )
identifier[parent_location] , identifier[_] , identifier[segment_extension] = identifier[parent_location] . identifier[rpartition] ( literal[string] )
identifier[segment_extension_start] = identifier[segment_extension] [ literal[int] ]
identifier[segment_extension_length] = identifier[len] ( identifier[segment_extension] )
keyword[if] ( identifier[segment_extension_length] keyword[not] keyword[in] [ literal[int] , literal[int] ] keyword[or]
keyword[not] identifier[segment_extension] . identifier[endswith] ( literal[string] ) keyword[or] (
identifier[segment_extension_length] == literal[int] keyword[and]
identifier[segment_extension_start] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]) keyword[or] (
identifier[segment_extension_length] == literal[int] keyword[and]
keyword[not] identifier[segment_extension] . identifier[startswith] ( literal[string] ))):
keyword[raise] identifier[errors] . identifier[PathSpecError] ((
literal[string]
literal[string] ). identifier[format] ( identifier[segment_extension] ))
identifier[segment_number] = literal[int]
identifier[segment_files] =[]
keyword[while] keyword[True] :
identifier[segment_location] = literal[string] . identifier[format] ( identifier[parent_location] , identifier[segment_extension] )
identifier[kwargs] = identifier[path_spec_factory] . identifier[Factory] . identifier[GetProperties] ( identifier[parent_path_spec] )
identifier[kwargs] [ literal[string] ]= identifier[segment_location]
keyword[if] identifier[parent_path_spec] . identifier[parent] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[parent_path_spec] . identifier[parent]
identifier[segment_path_spec] = identifier[path_spec_factory] . identifier[Factory] . identifier[NewPathSpec] (
identifier[parent_path_spec] . identifier[type_indicator] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[file_system] . identifier[FileEntryExistsByPathSpec] ( identifier[segment_path_spec] ):
keyword[break]
identifier[segment_files] . identifier[append] ( identifier[segment_path_spec] )
identifier[segment_number] += literal[int]
keyword[if] identifier[segment_number] <= literal[int] :
keyword[if] identifier[segment_extension_length] == literal[int] :
identifier[segment_extension] = literal[string] . identifier[format] (
identifier[segment_extension_start] , identifier[segment_number] )
keyword[elif] identifier[segment_extension_length] == literal[int] :
identifier[segment_extension] = literal[string] . identifier[format] (
identifier[segment_extension_start] , identifier[segment_number] )
keyword[else] :
identifier[segment_index] = identifier[segment_number] - literal[int]
keyword[if] identifier[segment_extension_start] keyword[in] [ literal[string] , literal[string] ]:
identifier[letter_offset] = identifier[ord] ( literal[string] )
keyword[else] :
identifier[letter_offset] = identifier[ord] ( literal[string] )
identifier[segment_index] , identifier[remainder] = identifier[divmod] ( identifier[segment_index] , literal[int] )
identifier[third_letter] = identifier[chr] ( identifier[letter_offset] + identifier[remainder] )
identifier[segment_index] , identifier[remainder] = identifier[divmod] ( identifier[segment_index] , literal[int] )
identifier[second_letter] = identifier[chr] ( identifier[letter_offset] + identifier[remainder] )
identifier[first_letter] = identifier[chr] ( identifier[ord] ( identifier[segment_extension_start] )+ identifier[segment_index] )
keyword[if] identifier[first_letter] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[segment_extension_length] == literal[int] :
identifier[segment_extension] = literal[string] . identifier[format] (
identifier[first_letter] , identifier[second_letter] , identifier[third_letter] )
keyword[elif] identifier[segment_extension_length] == literal[int] :
identifier[segment_extension] = literal[string] . identifier[format] (
identifier[first_letter] , identifier[second_letter] , identifier[third_letter] )
keyword[return] identifier[segment_files]
|
def EWFGlobPathSpec(file_system, path_spec):
"""Globs for path specifications according to the EWF naming schema.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
Returns:
list[PathSpec]: path specifications that match the glob.
Raises:
PathSpecError: if the path specification is invalid.
RuntimeError: if the maximum number of supported segment files is
reached.
"""
if not path_spec.HasParent():
raise errors.PathSpecError('Unsupported path specification without parent.') # depends on [control=['if'], data=[]]
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError('Unsupported parent path specification without location.') # depends on [control=['if'], data=[]]
(parent_location, _, segment_extension) = parent_location.rpartition('.')
segment_extension_start = segment_extension[0]
segment_extension_length = len(segment_extension)
if segment_extension_length not in [3, 4] or not segment_extension.endswith('01') or (segment_extension_length == 3 and segment_extension_start not in ['E', 'e', 's']) or (segment_extension_length == 4 and (not segment_extension.startswith('Ex'))):
raise errors.PathSpecError('Unsupported parent path specification invalid segment file extension: {0:s}'.format(segment_extension)) # depends on [control=['if'], data=[]]
segment_number = 1
segment_files = []
while True:
segment_location = '{0:s}.{1:s}'.format(parent_location, segment_extension)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent # depends on [control=['if'], data=[]]
segment_path_spec = path_spec_factory.Factory.NewPathSpec(parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break # depends on [control=['if'], data=[]]
segment_files.append(segment_path_spec)
segment_number += 1
if segment_number <= 99:
if segment_extension_length == 3:
segment_extension = '{0:s}{1:02d}'.format(segment_extension_start, segment_number) # depends on [control=['if'], data=[]]
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:02d}'.format(segment_extension_start, segment_number) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['segment_number']]
else:
segment_index = segment_number - 100
if segment_extension_start in ['e', 's']:
letter_offset = ord('a') # depends on [control=['if'], data=[]]
else:
letter_offset = ord('A')
(segment_index, remainder) = divmod(segment_index, 26)
third_letter = chr(letter_offset + remainder)
(segment_index, remainder) = divmod(segment_index, 26)
second_letter = chr(letter_offset + remainder)
first_letter = chr(ord(segment_extension_start) + segment_index)
if first_letter in ['[', '{']:
raise RuntimeError('Unsupported number of segment files.') # depends on [control=['if'], data=[]]
if segment_extension_length == 3:
segment_extension = '{0:s}{1:s}{2:s}'.format(first_letter, second_letter, third_letter) # depends on [control=['if'], data=[]]
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:s}{2:s}'.format(first_letter, second_letter, third_letter) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return segment_files
|
def getChargeURL(self, CorpNum, UserID):
""" 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CHRG', CorpNum, UserID)
return result.url
|
def function[getChargeURL, parameter[self, CorpNum, UserID]]:
constant[ 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
]
variable[result] assign[=] call[name[self]._httpget, parameter[constant[/?TG=CHRG], name[CorpNum], name[UserID]]]
return[name[result].url]
|
keyword[def] identifier[getChargeURL] ( identifier[self] , identifier[CorpNum] , identifier[UserID] ):
literal[string]
identifier[result] = identifier[self] . identifier[_httpget] ( literal[string] , identifier[CorpNum] , identifier[UserID] )
keyword[return] identifier[result] . identifier[url]
|
def getChargeURL(self, CorpNum, UserID):
""" 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CHRG', CorpNum, UserID)
return result.url
|
def _alloc(self):
"""
Allocate empty memory for dae variable indices.
Called in device setup phase.
:return: None
"""
nzeros = [0] * self.n
for var in self._states:
self.__dict__[var] = nzeros[:]
for var in self._algebs:
self.__dict__[var] = nzeros[:]
|
def function[_alloc, parameter[self]]:
constant[
Allocate empty memory for dae variable indices.
Called in device setup phase.
:return: None
]
variable[nzeros] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f09de40>]] * name[self].n]
for taget[name[var]] in starred[name[self]._states] begin[:]
call[name[self].__dict__][name[var]] assign[=] call[name[nzeros]][<ast.Slice object at 0x7da18f09efe0>]
for taget[name[var]] in starred[name[self]._algebs] begin[:]
call[name[self].__dict__][name[var]] assign[=] call[name[nzeros]][<ast.Slice object at 0x7da18f09f130>]
|
keyword[def] identifier[_alloc] ( identifier[self] ):
literal[string]
identifier[nzeros] =[ literal[int] ]* identifier[self] . identifier[n]
keyword[for] identifier[var] keyword[in] identifier[self] . identifier[_states] :
identifier[self] . identifier[__dict__] [ identifier[var] ]= identifier[nzeros] [:]
keyword[for] identifier[var] keyword[in] identifier[self] . identifier[_algebs] :
identifier[self] . identifier[__dict__] [ identifier[var] ]= identifier[nzeros] [:]
|
def _alloc(self):
"""
Allocate empty memory for dae variable indices.
Called in device setup phase.
:return: None
"""
nzeros = [0] * self.n
for var in self._states:
self.__dict__[var] = nzeros[:] # depends on [control=['for'], data=['var']]
for var in self._algebs:
self.__dict__[var] = nzeros[:] # depends on [control=['for'], data=['var']]
|
def add_unique_postfix(fn):
"""__source__ = 'http://code.activestate.com/recipes/577200-make-unique-file-name/'"""
if not os.path.exists(fn):
return fn
path, name = os.path.split(fn)
name, ext = os.path.splitext(name)
make_fn = lambda i: os.path.join(path, '%s(%d)%s' % (name, i, ext))
for i in xrange(2, sys.maxint):
uni_fn = make_fn(i)
if not os.path.exists(uni_fn):
return uni_fn
|
def function[add_unique_postfix, parameter[fn]]:
constant[__source__ = 'http://code.activestate.com/recipes/577200-make-unique-file-name/']
if <ast.UnaryOp object at 0x7da18f720550> begin[:]
return[name[fn]]
<ast.Tuple object at 0x7da18f723670> assign[=] call[name[os].path.split, parameter[name[fn]]]
<ast.Tuple object at 0x7da1b0b720e0> assign[=] call[name[os].path.splitext, parameter[name[name]]]
variable[make_fn] assign[=] <ast.Lambda object at 0x7da1b0b73640>
for taget[name[i]] in starred[call[name[xrange], parameter[constant[2], name[sys].maxint]]] begin[:]
variable[uni_fn] assign[=] call[name[make_fn], parameter[name[i]]]
if <ast.UnaryOp object at 0x7da207f00c40> begin[:]
return[name[uni_fn]]
|
keyword[def] identifier[add_unique_postfix] ( identifier[fn] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fn] ):
keyword[return] identifier[fn]
identifier[path] , identifier[name] = identifier[os] . identifier[path] . identifier[split] ( identifier[fn] )
identifier[name] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[name] )
identifier[make_fn] = keyword[lambda] identifier[i] : identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] %( identifier[name] , identifier[i] , identifier[ext] ))
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[sys] . identifier[maxint] ):
identifier[uni_fn] = identifier[make_fn] ( identifier[i] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[uni_fn] ):
keyword[return] identifier[uni_fn]
|
def add_unique_postfix(fn):
"""__source__ = 'http://code.activestate.com/recipes/577200-make-unique-file-name/'"""
if not os.path.exists(fn):
return fn # depends on [control=['if'], data=[]]
(path, name) = os.path.split(fn)
(name, ext) = os.path.splitext(name)
make_fn = lambda i: os.path.join(path, '%s(%d)%s' % (name, i, ext))
for i in xrange(2, sys.maxint):
uni_fn = make_fn(i)
if not os.path.exists(uni_fn):
return uni_fn # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
|
def get_suggestion(self, random_search=False):
"""get suggestion from hyperopt
Parameters
----------
random_search : bool
flag to indicate random search or not (default: {False})
Returns
----------
total_params : dict
parameter suggestion
"""
rval = self.rval
trials = rval.trials
algorithm = rval.algo
new_ids = rval.trials.new_trial_ids(1)
rval.trials.refresh()
random_state = rval.rstate.randint(2**31-1)
if random_search:
new_trials = hp.rand.suggest(new_ids, rval.domain, trials, random_state)
else:
new_trials = algorithm(new_ids, rval.domain, trials, random_state)
rval.trials.refresh()
vals = new_trials[0]['misc']['vals']
parameter = dict()
for key in vals:
try:
parameter[key] = vals[key][0].item()
except (KeyError, IndexError):
parameter[key] = None
# remove '_index' from json2parameter and save params-id
total_params = json2parameter(self.json, parameter)
return total_params
|
def function[get_suggestion, parameter[self, random_search]]:
constant[get suggestion from hyperopt
Parameters
----------
random_search : bool
flag to indicate random search or not (default: {False})
Returns
----------
total_params : dict
parameter suggestion
]
variable[rval] assign[=] name[self].rval
variable[trials] assign[=] name[rval].trials
variable[algorithm] assign[=] name[rval].algo
variable[new_ids] assign[=] call[name[rval].trials.new_trial_ids, parameter[constant[1]]]
call[name[rval].trials.refresh, parameter[]]
variable[random_state] assign[=] call[name[rval].rstate.randint, parameter[binary_operation[binary_operation[constant[2] ** constant[31]] - constant[1]]]]
if name[random_search] begin[:]
variable[new_trials] assign[=] call[name[hp].rand.suggest, parameter[name[new_ids], name[rval].domain, name[trials], name[random_state]]]
call[name[rval].trials.refresh, parameter[]]
variable[vals] assign[=] call[call[call[name[new_trials]][constant[0]]][constant[misc]]][constant[vals]]
variable[parameter] assign[=] call[name[dict], parameter[]]
for taget[name[key]] in starred[name[vals]] begin[:]
<ast.Try object at 0x7da1b216c640>
variable[total_params] assign[=] call[name[json2parameter], parameter[name[self].json, name[parameter]]]
return[name[total_params]]
|
keyword[def] identifier[get_suggestion] ( identifier[self] , identifier[random_search] = keyword[False] ):
literal[string]
identifier[rval] = identifier[self] . identifier[rval]
identifier[trials] = identifier[rval] . identifier[trials]
identifier[algorithm] = identifier[rval] . identifier[algo]
identifier[new_ids] = identifier[rval] . identifier[trials] . identifier[new_trial_ids] ( literal[int] )
identifier[rval] . identifier[trials] . identifier[refresh] ()
identifier[random_state] = identifier[rval] . identifier[rstate] . identifier[randint] ( literal[int] ** literal[int] - literal[int] )
keyword[if] identifier[random_search] :
identifier[new_trials] = identifier[hp] . identifier[rand] . identifier[suggest] ( identifier[new_ids] , identifier[rval] . identifier[domain] , identifier[trials] , identifier[random_state] )
keyword[else] :
identifier[new_trials] = identifier[algorithm] ( identifier[new_ids] , identifier[rval] . identifier[domain] , identifier[trials] , identifier[random_state] )
identifier[rval] . identifier[trials] . identifier[refresh] ()
identifier[vals] = identifier[new_trials] [ literal[int] ][ literal[string] ][ literal[string] ]
identifier[parameter] = identifier[dict] ()
keyword[for] identifier[key] keyword[in] identifier[vals] :
keyword[try] :
identifier[parameter] [ identifier[key] ]= identifier[vals] [ identifier[key] ][ literal[int] ]. identifier[item] ()
keyword[except] ( identifier[KeyError] , identifier[IndexError] ):
identifier[parameter] [ identifier[key] ]= keyword[None]
identifier[total_params] = identifier[json2parameter] ( identifier[self] . identifier[json] , identifier[parameter] )
keyword[return] identifier[total_params]
|
def get_suggestion(self, random_search=False):
"""get suggestion from hyperopt
Parameters
----------
random_search : bool
flag to indicate random search or not (default: {False})
Returns
----------
total_params : dict
parameter suggestion
"""
rval = self.rval
trials = rval.trials
algorithm = rval.algo
new_ids = rval.trials.new_trial_ids(1)
rval.trials.refresh()
random_state = rval.rstate.randint(2 ** 31 - 1)
if random_search:
new_trials = hp.rand.suggest(new_ids, rval.domain, trials, random_state) # depends on [control=['if'], data=[]]
else:
new_trials = algorithm(new_ids, rval.domain, trials, random_state)
rval.trials.refresh()
vals = new_trials[0]['misc']['vals']
parameter = dict()
for key in vals:
try:
parameter[key] = vals[key][0].item() # depends on [control=['try'], data=[]]
except (KeyError, IndexError):
parameter[key] = None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']]
# remove '_index' from json2parameter and save params-id
total_params = json2parameter(self.json, parameter)
return total_params
|
def idle_task(self):
'''handle missing waypoints'''
if self.wp_period.trigger():
# cope with packet loss fetching mission
if self.master is not None and self.master.time_since('MISSION_ITEM') >= 2 and self.wploader.count() < getattr(self.wploader,'expected_count',0):
wps = self.missing_wps_to_request();
print("re-requesting WPs %s" % str(wps))
self.send_wp_requests(wps)
if self.module('console') is not None and not self.menu_added_console:
self.menu_added_console = True
self.module('console').add_menu(self.menu)
if self.module('map') is not None and not self.menu_added_map:
self.menu_added_map = True
self.module('map').add_menu(self.menu)
|
def function[idle_task, parameter[self]]:
constant[handle missing waypoints]
if call[name[self].wp_period.trigger, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da18f722e90> begin[:]
variable[wps] assign[=] call[name[self].missing_wps_to_request, parameter[]]
call[name[print], parameter[binary_operation[constant[re-requesting WPs %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[wps]]]]]]
call[name[self].send_wp_requests, parameter[name[wps]]]
if <ast.BoolOp object at 0x7da20c991690> begin[:]
name[self].menu_added_console assign[=] constant[True]
call[call[name[self].module, parameter[constant[console]]].add_menu, parameter[name[self].menu]]
if <ast.BoolOp object at 0x7da20c990dc0> begin[:]
name[self].menu_added_map assign[=] constant[True]
call[call[name[self].module, parameter[constant[map]]].add_menu, parameter[name[self].menu]]
|
keyword[def] identifier[idle_task] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[wp_period] . identifier[trigger] ():
keyword[if] identifier[self] . identifier[master] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[master] . identifier[time_since] ( literal[string] )>= literal[int] keyword[and] identifier[self] . identifier[wploader] . identifier[count] ()< identifier[getattr] ( identifier[self] . identifier[wploader] , literal[string] , literal[int] ):
identifier[wps] = identifier[self] . identifier[missing_wps_to_request] ();
identifier[print] ( literal[string] % identifier[str] ( identifier[wps] ))
identifier[self] . identifier[send_wp_requests] ( identifier[wps] )
keyword[if] identifier[self] . identifier[module] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[menu_added_console] :
identifier[self] . identifier[menu_added_console] = keyword[True]
identifier[self] . identifier[module] ( literal[string] ). identifier[add_menu] ( identifier[self] . identifier[menu] )
keyword[if] identifier[self] . identifier[module] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[menu_added_map] :
identifier[self] . identifier[menu_added_map] = keyword[True]
identifier[self] . identifier[module] ( literal[string] ). identifier[add_menu] ( identifier[self] . identifier[menu] )
|
def idle_task(self):
"""handle missing waypoints"""
if self.wp_period.trigger():
# cope with packet loss fetching mission
if self.master is not None and self.master.time_since('MISSION_ITEM') >= 2 and (self.wploader.count() < getattr(self.wploader, 'expected_count', 0)):
wps = self.missing_wps_to_request()
print('re-requesting WPs %s' % str(wps))
self.send_wp_requests(wps) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.module('console') is not None and (not self.menu_added_console):
self.menu_added_console = True
self.module('console').add_menu(self.menu) # depends on [control=['if'], data=[]]
if self.module('map') is not None and (not self.menu_added_map):
self.menu_added_map = True
self.module('map').add_menu(self.menu) # depends on [control=['if'], data=[]]
|
def _get_conda_version(stdout, stderr):
"""Callback for get_conda_version."""
# argparse outputs version to stderr in Python < 3.4.
# http://bugs.python.org/issue18920
pat = re.compile(r'conda:?\s+(\d+\.\d\S+|unknown)')
m = pat.match(stderr.decode().strip())
if m is None:
m = pat.match(stdout.decode().strip())
if m is None:
raise Exception('output did not match: {0}'.format(stderr))
return m.group(1)
|
def function[_get_conda_version, parameter[stdout, stderr]]:
constant[Callback for get_conda_version.]
variable[pat] assign[=] call[name[re].compile, parameter[constant[conda:?\s+(\d+\.\d\S+|unknown)]]]
variable[m] assign[=] call[name[pat].match, parameter[call[call[name[stderr].decode, parameter[]].strip, parameter[]]]]
if compare[name[m] is constant[None]] begin[:]
variable[m] assign[=] call[name[pat].match, parameter[call[call[name[stdout].decode, parameter[]].strip, parameter[]]]]
if compare[name[m] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2766380>
return[call[name[m].group, parameter[constant[1]]]]
|
keyword[def] identifier[_get_conda_version] ( identifier[stdout] , identifier[stderr] ):
literal[string]
identifier[pat] = identifier[re] . identifier[compile] ( literal[string] )
identifier[m] = identifier[pat] . identifier[match] ( identifier[stderr] . identifier[decode] (). identifier[strip] ())
keyword[if] identifier[m] keyword[is] keyword[None] :
identifier[m] = identifier[pat] . identifier[match] ( identifier[stdout] . identifier[decode] (). identifier[strip] ())
keyword[if] identifier[m] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[stderr] ))
keyword[return] identifier[m] . identifier[group] ( literal[int] )
|
def _get_conda_version(stdout, stderr):
"""Callback for get_conda_version."""
# argparse outputs version to stderr in Python < 3.4.
# http://bugs.python.org/issue18920
pat = re.compile('conda:?\\s+(\\d+\\.\\d\\S+|unknown)')
m = pat.match(stderr.decode().strip())
if m is None:
m = pat.match(stdout.decode().strip()) # depends on [control=['if'], data=['m']]
if m is None:
raise Exception('output did not match: {0}'.format(stderr)) # depends on [control=['if'], data=[]]
return m.group(1)
|
def verifypartialtablecell(self, window_name, object_name, row_index,
column_index, row_text):
"""
Verify partial table cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column_index: Column index to get, default value 0
@type column_index: integer
@param row_text: Row text to match
@type string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
value = getcellvalue(window_name, object_name, row_index, column_index)
if re.searchmatch(row_text, value):
return 1
except LdtpServerException:
pass
return 0
|
def function[verifypartialtablecell, parameter[self, window_name, object_name, row_index, column_index, row_text]]:
constant[
Verify partial table cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column_index: Column index to get, default value 0
@type column_index: integer
@param row_text: Row text to match
@type string
@return: 1 on success 0 on failure.
@rtype: integer
]
<ast.Try object at 0x7da20c7cb1c0>
return[constant[0]]
|
keyword[def] identifier[verifypartialtablecell] ( identifier[self] , identifier[window_name] , identifier[object_name] , identifier[row_index] ,
identifier[column_index] , identifier[row_text] ):
literal[string]
keyword[try] :
identifier[value] = identifier[getcellvalue] ( identifier[window_name] , identifier[object_name] , identifier[row_index] , identifier[column_index] )
keyword[if] identifier[re] . identifier[searchmatch] ( identifier[row_text] , identifier[value] ):
keyword[return] literal[int]
keyword[except] identifier[LdtpServerException] :
keyword[pass]
keyword[return] literal[int]
|
def verifypartialtablecell(self, window_name, object_name, row_index, column_index, row_text):
"""
Verify partial table cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column_index: Column index to get, default value 0
@type column_index: integer
@param row_text: Row text to match
@type string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
value = getcellvalue(window_name, object_name, row_index, column_index)
if re.searchmatch(row_text, value):
return 1 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except LdtpServerException:
pass # depends on [control=['except'], data=[]]
return 0
|
def get_registered_models(self, abstract=True):
"""
Returns a list of all registered models, or just concrete
registered models.
"""
return [model for (model, opts) in self._registry.items()
if opts.registered and (not model._meta.abstract or abstract)]
|
def function[get_registered_models, parameter[self, abstract]]:
constant[
Returns a list of all registered models, or just concrete
registered models.
]
return[<ast.ListComp object at 0x7da18f58ce80>]
|
keyword[def] identifier[get_registered_models] ( identifier[self] , identifier[abstract] = keyword[True] ):
literal[string]
keyword[return] [ identifier[model] keyword[for] ( identifier[model] , identifier[opts] ) keyword[in] identifier[self] . identifier[_registry] . identifier[items] ()
keyword[if] identifier[opts] . identifier[registered] keyword[and] ( keyword[not] identifier[model] . identifier[_meta] . identifier[abstract] keyword[or] identifier[abstract] )]
|
def get_registered_models(self, abstract=True):
"""
Returns a list of all registered models, or just concrete
registered models.
"""
return [model for (model, opts) in self._registry.items() if opts.registered and (not model._meta.abstract or abstract)]
|
def patch_connection_file_paths(connection: str) -> str:
"""
Patch any paths in a connection to remove the balena host paths
Undoes the changes applied by
:py:meth:`opentrons.system.nmcli._rewrite_key_path_to_host_path`
:param connection: The contents of a NetworkManager connection file
:return: The patches contents, suitable for writing somewher
"""
new_conn_lines = []
for line in connection.split('\n'):
if '=' in line:
parts = line.split('=')
path_matches = re.search(
'/mnt/data/resin-data/[0-9]+/(.*)', parts[1])
if path_matches:
new_path = f'/data/{path_matches.group(1)}'
new_conn_lines.append(
'='.join([parts[0], new_path]))
LOG.info(
f"migrate_connection_file: {parts[0]}: "
f"{parts[1]}->{new_path}")
continue
new_conn_lines.append(line)
return '\n'.join(new_conn_lines)
|
def function[patch_connection_file_paths, parameter[connection]]:
constant[
Patch any paths in a connection to remove the balena host paths
Undoes the changes applied by
:py:meth:`opentrons.system.nmcli._rewrite_key_path_to_host_path`
:param connection: The contents of a NetworkManager connection file
:return: The patches contents, suitable for writing somewher
]
variable[new_conn_lines] assign[=] list[[]]
for taget[name[line]] in starred[call[name[connection].split, parameter[constant[
]]]] begin[:]
if compare[constant[=] in name[line]] begin[:]
variable[parts] assign[=] call[name[line].split, parameter[constant[=]]]
variable[path_matches] assign[=] call[name[re].search, parameter[constant[/mnt/data/resin-data/[0-9]+/(.*)], call[name[parts]][constant[1]]]]
if name[path_matches] begin[:]
variable[new_path] assign[=] <ast.JoinedStr object at 0x7da18ede4310>
call[name[new_conn_lines].append, parameter[call[constant[=].join, parameter[list[[<ast.Subscript object at 0x7da1b08a0970>, <ast.Name object at 0x7da1b08a04c0>]]]]]]
call[name[LOG].info, parameter[<ast.JoinedStr object at 0x7da1b08a3280>]]
continue
call[name[new_conn_lines].append, parameter[name[line]]]
return[call[constant[
].join, parameter[name[new_conn_lines]]]]
|
keyword[def] identifier[patch_connection_file_paths] ( identifier[connection] : identifier[str] )-> identifier[str] :
literal[string]
identifier[new_conn_lines] =[]
keyword[for] identifier[line] keyword[in] identifier[connection] . identifier[split] ( literal[string] ):
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[parts] = identifier[line] . identifier[split] ( literal[string] )
identifier[path_matches] = identifier[re] . identifier[search] (
literal[string] , identifier[parts] [ literal[int] ])
keyword[if] identifier[path_matches] :
identifier[new_path] = literal[string]
identifier[new_conn_lines] . identifier[append] (
literal[string] . identifier[join] ([ identifier[parts] [ literal[int] ], identifier[new_path] ]))
identifier[LOG] . identifier[info] (
literal[string]
literal[string] )
keyword[continue]
identifier[new_conn_lines] . identifier[append] ( identifier[line] )
keyword[return] literal[string] . identifier[join] ( identifier[new_conn_lines] )
|
def patch_connection_file_paths(connection: str) -> str:
"""
Patch any paths in a connection to remove the balena host paths
Undoes the changes applied by
:py:meth:`opentrons.system.nmcli._rewrite_key_path_to_host_path`
:param connection: The contents of a NetworkManager connection file
:return: The patches contents, suitable for writing somewher
"""
new_conn_lines = []
for line in connection.split('\n'):
if '=' in line:
parts = line.split('=')
path_matches = re.search('/mnt/data/resin-data/[0-9]+/(.*)', parts[1])
if path_matches:
new_path = f'/data/{path_matches.group(1)}'
new_conn_lines.append('='.join([parts[0], new_path]))
LOG.info(f'migrate_connection_file: {parts[0]}: {parts[1]}->{new_path}')
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['line']]
new_conn_lines.append(line) # depends on [control=['for'], data=['line']]
return '\n'.join(new_conn_lines)
|
def triangle_normal(tri):
""" Computes the (approximate) normal vector of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:return: normal vector of the triangle
:rtype: tuple
"""
vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data)
vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data)
return vector_cross(vec1, vec2)
|
def function[triangle_normal, parameter[tri]]:
constant[ Computes the (approximate) normal vector of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:return: normal vector of the triangle
:rtype: tuple
]
variable[vec1] assign[=] call[name[vector_generate], parameter[call[name[tri].vertices][constant[0]].data, call[name[tri].vertices][constant[1]].data]]
variable[vec2] assign[=] call[name[vector_generate], parameter[call[name[tri].vertices][constant[1]].data, call[name[tri].vertices][constant[2]].data]]
return[call[name[vector_cross], parameter[name[vec1], name[vec2]]]]
|
keyword[def] identifier[triangle_normal] ( identifier[tri] ):
literal[string]
identifier[vec1] = identifier[vector_generate] ( identifier[tri] . identifier[vertices] [ literal[int] ]. identifier[data] , identifier[tri] . identifier[vertices] [ literal[int] ]. identifier[data] )
identifier[vec2] = identifier[vector_generate] ( identifier[tri] . identifier[vertices] [ literal[int] ]. identifier[data] , identifier[tri] . identifier[vertices] [ literal[int] ]. identifier[data] )
keyword[return] identifier[vector_cross] ( identifier[vec1] , identifier[vec2] )
|
def triangle_normal(tri):
""" Computes the (approximate) normal vector of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:return: normal vector of the triangle
:rtype: tuple
"""
vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data)
vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data)
return vector_cross(vec1, vec2)
|
def has(self, name):
"""
Returns True if there is atleast one annotation by a given name, otherwise False.
"""
for a in self.all_annotations:
if a.name == name:
return True
return False
|
def function[has, parameter[self, name]]:
constant[
Returns True if there is atleast one annotation by a given name, otherwise False.
]
for taget[name[a]] in starred[name[self].all_annotations] begin[:]
if compare[name[a].name equal[==] name[name]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[has] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[all_annotations] :
keyword[if] identifier[a] . identifier[name] == identifier[name] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def has(self, name):
"""
Returns True if there is atleast one annotation by a given name, otherwise False.
"""
for a in self.all_annotations:
if a.name == name:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
return False
|
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return Tree(json, self._session) if json else None
|
def function[recurse, parameter[self]]:
constant[Recurse into the tree.
:returns: :class:`Tree <Tree>`
]
variable[json] assign[=] call[name[self]._json, parameter[call[name[self]._get, parameter[name[self]._api]], constant[200]]]
return[<ast.IfExp object at 0x7da1b0fbb5b0>]
|
keyword[def] identifier[recurse] ( identifier[self] ):
literal[string]
identifier[json] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[self] . identifier[_api] , identifier[params] ={ literal[string] : literal[string] }),
literal[int] )
keyword[return] identifier[Tree] ( identifier[json] , identifier[self] . identifier[_session] ) keyword[if] identifier[json] keyword[else] keyword[None]
|
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}), 200)
return Tree(json, self._session) if json else None
|
def copy_with_additional_key(self, key: Any) -> 'RandomnessStream':
"""Creates a copy of this stream that combines this streams key with a new one.
Parameters
----------
key :
The additional key to describe the new stream with.
Returns
-------
RandomnessStream
A new RandomnessStream with a combined key.
"""
if self._for_initialization:
raise RandomnessError('Initialization streams cannot be copied.')
elif self._manager:
return self._manager.get_randomness_stream('_'.join([self.key, key]))
else:
return RandomnessStream(self.key, self.clock, self.seed, self.index_map)
|
def function[copy_with_additional_key, parameter[self, key]]:
constant[Creates a copy of this stream that combines this streams key with a new one.
Parameters
----------
key :
The additional key to describe the new stream with.
Returns
-------
RandomnessStream
A new RandomnessStream with a combined key.
]
if name[self]._for_initialization begin[:]
<ast.Raise object at 0x7da18f7219f0>
|
keyword[def] identifier[copy_with_additional_key] ( identifier[self] , identifier[key] : identifier[Any] )-> literal[string] :
literal[string]
keyword[if] identifier[self] . identifier[_for_initialization] :
keyword[raise] identifier[RandomnessError] ( literal[string] )
keyword[elif] identifier[self] . identifier[_manager] :
keyword[return] identifier[self] . identifier[_manager] . identifier[get_randomness_stream] ( literal[string] . identifier[join] ([ identifier[self] . identifier[key] , identifier[key] ]))
keyword[else] :
keyword[return] identifier[RandomnessStream] ( identifier[self] . identifier[key] , identifier[self] . identifier[clock] , identifier[self] . identifier[seed] , identifier[self] . identifier[index_map] )
|
def copy_with_additional_key(self, key: Any) -> 'RandomnessStream':
"""Creates a copy of this stream that combines this streams key with a new one.
Parameters
----------
key :
The additional key to describe the new stream with.
Returns
-------
RandomnessStream
A new RandomnessStream with a combined key.
"""
if self._for_initialization:
raise RandomnessError('Initialization streams cannot be copied.') # depends on [control=['if'], data=[]]
elif self._manager:
return self._manager.get_randomness_stream('_'.join([self.key, key])) # depends on [control=['if'], data=[]]
else:
return RandomnessStream(self.key, self.clock, self.seed, self.index_map)
|
def get_device_state(self, device, id_override=None, type_override=None):
"""
Get device state via online API.
Args:
device (WinkDevice): The device the change is being requested for.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format
"""
_LOGGER.info("Getting state via online API")
object_id = id_override or device.object_id()
object_type = type_override or device.object_type()
url_string = "{}/{}s/{}".format(self.BASE_URL,
object_type, object_id)
arequest = requests.get(url_string, headers=API_HEADERS)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json
|
def function[get_device_state, parameter[self, device, id_override, type_override]]:
constant[
Get device state via online API.
Args:
device (WinkDevice): The device the change is being requested for.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format
]
call[name[_LOGGER].info, parameter[constant[Getting state via online API]]]
variable[object_id] assign[=] <ast.BoolOp object at 0x7da1b2630e50>
variable[object_type] assign[=] <ast.BoolOp object at 0x7da1b2630160>
variable[url_string] assign[=] call[constant[{}/{}s/{}].format, parameter[name[self].BASE_URL, name[object_type], name[object_id]]]
variable[arequest] assign[=] call[name[requests].get, parameter[name[url_string]]]
variable[response_json] assign[=] call[name[arequest].json, parameter[]]
call[name[_LOGGER].debug, parameter[constant[%s], name[response_json]]]
return[name[response_json]]
|
keyword[def] identifier[get_device_state] ( identifier[self] , identifier[device] , identifier[id_override] = keyword[None] , identifier[type_override] = keyword[None] ):
literal[string]
identifier[_LOGGER] . identifier[info] ( literal[string] )
identifier[object_id] = identifier[id_override] keyword[or] identifier[device] . identifier[object_id] ()
identifier[object_type] = identifier[type_override] keyword[or] identifier[device] . identifier[object_type] ()
identifier[url_string] = literal[string] . identifier[format] ( identifier[self] . identifier[BASE_URL] ,
identifier[object_type] , identifier[object_id] )
identifier[arequest] = identifier[requests] . identifier[get] ( identifier[url_string] , identifier[headers] = identifier[API_HEADERS] )
identifier[response_json] = identifier[arequest] . identifier[json] ()
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[response_json] )
keyword[return] identifier[response_json]
|
def get_device_state(self, device, id_override=None, type_override=None):
"""
Get device state via online API.
Args:
device (WinkDevice): The device the change is being requested for.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format
"""
_LOGGER.info('Getting state via online API')
object_id = id_override or device.object_id()
object_type = type_override or device.object_type()
url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)
arequest = requests.get(url_string, headers=API_HEADERS)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json
|
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
context['poster'] = self.poster
return context
|
def function[get_context_data, parameter[self]]:
constant[ Returns the context data to provide to the template. ]
variable[context] assign[=] call[call[name[super], parameter[]].get_context_data, parameter[]]
call[name[context]][constant[poster]] assign[=] name[self].poster
return[name[context]]
|
keyword[def] identifier[get_context_data] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[super] (). identifier[get_context_data] (** identifier[kwargs] )
identifier[context] [ literal[string] ]= identifier[self] . identifier[poster]
keyword[return] identifier[context]
|
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
context['poster'] = self.poster
return context
|
def start(self, activity, action):
'''
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._start_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self.refresh()
|
def function[start, parameter[self, activity, action]]:
constant[
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
]
<ast.Try object at 0x7da2047eb190>
call[name[self].refresh, parameter[]]
|
keyword[def] identifier[start] ( identifier[self] , identifier[activity] , identifier[action] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_start_action] ( identifier[activity] , identifier[action] )
keyword[except] identifier[ValueError] :
identifier[retox_log] . identifier[debug] ( literal[string] %( identifier[activity] , identifier[self] . identifier[name] ))
identifier[self] . identifier[refresh] ()
|
def start(self, activity, action):
"""
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
"""
try:
self._start_action(activity, action) # depends on [control=['try'], data=[]]
except ValueError:
retox_log.debug('Could not find action %s in env %s' % (activity, self.name)) # depends on [control=['except'], data=[]]
self.refresh()
|
def get_group_id(self, user_id):
"""
获取用户所在分组 ID
详情请参考
http://mp.weixin.qq.com/wiki/0/56d992c605a97245eb7e617854b169fc.html
:param user_id: 用户 ID
:return: 用户所在分组 ID
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
group_id = client.user.get_group_id('openid')
"""
res = self._post(
'groups/getid',
data={'openid': user_id},
result_processor=lambda x: x['groupid']
)
return res
|
def function[get_group_id, parameter[self, user_id]]:
constant[
获取用户所在分组 ID
详情请参考
http://mp.weixin.qq.com/wiki/0/56d992c605a97245eb7e617854b169fc.html
:param user_id: 用户 ID
:return: 用户所在分组 ID
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
group_id = client.user.get_group_id('openid')
]
variable[res] assign[=] call[name[self]._post, parameter[constant[groups/getid]]]
return[name[res]]
|
keyword[def] identifier[get_group_id] ( identifier[self] , identifier[user_id] ):
literal[string]
identifier[res] = identifier[self] . identifier[_post] (
literal[string] ,
identifier[data] ={ literal[string] : identifier[user_id] },
identifier[result_processor] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ]
)
keyword[return] identifier[res]
|
def get_group_id(self, user_id):
"""
获取用户所在分组 ID
详情请参考
http://mp.weixin.qq.com/wiki/0/56d992c605a97245eb7e617854b169fc.html
:param user_id: 用户 ID
:return: 用户所在分组 ID
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
group_id = client.user.get_group_id('openid')
"""
res = self._post('groups/getid', data={'openid': user_id}, result_processor=lambda x: x['groupid'])
return res
|
def to_normal_cloud(self):
"""Convert the image to a NormalCloud object.
Returns
-------
:obj:`autolab_core.NormalCloud`
The corresponding NormalCloud.
"""
return NormalCloud(
data=self._data.reshape(
self.height *
self.width,
3).T,
frame=self._frame)
|
def function[to_normal_cloud, parameter[self]]:
constant[Convert the image to a NormalCloud object.
Returns
-------
:obj:`autolab_core.NormalCloud`
The corresponding NormalCloud.
]
return[call[name[NormalCloud], parameter[]]]
|
keyword[def] identifier[to_normal_cloud] ( identifier[self] ):
literal[string]
keyword[return] identifier[NormalCloud] (
identifier[data] = identifier[self] . identifier[_data] . identifier[reshape] (
identifier[self] . identifier[height] *
identifier[self] . identifier[width] ,
literal[int] ). identifier[T] ,
identifier[frame] = identifier[self] . identifier[_frame] )
|
def to_normal_cloud(self):
"""Convert the image to a NormalCloud object.
Returns
-------
:obj:`autolab_core.NormalCloud`
The corresponding NormalCloud.
"""
return NormalCloud(data=self._data.reshape(self.height * self.width, 3).T, frame=self._frame)
|
def descending(self, name):
''' Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
'''
self.components.append((name, Index.DESCENDING))
return self
|
def function[descending, parameter[self, name]]:
constant[ Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
]
call[name[self].components.append, parameter[tuple[[<ast.Name object at 0x7da207f9b3d0>, <ast.Attribute object at 0x7da207f99e10>]]]]
return[name[self]]
|
keyword[def] identifier[descending] ( identifier[self] , identifier[name] ):
literal[string]
identifier[self] . identifier[components] . identifier[append] (( identifier[name] , identifier[Index] . identifier[DESCENDING] ))
keyword[return] identifier[self]
|
def descending(self, name):
""" Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
"""
self.components.append((name, Index.DESCENDING))
return self
|
def get_barcode_stats(self, barcode):
"""
Loads the JSON in a ${barcode}_stats.json file in the DNAnexus project (usually in the qc
folder).
"""
filename = barcode + "_stats.json"
# In the call to dxpy.find_one_data_object() below, I'd normally set the
# more_ok parameter to False, but this blows-up in Python 3.7 - giving me a RuntimeError.
# So, I just won't set it for now. I think dxpy is still mainly a Python 2.7 library and
# can break in later version of Python3.
file_id = dxpy.find_one_data_object(
zero_ok=False,
project=self.dx_project_id,
name=filename)["id"]
json_data = json.loads(dxpy.open_dxfile(file_id).read())
return json_data
|
def function[get_barcode_stats, parameter[self, barcode]]:
constant[
Loads the JSON in a ${barcode}_stats.json file in the DNAnexus project (usually in the qc
folder).
]
variable[filename] assign[=] binary_operation[name[barcode] + constant[_stats.json]]
variable[file_id] assign[=] call[call[name[dxpy].find_one_data_object, parameter[]]][constant[id]]
variable[json_data] assign[=] call[name[json].loads, parameter[call[call[name[dxpy].open_dxfile, parameter[name[file_id]]].read, parameter[]]]]
return[name[json_data]]
|
keyword[def] identifier[get_barcode_stats] ( identifier[self] , identifier[barcode] ):
literal[string]
identifier[filename] = identifier[barcode] + literal[string]
identifier[file_id] = identifier[dxpy] . identifier[find_one_data_object] (
identifier[zero_ok] = keyword[False] ,
identifier[project] = identifier[self] . identifier[dx_project_id] ,
identifier[name] = identifier[filename] )[ literal[string] ]
identifier[json_data] = identifier[json] . identifier[loads] ( identifier[dxpy] . identifier[open_dxfile] ( identifier[file_id] ). identifier[read] ())
keyword[return] identifier[json_data]
|
def get_barcode_stats(self, barcode):
"""
Loads the JSON in a ${barcode}_stats.json file in the DNAnexus project (usually in the qc
folder).
"""
filename = barcode + '_stats.json' # In the call to dxpy.find_one_data_object() below, I'd normally set the
# more_ok parameter to False, but this blows-up in Python 3.7 - giving me a RuntimeError.
# So, I just won't set it for now. I think dxpy is still mainly a Python 2.7 library and
# can break in later version of Python3.
file_id = dxpy.find_one_data_object(zero_ok=False, project=self.dx_project_id, name=filename)['id']
json_data = json.loads(dxpy.open_dxfile(file_id).read())
return json_data
|
def momentum_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
Momentum SGD optimization core.
"""
free_parameters = []
updates = []
for param, grad in zip(params, gradients):
delta = learning_rate * grad
velocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_vel')
updates.append((velocity, momentum * velocity - delta))
updates.append((param, param + velocity))
free_parameters.append(velocity)
return updates, free_parameters
|
def function[momentum_core, parameter[params, gradients, momentum, learning_rate]]:
constant[
Momentum SGD optimization core.
]
variable[free_parameters] assign[=] list[[]]
variable[updates] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b05c7910>, <ast.Name object at 0x7da1b05c6320>]]] in starred[call[name[zip], parameter[name[params], name[gradients]]]] begin[:]
variable[delta] assign[=] binary_operation[name[learning_rate] * name[grad]]
variable[velocity] assign[=] call[name[theano].shared, parameter[call[name[np].zeros_like, parameter[call[name[param].get_value, parameter[]]]]]]
call[name[updates].append, parameter[tuple[[<ast.Name object at 0x7da1b05c7a60>, <ast.BinOp object at 0x7da1b05c4580>]]]]
call[name[updates].append, parameter[tuple[[<ast.Name object at 0x7da1b05c4a60>, <ast.BinOp object at 0x7da1b05c7b20>]]]]
call[name[free_parameters].append, parameter[name[velocity]]]
return[tuple[[<ast.Name object at 0x7da1b05c4af0>, <ast.Name object at 0x7da1b05c68c0>]]]
|
keyword[def] identifier[momentum_core] ( identifier[params] , identifier[gradients] , identifier[momentum] = literal[int] , identifier[learning_rate] = literal[int] ):
literal[string]
identifier[free_parameters] =[]
identifier[updates] =[]
keyword[for] identifier[param] , identifier[grad] keyword[in] identifier[zip] ( identifier[params] , identifier[gradients] ):
identifier[delta] = identifier[learning_rate] * identifier[grad]
identifier[velocity] = identifier[theano] . identifier[shared] ( identifier[np] . identifier[zeros_like] ( identifier[param] . identifier[get_value] ()), identifier[name] = identifier[param] . identifier[name] + literal[string] )
identifier[updates] . identifier[append] (( identifier[velocity] , identifier[momentum] * identifier[velocity] - identifier[delta] ))
identifier[updates] . identifier[append] (( identifier[param] , identifier[param] + identifier[velocity] ))
identifier[free_parameters] . identifier[append] ( identifier[velocity] )
keyword[return] identifier[updates] , identifier[free_parameters]
|
def momentum_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
Momentum SGD optimization core.
"""
free_parameters = []
updates = []
for (param, grad) in zip(params, gradients):
delta = learning_rate * grad
velocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_vel')
updates.append((velocity, momentum * velocity - delta))
updates.append((param, param + velocity))
free_parameters.append(velocity) # depends on [control=['for'], data=[]]
return (updates, free_parameters)
|
def write_obs_summary_table(self,filename=None,group_names=None):
"""write a stand alone observation summary latex table
Parameters
----------
filename : str
latex filename. If None, use <case>.par.tex. Default is None
group_names: dict
par group names : table names for example {"w0":"well stress period 1"}.
Default is None
Returns
-------
None
"""
ffmt = lambda x: "{0:5G}".format(x)
obs = self.observation_data.copy()
obsgp = obs.groupby(obs.obgnme).groups
cols = ["obgnme","obsval","nzcount","zcount","weight","stdev","pe"]
labels = {"obgnme":"group","obsval":"value","nzcount":"non-zero weight",
"zcount":"zero weight","weight":"weight","stdev":"standard deviation",
"pe":"percent error"}
obs.loc[:,"stdev"] = 1.0 / obs.weight
obs.loc[:,"pe"] = 100.0 * (obs.stdev / obs.obsval.apply(np.abs))
obs = obs.replace([np.inf,-np.inf],np.NaN)
data = {c: [] for c in cols}
for og, onames in obsgp.items():
obs_g = obs.loc[onames, :]
data["obgnme"].append(og)
data["nzcount"].append(obs_g.loc[obs_g.weight > 0.0,:].shape[0])
data["zcount"].append(obs_g.loc[obs_g.weight == 0.0,:].shape[0])
for col in cols:
if col in ["obgnme","nzcount","zcount"]:
continue
#print(col)
mn = obs_g.loc[:, col].min()
mx = obs_g.loc[:, col].max()
if np.isnan(mn) or np.isnan(mx):
data[col].append("NA")
elif mn == mx:
data[col].append(ffmt(mn))
else:
data[col].append("{0} to {1}".format(ffmt(mn), ffmt(mx)))
obsg_df = pd.DataFrame(data=data, index=list(obsgp.keys()))
obsg_df = obsg_df.loc[:, cols]
if group_names is not None:
obsg_df.loc[:, "obgnme"] = obsg_df.obgnme.apply(lambda x: group_names.pop(x, x))
obsg_df.sort_values(by="obgnme",inplace=True,ascending=True)
obsg_df.columns = obsg_df.columns.map(lambda x: labels[x])
preamble = '\\documentclass{article}\n\\usepackage{booktabs}\n' + \
'\\usepackage{pdflscape}\n\\usepackage{longtable}\n' + \
'\\usepackage{booktabs}\n\\usepackage{nopageno}\n\\begin{document}\n'
if filename == "none":
return obsg_df
if filename is None:
filename = self.filename.replace(".pst", ".obs.tex")
with open(filename, 'w') as f:
f.write(preamble)
f.write("\\begin{center}\nObservation Summary\n\\end{center}\n")
f.write("\\begin{center}\n\\begin{landscape}\n")
f.write("\\setlength{\\LTleft}{-4.0cm}\n")
obsg_df.to_latex(f, index=False, longtable=True)
f.write("\\end{landscape}\n")
f.write("\\end{center}\n")
f.write("\\end{document}\n")
return obsg_df
|
def function[write_obs_summary_table, parameter[self, filename, group_names]]:
constant[write a stand alone observation summary latex table
Parameters
----------
filename : str
latex filename. If None, use <case>.par.tex. Default is None
group_names: dict
par group names : table names for example {"w0":"well stress period 1"}.
Default is None
Returns
-------
None
]
variable[ffmt] assign[=] <ast.Lambda object at 0x7da1b1d6f370>
variable[obs] assign[=] call[name[self].observation_data.copy, parameter[]]
variable[obsgp] assign[=] call[name[obs].groupby, parameter[name[obs].obgnme]].groups
variable[cols] assign[=] list[[<ast.Constant object at 0x7da1b1d6dc00>, <ast.Constant object at 0x7da1b1d6c400>, <ast.Constant object at 0x7da1b1d6e2c0>, <ast.Constant object at 0x7da1b1d6d180>, <ast.Constant object at 0x7da1b1d6dff0>, <ast.Constant object at 0x7da1b1d6edd0>, <ast.Constant object at 0x7da1b1d6e290>]]
variable[labels] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d6f010>, <ast.Constant object at 0x7da1b1d6d900>, <ast.Constant object at 0x7da1b1d6dc30>, <ast.Constant object at 0x7da1b1d6d990>, <ast.Constant object at 0x7da1b1d6ff40>, <ast.Constant object at 0x7da1b1d6d870>, <ast.Constant object at 0x7da1b1d6dcf0>], [<ast.Constant object at 0x7da1b1d6f850>, <ast.Constant object at 0x7da1b1d6e860>, <ast.Constant object at 0x7da1b1d6e950>, <ast.Constant object at 0x7da1b1d6c9a0>, <ast.Constant object at 0x7da1b1d6c220>, <ast.Constant object at 0x7da1b1d6e260>, <ast.Constant object at 0x7da1b1d6f820>]]
call[name[obs].loc][tuple[[<ast.Slice object at 0x7da1b1d6e380>, <ast.Constant object at 0x7da1b1d6e5c0>]]] assign[=] binary_operation[constant[1.0] / name[obs].weight]
call[name[obs].loc][tuple[[<ast.Slice object at 0x7da1b1d6d240>, <ast.Constant object at 0x7da1b1d6da20>]]] assign[=] binary_operation[constant[100.0] * binary_operation[name[obs].stdev / call[name[obs].obsval.apply, parameter[name[np].abs]]]]
variable[obs] assign[=] call[name[obs].replace, parameter[list[[<ast.Attribute object at 0x7da1b1d6dfc0>, <ast.UnaryOp object at 0x7da1b1d6d420>]], name[np].NaN]]
variable[data] assign[=] <ast.DictComp object at 0x7da1b1d6cb50>
for taget[tuple[[<ast.Name object at 0x7da1b1d6d330>, <ast.Name object at 0x7da1b1d6fbe0>]]] in starred[call[name[obsgp].items, parameter[]]] begin[:]
variable[obs_g] assign[=] call[name[obs].loc][tuple[[<ast.Name object at 0x7da1b1d6ffa0>, <ast.Slice object at 0x7da1b1d6e230>]]]
call[call[name[data]][constant[obgnme]].append, parameter[name[og]]]
call[call[name[data]][constant[nzcount]].append, parameter[call[call[name[obs_g].loc][tuple[[<ast.Compare object at 0x7da1b1d6dc60>, <ast.Slice object at 0x7da1b1d6e650>]]].shape][constant[0]]]]
call[call[name[data]][constant[zcount]].append, parameter[call[call[name[obs_g].loc][tuple[[<ast.Compare object at 0x7da1b1d6d5d0>, <ast.Slice object at 0x7da1b1d6f250>]]].shape][constant[0]]]]
for taget[name[col]] in starred[name[cols]] begin[:]
if compare[name[col] in list[[<ast.Constant object at 0x7da1b1d6c0d0>, <ast.Constant object at 0x7da1b1d6ff70>, <ast.Constant object at 0x7da1b1d6c1c0>]]] begin[:]
continue
variable[mn] assign[=] call[call[name[obs_g].loc][tuple[[<ast.Slice object at 0x7da1b1d6eb00>, <ast.Name object at 0x7da1b1d6fdf0>]]].min, parameter[]]
variable[mx] assign[=] call[call[name[obs_g].loc][tuple[[<ast.Slice object at 0x7da1b1d6d690>, <ast.Name object at 0x7da1b1d6d150>]]].max, parameter[]]
if <ast.BoolOp object at 0x7da1b1d6d750> begin[:]
call[call[name[data]][name[col]].append, parameter[constant[NA]]]
variable[obsg_df] assign[=] call[name[pd].DataFrame, parameter[]]
variable[obsg_df] assign[=] call[name[obsg_df].loc][tuple[[<ast.Slice object at 0x7da1b1d50220>, <ast.Name object at 0x7da1b1d51300>]]]
if compare[name[group_names] is_not constant[None]] begin[:]
call[name[obsg_df].loc][tuple[[<ast.Slice object at 0x7da1b2281de0>, <ast.Constant object at 0x7da1b2281db0>]]] assign[=] call[name[obsg_df].obgnme.apply, parameter[<ast.Lambda object at 0x7da1b2281cc0>]]
call[name[obsg_df].sort_values, parameter[]]
name[obsg_df].columns assign[=] call[name[obsg_df].columns.map, parameter[<ast.Lambda object at 0x7da1b2281780>]]
variable[preamble] assign[=] binary_operation[binary_operation[constant[\documentclass{article}
\usepackage{booktabs}
] + constant[\usepackage{pdflscape}
\usepackage{longtable}
]] + constant[\usepackage{booktabs}
\usepackage{nopageno}
\begin{document}
]]
if compare[name[filename] equal[==] constant[none]] begin[:]
return[name[obsg_df]]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[name[self].filename.replace, parameter[constant[.pst], constant[.obs.tex]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[f].write, parameter[name[preamble]]]
call[name[f].write, parameter[constant[\begin{center}
Observation Summary
\end{center}
]]]
call[name[f].write, parameter[constant[\begin{center}
\begin{landscape}
]]]
call[name[f].write, parameter[constant[\setlength{\LTleft}{-4.0cm}
]]]
call[name[obsg_df].to_latex, parameter[name[f]]]
call[name[f].write, parameter[constant[\end{landscape}
]]]
call[name[f].write, parameter[constant[\end{center}
]]]
call[name[f].write, parameter[constant[\end{document}
]]]
return[name[obsg_df]]
|
keyword[def] identifier[write_obs_summary_table] ( identifier[self] , identifier[filename] = keyword[None] , identifier[group_names] = keyword[None] ):
literal[string]
identifier[ffmt] = keyword[lambda] identifier[x] : literal[string] . identifier[format] ( identifier[x] )
identifier[obs] = identifier[self] . identifier[observation_data] . identifier[copy] ()
identifier[obsgp] = identifier[obs] . identifier[groupby] ( identifier[obs] . identifier[obgnme] ). identifier[groups]
identifier[cols] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[labels] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[obs] . identifier[loc] [:, literal[string] ]= literal[int] / identifier[obs] . identifier[weight]
identifier[obs] . identifier[loc] [:, literal[string] ]= literal[int] *( identifier[obs] . identifier[stdev] / identifier[obs] . identifier[obsval] . identifier[apply] ( identifier[np] . identifier[abs] ))
identifier[obs] = identifier[obs] . identifier[replace] ([ identifier[np] . identifier[inf] ,- identifier[np] . identifier[inf] ], identifier[np] . identifier[NaN] )
identifier[data] ={ identifier[c] :[] keyword[for] identifier[c] keyword[in] identifier[cols] }
keyword[for] identifier[og] , identifier[onames] keyword[in] identifier[obsgp] . identifier[items] ():
identifier[obs_g] = identifier[obs] . identifier[loc] [ identifier[onames] ,:]
identifier[data] [ literal[string] ]. identifier[append] ( identifier[og] )
identifier[data] [ literal[string] ]. identifier[append] ( identifier[obs_g] . identifier[loc] [ identifier[obs_g] . identifier[weight] > literal[int] ,:]. identifier[shape] [ literal[int] ])
identifier[data] [ literal[string] ]. identifier[append] ( identifier[obs_g] . identifier[loc] [ identifier[obs_g] . identifier[weight] == literal[int] ,:]. identifier[shape] [ literal[int] ])
keyword[for] identifier[col] keyword[in] identifier[cols] :
keyword[if] identifier[col] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[continue]
identifier[mn] = identifier[obs_g] . identifier[loc] [:, identifier[col] ]. identifier[min] ()
identifier[mx] = identifier[obs_g] . identifier[loc] [:, identifier[col] ]. identifier[max] ()
keyword[if] identifier[np] . identifier[isnan] ( identifier[mn] ) keyword[or] identifier[np] . identifier[isnan] ( identifier[mx] ):
identifier[data] [ identifier[col] ]. identifier[append] ( literal[string] )
keyword[elif] identifier[mn] == identifier[mx] :
identifier[data] [ identifier[col] ]. identifier[append] ( identifier[ffmt] ( identifier[mn] ))
keyword[else] :
identifier[data] [ identifier[col] ]. identifier[append] ( literal[string] . identifier[format] ( identifier[ffmt] ( identifier[mn] ), identifier[ffmt] ( identifier[mx] )))
identifier[obsg_df] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[data] , identifier[index] = identifier[list] ( identifier[obsgp] . identifier[keys] ()))
identifier[obsg_df] = identifier[obsg_df] . identifier[loc] [:, identifier[cols] ]
keyword[if] identifier[group_names] keyword[is] keyword[not] keyword[None] :
identifier[obsg_df] . identifier[loc] [:, literal[string] ]= identifier[obsg_df] . identifier[obgnme] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[group_names] . identifier[pop] ( identifier[x] , identifier[x] ))
identifier[obsg_df] . identifier[sort_values] ( identifier[by] = literal[string] , identifier[inplace] = keyword[True] , identifier[ascending] = keyword[True] )
identifier[obsg_df] . identifier[columns] = identifier[obsg_df] . identifier[columns] . identifier[map] ( keyword[lambda] identifier[x] : identifier[labels] [ identifier[x] ])
identifier[preamble] = literal[string] + literal[string] + literal[string]
keyword[if] identifier[filename] == literal[string] :
keyword[return] identifier[obsg_df]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[self] . identifier[filename] . identifier[replace] ( literal[string] , literal[string] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[preamble] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[obsg_df] . identifier[to_latex] ( identifier[f] , identifier[index] = keyword[False] , identifier[longtable] = keyword[True] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
keyword[return] identifier[obsg_df]
|
def write_obs_summary_table(self, filename=None, group_names=None):
"""write a stand alone observation summary latex table
Parameters
----------
filename : str
latex filename. If None, use <case>.par.tex. Default is None
group_names: dict
par group names : table names for example {"w0":"well stress period 1"}.
Default is None
Returns
-------
None
"""
ffmt = lambda x: '{0:5G}'.format(x)
obs = self.observation_data.copy()
obsgp = obs.groupby(obs.obgnme).groups
cols = ['obgnme', 'obsval', 'nzcount', 'zcount', 'weight', 'stdev', 'pe']
labels = {'obgnme': 'group', 'obsval': 'value', 'nzcount': 'non-zero weight', 'zcount': 'zero weight', 'weight': 'weight', 'stdev': 'standard deviation', 'pe': 'percent error'}
obs.loc[:, 'stdev'] = 1.0 / obs.weight
obs.loc[:, 'pe'] = 100.0 * (obs.stdev / obs.obsval.apply(np.abs))
obs = obs.replace([np.inf, -np.inf], np.NaN)
data = {c: [] for c in cols}
for (og, onames) in obsgp.items():
obs_g = obs.loc[onames, :]
data['obgnme'].append(og)
data['nzcount'].append(obs_g.loc[obs_g.weight > 0.0, :].shape[0])
data['zcount'].append(obs_g.loc[obs_g.weight == 0.0, :].shape[0])
for col in cols:
if col in ['obgnme', 'nzcount', 'zcount']:
continue # depends on [control=['if'], data=[]]
#print(col)
mn = obs_g.loc[:, col].min()
mx = obs_g.loc[:, col].max()
if np.isnan(mn) or np.isnan(mx):
data[col].append('NA') # depends on [control=['if'], data=[]]
elif mn == mx:
data[col].append(ffmt(mn)) # depends on [control=['if'], data=['mn']]
else:
data[col].append('{0} to {1}'.format(ffmt(mn), ffmt(mx))) # depends on [control=['for'], data=['col']] # depends on [control=['for'], data=[]]
obsg_df = pd.DataFrame(data=data, index=list(obsgp.keys()))
obsg_df = obsg_df.loc[:, cols]
if group_names is not None:
obsg_df.loc[:, 'obgnme'] = obsg_df.obgnme.apply(lambda x: group_names.pop(x, x)) # depends on [control=['if'], data=['group_names']]
obsg_df.sort_values(by='obgnme', inplace=True, ascending=True)
obsg_df.columns = obsg_df.columns.map(lambda x: labels[x])
preamble = '\\documentclass{article}\n\\usepackage{booktabs}\n' + '\\usepackage{pdflscape}\n\\usepackage{longtable}\n' + '\\usepackage{booktabs}\n\\usepackage{nopageno}\n\\begin{document}\n'
if filename == 'none':
return obsg_df # depends on [control=['if'], data=[]]
if filename is None:
filename = self.filename.replace('.pst', '.obs.tex') # depends on [control=['if'], data=['filename']]
with open(filename, 'w') as f:
f.write(preamble)
f.write('\\begin{center}\nObservation Summary\n\\end{center}\n')
f.write('\\begin{center}\n\\begin{landscape}\n')
f.write('\\setlength{\\LTleft}{-4.0cm}\n')
obsg_df.to_latex(f, index=False, longtable=True)
f.write('\\end{landscape}\n')
f.write('\\end{center}\n')
f.write('\\end{document}\n') # depends on [control=['with'], data=['f']]
return obsg_df
|
def max_event_offset(event_list):
"""Find the offset (end-time) of last event
Parameters
----------
event_list : list or dcase_util.containers.MetaDataContainer
A list containing event dicts
Returns
-------
float > 0
maximum offset
"""
if isinstance(event_list, dcase_util.containers.MetaDataContainer):
return event_list.max_offset
else:
max_offset = 0
for event in event_list:
if 'event_offset' in event:
if event['event_offset'] > max_offset:
max_offset = event['event_offset']
elif 'offset' in event:
if event['offset'] > max_offset:
max_offset = event['offset']
return max_offset
|
def function[max_event_offset, parameter[event_list]]:
constant[Find the offset (end-time) of last event
Parameters
----------
event_list : list or dcase_util.containers.MetaDataContainer
A list containing event dicts
Returns
-------
float > 0
maximum offset
]
if call[name[isinstance], parameter[name[event_list], name[dcase_util].containers.MetaDataContainer]] begin[:]
return[name[event_list].max_offset]
|
keyword[def] identifier[max_event_offset] ( identifier[event_list] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[event_list] , identifier[dcase_util] . identifier[containers] . identifier[MetaDataContainer] ):
keyword[return] identifier[event_list] . identifier[max_offset]
keyword[else] :
identifier[max_offset] = literal[int]
keyword[for] identifier[event] keyword[in] identifier[event_list] :
keyword[if] literal[string] keyword[in] identifier[event] :
keyword[if] identifier[event] [ literal[string] ]> identifier[max_offset] :
identifier[max_offset] = identifier[event] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[event] :
keyword[if] identifier[event] [ literal[string] ]> identifier[max_offset] :
identifier[max_offset] = identifier[event] [ literal[string] ]
keyword[return] identifier[max_offset]
|
def max_event_offset(event_list):
"""Find the offset (end-time) of last event
Parameters
----------
event_list : list or dcase_util.containers.MetaDataContainer
A list containing event dicts
Returns
-------
float > 0
maximum offset
"""
if isinstance(event_list, dcase_util.containers.MetaDataContainer):
return event_list.max_offset # depends on [control=['if'], data=[]]
else:
max_offset = 0
for event in event_list:
if 'event_offset' in event:
if event['event_offset'] > max_offset:
max_offset = event['event_offset'] # depends on [control=['if'], data=['max_offset']] # depends on [control=['if'], data=['event']]
elif 'offset' in event:
if event['offset'] > max_offset:
max_offset = event['offset'] # depends on [control=['if'], data=['max_offset']] # depends on [control=['if'], data=['event']] # depends on [control=['for'], data=['event']]
return max_offset
|
def track_statistic(self, name, description='', max_rows=None):
"""
Create a Statistic object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Statistic', description)
self._tables[name] = Statistic(name, self, max_rows=max_rows)
|
def function[track_statistic, parameter[self, name, description, max_rows]]:
constant[
Create a Statistic object in the Tracker.
]
if compare[name[name] in name[self]._tables] begin[:]
<ast.Raise object at 0x7da1b09e9ae0>
if compare[name[max_rows] is constant[None]] begin[:]
variable[max_rows] assign[=] name[AnonymousUsageTracker].MAX_ROWS_PER_TABLE
call[name[self].register_table, parameter[name[name], name[self].uuid, constant[Statistic], name[description]]]
call[name[self]._tables][name[name]] assign[=] call[name[Statistic], parameter[name[name], name[self]]]
|
keyword[def] identifier[track_statistic] ( identifier[self] , identifier[name] , identifier[description] = literal[string] , identifier[max_rows] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_tables] :
keyword[raise] identifier[TableConflictError] ( identifier[name] )
keyword[if] identifier[max_rows] keyword[is] keyword[None] :
identifier[max_rows] = identifier[AnonymousUsageTracker] . identifier[MAX_ROWS_PER_TABLE]
identifier[self] . identifier[register_table] ( identifier[name] , identifier[self] . identifier[uuid] , literal[string] , identifier[description] )
identifier[self] . identifier[_tables] [ identifier[name] ]= identifier[Statistic] ( identifier[name] , identifier[self] , identifier[max_rows] = identifier[max_rows] )
|
def track_statistic(self, name, description='', max_rows=None):
"""
Create a Statistic object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name) # depends on [control=['if'], data=['name']]
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE # depends on [control=['if'], data=['max_rows']]
self.register_table(name, self.uuid, 'Statistic', description)
self._tables[name] = Statistic(name, self, max_rows=max_rows)
|
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
|
def function[masked_rec_array_to_mgr, parameter[data, index, columns, dtype, copy]]:
constant[
Extract from a masked rec array and create the manager.
]
variable[fill_value] assign[=] name[data].fill_value
variable[fdata] assign[=] call[name[ma].getdata, parameter[name[data]]]
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] call[name[get_names_from_index], parameter[name[fdata]]]
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] call[name[ibase].default_index, parameter[call[name[len], parameter[name[data]]]]]
variable[index] assign[=] call[name[ensure_index], parameter[name[index]]]
if compare[name[columns] is_not constant[None]] begin[:]
variable[columns] assign[=] call[name[ensure_index], parameter[name[columns]]]
<ast.Tuple object at 0x7da207f005e0> assign[=] call[name[to_arrays], parameter[name[fdata], name[columns]]]
variable[new_arrays] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f021d0>, <ast.Name object at 0x7da207f02680>, <ast.Name object at 0x7da207f00cd0>]]] in starred[call[name[zip], parameter[name[fill_value], name[arrays], name[arr_columns]]]] begin[:]
variable[mask] assign[=] call[name[ma].getmaskarray, parameter[call[name[data]][name[col]]]]
if call[name[mask].any, parameter[]] begin[:]
<ast.Tuple object at 0x7da20e9b2860> assign[=] call[name[maybe_upcast], parameter[name[arr]]]
call[name[arr]][name[mask]] assign[=] name[fv]
call[name[new_arrays].append, parameter[name[arr]]]
<ast.Tuple object at 0x7da20e9b3490> assign[=] call[name[reorder_arrays], parameter[name[new_arrays], name[arr_columns], name[columns]]]
if compare[name[columns] is constant[None]] begin[:]
variable[columns] assign[=] name[arr_columns]
variable[mgr] assign[=] call[name[arrays_to_mgr], parameter[name[arrays], name[arr_columns], name[index], name[columns], name[dtype]]]
if name[copy] begin[:]
variable[mgr] assign[=] call[name[mgr].copy, parameter[]]
return[name[mgr]]
|
keyword[def] identifier[masked_rec_array_to_mgr] ( identifier[data] , identifier[index] , identifier[columns] , identifier[dtype] , identifier[copy] ):
literal[string]
identifier[fill_value] = identifier[data] . identifier[fill_value]
identifier[fdata] = identifier[ma] . identifier[getdata] ( identifier[data] )
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[get_names_from_index] ( identifier[fdata] )
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[ibase] . identifier[default_index] ( identifier[len] ( identifier[data] ))
identifier[index] = identifier[ensure_index] ( identifier[index] )
keyword[if] identifier[columns] keyword[is] keyword[not] keyword[None] :
identifier[columns] = identifier[ensure_index] ( identifier[columns] )
identifier[arrays] , identifier[arr_columns] = identifier[to_arrays] ( identifier[fdata] , identifier[columns] )
identifier[new_arrays] =[]
keyword[for] identifier[fv] , identifier[arr] , identifier[col] keyword[in] identifier[zip] ( identifier[fill_value] , identifier[arrays] , identifier[arr_columns] ):
identifier[mask] = identifier[ma] . identifier[getmaskarray] ( identifier[data] [ identifier[col] ])
keyword[if] identifier[mask] . identifier[any] ():
identifier[arr] , identifier[fv] = identifier[maybe_upcast] ( identifier[arr] , identifier[fill_value] = identifier[fv] , identifier[copy] = keyword[True] )
identifier[arr] [ identifier[mask] ]= identifier[fv]
identifier[new_arrays] . identifier[append] ( identifier[arr] )
identifier[arrays] , identifier[arr_columns] = identifier[reorder_arrays] ( identifier[new_arrays] , identifier[arr_columns] , identifier[columns] )
keyword[if] identifier[columns] keyword[is] keyword[None] :
identifier[columns] = identifier[arr_columns]
identifier[mgr] = identifier[arrays_to_mgr] ( identifier[arrays] , identifier[arr_columns] , identifier[index] , identifier[columns] , identifier[dtype] )
keyword[if] identifier[copy] :
identifier[mgr] = identifier[mgr] . identifier[copy] ()
keyword[return] identifier[mgr]
|
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data)) # depends on [control=['if'], data=['index']] # depends on [control=['if'], data=['index']]
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns) # depends on [control=['if'], data=['columns']]
(arrays, arr_columns) = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for (fv, arr, col) in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
(arr, fv) = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv # depends on [control=['if'], data=[]]
new_arrays.append(arr) # depends on [control=['for'], data=[]]
# create the manager
(arrays, arr_columns) = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns # depends on [control=['if'], data=['columns']]
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy() # depends on [control=['if'], data=[]]
return mgr
|
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
|
def function[get_exe_prefixes, parameter[exe_filename]]:
constant[Get exe->egg path translations for a given .exe file]
variable[prefixes] assign[=] list[[<ast.Tuple object at 0x7da1b1a07d90>, <ast.Tuple object at 0x7da1b1a07d00>, <ast.Tuple object at 0x7da1b1a07c70>, <ast.Tuple object at 0x7da1b1a07be0>, <ast.Tuple object at 0x7da1b1a07b50>]]
variable[z] assign[=] call[name[zipfile].ZipFile, parameter[name[exe_filename]]]
<ast.Try object at 0x7da1b1a079a0>
variable[prefixes] assign[=] <ast.ListComp object at 0x7da1b1a056c0>
call[name[prefixes].sort, parameter[]]
call[name[prefixes].reverse, parameter[]]
return[name[prefixes]]
|
keyword[def] identifier[get_exe_prefixes] ( identifier[exe_filename] ):
literal[string]
identifier[prefixes] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
]
identifier[z] = identifier[zipfile] . identifier[ZipFile] ( identifier[exe_filename] )
keyword[try] :
keyword[for] identifier[info] keyword[in] identifier[z] . identifier[infolist] ():
identifier[name] = identifier[info] . identifier[filename]
identifier[parts] = identifier[name] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )== literal[int] keyword[and] identifier[parts] [ literal[int] ]== literal[string] :
keyword[if] identifier[parts] [ literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[prefixes] . identifier[insert] ( literal[int] ,( literal[string] . identifier[join] ( identifier[parts] [: literal[int] ]), literal[string] ))
keyword[break]
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] keyword[or] keyword[not] identifier[name] . identifier[endswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[parts] [ literal[int] ]. identifier[upper] () keyword[in] ( literal[string] , literal[string] ):
identifier[contents] = identifier[z] . identifier[read] ( identifier[name] )
keyword[if] identifier[six] . identifier[PY3] :
identifier[contents] = identifier[contents] . identifier[decode] ()
keyword[for] identifier[pth] keyword[in] identifier[yield_lines] ( identifier[contents] ):
identifier[pth] = identifier[pth] . identifier[strip] (). identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[pth] . identifier[startswith] ( literal[string] ):
identifier[prefixes] . identifier[append] ((( literal[string] %( identifier[parts] [ literal[int] ], identifier[pth] )), literal[string] ))
keyword[finally] :
identifier[z] . identifier[close] ()
identifier[prefixes] =[( identifier[x] . identifier[lower] (), identifier[y] ) keyword[for] identifier[x] , identifier[y] keyword[in] identifier[prefixes] ]
identifier[prefixes] . identifier[sort] ()
identifier[prefixes] . identifier[reverse] ()
keyword[return] identifier[prefixes]
|
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), ('PLATLIB/', ''), ('SCRIPTS/', 'EGG-INFO/scripts/'), ('DATA/lib/site-packages', '')]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(parts) != 2 or not name.endswith('.pth'):
continue # depends on [control=['if'], data=[]]
if name.endswith('-nspkg.pth'):
continue # depends on [control=['if'], data=[]]
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode() # depends on [control=['if'], data=[]]
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append(('%s/%s/' % (parts[0], pth), '')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pth']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['info']] # depends on [control=['try'], data=[]]
finally:
z.close()
prefixes = [(x.lower(), y) for (x, y) in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
|
def list(self, svc_rec=None, hostfilter=None, compromised=False):
"""
List user accounts
:param svc_rec: db.t_services.id
:param hostfilter:
:param compromised: Show only compromised accounts
:return: [acct.t_accounts.f_services_id, acct.t_hosts.f_ipaddr,
acct.t_hosts.f_hostname,
acct.t_accounts.id, acct.t_accounts.f_username,
acct.t_accounts.f_fullname, acct.t_accounts.f_password,
acct.t_accounts.f_compromised, acct.t_accounts.f_hash1,
acct.t_accounts.f_hash1_type, acct.t_accounts.f_hash2,
acct.t_accounts.f_hash2_type, acct.t_accounts.f_source,
acct.t_accounts.f_uid, acct.t_accounts.f_gid,
acct.t_accounts.f_level, acct.t_accounts.f_domain,
acct.t_accounts.f_message, acct.t_accounts.f_lockout,
acct.t_accounts.f_duration, acct.t_accounts.f_active,
acct.t_accounts.f_description,
acct.t_services.f_proto, acct.t_services.f_number,
]
"""
return self.send.accounts_list(svc_rec, hostfilter, compromised)
|
def function[list, parameter[self, svc_rec, hostfilter, compromised]]:
constant[
List user accounts
:param svc_rec: db.t_services.id
:param hostfilter:
:param compromised: Show only compromised accounts
:return: [acct.t_accounts.f_services_id, acct.t_hosts.f_ipaddr,
acct.t_hosts.f_hostname,
acct.t_accounts.id, acct.t_accounts.f_username,
acct.t_accounts.f_fullname, acct.t_accounts.f_password,
acct.t_accounts.f_compromised, acct.t_accounts.f_hash1,
acct.t_accounts.f_hash1_type, acct.t_accounts.f_hash2,
acct.t_accounts.f_hash2_type, acct.t_accounts.f_source,
acct.t_accounts.f_uid, acct.t_accounts.f_gid,
acct.t_accounts.f_level, acct.t_accounts.f_domain,
acct.t_accounts.f_message, acct.t_accounts.f_lockout,
acct.t_accounts.f_duration, acct.t_accounts.f_active,
acct.t_accounts.f_description,
acct.t_services.f_proto, acct.t_services.f_number,
]
]
return[call[name[self].send.accounts_list, parameter[name[svc_rec], name[hostfilter], name[compromised]]]]
|
keyword[def] identifier[list] ( identifier[self] , identifier[svc_rec] = keyword[None] , identifier[hostfilter] = keyword[None] , identifier[compromised] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] . identifier[accounts_list] ( identifier[svc_rec] , identifier[hostfilter] , identifier[compromised] )
|
def list(self, svc_rec=None, hostfilter=None, compromised=False):
"""
List user accounts
:param svc_rec: db.t_services.id
:param hostfilter:
:param compromised: Show only compromised accounts
:return: [acct.t_accounts.f_services_id, acct.t_hosts.f_ipaddr,
acct.t_hosts.f_hostname,
acct.t_accounts.id, acct.t_accounts.f_username,
acct.t_accounts.f_fullname, acct.t_accounts.f_password,
acct.t_accounts.f_compromised, acct.t_accounts.f_hash1,
acct.t_accounts.f_hash1_type, acct.t_accounts.f_hash2,
acct.t_accounts.f_hash2_type, acct.t_accounts.f_source,
acct.t_accounts.f_uid, acct.t_accounts.f_gid,
acct.t_accounts.f_level, acct.t_accounts.f_domain,
acct.t_accounts.f_message, acct.t_accounts.f_lockout,
acct.t_accounts.f_duration, acct.t_accounts.f_active,
acct.t_accounts.f_description,
acct.t_services.f_proto, acct.t_services.f_number,
]
"""
return self.send.accounts_list(svc_rec, hostfilter, compromised)
|
def handle_input(self, code):
"""Compile Coconut interpreter input."""
if not self.prompt.multiline:
if not should_indent(code):
try:
return self.comp.parse_block(code)
except CoconutException:
pass
while True:
line = self.get_input(more=True)
if line is None:
return None
elif line:
code += "\n" + line
else:
break
try:
return self.comp.parse_block(code)
except CoconutException:
logger.display_exc()
return None
|
def function[handle_input, parameter[self, code]]:
constant[Compile Coconut interpreter input.]
if <ast.UnaryOp object at 0x7da20c7cae00> begin[:]
if <ast.UnaryOp object at 0x7da20c7cbaf0> begin[:]
<ast.Try object at 0x7da20c7ca590>
while constant[True] begin[:]
variable[line] assign[=] call[name[self].get_input, parameter[]]
if compare[name[line] is constant[None]] begin[:]
return[constant[None]]
<ast.Try object at 0x7da20c7c8b80>
return[constant[None]]
|
keyword[def] identifier[handle_input] ( identifier[self] , identifier[code] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[prompt] . identifier[multiline] :
keyword[if] keyword[not] identifier[should_indent] ( identifier[code] ):
keyword[try] :
keyword[return] identifier[self] . identifier[comp] . identifier[parse_block] ( identifier[code] )
keyword[except] identifier[CoconutException] :
keyword[pass]
keyword[while] keyword[True] :
identifier[line] = identifier[self] . identifier[get_input] ( identifier[more] = keyword[True] )
keyword[if] identifier[line] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[elif] identifier[line] :
identifier[code] += literal[string] + identifier[line]
keyword[else] :
keyword[break]
keyword[try] :
keyword[return] identifier[self] . identifier[comp] . identifier[parse_block] ( identifier[code] )
keyword[except] identifier[CoconutException] :
identifier[logger] . identifier[display_exc] ()
keyword[return] keyword[None]
|
def handle_input(self, code):
"""Compile Coconut interpreter input."""
if not self.prompt.multiline:
if not should_indent(code):
try:
return self.comp.parse_block(code) # depends on [control=['try'], data=[]]
except CoconutException:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
while True:
line = self.get_input(more=True)
if line is None:
return None # depends on [control=['if'], data=[]]
elif line:
code += '\n' + line # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
try:
return self.comp.parse_block(code) # depends on [control=['try'], data=[]]
except CoconutException:
logger.display_exc() # depends on [control=['except'], data=[]]
return None
|
def copy_opts_for_single_ifo(opt, ifo):
"""
Takes the namespace object (opt) from the multi-detector interface and
returns a namespace object for a single ifo that can be used with
functions expecting output from the single-detector interface.
"""
opt = copy.deepcopy(opt)
for arg, val in vars(opt).items():
if isinstance(val, DictWithDefaultReturn):
setattr(opt, arg, getattr(opt, arg)[ifo])
return opt
|
def function[copy_opts_for_single_ifo, parameter[opt, ifo]]:
constant[
Takes the namespace object (opt) from the multi-detector interface and
returns a namespace object for a single ifo that can be used with
functions expecting output from the single-detector interface.
]
variable[opt] assign[=] call[name[copy].deepcopy, parameter[name[opt]]]
for taget[tuple[[<ast.Name object at 0x7da20c7ca5f0>, <ast.Name object at 0x7da20c7cbd90>]]] in starred[call[call[name[vars], parameter[name[opt]]].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[val], name[DictWithDefaultReturn]]] begin[:]
call[name[setattr], parameter[name[opt], name[arg], call[call[name[getattr], parameter[name[opt], name[arg]]]][name[ifo]]]]
return[name[opt]]
|
keyword[def] identifier[copy_opts_for_single_ifo] ( identifier[opt] , identifier[ifo] ):
literal[string]
identifier[opt] = identifier[copy] . identifier[deepcopy] ( identifier[opt] )
keyword[for] identifier[arg] , identifier[val] keyword[in] identifier[vars] ( identifier[opt] ). identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[val] , identifier[DictWithDefaultReturn] ):
identifier[setattr] ( identifier[opt] , identifier[arg] , identifier[getattr] ( identifier[opt] , identifier[arg] )[ identifier[ifo] ])
keyword[return] identifier[opt]
|
def copy_opts_for_single_ifo(opt, ifo):
"""
Takes the namespace object (opt) from the multi-detector interface and
returns a namespace object for a single ifo that can be used with
functions expecting output from the single-detector interface.
"""
opt = copy.deepcopy(opt)
for (arg, val) in vars(opt).items():
if isinstance(val, DictWithDefaultReturn):
setattr(opt, arg, getattr(opt, arg)[ifo]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return opt
|
def leave_scope(self):
""" Ends a function body and pops current scope out of the symbol table.
"""
def entry_size(entry):
""" For local variables and params, returns the real variable or
local array size in bytes
"""
if entry.scope == SCOPE.global_ or \
entry.is_aliased: # aliases or global variables = 0
return 0
if entry.class_ != CLASS.array:
return entry.size
return entry.memsize
for v in self.table[self.current_scope].values(filter_by_opt=False):
if not v.accessed:
if v.scope == SCOPE.parameter:
kind = 'Parameter'
v.accessed = True # HINT: Parameters must always be present even if not used!
warning_not_used(v.lineno, v.name, kind=kind)
entries = sorted(self.table[self.current_scope].values(filter_by_opt=True), key=entry_size)
offset = 0
for entry in entries: # Symbols of the current level
if entry.class_ is CLASS.unknown:
self.move_to_global_scope(entry.name)
if entry.class_ in (CLASS.function, CLASS.label, CLASS.type_):
continue
# Local variables offset
if entry.class_ == CLASS.var and entry.scope == SCOPE.local:
if entry.alias is not None: # alias of another variable?
if entry.offset is None:
entry.offset = entry.alias.offset
else:
entry.offset = entry.alias.offset - entry.offset
else:
offset += entry_size(entry)
entry.offset = offset
if entry.class_ == CLASS.array and entry.scope == SCOPE.local:
entry.offset = entry_size(entry) + offset
offset = entry.offset
self.mangle = self[self.current_scope].parent_mangle
self.table.pop()
global_.LOOPS = global_.META_LOOPS.pop()
return offset
|
def function[leave_scope, parameter[self]]:
constant[ Ends a function body and pops current scope out of the symbol table.
]
def function[entry_size, parameter[entry]]:
constant[ For local variables and params, returns the real variable or
local array size in bytes
]
if <ast.BoolOp object at 0x7da18bc70e20> begin[:]
return[constant[0]]
if compare[name[entry].class_ not_equal[!=] name[CLASS].array] begin[:]
return[name[entry].size]
return[name[entry].memsize]
for taget[name[v]] in starred[call[call[name[self].table][name[self].current_scope].values, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc70250> begin[:]
if compare[name[v].scope equal[==] name[SCOPE].parameter] begin[:]
variable[kind] assign[=] constant[Parameter]
name[v].accessed assign[=] constant[True]
call[name[warning_not_used], parameter[name[v].lineno, name[v].name]]
variable[entries] assign[=] call[name[sorted], parameter[call[call[name[self].table][name[self].current_scope].values, parameter[]]]]
variable[offset] assign[=] constant[0]
for taget[name[entry]] in starred[name[entries]] begin[:]
if compare[name[entry].class_ is name[CLASS].unknown] begin[:]
call[name[self].move_to_global_scope, parameter[name[entry].name]]
if compare[name[entry].class_ in tuple[[<ast.Attribute object at 0x7da18bc714b0>, <ast.Attribute object at 0x7da18bc71120>, <ast.Attribute object at 0x7da18bc719c0>]]] begin[:]
continue
if <ast.BoolOp object at 0x7da18bc708e0> begin[:]
if compare[name[entry].alias is_not constant[None]] begin[:]
if compare[name[entry].offset is constant[None]] begin[:]
name[entry].offset assign[=] name[entry].alias.offset
if <ast.BoolOp object at 0x7da18f811510> begin[:]
name[entry].offset assign[=] binary_operation[call[name[entry_size], parameter[name[entry]]] + name[offset]]
variable[offset] assign[=] name[entry].offset
name[self].mangle assign[=] call[name[self]][name[self].current_scope].parent_mangle
call[name[self].table.pop, parameter[]]
name[global_].LOOPS assign[=] call[name[global_].META_LOOPS.pop, parameter[]]
return[name[offset]]
|
keyword[def] identifier[leave_scope] ( identifier[self] ):
literal[string]
keyword[def] identifier[entry_size] ( identifier[entry] ):
literal[string]
keyword[if] identifier[entry] . identifier[scope] == identifier[SCOPE] . identifier[global_] keyword[or] identifier[entry] . identifier[is_aliased] :
keyword[return] literal[int]
keyword[if] identifier[entry] . identifier[class_] != identifier[CLASS] . identifier[array] :
keyword[return] identifier[entry] . identifier[size]
keyword[return] identifier[entry] . identifier[memsize]
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[table] [ identifier[self] . identifier[current_scope] ]. identifier[values] ( identifier[filter_by_opt] = keyword[False] ):
keyword[if] keyword[not] identifier[v] . identifier[accessed] :
keyword[if] identifier[v] . identifier[scope] == identifier[SCOPE] . identifier[parameter] :
identifier[kind] = literal[string]
identifier[v] . identifier[accessed] = keyword[True]
identifier[warning_not_used] ( identifier[v] . identifier[lineno] , identifier[v] . identifier[name] , identifier[kind] = identifier[kind] )
identifier[entries] = identifier[sorted] ( identifier[self] . identifier[table] [ identifier[self] . identifier[current_scope] ]. identifier[values] ( identifier[filter_by_opt] = keyword[True] ), identifier[key] = identifier[entry_size] )
identifier[offset] = literal[int]
keyword[for] identifier[entry] keyword[in] identifier[entries] :
keyword[if] identifier[entry] . identifier[class_] keyword[is] identifier[CLASS] . identifier[unknown] :
identifier[self] . identifier[move_to_global_scope] ( identifier[entry] . identifier[name] )
keyword[if] identifier[entry] . identifier[class_] keyword[in] ( identifier[CLASS] . identifier[function] , identifier[CLASS] . identifier[label] , identifier[CLASS] . identifier[type_] ):
keyword[continue]
keyword[if] identifier[entry] . identifier[class_] == identifier[CLASS] . identifier[var] keyword[and] identifier[entry] . identifier[scope] == identifier[SCOPE] . identifier[local] :
keyword[if] identifier[entry] . identifier[alias] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[entry] . identifier[offset] keyword[is] keyword[None] :
identifier[entry] . identifier[offset] = identifier[entry] . identifier[alias] . identifier[offset]
keyword[else] :
identifier[entry] . identifier[offset] = identifier[entry] . identifier[alias] . identifier[offset] - identifier[entry] . identifier[offset]
keyword[else] :
identifier[offset] += identifier[entry_size] ( identifier[entry] )
identifier[entry] . identifier[offset] = identifier[offset]
keyword[if] identifier[entry] . identifier[class_] == identifier[CLASS] . identifier[array] keyword[and] identifier[entry] . identifier[scope] == identifier[SCOPE] . identifier[local] :
identifier[entry] . identifier[offset] = identifier[entry_size] ( identifier[entry] )+ identifier[offset]
identifier[offset] = identifier[entry] . identifier[offset]
identifier[self] . identifier[mangle] = identifier[self] [ identifier[self] . identifier[current_scope] ]. identifier[parent_mangle]
identifier[self] . identifier[table] . identifier[pop] ()
identifier[global_] . identifier[LOOPS] = identifier[global_] . identifier[META_LOOPS] . identifier[pop] ()
keyword[return] identifier[offset]
|
def leave_scope(self):
""" Ends a function body and pops current scope out of the symbol table.
"""
def entry_size(entry):
""" For local variables and params, returns the real variable or
local array size in bytes
"""
if entry.scope == SCOPE.global_ or entry.is_aliased: # aliases or global variables = 0
return 0 # depends on [control=['if'], data=[]]
if entry.class_ != CLASS.array:
return entry.size # depends on [control=['if'], data=[]]
return entry.memsize
for v in self.table[self.current_scope].values(filter_by_opt=False):
if not v.accessed:
if v.scope == SCOPE.parameter:
kind = 'Parameter'
v.accessed = True # HINT: Parameters must always be present even if not used!
warning_not_used(v.lineno, v.name, kind=kind) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
entries = sorted(self.table[self.current_scope].values(filter_by_opt=True), key=entry_size)
offset = 0
for entry in entries: # Symbols of the current level
if entry.class_ is CLASS.unknown:
self.move_to_global_scope(entry.name) # depends on [control=['if'], data=[]]
if entry.class_ in (CLASS.function, CLASS.label, CLASS.type_):
continue # depends on [control=['if'], data=[]]
# Local variables offset
if entry.class_ == CLASS.var and entry.scope == SCOPE.local:
if entry.alias is not None: # alias of another variable?
if entry.offset is None:
entry.offset = entry.alias.offset # depends on [control=['if'], data=[]]
else:
entry.offset = entry.alias.offset - entry.offset # depends on [control=['if'], data=[]]
else:
offset += entry_size(entry)
entry.offset = offset # depends on [control=['if'], data=[]]
if entry.class_ == CLASS.array and entry.scope == SCOPE.local:
entry.offset = entry_size(entry) + offset
offset = entry.offset # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']]
self.mangle = self[self.current_scope].parent_mangle
self.table.pop()
global_.LOOPS = global_.META_LOOPS.pop()
return offset
|
def load_data(self, key=None, **kwargs):
""" Lee de disco la información y la devuelve
:param key:
"""
self.store.open()
if key:
return pd.read_hdf(self.PATH_DATABASE, key, **kwargs)
else:
data_load = dict()
for k in self.store.keys():
k = k.replace('/', '')
# **kwargs ej:= where=['index > 2009','index < 2010'],columns=['ordinal']
data_load[k] = pd.read_hdf(self.PATH_DATABASE, k, **kwargs)
self.data = data_load
self.store.close()
self.integridad_data()
|
def function[load_data, parameter[self, key]]:
constant[ Lee de disco la información y la devuelve
:param key:
]
call[name[self].store.open, parameter[]]
if name[key] begin[:]
return[call[name[pd].read_hdf, parameter[name[self].PATH_DATABASE, name[key]]]]
call[name[self].store.close, parameter[]]
call[name[self].integridad_data, parameter[]]
|
keyword[def] identifier[load_data] ( identifier[self] , identifier[key] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[store] . identifier[open] ()
keyword[if] identifier[key] :
keyword[return] identifier[pd] . identifier[read_hdf] ( identifier[self] . identifier[PATH_DATABASE] , identifier[key] ,** identifier[kwargs] )
keyword[else] :
identifier[data_load] = identifier[dict] ()
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[store] . identifier[keys] ():
identifier[k] = identifier[k] . identifier[replace] ( literal[string] , literal[string] )
identifier[data_load] [ identifier[k] ]= identifier[pd] . identifier[read_hdf] ( identifier[self] . identifier[PATH_DATABASE] , identifier[k] ,** identifier[kwargs] )
identifier[self] . identifier[data] = identifier[data_load]
identifier[self] . identifier[store] . identifier[close] ()
identifier[self] . identifier[integridad_data] ()
|
def load_data(self, key=None, **kwargs):
""" Lee de disco la información y la devuelve
:param key:
"""
self.store.open()
if key:
return pd.read_hdf(self.PATH_DATABASE, key, **kwargs) # depends on [control=['if'], data=[]]
else:
data_load = dict()
for k in self.store.keys():
k = k.replace('/', '')
# **kwargs ej:= where=['index > 2009','index < 2010'],columns=['ordinal']
data_load[k] = pd.read_hdf(self.PATH_DATABASE, k, **kwargs) # depends on [control=['for'], data=['k']]
self.data = data_load
self.store.close()
self.integridad_data()
|
def _get_data_raw(self, time, site_id, pressure=None):
r"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data
"""
query = {'ts': time.strftime('%Y%m%d%H00')}
if site_id is not None:
query['station'] = site_id
if pressure is not None:
query['pressure'] = pressure
resp = self.get_path('raob.py', query)
json_data = json.loads(resp.text)
# See if the return is valid, but has no data
if not (json_data['profiles'] and json_data['profiles'][0]['profile']):
message = 'No data available '
if time is not None:
message += 'for {time:%Y-%m-%d %HZ} '.format(time=time)
if site_id is not None:
message += 'for station {stid}'.format(stid=site_id)
if pressure is not None:
message += 'for pressure {pres}'.format(pres=pressure)
message = message[:-1] + '.'
raise ValueError(message)
return json_data
|
def function[_get_data_raw, parameter[self, time, site_id, pressure]]:
constant[Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data
]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da1b107a890>], [<ast.Call object at 0x7da1b107a4d0>]]
if compare[name[site_id] is_not constant[None]] begin[:]
call[name[query]][constant[station]] assign[=] name[site_id]
if compare[name[pressure] is_not constant[None]] begin[:]
call[name[query]][constant[pressure]] assign[=] name[pressure]
variable[resp] assign[=] call[name[self].get_path, parameter[constant[raob.py], name[query]]]
variable[json_data] assign[=] call[name[json].loads, parameter[name[resp].text]]
if <ast.UnaryOp object at 0x7da1b107b100> begin[:]
variable[message] assign[=] constant[No data available ]
if compare[name[time] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1078490>
if compare[name[site_id] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b107a5c0>
if compare[name[pressure] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b10c04c0>
variable[message] assign[=] binary_operation[call[name[message]][<ast.Slice object at 0x7da1b10c1630>] + constant[.]]
<ast.Raise object at 0x7da1b10c1270>
return[name[json_data]]
|
keyword[def] identifier[_get_data_raw] ( identifier[self] , identifier[time] , identifier[site_id] , identifier[pressure] = keyword[None] ):
literal[string]
identifier[query] ={ literal[string] : identifier[time] . identifier[strftime] ( literal[string] )}
keyword[if] identifier[site_id] keyword[is] keyword[not] keyword[None] :
identifier[query] [ literal[string] ]= identifier[site_id]
keyword[if] identifier[pressure] keyword[is] keyword[not] keyword[None] :
identifier[query] [ literal[string] ]= identifier[pressure]
identifier[resp] = identifier[self] . identifier[get_path] ( literal[string] , identifier[query] )
identifier[json_data] = identifier[json] . identifier[loads] ( identifier[resp] . identifier[text] )
keyword[if] keyword[not] ( identifier[json_data] [ literal[string] ] keyword[and] identifier[json_data] [ literal[string] ][ literal[int] ][ literal[string] ]):
identifier[message] = literal[string]
keyword[if] identifier[time] keyword[is] keyword[not] keyword[None] :
identifier[message] += literal[string] . identifier[format] ( identifier[time] = identifier[time] )
keyword[if] identifier[site_id] keyword[is] keyword[not] keyword[None] :
identifier[message] += literal[string] . identifier[format] ( identifier[stid] = identifier[site_id] )
keyword[if] identifier[pressure] keyword[is] keyword[not] keyword[None] :
identifier[message] += literal[string] . identifier[format] ( identifier[pres] = identifier[pressure] )
identifier[message] = identifier[message] [:- literal[int] ]+ literal[string]
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[return] identifier[json_data]
|
def _get_data_raw(self, time, site_id, pressure=None):
"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data
"""
query = {'ts': time.strftime('%Y%m%d%H00')}
if site_id is not None:
query['station'] = site_id # depends on [control=['if'], data=['site_id']]
if pressure is not None:
query['pressure'] = pressure # depends on [control=['if'], data=['pressure']]
resp = self.get_path('raob.py', query)
json_data = json.loads(resp.text)
# See if the return is valid, but has no data
if not (json_data['profiles'] and json_data['profiles'][0]['profile']):
message = 'No data available '
if time is not None:
message += 'for {time:%Y-%m-%d %HZ} '.format(time=time) # depends on [control=['if'], data=['time']]
if site_id is not None:
message += 'for station {stid}'.format(stid=site_id) # depends on [control=['if'], data=['site_id']]
if pressure is not None:
message += 'for pressure {pres}'.format(pres=pressure) # depends on [control=['if'], data=['pressure']]
message = message[:-1] + '.'
raise ValueError(message) # depends on [control=['if'], data=[]]
return json_data
|
def curve_fit(self):
"""
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
"""
if not hasattr(self,'_curve_fit'):
options = self.options.copy()
fit_function = options.pop('fit_function')
independent_values = self.data.array[0]
dependent_values = self.data.array[1]
if fit_function == 'lmfit':
self._curve_fit = lmfit.minimize(
self.lmfit_fcn2min, self.lmfit_parameters,
args=(independent_values, dependent_values, self.data.error), **options)
else:
p0 = [ prefix_factor(param) * param['guess'] for param in self.fitting_parameters ]
self._curve_fit = fit_function(
self.function, independent_values, dependent_values, p0, **options)
return self._curve_fit
|
def function[curve_fit, parameter[self]]:
constant[
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
]
if <ast.UnaryOp object at 0x7da18eb548b0> begin[:]
variable[options] assign[=] call[name[self].options.copy, parameter[]]
variable[fit_function] assign[=] call[name[options].pop, parameter[constant[fit_function]]]
variable[independent_values] assign[=] call[name[self].data.array][constant[0]]
variable[dependent_values] assign[=] call[name[self].data.array][constant[1]]
if compare[name[fit_function] equal[==] constant[lmfit]] begin[:]
name[self]._curve_fit assign[=] call[name[lmfit].minimize, parameter[name[self].lmfit_fcn2min, name[self].lmfit_parameters]]
return[name[self]._curve_fit]
|
keyword[def] identifier[curve_fit] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[options] = identifier[self] . identifier[options] . identifier[copy] ()
identifier[fit_function] = identifier[options] . identifier[pop] ( literal[string] )
identifier[independent_values] = identifier[self] . identifier[data] . identifier[array] [ literal[int] ]
identifier[dependent_values] = identifier[self] . identifier[data] . identifier[array] [ literal[int] ]
keyword[if] identifier[fit_function] == literal[string] :
identifier[self] . identifier[_curve_fit] = identifier[lmfit] . identifier[minimize] (
identifier[self] . identifier[lmfit_fcn2min] , identifier[self] . identifier[lmfit_parameters] ,
identifier[args] =( identifier[independent_values] , identifier[dependent_values] , identifier[self] . identifier[data] . identifier[error] ),** identifier[options] )
keyword[else] :
identifier[p0] =[ identifier[prefix_factor] ( identifier[param] )* identifier[param] [ literal[string] ] keyword[for] identifier[param] keyword[in] identifier[self] . identifier[fitting_parameters] ]
identifier[self] . identifier[_curve_fit] = identifier[fit_function] (
identifier[self] . identifier[function] , identifier[independent_values] , identifier[dependent_values] , identifier[p0] ,** identifier[options] )
keyword[return] identifier[self] . identifier[_curve_fit]
|
def curve_fit(self):
"""
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
"""
if not hasattr(self, '_curve_fit'):
options = self.options.copy()
fit_function = options.pop('fit_function')
independent_values = self.data.array[0]
dependent_values = self.data.array[1]
if fit_function == 'lmfit':
self._curve_fit = lmfit.minimize(self.lmfit_fcn2min, self.lmfit_parameters, args=(independent_values, dependent_values, self.data.error), **options) # depends on [control=['if'], data=[]]
else:
p0 = [prefix_factor(param) * param['guess'] for param in self.fitting_parameters]
self._curve_fit = fit_function(self.function, independent_values, dependent_values, p0, **options) # depends on [control=['if'], data=[]]
return self._curve_fit
|
def get_niggli_reduced_lattice(self, tol: float = 1e-5) -> "Lattice":
"""
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
"""
# lll reduction is more stable for skewed cells
matrix = self.lll_matrix
a = matrix[0]
b = matrix[1]
c = matrix[2]
e = tol * self.volume ** (1 / 3)
# Define metric tensor
G = [
[dot(a, a), dot(a, b), dot(a, c)],
[dot(a, b), dot(b, b), dot(b, c)],
[dot(a, c), dot(b, c), dot(c, c)],
]
G = np.array(G)
# This sets an upper limit on the number of iterations.
for count in range(100):
# The steps are labelled as Ax as per the labelling scheme in the
# paper.
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e):
# A1
M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
G = dot(transpose(M), dot(G, M))
if (B > C + e) or (abs(B - C) < e and abs(N) > abs(Y) + e):
# A2
M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
G = dot(transpose(M), dot(G, M))
continue
l = 0 if abs(E) < e else E / abs(E)
m = 0 if abs(N) < e else N / abs(N)
n = 0 if abs(Y) < e else Y / abs(Y)
if l * m * n == 1:
# A3
i = -1 if l == -1 else 1
j = -1 if m == -1 else 1
k = -1 if n == -1 else 1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
elif l * m * n == 0 or l * m * n == -1:
# A4
i = -1 if l == 1 else 1
j = -1 if m == 1 else 1
k = -1 if n == 1 else 1
if i * j * k == -1:
if n == 0:
k = -1
elif m == 0:
j = -1
elif l == 0:
i = -1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
# A5
if (
abs(E) > B + e
or (abs(E - B) < e and 2 * N < Y - e)
or (abs(E + B) < e and Y < -e)
):
M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A6
if (
abs(N) > A + e
or (abs(A - N) < e and 2 * E < Y - e)
or (abs(A + N) < e and Y < -e)
):
M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A7
if (
abs(Y) > A + e
or (abs(A - Y) < e and 2 * E < N - e)
or (abs(A + Y) < e and N < -e)
):
M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A8
if E + N + Y + A + B < -e or (abs(E + N + Y + A + B) < e < Y + (A + N) * 2):
M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
break
A = G[0, 0]
B = G[1, 1]
C = G[2, 2]
E = 2 * G[1, 2]
N = 2 * G[0, 2]
Y = 2 * G[0, 1]
a = math.sqrt(A)
b = math.sqrt(B)
c = math.sqrt(C)
alpha = math.acos(E / 2 / b / c) / math.pi * 180
beta = math.acos(N / 2 / a / c) / math.pi * 180
gamma = math.acos(Y / 2 / a / b) / math.pi * 180
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
mapped = self.find_mapping(latt, e, skip_rotation_matrix=True)
if mapped is not None:
if np.linalg.det(mapped[0].matrix) > 0:
return mapped[0]
else:
return Lattice(-mapped[0].matrix)
raise ValueError("can't find niggli")
|
def function[get_niggli_reduced_lattice, parameter[self, tol]]:
constant[
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
]
variable[matrix] assign[=] name[self].lll_matrix
variable[a] assign[=] call[name[matrix]][constant[0]]
variable[b] assign[=] call[name[matrix]][constant[1]]
variable[c] assign[=] call[name[matrix]][constant[2]]
variable[e] assign[=] binary_operation[name[tol] * binary_operation[name[self].volume ** binary_operation[constant[1] / constant[3]]]]
variable[G] assign[=] list[[<ast.List object at 0x7da1b1c200a0>, <ast.List object at 0x7da1b1cb70d0>, <ast.List object at 0x7da1b1c20130>]]
variable[G] assign[=] call[name[np].array, parameter[name[G]]]
for taget[name[count]] in starred[call[name[range], parameter[constant[100]]]] begin[:]
<ast.Tuple object at 0x7da1b1c20790> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c207f0>, <ast.Subscript object at 0x7da1b1c20970>, <ast.Subscript object at 0x7da1b1c20b50>, <ast.BinOp object at 0x7da1b1c20af0>, <ast.BinOp object at 0x7da1b1c20c10>, <ast.BinOp object at 0x7da1b1c21090>]]
if <ast.BoolOp object at 0x7da1b1c20e20> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1c213c0>, <ast.List object at 0x7da1b1c21960>, <ast.List object at 0x7da1b1c21780>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
if <ast.BoolOp object at 0x7da1b1c219c0> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1c21ea0>, <ast.List object at 0x7da1b1c21f90>, <ast.List object at 0x7da1b1c21ff0>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
continue
variable[l] assign[=] <ast.IfExp object at 0x7da1b1c224a0>
variable[m] assign[=] <ast.IfExp object at 0x7da1b1c22650>
variable[n] assign[=] <ast.IfExp object at 0x7da1b1c22cb0>
if compare[binary_operation[binary_operation[name[l] * name[m]] * name[n]] equal[==] constant[1]] begin[:]
variable[i] assign[=] <ast.IfExp object at 0x7da18dc07fa0>
variable[j] assign[=] <ast.IfExp object at 0x7da18dc04e80>
variable[k] assign[=] <ast.IfExp object at 0x7da18dc079a0>
variable[M] assign[=] list[[<ast.List object at 0x7da18dc06bf0>, <ast.List object at 0x7da18dc07520>, <ast.List object at 0x7da18dc07850>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
<ast.Tuple object at 0x7da1b1c10f70> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c10b50>, <ast.Subscript object at 0x7da1b1c12020>, <ast.Subscript object at 0x7da1b1c12260>, <ast.BinOp object at 0x7da1b1c13bb0>, <ast.BinOp object at 0x7da1b1c13be0>, <ast.BinOp object at 0x7da1b1c10520>]]
if <ast.BoolOp object at 0x7da1b1c10d90> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1c132b0>, <ast.List object at 0x7da1b1c134f0>, <ast.List object at 0x7da1b1c12860>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
continue
if <ast.BoolOp object at 0x7da1b1c13100> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1c11510>, <ast.List object at 0x7da1b1c11660>, <ast.List object at 0x7da1b1c11180>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
continue
if <ast.BoolOp object at 0x7da1b1c10a60> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1c12cb0>, <ast.List object at 0x7da1b1c10490>, <ast.List object at 0x7da1b1c12620>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
continue
if <ast.BoolOp object at 0x7da1b1cb4610> begin[:]
variable[M] assign[=] list[[<ast.List object at 0x7da1b1cb69b0>, <ast.List object at 0x7da1b1cb6e60>, <ast.List object at 0x7da1b1cb65f0>]]
variable[G] assign[=] call[name[dot], parameter[call[name[transpose], parameter[name[M]]], call[name[dot], parameter[name[G], name[M]]]]]
continue
break
variable[A] assign[=] call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb5c90>, <ast.Constant object at 0x7da1b1cb5e70>]]]
variable[B] assign[=] call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb6710>, <ast.Constant object at 0x7da1b1cb6bc0>]]]
variable[C] assign[=] call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb6d10>, <ast.Constant object at 0x7da1b1cb7430>]]]
variable[E] assign[=] binary_operation[constant[2] * call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb6c80>, <ast.Constant object at 0x7da1b1cb4460>]]]]
variable[N] assign[=] binary_operation[constant[2] * call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb4820>, <ast.Constant object at 0x7da1b1cb7310>]]]]
variable[Y] assign[=] binary_operation[constant[2] * call[name[G]][tuple[[<ast.Constant object at 0x7da1b1cb6230>, <ast.Constant object at 0x7da1b1cb6fe0>]]]]
variable[a] assign[=] call[name[math].sqrt, parameter[name[A]]]
variable[b] assign[=] call[name[math].sqrt, parameter[name[B]]]
variable[c] assign[=] call[name[math].sqrt, parameter[name[C]]]
variable[alpha] assign[=] binary_operation[binary_operation[call[name[math].acos, parameter[binary_operation[binary_operation[binary_operation[name[E] / constant[2]] / name[b]] / name[c]]]] / name[math].pi] * constant[180]]
variable[beta] assign[=] binary_operation[binary_operation[call[name[math].acos, parameter[binary_operation[binary_operation[binary_operation[name[N] / constant[2]] / name[a]] / name[c]]]] / name[math].pi] * constant[180]]
variable[gamma] assign[=] binary_operation[binary_operation[call[name[math].acos, parameter[binary_operation[binary_operation[binary_operation[name[Y] / constant[2]] / name[a]] / name[b]]]] / name[math].pi] * constant[180]]
variable[latt] assign[=] call[name[Lattice].from_parameters, parameter[name[a], name[b], name[c], name[alpha], name[beta], name[gamma]]]
variable[mapped] assign[=] call[name[self].find_mapping, parameter[name[latt], name[e]]]
if compare[name[mapped] is_not constant[None]] begin[:]
if compare[call[name[np].linalg.det, parameter[call[name[mapped]][constant[0]].matrix]] greater[>] constant[0]] begin[:]
return[call[name[mapped]][constant[0]]]
<ast.Raise object at 0x7da18fe91120>
|
keyword[def] identifier[get_niggli_reduced_lattice] ( identifier[self] , identifier[tol] : identifier[float] = literal[int] )-> literal[string] :
literal[string]
identifier[matrix] = identifier[self] . identifier[lll_matrix]
identifier[a] = identifier[matrix] [ literal[int] ]
identifier[b] = identifier[matrix] [ literal[int] ]
identifier[c] = identifier[matrix] [ literal[int] ]
identifier[e] = identifier[tol] * identifier[self] . identifier[volume] **( literal[int] / literal[int] )
identifier[G] =[
[ identifier[dot] ( identifier[a] , identifier[a] ), identifier[dot] ( identifier[a] , identifier[b] ), identifier[dot] ( identifier[a] , identifier[c] )],
[ identifier[dot] ( identifier[a] , identifier[b] ), identifier[dot] ( identifier[b] , identifier[b] ), identifier[dot] ( identifier[b] , identifier[c] )],
[ identifier[dot] ( identifier[a] , identifier[c] ), identifier[dot] ( identifier[b] , identifier[c] ), identifier[dot] ( identifier[c] , identifier[c] )],
]
identifier[G] = identifier[np] . identifier[array] ( identifier[G] )
keyword[for] identifier[count] keyword[in] identifier[range] ( literal[int] ):
( identifier[A] , identifier[B] , identifier[C] , identifier[E] , identifier[N] , identifier[Y] )=(
identifier[G] [ literal[int] , literal[int] ],
identifier[G] [ literal[int] , literal[int] ],
identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
)
keyword[if] identifier[A] > identifier[B] + identifier[e] keyword[or] ( identifier[abs] ( identifier[A] - identifier[B] )< identifier[e] keyword[and] identifier[abs] ( identifier[E] )> identifier[abs] ( identifier[N] )+ identifier[e] ):
identifier[M] =[[ literal[int] ,- literal[int] , literal[int] ],[- literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] ,- literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[if] ( identifier[B] > identifier[C] + identifier[e] ) keyword[or] ( identifier[abs] ( identifier[B] - identifier[C] )< identifier[e] keyword[and] identifier[abs] ( identifier[N] )> identifier[abs] ( identifier[Y] )+ identifier[e] ):
identifier[M] =[[- literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] ,- literal[int] ],[ literal[int] ,- literal[int] , literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[continue]
identifier[l] = literal[int] keyword[if] identifier[abs] ( identifier[E] )< identifier[e] keyword[else] identifier[E] / identifier[abs] ( identifier[E] )
identifier[m] = literal[int] keyword[if] identifier[abs] ( identifier[N] )< identifier[e] keyword[else] identifier[N] / identifier[abs] ( identifier[N] )
identifier[n] = literal[int] keyword[if] identifier[abs] ( identifier[Y] )< identifier[e] keyword[else] identifier[Y] / identifier[abs] ( identifier[Y] )
keyword[if] identifier[l] * identifier[m] * identifier[n] == literal[int] :
identifier[i] =- literal[int] keyword[if] identifier[l] ==- literal[int] keyword[else] literal[int]
identifier[j] =- literal[int] keyword[if] identifier[m] ==- literal[int] keyword[else] literal[int]
identifier[k] =- literal[int] keyword[if] identifier[n] ==- literal[int] keyword[else] literal[int]
identifier[M] =[[ identifier[i] , literal[int] , literal[int] ],[ literal[int] , identifier[j] , literal[int] ],[ literal[int] , literal[int] , identifier[k] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[elif] identifier[l] * identifier[m] * identifier[n] == literal[int] keyword[or] identifier[l] * identifier[m] * identifier[n] ==- literal[int] :
identifier[i] =- literal[int] keyword[if] identifier[l] == literal[int] keyword[else] literal[int]
identifier[j] =- literal[int] keyword[if] identifier[m] == literal[int] keyword[else] literal[int]
identifier[k] =- literal[int] keyword[if] identifier[n] == literal[int] keyword[else] literal[int]
keyword[if] identifier[i] * identifier[j] * identifier[k] ==- literal[int] :
keyword[if] identifier[n] == literal[int] :
identifier[k] =- literal[int]
keyword[elif] identifier[m] == literal[int] :
identifier[j] =- literal[int]
keyword[elif] identifier[l] == literal[int] :
identifier[i] =- literal[int]
identifier[M] =[[ identifier[i] , literal[int] , literal[int] ],[ literal[int] , identifier[j] , literal[int] ],[ literal[int] , literal[int] , identifier[k] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
( identifier[A] , identifier[B] , identifier[C] , identifier[E] , identifier[N] , identifier[Y] )=(
identifier[G] [ literal[int] , literal[int] ],
identifier[G] [ literal[int] , literal[int] ],
identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
literal[int] * identifier[G] [ literal[int] , literal[int] ],
)
keyword[if] (
identifier[abs] ( identifier[E] )> identifier[B] + identifier[e]
keyword[or] ( identifier[abs] ( identifier[E] - identifier[B] )< identifier[e] keyword[and] literal[int] * identifier[N] < identifier[Y] - identifier[e] )
keyword[or] ( identifier[abs] ( identifier[E] + identifier[B] )< identifier[e] keyword[and] identifier[Y] <- identifier[e] )
):
identifier[M] =[[ literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] ,- identifier[E] / identifier[abs] ( identifier[E] )],[ literal[int] , literal[int] , literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[continue]
keyword[if] (
identifier[abs] ( identifier[N] )> identifier[A] + identifier[e]
keyword[or] ( identifier[abs] ( identifier[A] - identifier[N] )< identifier[e] keyword[and] literal[int] * identifier[E] < identifier[Y] - identifier[e] )
keyword[or] ( identifier[abs] ( identifier[A] + identifier[N] )< identifier[e] keyword[and] identifier[Y] <- identifier[e] )
):
identifier[M] =[[ literal[int] , literal[int] ,- identifier[N] / identifier[abs] ( identifier[N] )],[ literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] , literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[continue]
keyword[if] (
identifier[abs] ( identifier[Y] )> identifier[A] + identifier[e]
keyword[or] ( identifier[abs] ( identifier[A] - identifier[Y] )< identifier[e] keyword[and] literal[int] * identifier[E] < identifier[N] - identifier[e] )
keyword[or] ( identifier[abs] ( identifier[A] + identifier[Y] )< identifier[e] keyword[and] identifier[N] <- identifier[e] )
):
identifier[M] =[[ literal[int] ,- identifier[Y] / identifier[abs] ( identifier[Y] ), literal[int] ],[ literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] , literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[continue]
keyword[if] identifier[E] + identifier[N] + identifier[Y] + identifier[A] + identifier[B] <- identifier[e] keyword[or] ( identifier[abs] ( identifier[E] + identifier[N] + identifier[Y] + identifier[A] + identifier[B] )< identifier[e] < identifier[Y] +( identifier[A] + identifier[N] )* literal[int] ):
identifier[M] =[[ literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] , literal[int] ],[ literal[int] , literal[int] , literal[int] ]]
identifier[G] = identifier[dot] ( identifier[transpose] ( identifier[M] ), identifier[dot] ( identifier[G] , identifier[M] ))
keyword[continue]
keyword[break]
identifier[A] = identifier[G] [ literal[int] , literal[int] ]
identifier[B] = identifier[G] [ literal[int] , literal[int] ]
identifier[C] = identifier[G] [ literal[int] , literal[int] ]
identifier[E] = literal[int] * identifier[G] [ literal[int] , literal[int] ]
identifier[N] = literal[int] * identifier[G] [ literal[int] , literal[int] ]
identifier[Y] = literal[int] * identifier[G] [ literal[int] , literal[int] ]
identifier[a] = identifier[math] . identifier[sqrt] ( identifier[A] )
identifier[b] = identifier[math] . identifier[sqrt] ( identifier[B] )
identifier[c] = identifier[math] . identifier[sqrt] ( identifier[C] )
identifier[alpha] = identifier[math] . identifier[acos] ( identifier[E] / literal[int] / identifier[b] / identifier[c] )/ identifier[math] . identifier[pi] * literal[int]
identifier[beta] = identifier[math] . identifier[acos] ( identifier[N] / literal[int] / identifier[a] / identifier[c] )/ identifier[math] . identifier[pi] * literal[int]
identifier[gamma] = identifier[math] . identifier[acos] ( identifier[Y] / literal[int] / identifier[a] / identifier[b] )/ identifier[math] . identifier[pi] * literal[int]
identifier[latt] = identifier[Lattice] . identifier[from_parameters] ( identifier[a] , identifier[b] , identifier[c] , identifier[alpha] , identifier[beta] , identifier[gamma] )
identifier[mapped] = identifier[self] . identifier[find_mapping] ( identifier[latt] , identifier[e] , identifier[skip_rotation_matrix] = keyword[True] )
keyword[if] identifier[mapped] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[np] . identifier[linalg] . identifier[det] ( identifier[mapped] [ literal[int] ]. identifier[matrix] )> literal[int] :
keyword[return] identifier[mapped] [ literal[int] ]
keyword[else] :
keyword[return] identifier[Lattice] (- identifier[mapped] [ literal[int] ]. identifier[matrix] )
keyword[raise] identifier[ValueError] ( literal[string] )
|
def get_niggli_reduced_lattice(self, tol: float=1e-05) -> 'Lattice':
"""
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
"""
# lll reduction is more stable for skewed cells
matrix = self.lll_matrix
a = matrix[0]
b = matrix[1]
c = matrix[2]
e = tol * self.volume ** (1 / 3)
# Define metric tensor
G = [[dot(a, a), dot(a, b), dot(a, c)], [dot(a, b), dot(b, b), dot(b, c)], [dot(a, c), dot(b, c), dot(c, c)]]
G = np.array(G)
# This sets an upper limit on the number of iterations.
for count in range(100):
# The steps are labelled as Ax as per the labelling scheme in the
# paper.
(A, B, C, E, N, Y) = (G[0, 0], G[1, 1], G[2, 2], 2 * G[1, 2], 2 * G[0, 2], 2 * G[0, 1])
if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e):
# A1
M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
G = dot(transpose(M), dot(G, M)) # depends on [control=['if'], data=[]]
if B > C + e or (abs(B - C) < e and abs(N) > abs(Y) + e):
# A2
M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
G = dot(transpose(M), dot(G, M))
continue # depends on [control=['if'], data=[]]
l = 0 if abs(E) < e else E / abs(E)
m = 0 if abs(N) < e else N / abs(N)
n = 0 if abs(Y) < e else Y / abs(Y)
if l * m * n == 1:
# A3
i = -1 if l == -1 else 1
j = -1 if m == -1 else 1
k = -1 if n == -1 else 1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M)) # depends on [control=['if'], data=[]]
elif l * m * n == 0 or l * m * n == -1:
# A4
i = -1 if l == 1 else 1
j = -1 if m == 1 else 1
k = -1 if n == 1 else 1
if i * j * k == -1:
if n == 0:
k = -1 # depends on [control=['if'], data=[]]
elif m == 0:
j = -1 # depends on [control=['if'], data=[]]
elif l == 0:
i = -1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M)) # depends on [control=['if'], data=[]]
(A, B, C, E, N, Y) = (G[0, 0], G[1, 1], G[2, 2], 2 * G[1, 2], 2 * G[0, 2], 2 * G[0, 1])
# A5
if abs(E) > B + e or (abs(E - B) < e and 2 * N < Y - e) or (abs(E + B) < e and Y < -e):
M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue # depends on [control=['if'], data=[]]
# A6
if abs(N) > A + e or (abs(A - N) < e and 2 * E < Y - e) or (abs(A + N) < e and Y < -e):
M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue # depends on [control=['if'], data=[]]
# A7
if abs(Y) > A + e or (abs(A - Y) < e and 2 * E < N - e) or (abs(A + Y) < e and N < -e):
M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue # depends on [control=['if'], data=[]]
# A8
if E + N + Y + A + B < -e or abs(E + N + Y + A + B) < e < Y + (A + N) * 2:
M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=[]]
A = G[0, 0]
B = G[1, 1]
C = G[2, 2]
E = 2 * G[1, 2]
N = 2 * G[0, 2]
Y = 2 * G[0, 1]
a = math.sqrt(A)
b = math.sqrt(B)
c = math.sqrt(C)
alpha = math.acos(E / 2 / b / c) / math.pi * 180
beta = math.acos(N / 2 / a / c) / math.pi * 180
gamma = math.acos(Y / 2 / a / b) / math.pi * 180
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
mapped = self.find_mapping(latt, e, skip_rotation_matrix=True)
if mapped is not None:
if np.linalg.det(mapped[0].matrix) > 0:
return mapped[0] # depends on [control=['if'], data=[]]
else:
return Lattice(-mapped[0].matrix) # depends on [control=['if'], data=['mapped']]
raise ValueError("can't find niggli")
|
def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
for k, v in sorted(d.items()):
if (not v) and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k))
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item))
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for kk, vv in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv))
|
def function[print_dict, parameter[d, show_missing]]:
constant[Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
]
for taget[tuple[[<ast.Name object at 0x7da2041dbc40>, <ast.Name object at 0x7da2041db2e0>]]] in starred[call[name[sorted], parameter[call[name[d].items, parameter[]]]]] begin[:]
if <ast.BoolOp object at 0x7da2041db6d0> begin[:]
call[name[print], parameter[call[constant[{} -].format, parameter[name[k]]]]]
|
keyword[def] identifier[print_dict] ( identifier[d] , identifier[show_missing] = keyword[True] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[d] . identifier[items] ()):
keyword[if] ( keyword[not] identifier[v] ) keyword[and] identifier[show_missing] :
identifier[print] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[list] ):
identifier[print] ( identifier[k] )
keyword[for] identifier[item] keyword[in] identifier[v] :
identifier[print] ( literal[string] . identifier[format] ( identifier[item] ))
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[print] ( identifier[k] )
keyword[for] identifier[kk] , identifier[vv] keyword[in] identifier[sorted] ( identifier[v] . identifier[items] ()):
identifier[print] ( literal[string] . identifier[format] ( identifier[kk] , identifier[vv] ))
|
def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
for (k, v) in sorted(d.items()):
if not v and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k)) # depends on [control=['if'], data=[]]
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item)) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for (kk, vv) in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def to_bool(value):
# type: (Any) -> bool
"""
Convert a value into a bool but handle "truthy" strings eg, yes, true, ok, y
"""
if isinstance(value, _compat.string_types):
return value.upper() in ('Y', 'YES', 'T', 'TRUE', '1', 'OK')
return bool(value)
|
def function[to_bool, parameter[value]]:
constant[
Convert a value into a bool but handle "truthy" strings eg, yes, true, ok, y
]
if call[name[isinstance], parameter[name[value], name[_compat].string_types]] begin[:]
return[compare[call[name[value].upper, parameter[]] in tuple[[<ast.Constant object at 0x7da1b0a643d0>, <ast.Constant object at 0x7da1b0a653c0>, <ast.Constant object at 0x7da1b0a65510>, <ast.Constant object at 0x7da1b0a65c00>, <ast.Constant object at 0x7da1b0a65750>, <ast.Constant object at 0x7da1b0a64c40>]]]]
return[call[name[bool], parameter[name[value]]]]
|
keyword[def] identifier[to_bool] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[_compat] . identifier[string_types] ):
keyword[return] identifier[value] . identifier[upper] () keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
keyword[return] identifier[bool] ( identifier[value] )
|
def to_bool(value):
# type: (Any) -> bool
'\n Convert a value into a bool but handle "truthy" strings eg, yes, true, ok, y\n '
if isinstance(value, _compat.string_types):
return value.upper() in ('Y', 'YES', 'T', 'TRUE', '1', 'OK') # depends on [control=['if'], data=[]]
return bool(value)
|
def mysql_aes_encrypt(val, key):
"""Mysql AES encrypt value with secret key.
:param val: Plain text value.
:param key: The AES key.
:returns: The encrypted AES value.
"""
assert isinstance(val, binary_type) or isinstance(val, text_type)
assert isinstance(key, binary_type) or isinstance(key, text_type)
k = _mysql_aes_key(_to_binary(key))
v = _mysql_aes_pad(_to_binary(val))
e = _mysql_aes_engine(k).encryptor()
return e.update(v) + e.finalize()
|
def function[mysql_aes_encrypt, parameter[val, key]]:
constant[Mysql AES encrypt value with secret key.
:param val: Plain text value.
:param key: The AES key.
:returns: The encrypted AES value.
]
assert[<ast.BoolOp object at 0x7da2044c29b0>]
assert[<ast.BoolOp object at 0x7da2044c3790>]
variable[k] assign[=] call[name[_mysql_aes_key], parameter[call[name[_to_binary], parameter[name[key]]]]]
variable[v] assign[=] call[name[_mysql_aes_pad], parameter[call[name[_to_binary], parameter[name[val]]]]]
variable[e] assign[=] call[call[name[_mysql_aes_engine], parameter[name[k]]].encryptor, parameter[]]
return[binary_operation[call[name[e].update, parameter[name[v]]] + call[name[e].finalize, parameter[]]]]
|
keyword[def] identifier[mysql_aes_encrypt] ( identifier[val] , identifier[key] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[val] , identifier[binary_type] ) keyword[or] identifier[isinstance] ( identifier[val] , identifier[text_type] )
keyword[assert] identifier[isinstance] ( identifier[key] , identifier[binary_type] ) keyword[or] identifier[isinstance] ( identifier[key] , identifier[text_type] )
identifier[k] = identifier[_mysql_aes_key] ( identifier[_to_binary] ( identifier[key] ))
identifier[v] = identifier[_mysql_aes_pad] ( identifier[_to_binary] ( identifier[val] ))
identifier[e] = identifier[_mysql_aes_engine] ( identifier[k] ). identifier[encryptor] ()
keyword[return] identifier[e] . identifier[update] ( identifier[v] )+ identifier[e] . identifier[finalize] ()
|
def mysql_aes_encrypt(val, key):
"""Mysql AES encrypt value with secret key.
:param val: Plain text value.
:param key: The AES key.
:returns: The encrypted AES value.
"""
assert isinstance(val, binary_type) or isinstance(val, text_type)
assert isinstance(key, binary_type) or isinstance(key, text_type)
k = _mysql_aes_key(_to_binary(key))
v = _mysql_aes_pad(_to_binary(val))
e = _mysql_aes_engine(k).encryptor()
return e.update(v) + e.finalize()
|
def verify_any(df, check, *args, **kwargs):
"""
Verify that any of the entries in ``check(df, *args, **kwargs)``
is true
"""
result = check(df, *args, **kwargs)
try:
assert np.any(result)
except AssertionError as e:
msg = '{} not true for any'.format(check.__name__)
e.args = (msg, df)
raise
return df
|
def function[verify_any, parameter[df, check]]:
constant[
Verify that any of the entries in ``check(df, *args, **kwargs)``
is true
]
variable[result] assign[=] call[name[check], parameter[name[df], <ast.Starred object at 0x7da1b077a140>]]
<ast.Try object at 0x7da1b077b100>
return[name[df]]
|
keyword[def] identifier[verify_any] ( identifier[df] , identifier[check] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[result] = identifier[check] ( identifier[df] ,* identifier[args] ,** identifier[kwargs] )
keyword[try] :
keyword[assert] identifier[np] . identifier[any] ( identifier[result] )
keyword[except] identifier[AssertionError] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[check] . identifier[__name__] )
identifier[e] . identifier[args] =( identifier[msg] , identifier[df] )
keyword[raise]
keyword[return] identifier[df]
|
def verify_any(df, check, *args, **kwargs):
"""
Verify that any of the entries in ``check(df, *args, **kwargs)``
is true
"""
result = check(df, *args, **kwargs)
try:
assert np.any(result) # depends on [control=['try'], data=[]]
except AssertionError as e:
msg = '{} not true for any'.format(check.__name__)
e.args = (msg, df)
raise # depends on [control=['except'], data=['e']]
return df
|
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
jsn = super(MinerTransaction, self).ToJson()
jsn['nonce'] = self.Nonce
return jsn
|
def function[ToJson, parameter[self]]:
constant[
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
]
variable[jsn] assign[=] call[call[name[super], parameter[name[MinerTransaction], name[self]]].ToJson, parameter[]]
call[name[jsn]][constant[nonce]] assign[=] name[self].Nonce
return[name[jsn]]
|
keyword[def] identifier[ToJson] ( identifier[self] ):
literal[string]
identifier[jsn] = identifier[super] ( identifier[MinerTransaction] , identifier[self] ). identifier[ToJson] ()
identifier[jsn] [ literal[string] ]= identifier[self] . identifier[Nonce]
keyword[return] identifier[jsn]
|
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
jsn = super(MinerTransaction, self).ToJson()
jsn['nonce'] = self.Nonce
return jsn
|
def create_processors_from_settings(self):
"""
Expects the Django setting "EVENT_TRACKING_PROCESSORS" to be defined and
point to a list of backend engine configurations.
Example::
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'some.arbitrary.Processor'
},
{
'ENGINE': 'some.arbitrary.OtherProcessor',
'OPTIONS': {
'user': 'foo'
}
},
]
"""
config = getattr(settings, DJANGO_PROCESSOR_SETTING_NAME, [])
processors = self.instantiate_objects(config)
return processors
|
def function[create_processors_from_settings, parameter[self]]:
constant[
Expects the Django setting "EVENT_TRACKING_PROCESSORS" to be defined and
point to a list of backend engine configurations.
Example::
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'some.arbitrary.Processor'
},
{
'ENGINE': 'some.arbitrary.OtherProcessor',
'OPTIONS': {
'user': 'foo'
}
},
]
]
variable[config] assign[=] call[name[getattr], parameter[name[settings], name[DJANGO_PROCESSOR_SETTING_NAME], list[[]]]]
variable[processors] assign[=] call[name[self].instantiate_objects, parameter[name[config]]]
return[name[processors]]
|
keyword[def] identifier[create_processors_from_settings] ( identifier[self] ):
literal[string]
identifier[config] = identifier[getattr] ( identifier[settings] , identifier[DJANGO_PROCESSOR_SETTING_NAME] ,[])
identifier[processors] = identifier[self] . identifier[instantiate_objects] ( identifier[config] )
keyword[return] identifier[processors]
|
def create_processors_from_settings(self):
"""
Expects the Django setting "EVENT_TRACKING_PROCESSORS" to be defined and
point to a list of backend engine configurations.
Example::
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'some.arbitrary.Processor'
},
{
'ENGINE': 'some.arbitrary.OtherProcessor',
'OPTIONS': {
'user': 'foo'
}
},
]
"""
config = getattr(settings, DJANGO_PROCESSOR_SETTING_NAME, [])
processors = self.instantiate_objects(config)
return processors
|
def execute_async_script(self, script, *args):
"""Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
"""
return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {
'script': script,
'args': list(args)})
|
def function[execute_async_script, parameter[self, script]]:
constant[Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
]
return[call[name[self]._execute, parameter[name[Command].EXECUTE_ASYNC_SCRIPT, dictionary[[<ast.Constant object at 0x7da1aff8ed40>, <ast.Constant object at 0x7da1aff8cee0>], [<ast.Name object at 0x7da1aff8dbd0>, <ast.Call object at 0x7da1aff8df00>]]]]]
|
keyword[def] identifier[execute_async_script] ( identifier[self] , identifier[script] ,* identifier[args] ):
literal[string]
keyword[return] identifier[self] . identifier[_execute] ( identifier[Command] . identifier[EXECUTE_ASYNC_SCRIPT] ,{
literal[string] : identifier[script] ,
literal[string] : identifier[list] ( identifier[args] )})
|
def execute_async_script(self, script, *args):
"""Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
"""
return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {'script': script, 'args': list(args)})
|
def readImages(path, sc=None, minParitions = 1, bigdl_type="float"):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
"""
df = callBigDlFunc(bigdl_type, "dlReadImage", path, sc, minParitions)
df._sc._jsc = sc._jsc
return df
|
def function[readImages, parameter[path, sc, minParitions, bigdl_type]]:
constant[
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
]
variable[df] assign[=] call[name[callBigDlFunc], parameter[name[bigdl_type], constant[dlReadImage], name[path], name[sc], name[minParitions]]]
name[df]._sc._jsc assign[=] name[sc]._jsc
return[name[df]]
|
keyword[def] identifier[readImages] ( identifier[path] , identifier[sc] = keyword[None] , identifier[minParitions] = literal[int] , identifier[bigdl_type] = literal[string] ):
literal[string]
identifier[df] = identifier[callBigDlFunc] ( identifier[bigdl_type] , literal[string] , identifier[path] , identifier[sc] , identifier[minParitions] )
identifier[df] . identifier[_sc] . identifier[_jsc] = identifier[sc] . identifier[_jsc]
keyword[return] identifier[df]
|
def readImages(path, sc=None, minParitions=1, bigdl_type='float'):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
"""
df = callBigDlFunc(bigdl_type, 'dlReadImage', path, sc, minParitions)
df._sc._jsc = sc._jsc
return df
|
def available(self):
""" True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool
"""
for module_name in self.packages:
if importlib.util.find_spec(module_name):
return True
return False
|
def function[available, parameter[self]]:
constant[ True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool
]
for taget[name[module_name]] in starred[name[self].packages] begin[:]
if call[name[importlib].util.find_spec, parameter[name[module_name]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[available] ( identifier[self] ):
literal[string]
keyword[for] identifier[module_name] keyword[in] identifier[self] . identifier[packages] :
keyword[if] identifier[importlib] . identifier[util] . identifier[find_spec] ( identifier[module_name] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def available(self):
""" True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool
"""
for module_name in self.packages:
if importlib.util.find_spec(module_name):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['module_name']]
return False
|
def update_jsonb(uid, extinfo):
'''
Update the json.
'''
cur_extinfo = MPost.get_by_uid(uid).extinfo
for key in extinfo:
cur_extinfo[key] = extinfo[key]
entry = TabPost.update(
extinfo=cur_extinfo,
).where(TabPost.uid == uid)
entry.execute()
return uid
|
def function[update_jsonb, parameter[uid, extinfo]]:
constant[
Update the json.
]
variable[cur_extinfo] assign[=] call[name[MPost].get_by_uid, parameter[name[uid]]].extinfo
for taget[name[key]] in starred[name[extinfo]] begin[:]
call[name[cur_extinfo]][name[key]] assign[=] call[name[extinfo]][name[key]]
variable[entry] assign[=] call[call[name[TabPost].update, parameter[]].where, parameter[compare[name[TabPost].uid equal[==] name[uid]]]]
call[name[entry].execute, parameter[]]
return[name[uid]]
|
keyword[def] identifier[update_jsonb] ( identifier[uid] , identifier[extinfo] ):
literal[string]
identifier[cur_extinfo] = identifier[MPost] . identifier[get_by_uid] ( identifier[uid] ). identifier[extinfo]
keyword[for] identifier[key] keyword[in] identifier[extinfo] :
identifier[cur_extinfo] [ identifier[key] ]= identifier[extinfo] [ identifier[key] ]
identifier[entry] = identifier[TabPost] . identifier[update] (
identifier[extinfo] = identifier[cur_extinfo] ,
). identifier[where] ( identifier[TabPost] . identifier[uid] == identifier[uid] )
identifier[entry] . identifier[execute] ()
keyword[return] identifier[uid]
|
def update_jsonb(uid, extinfo):
"""
Update the json.
"""
cur_extinfo = MPost.get_by_uid(uid).extinfo
for key in extinfo:
cur_extinfo[key] = extinfo[key] # depends on [control=['for'], data=['key']]
entry = TabPost.update(extinfo=cur_extinfo).where(TabPost.uid == uid)
entry.execute()
return uid
|
def setValue( self, value ):
"""
Moves the line to the given value and rebuilds it
:param value | <variant>
"""
scene = self.scene()
point = scene.mapFromChart(value, None)
self.setPos(point.x(), self.pos().y())
self.rebuild(scene.gridRect())
|
def function[setValue, parameter[self, value]]:
constant[
Moves the line to the given value and rebuilds it
:param value | <variant>
]
variable[scene] assign[=] call[name[self].scene, parameter[]]
variable[point] assign[=] call[name[scene].mapFromChart, parameter[name[value], constant[None]]]
call[name[self].setPos, parameter[call[name[point].x, parameter[]], call[call[name[self].pos, parameter[]].y, parameter[]]]]
call[name[self].rebuild, parameter[call[name[scene].gridRect, parameter[]]]]
|
keyword[def] identifier[setValue] ( identifier[self] , identifier[value] ):
literal[string]
identifier[scene] = identifier[self] . identifier[scene] ()
identifier[point] = identifier[scene] . identifier[mapFromChart] ( identifier[value] , keyword[None] )
identifier[self] . identifier[setPos] ( identifier[point] . identifier[x] (), identifier[self] . identifier[pos] (). identifier[y] ())
identifier[self] . identifier[rebuild] ( identifier[scene] . identifier[gridRect] ())
|
def setValue(self, value):
"""
Moves the line to the given value and rebuilds it
:param value | <variant>
"""
scene = self.scene()
point = scene.mapFromChart(value, None)
self.setPos(point.x(), self.pos().y())
self.rebuild(scene.gridRect())
|
def meta_request(func):
""" Handles parsing response structure and translating API Exceptions """
def inner(self, resource, *args, **kwargs):
serialize_response = kwargs.pop('serialize', True)
try:
resp = func(self, resource, *args, **kwargs)
except ApiException as e:
raise api_exception(e)
if serialize_response:
return serialize(resource, resp)
return resp
return inner
|
def function[meta_request, parameter[func]]:
constant[ Handles parsing response structure and translating API Exceptions ]
def function[inner, parameter[self, resource]]:
variable[serialize_response] assign[=] call[name[kwargs].pop, parameter[constant[serialize], constant[True]]]
<ast.Try object at 0x7da2047eba30>
if name[serialize_response] begin[:]
return[call[name[serialize], parameter[name[resource], name[resp]]]]
return[name[resp]]
return[name[inner]]
|
keyword[def] identifier[meta_request] ( identifier[func] ):
literal[string]
keyword[def] identifier[inner] ( identifier[self] , identifier[resource] ,* identifier[args] ,** identifier[kwargs] ):
identifier[serialize_response] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
keyword[try] :
identifier[resp] = identifier[func] ( identifier[self] , identifier[resource] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[ApiException] keyword[as] identifier[e] :
keyword[raise] identifier[api_exception] ( identifier[e] )
keyword[if] identifier[serialize_response] :
keyword[return] identifier[serialize] ( identifier[resource] , identifier[resp] )
keyword[return] identifier[resp]
keyword[return] identifier[inner]
|
def meta_request(func):
""" Handles parsing response structure and translating API Exceptions """
def inner(self, resource, *args, **kwargs):
serialize_response = kwargs.pop('serialize', True)
try:
resp = func(self, resource, *args, **kwargs) # depends on [control=['try'], data=[]]
except ApiException as e:
raise api_exception(e) # depends on [control=['except'], data=['e']]
if serialize_response:
return serialize(resource, resp) # depends on [control=['if'], data=[]]
return resp
return inner
|
def password(self, password):
"""
Sets the password of this WebCredentials.
The secret password. This field is write-only. It is omitted by read operations. If authorization is required, the `password` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `password` field--the update will preserve the previous value.
:param password: The password of this WebCredentials.
:type: str
"""
if password is not None and len(password) > 1024:
raise ValueError("Invalid value for `password`, length must be less than or equal to `1024`")
self._password = password
|
def function[password, parameter[self, password]]:
constant[
Sets the password of this WebCredentials.
The secret password. This field is write-only. It is omitted by read operations. If authorization is required, the `password` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `password` field--the update will preserve the previous value.
:param password: The password of this WebCredentials.
:type: str
]
if <ast.BoolOp object at 0x7da204962830> begin[:]
<ast.Raise object at 0x7da204961c90>
name[self]._password assign[=] name[password]
|
keyword[def] identifier[password] ( identifier[self] , identifier[password] ):
literal[string]
keyword[if] identifier[password] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[password] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_password] = identifier[password]
|
def password(self, password):
"""
Sets the password of this WebCredentials.
The secret password. This field is write-only. It is omitted by read operations. If authorization is required, the `password` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `password` field--the update will preserve the previous value.
:param password: The password of this WebCredentials.
:type: str
"""
if password is not None and len(password) > 1024:
raise ValueError('Invalid value for `password`, length must be less than or equal to `1024`') # depends on [control=['if'], data=[]]
self._password = password
|
def allows(self, user, permission, obj=_nothing):
"""Checks that a user has permission. Returns True or False.
:param user: a user.
:param permission: permission to check.
:param obj: (optional) an object to check permission for.
"""
rule = self._get_rule(obj)
if not isinstance(permission, basestring):
return all(
self._use_rule(rule, user, perm, obj)
for perm in permission
)
return self._use_rule(rule, user, permission, obj)
|
def function[allows, parameter[self, user, permission, obj]]:
constant[Checks that a user has permission. Returns True or False.
:param user: a user.
:param permission: permission to check.
:param obj: (optional) an object to check permission for.
]
variable[rule] assign[=] call[name[self]._get_rule, parameter[name[obj]]]
if <ast.UnaryOp object at 0x7da18f00ce20> begin[:]
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da18f00db40>]]]
return[call[name[self]._use_rule, parameter[name[rule], name[user], name[permission], name[obj]]]]
|
keyword[def] identifier[allows] ( identifier[self] , identifier[user] , identifier[permission] , identifier[obj] = identifier[_nothing] ):
literal[string]
identifier[rule] = identifier[self] . identifier[_get_rule] ( identifier[obj] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[permission] , identifier[basestring] ):
keyword[return] identifier[all] (
identifier[self] . identifier[_use_rule] ( identifier[rule] , identifier[user] , identifier[perm] , identifier[obj] )
keyword[for] identifier[perm] keyword[in] identifier[permission]
)
keyword[return] identifier[self] . identifier[_use_rule] ( identifier[rule] , identifier[user] , identifier[permission] , identifier[obj] )
|
def allows(self, user, permission, obj=_nothing):
"""Checks that a user has permission. Returns True or False.
:param user: a user.
:param permission: permission to check.
:param obj: (optional) an object to check permission for.
"""
rule = self._get_rule(obj)
if not isinstance(permission, basestring):
return all((self._use_rule(rule, user, perm, obj) for perm in permission)) # depends on [control=['if'], data=[]]
return self._use_rule(rule, user, permission, obj)
|
def _container_candidates(self):
"""Generate container candidate list
Returns:
tuple list: [(width1, height1), (width2, height2), ...]
"""
if not self._rectangles:
return []
if self._rotation:
sides = sorted(side for rect in self._rectangles for side in rect)
max_height = sum(max(r[0], r[1]) for r in self._rectangles)
min_width = max(min(r[0], r[1]) for r in self._rectangles)
max_width = max_height
else:
sides = sorted(r[0] for r in self._rectangles)
max_height = sum(r[1] for r in self._rectangles)
min_width = max(r[0] for r in self._rectangles)
max_width = sum(sides)
if self._max_width and self._max_width < max_width:
max_width = self._max_width
if self._max_height and self._max_height < max_height:
max_height = self._max_height
assert(max_width>min_width)
# Generate initial container widths
candidates = [max_width, min_width]
width = 0
for s in reversed(sides):
width += s
candidates.append(width)
width = 0
for s in sides:
width += s
candidates.append(width)
candidates.append(max_width)
candidates.append(min_width)
# Remove duplicates and widths too big or small
seen = set()
seen_add = seen.add
candidates = [x for x in candidates if not(x in seen or seen_add(x))]
candidates = [x for x in candidates if not(x>max_width or x<min_width)]
# Remove candidates too small to fit all the rectangles
min_area = sum(r[0]*r[1] for r in self._rectangles)
return [(c, max_height) for c in candidates if c*max_height>=min_area]
|
def function[_container_candidates, parameter[self]]:
constant[Generate container candidate list
Returns:
tuple list: [(width1, height1), (width2, height2), ...]
]
if <ast.UnaryOp object at 0x7da18ede7a90> begin[:]
return[list[[]]]
if name[self]._rotation begin[:]
variable[sides] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da18ede41f0>]]
variable[max_height] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18ede6740>]]
variable[min_width] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da18ede5360>]]
variable[max_width] assign[=] name[max_height]
if <ast.BoolOp object at 0x7da18ede7670> begin[:]
variable[max_width] assign[=] name[self]._max_width
if <ast.BoolOp object at 0x7da18ede5ba0> begin[:]
variable[max_height] assign[=] name[self]._max_height
assert[compare[name[max_width] greater[>] name[min_width]]]
variable[candidates] assign[=] list[[<ast.Name object at 0x7da18ede7940>, <ast.Name object at 0x7da18ede4070>]]
variable[width] assign[=] constant[0]
for taget[name[s]] in starred[call[name[reversed], parameter[name[sides]]]] begin[:]
<ast.AugAssign object at 0x7da18ede5840>
call[name[candidates].append, parameter[name[width]]]
variable[width] assign[=] constant[0]
for taget[name[s]] in starred[name[sides]] begin[:]
<ast.AugAssign object at 0x7da18ede5990>
call[name[candidates].append, parameter[name[width]]]
call[name[candidates].append, parameter[name[max_width]]]
call[name[candidates].append, parameter[name[min_width]]]
variable[seen] assign[=] call[name[set], parameter[]]
variable[seen_add] assign[=] name[seen].add
variable[candidates] assign[=] <ast.ListComp object at 0x7da18f810130>
variable[candidates] assign[=] <ast.ListComp object at 0x7da18f813730>
variable[min_area] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18f811b40>]]
return[<ast.ListComp object at 0x7da18f8125f0>]
|
keyword[def] identifier[_container_candidates] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_rectangles] :
keyword[return] []
keyword[if] identifier[self] . identifier[_rotation] :
identifier[sides] = identifier[sorted] ( identifier[side] keyword[for] identifier[rect] keyword[in] identifier[self] . identifier[_rectangles] keyword[for] identifier[side] keyword[in] identifier[rect] )
identifier[max_height] = identifier[sum] ( identifier[max] ( identifier[r] [ literal[int] ], identifier[r] [ literal[int] ]) keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
identifier[min_width] = identifier[max] ( identifier[min] ( identifier[r] [ literal[int] ], identifier[r] [ literal[int] ]) keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
identifier[max_width] = identifier[max_height]
keyword[else] :
identifier[sides] = identifier[sorted] ( identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
identifier[max_height] = identifier[sum] ( identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
identifier[min_width] = identifier[max] ( identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
identifier[max_width] = identifier[sum] ( identifier[sides] )
keyword[if] identifier[self] . identifier[_max_width] keyword[and] identifier[self] . identifier[_max_width] < identifier[max_width] :
identifier[max_width] = identifier[self] . identifier[_max_width]
keyword[if] identifier[self] . identifier[_max_height] keyword[and] identifier[self] . identifier[_max_height] < identifier[max_height] :
identifier[max_height] = identifier[self] . identifier[_max_height]
keyword[assert] ( identifier[max_width] > identifier[min_width] )
identifier[candidates] =[ identifier[max_width] , identifier[min_width] ]
identifier[width] = literal[int]
keyword[for] identifier[s] keyword[in] identifier[reversed] ( identifier[sides] ):
identifier[width] += identifier[s]
identifier[candidates] . identifier[append] ( identifier[width] )
identifier[width] = literal[int]
keyword[for] identifier[s] keyword[in] identifier[sides] :
identifier[width] += identifier[s]
identifier[candidates] . identifier[append] ( identifier[width] )
identifier[candidates] . identifier[append] ( identifier[max_width] )
identifier[candidates] . identifier[append] ( identifier[min_width] )
identifier[seen] = identifier[set] ()
identifier[seen_add] = identifier[seen] . identifier[add]
identifier[candidates] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[candidates] keyword[if] keyword[not] ( identifier[x] keyword[in] identifier[seen] keyword[or] identifier[seen_add] ( identifier[x] ))]
identifier[candidates] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[candidates] keyword[if] keyword[not] ( identifier[x] > identifier[max_width] keyword[or] identifier[x] < identifier[min_width] )]
identifier[min_area] = identifier[sum] ( identifier[r] [ literal[int] ]* identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_rectangles] )
keyword[return] [( identifier[c] , identifier[max_height] ) keyword[for] identifier[c] keyword[in] identifier[candidates] keyword[if] identifier[c] * identifier[max_height] >= identifier[min_area] ]
|
def _container_candidates(self):
"""Generate container candidate list
Returns:
tuple list: [(width1, height1), (width2, height2), ...]
"""
if not self._rectangles:
return [] # depends on [control=['if'], data=[]]
if self._rotation:
sides = sorted((side for rect in self._rectangles for side in rect))
max_height = sum((max(r[0], r[1]) for r in self._rectangles))
min_width = max((min(r[0], r[1]) for r in self._rectangles))
max_width = max_height # depends on [control=['if'], data=[]]
else:
sides = sorted((r[0] for r in self._rectangles))
max_height = sum((r[1] for r in self._rectangles))
min_width = max((r[0] for r in self._rectangles))
max_width = sum(sides)
if self._max_width and self._max_width < max_width:
max_width = self._max_width # depends on [control=['if'], data=[]]
if self._max_height and self._max_height < max_height:
max_height = self._max_height # depends on [control=['if'], data=[]]
assert max_width > min_width
# Generate initial container widths
candidates = [max_width, min_width]
width = 0
for s in reversed(sides):
width += s
candidates.append(width) # depends on [control=['for'], data=['s']]
width = 0
for s in sides:
width += s
candidates.append(width) # depends on [control=['for'], data=['s']]
candidates.append(max_width)
candidates.append(min_width)
# Remove duplicates and widths too big or small
seen = set()
seen_add = seen.add
candidates = [x for x in candidates if not (x in seen or seen_add(x))]
candidates = [x for x in candidates if not (x > max_width or x < min_width)]
# Remove candidates too small to fit all the rectangles
min_area = sum((r[0] * r[1] for r in self._rectangles))
return [(c, max_height) for c in candidates if c * max_height >= min_area]
|
def abort(err):
"""Abort everything, everywhere."""
if _debug: abort._debug("abort %r", err)
global local_controllers
# tell all the local controllers to abort
for controller in local_controllers.values():
controller.abort(err)
|
def function[abort, parameter[err]]:
constant[Abort everything, everywhere.]
if name[_debug] begin[:]
call[name[abort]._debug, parameter[constant[abort %r], name[err]]]
<ast.Global object at 0x7da20e9b27d0>
for taget[name[controller]] in starred[call[name[local_controllers].values, parameter[]]] begin[:]
call[name[controller].abort, parameter[name[err]]]
|
keyword[def] identifier[abort] ( identifier[err] ):
literal[string]
keyword[if] identifier[_debug] : identifier[abort] . identifier[_debug] ( literal[string] , identifier[err] )
keyword[global] identifier[local_controllers]
keyword[for] identifier[controller] keyword[in] identifier[local_controllers] . identifier[values] ():
identifier[controller] . identifier[abort] ( identifier[err] )
|
def abort(err):
"""Abort everything, everywhere."""
if _debug:
abort._debug('abort %r', err) # depends on [control=['if'], data=[]]
global local_controllers
# tell all the local controllers to abort
for controller in local_controllers.values():
controller.abort(err) # depends on [control=['for'], data=['controller']]
|
def change_password(self, user, password):
""" Changes user password """
if not self.__contains__(user):
raise UserNotExists
self.new_users[user] = self._encrypt_password(password) + "\n"
|
def function[change_password, parameter[self, user, password]]:
constant[ Changes user password ]
if <ast.UnaryOp object at 0x7da20c6ab850> begin[:]
<ast.Raise object at 0x7da20c6a9960>
call[name[self].new_users][name[user]] assign[=] binary_operation[call[name[self]._encrypt_password, parameter[name[password]]] + constant[
]]
|
keyword[def] identifier[change_password] ( identifier[self] , identifier[user] , identifier[password] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__contains__] ( identifier[user] ):
keyword[raise] identifier[UserNotExists]
identifier[self] . identifier[new_users] [ identifier[user] ]= identifier[self] . identifier[_encrypt_password] ( identifier[password] )+ literal[string]
|
def change_password(self, user, password):
""" Changes user password """
if not self.__contains__(user):
raise UserNotExists # depends on [control=['if'], data=[]]
self.new_users[user] = self._encrypt_password(password) + '\n'
|
def vert_dpi(self):
"""
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
"""
pHYs = self._chunks.pHYs
if pHYs is None:
return 72
return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit)
|
def function[vert_dpi, parameter[self]]:
constant[
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
]
variable[pHYs] assign[=] name[self]._chunks.pHYs
if compare[name[pHYs] is constant[None]] begin[:]
return[constant[72]]
return[call[name[self]._dpi, parameter[name[pHYs].units_specifier, name[pHYs].vert_px_per_unit]]]
|
keyword[def] identifier[vert_dpi] ( identifier[self] ):
literal[string]
identifier[pHYs] = identifier[self] . identifier[_chunks] . identifier[pHYs]
keyword[if] identifier[pHYs] keyword[is] keyword[None] :
keyword[return] literal[int]
keyword[return] identifier[self] . identifier[_dpi] ( identifier[pHYs] . identifier[units_specifier] , identifier[pHYs] . identifier[vert_px_per_unit] )
|
def vert_dpi(self):
"""
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
"""
pHYs = self._chunks.pHYs
if pHYs is None:
return 72 # depends on [control=['if'], data=[]]
return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit)
|
def short(cls, path):
"""
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
"""
if not path:
return path
path = str(path)
if cls.paths:
for p in cls.paths:
if p:
path = path.replace(p + "/", "")
path = path.replace(cls.home, "~")
return path
|
def function[short, parameter[cls, path]]:
constant[
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
]
if <ast.UnaryOp object at 0x7da1b24e2c80> begin[:]
return[name[path]]
variable[path] assign[=] call[name[str], parameter[name[path]]]
if name[cls].paths begin[:]
for taget[name[p]] in starred[name[cls].paths] begin[:]
if name[p] begin[:]
variable[path] assign[=] call[name[path].replace, parameter[binary_operation[name[p] + constant[/]], constant[]]]
variable[path] assign[=] call[name[path].replace, parameter[name[cls].home, constant[~]]]
return[name[path]]
|
keyword[def] identifier[short] ( identifier[cls] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
keyword[return] identifier[path]
identifier[path] = identifier[str] ( identifier[path] )
keyword[if] identifier[cls] . identifier[paths] :
keyword[for] identifier[p] keyword[in] identifier[cls] . identifier[paths] :
keyword[if] identifier[p] :
identifier[path] = identifier[path] . identifier[replace] ( identifier[p] + literal[string] , literal[string] )
identifier[path] = identifier[path] . identifier[replace] ( identifier[cls] . identifier[home] , literal[string] )
keyword[return] identifier[path]
|
def short(cls, path):
"""
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
"""
if not path:
return path # depends on [control=['if'], data=[]]
path = str(path)
if cls.paths:
for p in cls.paths:
if p:
path = path.replace(p + '/', '') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
path = path.replace(cls.home, '~')
return path
|
def sort_snps(snps):
""" Sort SNPs based on ordered chromosome list and position. """
sorted_list = sorted(snps["chrom"].unique(), key=_natural_sort_key)
# move PAR and MT to the end of the dataframe
if "PAR" in sorted_list:
sorted_list.remove("PAR")
sorted_list.append("PAR")
if "MT" in sorted_list:
sorted_list.remove("MT")
sorted_list.append("MT")
# convert chrom column to category for sorting
# https://stackoverflow.com/a/26707444
snps["chrom"] = snps["chrom"].astype(
CategoricalDtype(categories=sorted_list, ordered=True)
)
# sort based on ordered chromosome list and position
snps = snps.sort_values(["chrom", "pos"])
# convert chromosome back to object
snps["chrom"] = snps["chrom"].astype(object)
return snps
|
def function[sort_snps, parameter[snps]]:
constant[ Sort SNPs based on ordered chromosome list and position. ]
variable[sorted_list] assign[=] call[name[sorted], parameter[call[call[name[snps]][constant[chrom]].unique, parameter[]]]]
if compare[constant[PAR] in name[sorted_list]] begin[:]
call[name[sorted_list].remove, parameter[constant[PAR]]]
call[name[sorted_list].append, parameter[constant[PAR]]]
if compare[constant[MT] in name[sorted_list]] begin[:]
call[name[sorted_list].remove, parameter[constant[MT]]]
call[name[sorted_list].append, parameter[constant[MT]]]
call[name[snps]][constant[chrom]] assign[=] call[call[name[snps]][constant[chrom]].astype, parameter[call[name[CategoricalDtype], parameter[]]]]
variable[snps] assign[=] call[name[snps].sort_values, parameter[list[[<ast.Constant object at 0x7da207f03ac0>, <ast.Constant object at 0x7da207f02b00>]]]]
call[name[snps]][constant[chrom]] assign[=] call[call[name[snps]][constant[chrom]].astype, parameter[name[object]]]
return[name[snps]]
|
keyword[def] identifier[sort_snps] ( identifier[snps] ):
literal[string]
identifier[sorted_list] = identifier[sorted] ( identifier[snps] [ literal[string] ]. identifier[unique] (), identifier[key] = identifier[_natural_sort_key] )
keyword[if] literal[string] keyword[in] identifier[sorted_list] :
identifier[sorted_list] . identifier[remove] ( literal[string] )
identifier[sorted_list] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[sorted_list] :
identifier[sorted_list] . identifier[remove] ( literal[string] )
identifier[sorted_list] . identifier[append] ( literal[string] )
identifier[snps] [ literal[string] ]= identifier[snps] [ literal[string] ]. identifier[astype] (
identifier[CategoricalDtype] ( identifier[categories] = identifier[sorted_list] , identifier[ordered] = keyword[True] )
)
identifier[snps] = identifier[snps] . identifier[sort_values] ([ literal[string] , literal[string] ])
identifier[snps] [ literal[string] ]= identifier[snps] [ literal[string] ]. identifier[astype] ( identifier[object] )
keyword[return] identifier[snps]
|
def sort_snps(snps):
""" Sort SNPs based on ordered chromosome list and position. """
sorted_list = sorted(snps['chrom'].unique(), key=_natural_sort_key)
# move PAR and MT to the end of the dataframe
if 'PAR' in sorted_list:
sorted_list.remove('PAR')
sorted_list.append('PAR') # depends on [control=['if'], data=['sorted_list']]
if 'MT' in sorted_list:
sorted_list.remove('MT')
sorted_list.append('MT') # depends on [control=['if'], data=['sorted_list']]
# convert chrom column to category for sorting
# https://stackoverflow.com/a/26707444
snps['chrom'] = snps['chrom'].astype(CategoricalDtype(categories=sorted_list, ordered=True))
# sort based on ordered chromosome list and position
snps = snps.sort_values(['chrom', 'pos'])
# convert chromosome back to object
snps['chrom'] = snps['chrom'].astype(object)
return snps
|
def __gridconnections(self):
"""Level-2 parser for gridconnections.
pattern:
object 2 class gridconnections counts 97 93 99
"""
try:
tok = self.__consume()
except DXParserNoTokens:
return
if tok.equals('counts'):
shape = []
try:
while True:
# raises exception if not an int
self.__peek().value('INTEGER')
tok = self.__consume()
shape.append(tok.value('INTEGER'))
except (DXParserNoTokens, ValueError):
pass
if len(shape) == 0:
raise DXParseError('gridconnections: no shape parameters')
self.currentobject['shape'] = shape
else:
raise DXParseError('gridconnections: '+str(tok)+' not recognized.')
|
def function[__gridconnections, parameter[self]]:
constant[Level-2 parser for gridconnections.
pattern:
object 2 class gridconnections counts 97 93 99
]
<ast.Try object at 0x7da1aff01660>
if call[name[tok].equals, parameter[constant[counts]]] begin[:]
variable[shape] assign[=] list[[]]
<ast.Try object at 0x7da1affc00a0>
if compare[call[name[len], parameter[name[shape]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1affc3ee0>
call[name[self].currentobject][constant[shape]] assign[=] name[shape]
|
keyword[def] identifier[__gridconnections] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[tok] = identifier[self] . identifier[__consume] ()
keyword[except] identifier[DXParserNoTokens] :
keyword[return]
keyword[if] identifier[tok] . identifier[equals] ( literal[string] ):
identifier[shape] =[]
keyword[try] :
keyword[while] keyword[True] :
identifier[self] . identifier[__peek] (). identifier[value] ( literal[string] )
identifier[tok] = identifier[self] . identifier[__consume] ()
identifier[shape] . identifier[append] ( identifier[tok] . identifier[value] ( literal[string] ))
keyword[except] ( identifier[DXParserNoTokens] , identifier[ValueError] ):
keyword[pass]
keyword[if] identifier[len] ( identifier[shape] )== literal[int] :
keyword[raise] identifier[DXParseError] ( literal[string] )
identifier[self] . identifier[currentobject] [ literal[string] ]= identifier[shape]
keyword[else] :
keyword[raise] identifier[DXParseError] ( literal[string] + identifier[str] ( identifier[tok] )+ literal[string] )
|
def __gridconnections(self):
"""Level-2 parser for gridconnections.
pattern:
object 2 class gridconnections counts 97 93 99
"""
try:
tok = self.__consume() # depends on [control=['try'], data=[]]
except DXParserNoTokens:
return # depends on [control=['except'], data=[]]
if tok.equals('counts'):
shape = []
try:
while True:
# raises exception if not an int
self.__peek().value('INTEGER')
tok = self.__consume()
shape.append(tok.value('INTEGER')) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except (DXParserNoTokens, ValueError):
pass # depends on [control=['except'], data=[]]
if len(shape) == 0:
raise DXParseError('gridconnections: no shape parameters') # depends on [control=['if'], data=[]]
self.currentobject['shape'] = shape # depends on [control=['if'], data=[]]
else:
raise DXParseError('gridconnections: ' + str(tok) + ' not recognized.')
|
def stable_u_range_dict(self, chempot_range, ref_delu, no_doped=True,
no_clean=False, delu_dict={}, miller_index=(),
dmu_at_0=False, return_se_dict=False):
"""
Creates a dictionary where each entry is a key pointing to a
chemical potential range where the surface of that entry is stable.
Does so by enumerating through all possible solutions (intersect)
for surface energies of a specific facet.
Args:
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
dmu_at_0 (bool): If True, if the surface energies corresponding to
the chemical potential range is between a negative and positive
value, the value is a list of three chemical potentials with the
one in the center corresponding a surface energy of 0. Uselful
in identifying unphysical ranges of surface energies and their
chemical potential range.
return_se_dict (bool): Whether or not to return the corresponding
dictionary of surface energies
"""
chempot_range = sorted(chempot_range)
stable_urange_dict, se_dict = {}, {}
# Get all entries for a specific facet
for hkl in self.all_slab_entries.keys():
entries_in_hkl = []
# Skip this facet if this is not the facet we want
if miller_index and hkl != tuple(miller_index):
continue
if not no_clean:
entries_in_hkl.extend([clean for clean in self.all_slab_entries[hkl]])
if not no_doped:
for entry in self.all_slab_entries[hkl]:
entries_in_hkl.extend([ads_entry for ads_entry in
self.all_slab_entries[hkl][entry]])
for entry in entries_in_hkl:
stable_urange_dict[entry] = []
se_dict[entry] = []
# if there is only one entry for this facet, then just give it the
# default urange, you can't make combinations with just 1 item
if len(entries_in_hkl) == 1:
stable_urange_dict[entries_in_hkl[0]] = chempot_range
u1, u2 = delu_dict.copy(), delu_dict.copy()
u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1]
se = self.as_coeffs_dict[entries_in_hkl[0]]
se_dict[entries_in_hkl[0]] = [sub_chempots(se, u1), sub_chempots(se, u2)]
continue
for pair in itertools.combinations(entries_in_hkl, 2):
# I'm assuming ref_delu was not set in delu_dict,
# so the solution should be for ref_delu
solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict)
# Check if this solution is stable
if not solution:
continue
new_delu_dict = delu_dict.copy()
new_delu_dict[ref_delu] = solution[ref_delu]
stable_entry, gamma = self.get_stable_entry_at_u(hkl, new_delu_dict,
no_doped=no_doped,
no_clean=no_clean)
if stable_entry not in pair:
continue
# Now check if the solution is within the chempot range
if not (chempot_range[0] <= solution[ref_delu] <= chempot_range[1]):
continue
for entry in pair:
stable_urange_dict[entry].append(solution[ref_delu])
se_dict[entry].append(gamma)
# Now check if all entries have 2 chempot values. If only
# one, we need to set the other value as either the upper
# limit or lower limit of the user provided chempot_range
new_delu_dict = delu_dict.copy()
for u in chempot_range:
new_delu_dict[ref_delu] = u
entry, gamma = self.get_stable_entry_at_u(hkl, delu_dict=new_delu_dict,
no_doped=no_doped,
no_clean=no_clean)
stable_urange_dict[entry].append(u)
se_dict[entry].append(gamma)
if dmu_at_0:
for entry in se_dict.keys():
# if se are of opposite sign, determine chempot when se=0.
# Useful for finding a chempot range where se is unphysical
if not stable_urange_dict[entry]:
continue
if se_dict[entry][0] * se_dict[entry][1] < 0:
# solve for gamma=0
se = self.as_coeffs_dict[entry]
se_dict[entry].append(0)
stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict),
ref_delu)[0])
# sort the chempot ranges for each facet
for entry in stable_urange_dict.keys():
se_dict[entry] = [se for i, se in sorted(zip(stable_urange_dict[entry],
se_dict[entry]))]
stable_urange_dict[entry] = sorted(stable_urange_dict[entry])
if return_se_dict:
return stable_urange_dict, se_dict
else:
return stable_urange_dict
|
def function[stable_u_range_dict, parameter[self, chempot_range, ref_delu, no_doped, no_clean, delu_dict, miller_index, dmu_at_0, return_se_dict]]:
constant[
Creates a dictionary where each entry is a key pointing to a
chemical potential range where the surface of that entry is stable.
Does so by enumerating through all possible solutions (intersect)
for surface energies of a specific facet.
Args:
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
dmu_at_0 (bool): If True, if the surface energies corresponding to
the chemical potential range is between a negative and positive
value, the value is a list of three chemical potentials with the
one in the center corresponding a surface energy of 0. Uselful
in identifying unphysical ranges of surface energies and their
chemical potential range.
return_se_dict (bool): Whether or not to return the corresponding
dictionary of surface energies
]
variable[chempot_range] assign[=] call[name[sorted], parameter[name[chempot_range]]]
<ast.Tuple object at 0x7da1b1cd7a90> assign[=] tuple[[<ast.Dict object at 0x7da1b1cd79d0>, <ast.Dict object at 0x7da1b1cd79a0>]]
for taget[name[hkl]] in starred[call[name[self].all_slab_entries.keys, parameter[]]] begin[:]
variable[entries_in_hkl] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b1cd7790> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b1cd75b0> begin[:]
call[name[entries_in_hkl].extend, parameter[<ast.ListComp object at 0x7da1b1cd7490>]]
if <ast.UnaryOp object at 0x7da1b1cd7280> begin[:]
for taget[name[entry]] in starred[call[name[self].all_slab_entries][name[hkl]]] begin[:]
call[name[entries_in_hkl].extend, parameter[<ast.ListComp object at 0x7da1b1cd7040>]]
for taget[name[entry]] in starred[name[entries_in_hkl]] begin[:]
call[name[stable_urange_dict]][name[entry]] assign[=] list[[]]
call[name[se_dict]][name[entry]] assign[=] list[[]]
if compare[call[name[len], parameter[name[entries_in_hkl]]] equal[==] constant[1]] begin[:]
call[name[stable_urange_dict]][call[name[entries_in_hkl]][constant[0]]] assign[=] name[chempot_range]
<ast.Tuple object at 0x7da1b1cd68c0> assign[=] tuple[[<ast.Call object at 0x7da1b1cd6800>, <ast.Call object at 0x7da1b1cd6770>]]
<ast.Tuple object at 0x7da1b1cd66b0> assign[=] tuple[[<ast.Subscript object at 0x7da1b1cd6530>, <ast.Subscript object at 0x7da1b1cd64a0>]]
variable[se] assign[=] call[name[self].as_coeffs_dict][call[name[entries_in_hkl]][constant[0]]]
call[name[se_dict]][call[name[entries_in_hkl]][constant[0]]] assign[=] list[[<ast.Call object at 0x7da1b1cd6140>, <ast.Call object at 0x7da1b1cd6080>]]
continue
for taget[name[pair]] in starred[call[name[itertools].combinations, parameter[name[entries_in_hkl], constant[2]]]] begin[:]
variable[solution] assign[=] call[name[self].get_surface_equilibrium, parameter[name[pair]]]
if <ast.UnaryOp object at 0x7da1b1cd5c60> begin[:]
continue
variable[new_delu_dict] assign[=] call[name[delu_dict].copy, parameter[]]
call[name[new_delu_dict]][name[ref_delu]] assign[=] call[name[solution]][name[ref_delu]]
<ast.Tuple object at 0x7da1b1cd5930> assign[=] call[name[self].get_stable_entry_at_u, parameter[name[hkl], name[new_delu_dict]]]
if compare[name[stable_entry] <ast.NotIn object at 0x7da2590d7190> name[pair]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b1cd55a0> begin[:]
continue
for taget[name[entry]] in starred[name[pair]] begin[:]
call[call[name[stable_urange_dict]][name[entry]].append, parameter[call[name[solution]][name[ref_delu]]]]
call[call[name[se_dict]][name[entry]].append, parameter[name[gamma]]]
variable[new_delu_dict] assign[=] call[name[delu_dict].copy, parameter[]]
for taget[name[u]] in starred[name[chempot_range]] begin[:]
call[name[new_delu_dict]][name[ref_delu]] assign[=] name[u]
<ast.Tuple object at 0x7da1b1cd4ca0> assign[=] call[name[self].get_stable_entry_at_u, parameter[name[hkl]]]
call[call[name[stable_urange_dict]][name[entry]].append, parameter[name[u]]]
call[call[name[se_dict]][name[entry]].append, parameter[name[gamma]]]
if name[dmu_at_0] begin[:]
for taget[name[entry]] in starred[call[name[se_dict].keys, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1cb72b0> begin[:]
continue
if compare[binary_operation[call[call[name[se_dict]][name[entry]]][constant[0]] * call[call[name[se_dict]][name[entry]]][constant[1]]] less[<] constant[0]] begin[:]
variable[se] assign[=] call[name[self].as_coeffs_dict][name[entry]]
call[call[name[se_dict]][name[entry]].append, parameter[constant[0]]]
call[call[name[stable_urange_dict]][name[entry]].append, parameter[call[call[name[solve], parameter[call[name[sub_chempots], parameter[name[se], name[delu_dict]]], name[ref_delu]]]][constant[0]]]]
for taget[name[entry]] in starred[call[name[stable_urange_dict].keys, parameter[]]] begin[:]
call[name[se_dict]][name[entry]] assign[=] <ast.ListComp object at 0x7da1b1cb67a0>
call[name[stable_urange_dict]][name[entry]] assign[=] call[name[sorted], parameter[call[name[stable_urange_dict]][name[entry]]]]
if name[return_se_dict] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1cb6200>, <ast.Name object at 0x7da1b1cb61d0>]]]
|
keyword[def] identifier[stable_u_range_dict] ( identifier[self] , identifier[chempot_range] , identifier[ref_delu] , identifier[no_doped] = keyword[True] ,
identifier[no_clean] = keyword[False] , identifier[delu_dict] ={}, identifier[miller_index] =(),
identifier[dmu_at_0] = keyword[False] , identifier[return_se_dict] = keyword[False] ):
literal[string]
identifier[chempot_range] = identifier[sorted] ( identifier[chempot_range] )
identifier[stable_urange_dict] , identifier[se_dict] ={},{}
keyword[for] identifier[hkl] keyword[in] identifier[self] . identifier[all_slab_entries] . identifier[keys] ():
identifier[entries_in_hkl] =[]
keyword[if] identifier[miller_index] keyword[and] identifier[hkl] != identifier[tuple] ( identifier[miller_index] ):
keyword[continue]
keyword[if] keyword[not] identifier[no_clean] :
identifier[entries_in_hkl] . identifier[extend] ([ identifier[clean] keyword[for] identifier[clean] keyword[in] identifier[self] . identifier[all_slab_entries] [ identifier[hkl] ]])
keyword[if] keyword[not] identifier[no_doped] :
keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[all_slab_entries] [ identifier[hkl] ]:
identifier[entries_in_hkl] . identifier[extend] ([ identifier[ads_entry] keyword[for] identifier[ads_entry] keyword[in]
identifier[self] . identifier[all_slab_entries] [ identifier[hkl] ][ identifier[entry] ]])
keyword[for] identifier[entry] keyword[in] identifier[entries_in_hkl] :
identifier[stable_urange_dict] [ identifier[entry] ]=[]
identifier[se_dict] [ identifier[entry] ]=[]
keyword[if] identifier[len] ( identifier[entries_in_hkl] )== literal[int] :
identifier[stable_urange_dict] [ identifier[entries_in_hkl] [ literal[int] ]]= identifier[chempot_range]
identifier[u1] , identifier[u2] = identifier[delu_dict] . identifier[copy] (), identifier[delu_dict] . identifier[copy] ()
identifier[u1] [ identifier[ref_delu] ], identifier[u2] [ identifier[ref_delu] ]= identifier[chempot_range] [ literal[int] ], identifier[chempot_range] [ literal[int] ]
identifier[se] = identifier[self] . identifier[as_coeffs_dict] [ identifier[entries_in_hkl] [ literal[int] ]]
identifier[se_dict] [ identifier[entries_in_hkl] [ literal[int] ]]=[ identifier[sub_chempots] ( identifier[se] , identifier[u1] ), identifier[sub_chempots] ( identifier[se] , identifier[u2] )]
keyword[continue]
keyword[for] identifier[pair] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[entries_in_hkl] , literal[int] ):
identifier[solution] = identifier[self] . identifier[get_surface_equilibrium] ( identifier[pair] , identifier[delu_dict] = identifier[delu_dict] )
keyword[if] keyword[not] identifier[solution] :
keyword[continue]
identifier[new_delu_dict] = identifier[delu_dict] . identifier[copy] ()
identifier[new_delu_dict] [ identifier[ref_delu] ]= identifier[solution] [ identifier[ref_delu] ]
identifier[stable_entry] , identifier[gamma] = identifier[self] . identifier[get_stable_entry_at_u] ( identifier[hkl] , identifier[new_delu_dict] ,
identifier[no_doped] = identifier[no_doped] ,
identifier[no_clean] = identifier[no_clean] )
keyword[if] identifier[stable_entry] keyword[not] keyword[in] identifier[pair] :
keyword[continue]
keyword[if] keyword[not] ( identifier[chempot_range] [ literal[int] ]<= identifier[solution] [ identifier[ref_delu] ]<= identifier[chempot_range] [ literal[int] ]):
keyword[continue]
keyword[for] identifier[entry] keyword[in] identifier[pair] :
identifier[stable_urange_dict] [ identifier[entry] ]. identifier[append] ( identifier[solution] [ identifier[ref_delu] ])
identifier[se_dict] [ identifier[entry] ]. identifier[append] ( identifier[gamma] )
identifier[new_delu_dict] = identifier[delu_dict] . identifier[copy] ()
keyword[for] identifier[u] keyword[in] identifier[chempot_range] :
identifier[new_delu_dict] [ identifier[ref_delu] ]= identifier[u]
identifier[entry] , identifier[gamma] = identifier[self] . identifier[get_stable_entry_at_u] ( identifier[hkl] , identifier[delu_dict] = identifier[new_delu_dict] ,
identifier[no_doped] = identifier[no_doped] ,
identifier[no_clean] = identifier[no_clean] )
identifier[stable_urange_dict] [ identifier[entry] ]. identifier[append] ( identifier[u] )
identifier[se_dict] [ identifier[entry] ]. identifier[append] ( identifier[gamma] )
keyword[if] identifier[dmu_at_0] :
keyword[for] identifier[entry] keyword[in] identifier[se_dict] . identifier[keys] ():
keyword[if] keyword[not] identifier[stable_urange_dict] [ identifier[entry] ]:
keyword[continue]
keyword[if] identifier[se_dict] [ identifier[entry] ][ literal[int] ]* identifier[se_dict] [ identifier[entry] ][ literal[int] ]< literal[int] :
identifier[se] = identifier[self] . identifier[as_coeffs_dict] [ identifier[entry] ]
identifier[se_dict] [ identifier[entry] ]. identifier[append] ( literal[int] )
identifier[stable_urange_dict] [ identifier[entry] ]. identifier[append] ( identifier[solve] ( identifier[sub_chempots] ( identifier[se] , identifier[delu_dict] ),
identifier[ref_delu] )[ literal[int] ])
keyword[for] identifier[entry] keyword[in] identifier[stable_urange_dict] . identifier[keys] ():
identifier[se_dict] [ identifier[entry] ]=[ identifier[se] keyword[for] identifier[i] , identifier[se] keyword[in] identifier[sorted] ( identifier[zip] ( identifier[stable_urange_dict] [ identifier[entry] ],
identifier[se_dict] [ identifier[entry] ]))]
identifier[stable_urange_dict] [ identifier[entry] ]= identifier[sorted] ( identifier[stable_urange_dict] [ identifier[entry] ])
keyword[if] identifier[return_se_dict] :
keyword[return] identifier[stable_urange_dict] , identifier[se_dict]
keyword[else] :
keyword[return] identifier[stable_urange_dict]
|
def stable_u_range_dict(self, chempot_range, ref_delu, no_doped=True, no_clean=False, delu_dict={}, miller_index=(), dmu_at_0=False, return_se_dict=False):
"""
Creates a dictionary where each entry is a key pointing to a
chemical potential range where the surface of that entry is stable.
Does so by enumerating through all possible solutions (intersect)
for surface energies of a specific facet.
Args:
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
dmu_at_0 (bool): If True, if the surface energies corresponding to
the chemical potential range is between a negative and positive
value, the value is a list of three chemical potentials with the
one in the center corresponding a surface energy of 0. Uselful
in identifying unphysical ranges of surface energies and their
chemical potential range.
return_se_dict (bool): Whether or not to return the corresponding
dictionary of surface energies
"""
chempot_range = sorted(chempot_range)
(stable_urange_dict, se_dict) = ({}, {})
# Get all entries for a specific facet
for hkl in self.all_slab_entries.keys():
entries_in_hkl = []
# Skip this facet if this is not the facet we want
if miller_index and hkl != tuple(miller_index):
continue # depends on [control=['if'], data=[]]
if not no_clean:
entries_in_hkl.extend([clean for clean in self.all_slab_entries[hkl]]) # depends on [control=['if'], data=[]]
if not no_doped:
for entry in self.all_slab_entries[hkl]:
entries_in_hkl.extend([ads_entry for ads_entry in self.all_slab_entries[hkl][entry]]) # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]]
for entry in entries_in_hkl:
stable_urange_dict[entry] = []
se_dict[entry] = [] # depends on [control=['for'], data=['entry']]
# if there is only one entry for this facet, then just give it the
# default urange, you can't make combinations with just 1 item
if len(entries_in_hkl) == 1:
stable_urange_dict[entries_in_hkl[0]] = chempot_range
(u1, u2) = (delu_dict.copy(), delu_dict.copy())
(u1[ref_delu], u2[ref_delu]) = (chempot_range[0], chempot_range[1])
se = self.as_coeffs_dict[entries_in_hkl[0]]
se_dict[entries_in_hkl[0]] = [sub_chempots(se, u1), sub_chempots(se, u2)]
continue # depends on [control=['if'], data=[]]
for pair in itertools.combinations(entries_in_hkl, 2):
# I'm assuming ref_delu was not set in delu_dict,
# so the solution should be for ref_delu
solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict)
# Check if this solution is stable
if not solution:
continue # depends on [control=['if'], data=[]]
new_delu_dict = delu_dict.copy()
new_delu_dict[ref_delu] = solution[ref_delu]
(stable_entry, gamma) = self.get_stable_entry_at_u(hkl, new_delu_dict, no_doped=no_doped, no_clean=no_clean)
if stable_entry not in pair:
continue # depends on [control=['if'], data=[]]
# Now check if the solution is within the chempot range
if not chempot_range[0] <= solution[ref_delu] <= chempot_range[1]:
continue # depends on [control=['if'], data=[]]
for entry in pair:
stable_urange_dict[entry].append(solution[ref_delu])
se_dict[entry].append(gamma) # depends on [control=['for'], data=['entry']] # depends on [control=['for'], data=['pair']]
# Now check if all entries have 2 chempot values. If only
# one, we need to set the other value as either the upper
# limit or lower limit of the user provided chempot_range
new_delu_dict = delu_dict.copy()
for u in chempot_range:
new_delu_dict[ref_delu] = u
(entry, gamma) = self.get_stable_entry_at_u(hkl, delu_dict=new_delu_dict, no_doped=no_doped, no_clean=no_clean)
stable_urange_dict[entry].append(u)
se_dict[entry].append(gamma) # depends on [control=['for'], data=['u']] # depends on [control=['for'], data=['hkl']]
if dmu_at_0:
for entry in se_dict.keys():
# if se are of opposite sign, determine chempot when se=0.
# Useful for finding a chempot range where se is unphysical
if not stable_urange_dict[entry]:
continue # depends on [control=['if'], data=[]]
if se_dict[entry][0] * se_dict[entry][1] < 0:
# solve for gamma=0
se = self.as_coeffs_dict[entry]
se_dict[entry].append(0)
stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict), ref_delu)[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]]
# sort the chempot ranges for each facet
for entry in stable_urange_dict.keys():
se_dict[entry] = [se for (i, se) in sorted(zip(stable_urange_dict[entry], se_dict[entry]))]
stable_urange_dict[entry] = sorted(stable_urange_dict[entry]) # depends on [control=['for'], data=['entry']]
if return_se_dict:
return (stable_urange_dict, se_dict) # depends on [control=['if'], data=[]]
else:
return stable_urange_dict
|
def main():
'''Main in a function in case you place a build.py for pydocs inside the root directory.'''
options = '''
pydocs
Usage:
pydocs SOURCE OUTPUT_DIR
pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES]
pydocs --help
Options:
SOURCE Source module, or . for current directory.
OUTPUT_DIR The location to output the generated markdown.
--json Dump meta in JSON format upon completion.
--index NAME Name of the index file (default index.md) to generate.
--ignore FILE,NAMES Comma separated modules to ignore/skip.
-h --help Show this screen.
--version Show version.
'''
args = docopt(options)
build(
getcwd(), args['SOURCE'], args['OUTPUT_DIR'],
json_dump=args['--json'], ignore_modules=args['--ignore'], index_filename=args['--index'] or 'index'
)
|
def function[main, parameter[]]:
constant[Main in a function in case you place a build.py for pydocs inside the root directory.]
variable[options] assign[=] constant[
pydocs
Usage:
pydocs SOURCE OUTPUT_DIR
pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES]
pydocs --help
Options:
SOURCE Source module, or . for current directory.
OUTPUT_DIR The location to output the generated markdown.
--json Dump meta in JSON format upon completion.
--index NAME Name of the index file (default index.md) to generate.
--ignore FILE,NAMES Comma separated modules to ignore/skip.
-h --help Show this screen.
--version Show version.
]
variable[args] assign[=] call[name[docopt], parameter[name[options]]]
call[name[build], parameter[call[name[getcwd], parameter[]], call[name[args]][constant[SOURCE]], call[name[args]][constant[OUTPUT_DIR]]]]
|
keyword[def] identifier[main] ():
literal[string]
identifier[options] = literal[string]
identifier[args] = identifier[docopt] ( identifier[options] )
identifier[build] (
identifier[getcwd] (), identifier[args] [ literal[string] ], identifier[args] [ literal[string] ],
identifier[json_dump] = identifier[args] [ literal[string] ], identifier[ignore_modules] = identifier[args] [ literal[string] ], identifier[index_filename] = identifier[args] [ literal[string] ] keyword[or] literal[string]
)
|
def main():
"""Main in a function in case you place a build.py for pydocs inside the root directory."""
options = '\n pydocs\n\n Usage:\n pydocs SOURCE OUTPUT_DIR\n pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES]\n pydocs --help\n\n Options:\n SOURCE Source module, or . for current directory.\n OUTPUT_DIR The location to output the generated markdown.\n --json Dump meta in JSON format upon completion.\n --index NAME Name of the index file (default index.md) to generate.\n --ignore FILE,NAMES Comma separated modules to ignore/skip.\n -h --help Show this screen.\n --version Show version.\n '
args = docopt(options)
build(getcwd(), args['SOURCE'], args['OUTPUT_DIR'], json_dump=args['--json'], ignore_modules=args['--ignore'], index_filename=args['--index'] or 'index')
|
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
|
<ast.AsyncFunctionDef object at 0x7da1b0a07070>
|
keyword[async] keyword[def] identifier[stdout] ( identifier[self] )-> identifier[AsyncGenerator] [ identifier[str] , keyword[None] ]:
literal[string]
keyword[await] identifier[self] . identifier[wait_running] ()
keyword[async] keyword[for] identifier[line] keyword[in] identifier[self] . identifier[_subprocess] . identifier[stdout] :
keyword[yield] identifier[line]
|
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
|
def normalize_shape(shape):
"""
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
"""
if isinstance(shape, tuple):
return shape
assert ia.is_np_array(shape), (
"Expected tuple of ints or array, got %s." % (type(shape),))
return shape.shape
|
def function[normalize_shape, parameter[shape]]:
constant[
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
]
if call[name[isinstance], parameter[name[shape], name[tuple]]] begin[:]
return[name[shape]]
assert[call[name[ia].is_np_array, parameter[name[shape]]]]
return[name[shape].shape]
|
keyword[def] identifier[normalize_shape] ( identifier[shape] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[shape] , identifier[tuple] ):
keyword[return] identifier[shape]
keyword[assert] identifier[ia] . identifier[is_np_array] ( identifier[shape] ),(
literal[string] %( identifier[type] ( identifier[shape] ),))
keyword[return] identifier[shape] . identifier[shape]
|
def normalize_shape(shape):
"""
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
"""
if isinstance(shape, tuple):
return shape # depends on [control=['if'], data=[]]
assert ia.is_np_array(shape), 'Expected tuple of ints or array, got %s.' % (type(shape),)
return shape.shape
|
def to_ufo_font_attributes(self, family_name):
"""Generate a list of UFOs with metadata loaded from .glyphs data.
Modifies the list of UFOs in the UFOBuilder (self) in-place.
"""
font = self.font
# "date" can be missing; Glyphs.app removes it on saving if it's empty:
# https://github.com/googlei18n/glyphsLib/issues/134
date_created = getattr(font, "date", None)
if date_created is not None:
date_created = to_ufo_time(date_created)
units_per_em = font.upm
version_major = font.versionMajor
version_minor = font.versionMinor
copyright = font.copyright
designer = font.designer
designer_url = font.designerURL
manufacturer = font.manufacturer
manufacturer_url = font.manufacturerURL
# XXX note is unused?
# note = font.note
glyph_order = list(glyph.name for glyph in font.glyphs)
for index, master in enumerate(font.masters):
source = self._designspace.newSourceDescriptor()
ufo = self.ufo_module.Font()
source.font = ufo
ufo.lib[APP_VERSION_LIB_KEY] = font.appVersion
ufo.lib[KEYBOARD_INCREMENT_KEY] = font.keyboardIncrement
if date_created is not None:
ufo.info.openTypeHeadCreated = date_created
ufo.info.unitsPerEm = units_per_em
ufo.info.versionMajor = version_major
ufo.info.versionMinor = version_minor
if copyright:
ufo.info.copyright = copyright
if designer:
ufo.info.openTypeNameDesigner = designer
if designer_url:
ufo.info.openTypeNameDesignerURL = designer_url
if manufacturer:
ufo.info.openTypeNameManufacturer = manufacturer
if manufacturer_url:
ufo.info.openTypeNameManufacturerURL = manufacturer_url
ufo.glyphOrder = glyph_order
self.to_ufo_names(ufo, master, family_name)
self.to_ufo_family_user_data(ufo)
self.to_ufo_custom_params(ufo, font)
self.to_ufo_master_attributes(source, master)
ufo.lib[MASTER_ORDER_LIB_KEY] = index
# FIXME: (jany) in the future, yield this UFO (for memory, lazy iter)
self._designspace.addSource(source)
self._sources[master.id] = source
|
def function[to_ufo_font_attributes, parameter[self, family_name]]:
constant[Generate a list of UFOs with metadata loaded from .glyphs data.
Modifies the list of UFOs in the UFOBuilder (self) in-place.
]
variable[font] assign[=] name[self].font
variable[date_created] assign[=] call[name[getattr], parameter[name[font], constant[date], constant[None]]]
if compare[name[date_created] is_not constant[None]] begin[:]
variable[date_created] assign[=] call[name[to_ufo_time], parameter[name[date_created]]]
variable[units_per_em] assign[=] name[font].upm
variable[version_major] assign[=] name[font].versionMajor
variable[version_minor] assign[=] name[font].versionMinor
variable[copyright] assign[=] name[font].copyright
variable[designer] assign[=] name[font].designer
variable[designer_url] assign[=] name[font].designerURL
variable[manufacturer] assign[=] name[font].manufacturer
variable[manufacturer_url] assign[=] name[font].manufacturerURL
variable[glyph_order] assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1b03c82e0>]]
for taget[tuple[[<ast.Name object at 0x7da1b03c9ba0>, <ast.Name object at 0x7da1b03c9b40>]]] in starred[call[name[enumerate], parameter[name[font].masters]]] begin[:]
variable[source] assign[=] call[name[self]._designspace.newSourceDescriptor, parameter[]]
variable[ufo] assign[=] call[name[self].ufo_module.Font, parameter[]]
name[source].font assign[=] name[ufo]
call[name[ufo].lib][name[APP_VERSION_LIB_KEY]] assign[=] name[font].appVersion
call[name[ufo].lib][name[KEYBOARD_INCREMENT_KEY]] assign[=] name[font].keyboardIncrement
if compare[name[date_created] is_not constant[None]] begin[:]
name[ufo].info.openTypeHeadCreated assign[=] name[date_created]
name[ufo].info.unitsPerEm assign[=] name[units_per_em]
name[ufo].info.versionMajor assign[=] name[version_major]
name[ufo].info.versionMinor assign[=] name[version_minor]
if name[copyright] begin[:]
name[ufo].info.copyright assign[=] name[copyright]
if name[designer] begin[:]
name[ufo].info.openTypeNameDesigner assign[=] name[designer]
if name[designer_url] begin[:]
name[ufo].info.openTypeNameDesignerURL assign[=] name[designer_url]
if name[manufacturer] begin[:]
name[ufo].info.openTypeNameManufacturer assign[=] name[manufacturer]
if name[manufacturer_url] begin[:]
name[ufo].info.openTypeNameManufacturerURL assign[=] name[manufacturer_url]
name[ufo].glyphOrder assign[=] name[glyph_order]
call[name[self].to_ufo_names, parameter[name[ufo], name[master], name[family_name]]]
call[name[self].to_ufo_family_user_data, parameter[name[ufo]]]
call[name[self].to_ufo_custom_params, parameter[name[ufo], name[font]]]
call[name[self].to_ufo_master_attributes, parameter[name[source], name[master]]]
call[name[ufo].lib][name[MASTER_ORDER_LIB_KEY]] assign[=] name[index]
call[name[self]._designspace.addSource, parameter[name[source]]]
call[name[self]._sources][name[master].id] assign[=] name[source]
|
keyword[def] identifier[to_ufo_font_attributes] ( identifier[self] , identifier[family_name] ):
literal[string]
identifier[font] = identifier[self] . identifier[font]
identifier[date_created] = identifier[getattr] ( identifier[font] , literal[string] , keyword[None] )
keyword[if] identifier[date_created] keyword[is] keyword[not] keyword[None] :
identifier[date_created] = identifier[to_ufo_time] ( identifier[date_created] )
identifier[units_per_em] = identifier[font] . identifier[upm]
identifier[version_major] = identifier[font] . identifier[versionMajor]
identifier[version_minor] = identifier[font] . identifier[versionMinor]
identifier[copyright] = identifier[font] . identifier[copyright]
identifier[designer] = identifier[font] . identifier[designer]
identifier[designer_url] = identifier[font] . identifier[designerURL]
identifier[manufacturer] = identifier[font] . identifier[manufacturer]
identifier[manufacturer_url] = identifier[font] . identifier[manufacturerURL]
identifier[glyph_order] = identifier[list] ( identifier[glyph] . identifier[name] keyword[for] identifier[glyph] keyword[in] identifier[font] . identifier[glyphs] )
keyword[for] identifier[index] , identifier[master] keyword[in] identifier[enumerate] ( identifier[font] . identifier[masters] ):
identifier[source] = identifier[self] . identifier[_designspace] . identifier[newSourceDescriptor] ()
identifier[ufo] = identifier[self] . identifier[ufo_module] . identifier[Font] ()
identifier[source] . identifier[font] = identifier[ufo]
identifier[ufo] . identifier[lib] [ identifier[APP_VERSION_LIB_KEY] ]= identifier[font] . identifier[appVersion]
identifier[ufo] . identifier[lib] [ identifier[KEYBOARD_INCREMENT_KEY] ]= identifier[font] . identifier[keyboardIncrement]
keyword[if] identifier[date_created] keyword[is] keyword[not] keyword[None] :
identifier[ufo] . identifier[info] . identifier[openTypeHeadCreated] = identifier[date_created]
identifier[ufo] . identifier[info] . identifier[unitsPerEm] = identifier[units_per_em]
identifier[ufo] . identifier[info] . identifier[versionMajor] = identifier[version_major]
identifier[ufo] . identifier[info] . identifier[versionMinor] = identifier[version_minor]
keyword[if] identifier[copyright] :
identifier[ufo] . identifier[info] . identifier[copyright] = identifier[copyright]
keyword[if] identifier[designer] :
identifier[ufo] . identifier[info] . identifier[openTypeNameDesigner] = identifier[designer]
keyword[if] identifier[designer_url] :
identifier[ufo] . identifier[info] . identifier[openTypeNameDesignerURL] = identifier[designer_url]
keyword[if] identifier[manufacturer] :
identifier[ufo] . identifier[info] . identifier[openTypeNameManufacturer] = identifier[manufacturer]
keyword[if] identifier[manufacturer_url] :
identifier[ufo] . identifier[info] . identifier[openTypeNameManufacturerURL] = identifier[manufacturer_url]
identifier[ufo] . identifier[glyphOrder] = identifier[glyph_order]
identifier[self] . identifier[to_ufo_names] ( identifier[ufo] , identifier[master] , identifier[family_name] )
identifier[self] . identifier[to_ufo_family_user_data] ( identifier[ufo] )
identifier[self] . identifier[to_ufo_custom_params] ( identifier[ufo] , identifier[font] )
identifier[self] . identifier[to_ufo_master_attributes] ( identifier[source] , identifier[master] )
identifier[ufo] . identifier[lib] [ identifier[MASTER_ORDER_LIB_KEY] ]= identifier[index]
identifier[self] . identifier[_designspace] . identifier[addSource] ( identifier[source] )
identifier[self] . identifier[_sources] [ identifier[master] . identifier[id] ]= identifier[source]
|
def to_ufo_font_attributes(self, family_name):
"""Generate a list of UFOs with metadata loaded from .glyphs data.
Modifies the list of UFOs in the UFOBuilder (self) in-place.
"""
font = self.font
# "date" can be missing; Glyphs.app removes it on saving if it's empty:
# https://github.com/googlei18n/glyphsLib/issues/134
date_created = getattr(font, 'date', None)
if date_created is not None:
date_created = to_ufo_time(date_created) # depends on [control=['if'], data=['date_created']]
units_per_em = font.upm
version_major = font.versionMajor
version_minor = font.versionMinor
copyright = font.copyright
designer = font.designer
designer_url = font.designerURL
manufacturer = font.manufacturer
manufacturer_url = font.manufacturerURL
# XXX note is unused?
# note = font.note
glyph_order = list((glyph.name for glyph in font.glyphs))
for (index, master) in enumerate(font.masters):
source = self._designspace.newSourceDescriptor()
ufo = self.ufo_module.Font()
source.font = ufo
ufo.lib[APP_VERSION_LIB_KEY] = font.appVersion
ufo.lib[KEYBOARD_INCREMENT_KEY] = font.keyboardIncrement
if date_created is not None:
ufo.info.openTypeHeadCreated = date_created # depends on [control=['if'], data=['date_created']]
ufo.info.unitsPerEm = units_per_em
ufo.info.versionMajor = version_major
ufo.info.versionMinor = version_minor
if copyright:
ufo.info.copyright = copyright # depends on [control=['if'], data=[]]
if designer:
ufo.info.openTypeNameDesigner = designer # depends on [control=['if'], data=[]]
if designer_url:
ufo.info.openTypeNameDesignerURL = designer_url # depends on [control=['if'], data=[]]
if manufacturer:
ufo.info.openTypeNameManufacturer = manufacturer # depends on [control=['if'], data=[]]
if manufacturer_url:
ufo.info.openTypeNameManufacturerURL = manufacturer_url # depends on [control=['if'], data=[]]
ufo.glyphOrder = glyph_order
self.to_ufo_names(ufo, master, family_name)
self.to_ufo_family_user_data(ufo)
self.to_ufo_custom_params(ufo, font)
self.to_ufo_master_attributes(source, master)
ufo.lib[MASTER_ORDER_LIB_KEY] = index
# FIXME: (jany) in the future, yield this UFO (for memory, lazy iter)
self._designspace.addSource(source)
self._sources[master.id] = source # depends on [control=['for'], data=[]]
|
def send_keysequence_window(self, window, keysequence, delay=12000):
"""
Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds.
"""
_libxdo.xdo_send_keysequence_window(
self._xdo, window, keysequence, delay)
|
def function[send_keysequence_window, parameter[self, window, keysequence, delay]]:
constant[
Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds.
]
call[name[_libxdo].xdo_send_keysequence_window, parameter[name[self]._xdo, name[window], name[keysequence], name[delay]]]
|
keyword[def] identifier[send_keysequence_window] ( identifier[self] , identifier[window] , identifier[keysequence] , identifier[delay] = literal[int] ):
literal[string]
identifier[_libxdo] . identifier[xdo_send_keysequence_window] (
identifier[self] . identifier[_xdo] , identifier[window] , identifier[keysequence] , identifier[delay] )
|
def send_keysequence_window(self, window, keysequence, delay=12000):
"""
Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds.
"""
_libxdo.xdo_send_keysequence_window(self._xdo, window, keysequence, delay)
|
def run(self):
""" Initializes the stream. """
if not hasattr(self, 'queue'):
raise RuntimeError("Audio queue is not intialized.")
self.keep_listening = True
self.stream.start()
while self.keep_listening:
try:
chunk = self.queue.get(timeout=queue_timeout)
underflowed = self.stream.write(chunk)
if underflowed:
logger.debug("Buffer underrun")
except Empty:
pass
self.stream.stop()
self.stream.close()
|
def function[run, parameter[self]]:
constant[ Initializes the stream. ]
if <ast.UnaryOp object at 0x7da1b2617c10> begin[:]
<ast.Raise object at 0x7da1b2593a30>
name[self].keep_listening assign[=] constant[True]
call[name[self].stream.start, parameter[]]
while name[self].keep_listening begin[:]
<ast.Try object at 0x7da1b2591150>
call[name[self].stream.stop, parameter[]]
call[name[self].stream.close, parameter[]]
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[keep_listening] = keyword[True]
identifier[self] . identifier[stream] . identifier[start] ()
keyword[while] identifier[self] . identifier[keep_listening] :
keyword[try] :
identifier[chunk] = identifier[self] . identifier[queue] . identifier[get] ( identifier[timeout] = identifier[queue_timeout] )
identifier[underflowed] = identifier[self] . identifier[stream] . identifier[write] ( identifier[chunk] )
keyword[if] identifier[underflowed] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[except] identifier[Empty] :
keyword[pass]
identifier[self] . identifier[stream] . identifier[stop] ()
identifier[self] . identifier[stream] . identifier[close] ()
|
def run(self):
""" Initializes the stream. """
if not hasattr(self, 'queue'):
raise RuntimeError('Audio queue is not intialized.') # depends on [control=['if'], data=[]]
self.keep_listening = True
self.stream.start()
while self.keep_listening:
try:
chunk = self.queue.get(timeout=queue_timeout)
underflowed = self.stream.write(chunk)
if underflowed:
logger.debug('Buffer underrun') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Empty:
pass # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
self.stream.stop()
self.stream.close()
|
def _remove_wire_nets(block):
""" Remove all wire nodes from the block. """
wire_src_dict = _ProducerList()
wire_removal_set = set() # set of all wirevectors to be removed
# one pass to build the map of value producers and
# all of the nets and wires to be removed
for net in block.logic:
if net.op == 'w':
wire_src_dict[net.dests[0]] = net.args[0]
if not isinstance(net.dests[0], Output):
wire_removal_set.add(net.dests[0])
# second full pass to create the new logic without the wire nets
new_logic = set()
for net in block.logic:
if net.op != 'w' or isinstance(net.dests[0], Output):
new_args = tuple(wire_src_dict.find_producer(x) for x in net.args)
new_net = LogicNet(net.op, net.op_param, new_args, net.dests)
new_logic.add(new_net)
# now update the block with the new logic and remove wirevectors
block.logic = new_logic
for dead_wirevector in wire_removal_set:
del block.wirevector_by_name[dead_wirevector.name]
block.wirevector_set.remove(dead_wirevector)
block.sanity_check()
|
def function[_remove_wire_nets, parameter[block]]:
constant[ Remove all wire nodes from the block. ]
variable[wire_src_dict] assign[=] call[name[_ProducerList], parameter[]]
variable[wire_removal_set] assign[=] call[name[set], parameter[]]
for taget[name[net]] in starred[name[block].logic] begin[:]
if compare[name[net].op equal[==] constant[w]] begin[:]
call[name[wire_src_dict]][call[name[net].dests][constant[0]]] assign[=] call[name[net].args][constant[0]]
if <ast.UnaryOp object at 0x7da20e961ff0> begin[:]
call[name[wire_removal_set].add, parameter[call[name[net].dests][constant[0]]]]
variable[new_logic] assign[=] call[name[set], parameter[]]
for taget[name[net]] in starred[name[block].logic] begin[:]
if <ast.BoolOp object at 0x7da20e960bb0> begin[:]
variable[new_args] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e9b3730>]]
variable[new_net] assign[=] call[name[LogicNet], parameter[name[net].op, name[net].op_param, name[new_args], name[net].dests]]
call[name[new_logic].add, parameter[name[new_net]]]
name[block].logic assign[=] name[new_logic]
for taget[name[dead_wirevector]] in starred[name[wire_removal_set]] begin[:]
<ast.Delete object at 0x7da2044c2260>
call[name[block].wirevector_set.remove, parameter[name[dead_wirevector]]]
call[name[block].sanity_check, parameter[]]
|
keyword[def] identifier[_remove_wire_nets] ( identifier[block] ):
literal[string]
identifier[wire_src_dict] = identifier[_ProducerList] ()
identifier[wire_removal_set] = identifier[set] ()
keyword[for] identifier[net] keyword[in] identifier[block] . identifier[logic] :
keyword[if] identifier[net] . identifier[op] == literal[string] :
identifier[wire_src_dict] [ identifier[net] . identifier[dests] [ literal[int] ]]= identifier[net] . identifier[args] [ literal[int] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[net] . identifier[dests] [ literal[int] ], identifier[Output] ):
identifier[wire_removal_set] . identifier[add] ( identifier[net] . identifier[dests] [ literal[int] ])
identifier[new_logic] = identifier[set] ()
keyword[for] identifier[net] keyword[in] identifier[block] . identifier[logic] :
keyword[if] identifier[net] . identifier[op] != literal[string] keyword[or] identifier[isinstance] ( identifier[net] . identifier[dests] [ literal[int] ], identifier[Output] ):
identifier[new_args] = identifier[tuple] ( identifier[wire_src_dict] . identifier[find_producer] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[net] . identifier[args] )
identifier[new_net] = identifier[LogicNet] ( identifier[net] . identifier[op] , identifier[net] . identifier[op_param] , identifier[new_args] , identifier[net] . identifier[dests] )
identifier[new_logic] . identifier[add] ( identifier[new_net] )
identifier[block] . identifier[logic] = identifier[new_logic]
keyword[for] identifier[dead_wirevector] keyword[in] identifier[wire_removal_set] :
keyword[del] identifier[block] . identifier[wirevector_by_name] [ identifier[dead_wirevector] . identifier[name] ]
identifier[block] . identifier[wirevector_set] . identifier[remove] ( identifier[dead_wirevector] )
identifier[block] . identifier[sanity_check] ()
|
def _remove_wire_nets(block):
""" Remove all wire nodes from the block. """
wire_src_dict = _ProducerList()
wire_removal_set = set() # set of all wirevectors to be removed
# one pass to build the map of value producers and
# all of the nets and wires to be removed
for net in block.logic:
if net.op == 'w':
wire_src_dict[net.dests[0]] = net.args[0]
if not isinstance(net.dests[0], Output):
wire_removal_set.add(net.dests[0]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['net']]
# second full pass to create the new logic without the wire nets
new_logic = set()
for net in block.logic:
if net.op != 'w' or isinstance(net.dests[0], Output):
new_args = tuple((wire_src_dict.find_producer(x) for x in net.args))
new_net = LogicNet(net.op, net.op_param, new_args, net.dests)
new_logic.add(new_net) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['net']]
# now update the block with the new logic and remove wirevectors
block.logic = new_logic
for dead_wirevector in wire_removal_set:
del block.wirevector_by_name[dead_wirevector.name]
block.wirevector_set.remove(dead_wirevector) # depends on [control=['for'], data=['dead_wirevector']]
block.sanity_check()
|
def get_available_FIELD_transitions(instance, field):
"""
List of transitions available in current model state
with all conditions met
"""
curr_state = field.get_state(instance)
transitions = field.transitions[instance.__class__]
for name, transition in transitions.items():
meta = transition._django_fsm
if meta.has_transition(curr_state) and meta.conditions_met(instance, curr_state):
yield meta.get_transition(curr_state)
|
def function[get_available_FIELD_transitions, parameter[instance, field]]:
constant[
List of transitions available in current model state
with all conditions met
]
variable[curr_state] assign[=] call[name[field].get_state, parameter[name[instance]]]
variable[transitions] assign[=] call[name[field].transitions][name[instance].__class__]
for taget[tuple[[<ast.Name object at 0x7da1b1da3970>, <ast.Name object at 0x7da1b1da2b90>]]] in starred[call[name[transitions].items, parameter[]]] begin[:]
variable[meta] assign[=] name[transition]._django_fsm
if <ast.BoolOp object at 0x7da1b1da0ac0> begin[:]
<ast.Yield object at 0x7da1b1da23e0>
|
keyword[def] identifier[get_available_FIELD_transitions] ( identifier[instance] , identifier[field] ):
literal[string]
identifier[curr_state] = identifier[field] . identifier[get_state] ( identifier[instance] )
identifier[transitions] = identifier[field] . identifier[transitions] [ identifier[instance] . identifier[__class__] ]
keyword[for] identifier[name] , identifier[transition] keyword[in] identifier[transitions] . identifier[items] ():
identifier[meta] = identifier[transition] . identifier[_django_fsm]
keyword[if] identifier[meta] . identifier[has_transition] ( identifier[curr_state] ) keyword[and] identifier[meta] . identifier[conditions_met] ( identifier[instance] , identifier[curr_state] ):
keyword[yield] identifier[meta] . identifier[get_transition] ( identifier[curr_state] )
|
def get_available_FIELD_transitions(instance, field):
"""
List of transitions available in current model state
with all conditions met
"""
curr_state = field.get_state(instance)
transitions = field.transitions[instance.__class__]
for (name, transition) in transitions.items():
meta = transition._django_fsm
if meta.has_transition(curr_state) and meta.conditions_met(instance, curr_state):
yield meta.get_transition(curr_state) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date
|
def function[get_release_date, parameter[pdb_id]]:
constant[Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
]
variable[pdb_id] assign[=] call[name[pdb_id].upper, parameter[]]
if compare[name[pdb_id] <ast.NotIn object at 0x7da2590d7190> call[name[_property_table], parameter[]].index] begin[:]
<ast.Raise object at 0x7da20c9920b0>
return[name[release_date]]
|
keyword[def] identifier[get_release_date] ( identifier[pdb_id] ):
literal[string]
identifier[pdb_id] = identifier[pdb_id] . identifier[upper] ()
keyword[if] identifier[pdb_id] keyword[not] keyword[in] identifier[_property_table] (). identifier[index] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[release_date] = identifier[_property_table] (). identifier[ix] [ identifier[pdb_id] , literal[string] ]
keyword[if] identifier[pd] . identifier[isnull] ( identifier[release_date] ):
identifier[log] . identifier[debug] ( literal[string] )
identifier[release_date] = keyword[None]
keyword[return] identifier[release_date]
|
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table') # depends on [control=['if'], data=[]]
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None # depends on [control=['if'], data=[]]
return release_date
|
def search_shell(self):
"""
Looks for a shell service
"""
with self._lock:
if self._shell is not None:
# A shell is already there
return
reference = self._context.get_service_reference(SERVICE_SHELL)
if reference is not None:
self.set_shell(reference)
|
def function[search_shell, parameter[self]]:
constant[
Looks for a shell service
]
with name[self]._lock begin[:]
if compare[name[self]._shell is_not constant[None]] begin[:]
return[None]
variable[reference] assign[=] call[name[self]._context.get_service_reference, parameter[name[SERVICE_SHELL]]]
if compare[name[reference] is_not constant[None]] begin[:]
call[name[self].set_shell, parameter[name[reference]]]
|
keyword[def] identifier[search_shell] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
keyword[if] identifier[self] . identifier[_shell] keyword[is] keyword[not] keyword[None] :
keyword[return]
identifier[reference] = identifier[self] . identifier[_context] . identifier[get_service_reference] ( identifier[SERVICE_SHELL] )
keyword[if] identifier[reference] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[set_shell] ( identifier[reference] )
|
def search_shell(self):
"""
Looks for a shell service
"""
with self._lock:
if self._shell is not None:
# A shell is already there
return # depends on [control=['if'], data=[]]
reference = self._context.get_service_reference(SERVICE_SHELL)
if reference is not None:
self.set_shell(reference) # depends on [control=['if'], data=['reference']] # depends on [control=['with'], data=[]]
|
def PrintMessage(self, message):
"""Convert protobuf message to text format.
Args:
message: The protocol buffers message.
"""
if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and
self.descriptor_pool and self._TryPrintAsAnyMessage(message)):
return
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if _IsMapEntry(field):
for key in sorted(value):
# This is slow for maps with submessage entires because it copies the
# entire tree. Unfortunately this would take significant refactoring
# of this file to work around.
#
# TODO(haberman): refactor and optimize if this becomes an issue.
entry_submsg = value.GetEntryClass()(key=key, value=value[key])
self.PrintField(field, entry_submsg)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
self.PrintField(field, element)
else:
self.PrintField(field, value)
|
def function[PrintMessage, parameter[self, message]]:
constant[Convert protobuf message to text format.
Args:
message: The protocol buffers message.
]
if <ast.BoolOp object at 0x7da1b20352d0> begin[:]
return[None]
variable[fields] assign[=] call[name[message].ListFields, parameter[]]
if name[self].use_index_order begin[:]
call[name[fields].sort, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2037bb0>, <ast.Name object at 0x7da1b2035d80>]]] in starred[name[fields]] begin[:]
if call[name[_IsMapEntry], parameter[name[field]]] begin[:]
for taget[name[key]] in starred[call[name[sorted], parameter[name[value]]]] begin[:]
variable[entry_submsg] assign[=] call[call[name[value].GetEntryClass, parameter[]], parameter[]]
call[name[self].PrintField, parameter[name[field], name[entry_submsg]]]
|
keyword[def] identifier[PrintMessage] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] ( identifier[message] . identifier[DESCRIPTOR] . identifier[full_name] == identifier[_ANY_FULL_TYPE_NAME] keyword[and]
identifier[self] . identifier[descriptor_pool] keyword[and] identifier[self] . identifier[_TryPrintAsAnyMessage] ( identifier[message] )):
keyword[return]
identifier[fields] = identifier[message] . identifier[ListFields] ()
keyword[if] identifier[self] . identifier[use_index_order] :
identifier[fields] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]. identifier[index] )
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[fields] :
keyword[if] identifier[_IsMapEntry] ( identifier[field] ):
keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[value] ):
identifier[entry_submsg] = identifier[value] . identifier[GetEntryClass] ()( identifier[key] = identifier[key] , identifier[value] = identifier[value] [ identifier[key] ])
identifier[self] . identifier[PrintField] ( identifier[field] , identifier[entry_submsg] )
keyword[elif] identifier[field] . identifier[label] == identifier[descriptor] . identifier[FieldDescriptor] . identifier[LABEL_REPEATED] :
keyword[for] identifier[element] keyword[in] identifier[value] :
identifier[self] . identifier[PrintField] ( identifier[field] , identifier[element] )
keyword[else] :
identifier[self] . identifier[PrintField] ( identifier[field] , identifier[value] )
|
def PrintMessage(self, message):
"""Convert protobuf message to text format.
Args:
message: The protocol buffers message.
"""
if message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and self.descriptor_pool and self._TryPrintAsAnyMessage(message):
return # depends on [control=['if'], data=[]]
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index) # depends on [control=['if'], data=[]]
for (field, value) in fields:
if _IsMapEntry(field):
for key in sorted(value):
# This is slow for maps with submessage entires because it copies the
# entire tree. Unfortunately this would take significant refactoring
# of this file to work around.
#
# TODO(haberman): refactor and optimize if this becomes an issue.
entry_submsg = value.GetEntryClass()(key=key, value=value[key])
self.PrintField(field, entry_submsg) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
self.PrintField(field, element) # depends on [control=['for'], data=['element']] # depends on [control=['if'], data=[]]
else:
self.PrintField(field, value) # depends on [control=['for'], data=[]]
|
def _abbreviate(self, rid):
'''
Abbreviate a relationship or resource ID target for efficient storage
in the DB. Works only with a prefix/suffix split of hierarchical HTTP-like IRIs,
e.g. 'http://example.org/spam/eggs' becomes something like '{a23}eggs'
and afterward there will be an entry in the prefix map from 'a23' to 'http://example.org/spam/'
The map can then easily be used with str.format
'''
if not isinstance(rid, str) or not iri.matches_uri_syntax(rid): return rid
head, tail = rid.rsplit('/', 1)
head += '/'
abbrev_obj = self._db_coll.find_one({'origin': '@_abbreviations'})
assert abbrev_obj is not None
pmap = abbrev_obj['map']
#FIXME: probably called too often to do this every time
inv_pmap = {v: k for k, v in pmap.items()}
if head in inv_pmap:
prefix = inv_pmap[head]
else:
prefix = f'a{self._abbr_index}'
pmap[prefix] = head
self._abbr_index += 1
self._db_coll.replace_one(
{'origin': '@_abbreviations'},
{'origin': '@_abbreviations', 'map': pmap}
)
post_rid = '{' + prefix + '}' + tail
return post_rid
|
def function[_abbreviate, parameter[self, rid]]:
constant[
Abbreviate a relationship or resource ID target for efficient storage
in the DB. Works only with a prefix/suffix split of hierarchical HTTP-like IRIs,
e.g. 'http://example.org/spam/eggs' becomes something like '{a23}eggs'
and afterward there will be an entry in the prefix map from 'a23' to 'http://example.org/spam/'
The map can then easily be used with str.format
]
if <ast.BoolOp object at 0x7da18ede75e0> begin[:]
return[name[rid]]
<ast.Tuple object at 0x7da18ede5ae0> assign[=] call[name[rid].rsplit, parameter[constant[/], constant[1]]]
<ast.AugAssign object at 0x7da18ede42b0>
variable[abbrev_obj] assign[=] call[name[self]._db_coll.find_one, parameter[dictionary[[<ast.Constant object at 0x7da18ede7d00>], [<ast.Constant object at 0x7da18ede43d0>]]]]
assert[compare[name[abbrev_obj] is_not constant[None]]]
variable[pmap] assign[=] call[name[abbrev_obj]][constant[map]]
variable[inv_pmap] assign[=] <ast.DictComp object at 0x7da18ede5f60>
if compare[name[head] in name[inv_pmap]] begin[:]
variable[prefix] assign[=] call[name[inv_pmap]][name[head]]
variable[post_rid] assign[=] binary_operation[binary_operation[binary_operation[constant[{] + name[prefix]] + constant[}]] + name[tail]]
return[name[post_rid]]
|
keyword[def] identifier[_abbreviate] ( identifier[self] , identifier[rid] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[rid] , identifier[str] ) keyword[or] keyword[not] identifier[iri] . identifier[matches_uri_syntax] ( identifier[rid] ): keyword[return] identifier[rid]
identifier[head] , identifier[tail] = identifier[rid] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[head] += literal[string]
identifier[abbrev_obj] = identifier[self] . identifier[_db_coll] . identifier[find_one] ({ literal[string] : literal[string] })
keyword[assert] identifier[abbrev_obj] keyword[is] keyword[not] keyword[None]
identifier[pmap] = identifier[abbrev_obj] [ literal[string] ]
identifier[inv_pmap] ={ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[pmap] . identifier[items] ()}
keyword[if] identifier[head] keyword[in] identifier[inv_pmap] :
identifier[prefix] = identifier[inv_pmap] [ identifier[head] ]
keyword[else] :
identifier[prefix] = literal[string]
identifier[pmap] [ identifier[prefix] ]= identifier[head]
identifier[self] . identifier[_abbr_index] += literal[int]
identifier[self] . identifier[_db_coll] . identifier[replace_one] (
{ literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : identifier[pmap] }
)
identifier[post_rid] = literal[string] + identifier[prefix] + literal[string] + identifier[tail]
keyword[return] identifier[post_rid]
|
def _abbreviate(self, rid):
"""
Abbreviate a relationship or resource ID target for efficient storage
in the DB. Works only with a prefix/suffix split of hierarchical HTTP-like IRIs,
e.g. 'http://example.org/spam/eggs' becomes something like '{a23}eggs'
and afterward there will be an entry in the prefix map from 'a23' to 'http://example.org/spam/'
The map can then easily be used with str.format
"""
if not isinstance(rid, str) or not iri.matches_uri_syntax(rid):
return rid # depends on [control=['if'], data=[]]
(head, tail) = rid.rsplit('/', 1)
head += '/'
abbrev_obj = self._db_coll.find_one({'origin': '@_abbreviations'})
assert abbrev_obj is not None
pmap = abbrev_obj['map']
#FIXME: probably called too often to do this every time
inv_pmap = {v: k for (k, v) in pmap.items()}
if head in inv_pmap:
prefix = inv_pmap[head] # depends on [control=['if'], data=['head', 'inv_pmap']]
else:
prefix = f'a{self._abbr_index}'
pmap[prefix] = head
self._abbr_index += 1
self._db_coll.replace_one({'origin': '@_abbreviations'}, {'origin': '@_abbreviations', 'map': pmap})
post_rid = '{' + prefix + '}' + tail
return post_rid
|
def load_metagraph(model_path_prefix: Union[str, Path]) -> tf.train.Saver:
""" Given the path to a model on disk (these will typically be found in
directories such as exp/<exp_num>/model/model_best.*) creates a Saver
object that can then be used to restore the graph inside a tf.Session.
"""
model_path_prefix = str(model_path_prefix)
metagraph = tf.train.import_meta_graph(model_path_prefix + ".meta")
return metagraph
|
def function[load_metagraph, parameter[model_path_prefix]]:
constant[ Given the path to a model on disk (these will typically be found in
directories such as exp/<exp_num>/model/model_best.*) creates a Saver
object that can then be used to restore the graph inside a tf.Session.
]
variable[model_path_prefix] assign[=] call[name[str], parameter[name[model_path_prefix]]]
variable[metagraph] assign[=] call[name[tf].train.import_meta_graph, parameter[binary_operation[name[model_path_prefix] + constant[.meta]]]]
return[name[metagraph]]
|
keyword[def] identifier[load_metagraph] ( identifier[model_path_prefix] : identifier[Union] [ identifier[str] , identifier[Path] ])-> identifier[tf] . identifier[train] . identifier[Saver] :
literal[string]
identifier[model_path_prefix] = identifier[str] ( identifier[model_path_prefix] )
identifier[metagraph] = identifier[tf] . identifier[train] . identifier[import_meta_graph] ( identifier[model_path_prefix] + literal[string] )
keyword[return] identifier[metagraph]
|
def load_metagraph(model_path_prefix: Union[str, Path]) -> tf.train.Saver:
""" Given the path to a model on disk (these will typically be found in
directories such as exp/<exp_num>/model/model_best.*) creates a Saver
object that can then be used to restore the graph inside a tf.Session.
"""
model_path_prefix = str(model_path_prefix)
metagraph = tf.train.import_meta_graph(model_path_prefix + '.meta')
return metagraph
|
def GetArtifactCollectorArgs(flow_args, knowledge_base):
"""Prepare bundle of artifacts and their dependencies for the client.
Args:
flow_args: An `ArtifactCollectorFlowArgs` instance.
knowledge_base: contains information about the client
Returns:
rdf value object containing a list of extended artifacts and the
knowledge base
"""
args = rdf_artifacts.ClientArtifactCollectorArgs()
args.knowledge_base = knowledge_base
args.apply_parsers = flow_args.apply_parsers
args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors
args.max_file_size = flow_args.max_file_size
args.use_tsk = flow_args.use_tsk
if not flow_args.recollect_knowledge_base:
artifact_names = flow_args.artifact_list
else:
artifact_names = GetArtifactsForCollection(knowledge_base.os,
flow_args.artifact_list)
expander = ArtifactExpander(knowledge_base, flow_args.path_type,
flow_args.max_file_size)
for artifact_name in artifact_names:
rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)
if not MeetsConditions(knowledge_base, rdf_artifact):
continue
if artifact_name in expander.processed_artifacts:
continue
requested_by_user = artifact_name in flow_args.artifact_list
for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user):
args.artifacts.append(expanded_artifact)
return args
|
def function[GetArtifactCollectorArgs, parameter[flow_args, knowledge_base]]:
constant[Prepare bundle of artifacts and their dependencies for the client.
Args:
flow_args: An `ArtifactCollectorFlowArgs` instance.
knowledge_base: contains information about the client
Returns:
rdf value object containing a list of extended artifacts and the
knowledge base
]
variable[args] assign[=] call[name[rdf_artifacts].ClientArtifactCollectorArgs, parameter[]]
name[args].knowledge_base assign[=] name[knowledge_base]
name[args].apply_parsers assign[=] name[flow_args].apply_parsers
name[args].ignore_interpolation_errors assign[=] name[flow_args].ignore_interpolation_errors
name[args].max_file_size assign[=] name[flow_args].max_file_size
name[args].use_tsk assign[=] name[flow_args].use_tsk
if <ast.UnaryOp object at 0x7da1b1b0d060> begin[:]
variable[artifact_names] assign[=] name[flow_args].artifact_list
variable[expander] assign[=] call[name[ArtifactExpander], parameter[name[knowledge_base], name[flow_args].path_type, name[flow_args].max_file_size]]
for taget[name[artifact_name]] in starred[name[artifact_names]] begin[:]
variable[rdf_artifact] assign[=] call[name[artifact_registry].REGISTRY.GetArtifact, parameter[name[artifact_name]]]
if <ast.UnaryOp object at 0x7da1b1b4b5e0> begin[:]
continue
if compare[name[artifact_name] in name[expander].processed_artifacts] begin[:]
continue
variable[requested_by_user] assign[=] compare[name[artifact_name] in name[flow_args].artifact_list]
for taget[name[expanded_artifact]] in starred[call[name[expander].Expand, parameter[name[rdf_artifact], name[requested_by_user]]]] begin[:]
call[name[args].artifacts.append, parameter[name[expanded_artifact]]]
return[name[args]]
|
keyword[def] identifier[GetArtifactCollectorArgs] ( identifier[flow_args] , identifier[knowledge_base] ):
literal[string]
identifier[args] = identifier[rdf_artifacts] . identifier[ClientArtifactCollectorArgs] ()
identifier[args] . identifier[knowledge_base] = identifier[knowledge_base]
identifier[args] . identifier[apply_parsers] = identifier[flow_args] . identifier[apply_parsers]
identifier[args] . identifier[ignore_interpolation_errors] = identifier[flow_args] . identifier[ignore_interpolation_errors]
identifier[args] . identifier[max_file_size] = identifier[flow_args] . identifier[max_file_size]
identifier[args] . identifier[use_tsk] = identifier[flow_args] . identifier[use_tsk]
keyword[if] keyword[not] identifier[flow_args] . identifier[recollect_knowledge_base] :
identifier[artifact_names] = identifier[flow_args] . identifier[artifact_list]
keyword[else] :
identifier[artifact_names] = identifier[GetArtifactsForCollection] ( identifier[knowledge_base] . identifier[os] ,
identifier[flow_args] . identifier[artifact_list] )
identifier[expander] = identifier[ArtifactExpander] ( identifier[knowledge_base] , identifier[flow_args] . identifier[path_type] ,
identifier[flow_args] . identifier[max_file_size] )
keyword[for] identifier[artifact_name] keyword[in] identifier[artifact_names] :
identifier[rdf_artifact] = identifier[artifact_registry] . identifier[REGISTRY] . identifier[GetArtifact] ( identifier[artifact_name] )
keyword[if] keyword[not] identifier[MeetsConditions] ( identifier[knowledge_base] , identifier[rdf_artifact] ):
keyword[continue]
keyword[if] identifier[artifact_name] keyword[in] identifier[expander] . identifier[processed_artifacts] :
keyword[continue]
identifier[requested_by_user] = identifier[artifact_name] keyword[in] identifier[flow_args] . identifier[artifact_list]
keyword[for] identifier[expanded_artifact] keyword[in] identifier[expander] . identifier[Expand] ( identifier[rdf_artifact] , identifier[requested_by_user] ):
identifier[args] . identifier[artifacts] . identifier[append] ( identifier[expanded_artifact] )
keyword[return] identifier[args]
|
def GetArtifactCollectorArgs(flow_args, knowledge_base):
"""Prepare bundle of artifacts and their dependencies for the client.
Args:
flow_args: An `ArtifactCollectorFlowArgs` instance.
knowledge_base: contains information about the client
Returns:
rdf value object containing a list of extended artifacts and the
knowledge base
"""
args = rdf_artifacts.ClientArtifactCollectorArgs()
args.knowledge_base = knowledge_base
args.apply_parsers = flow_args.apply_parsers
args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors
args.max_file_size = flow_args.max_file_size
args.use_tsk = flow_args.use_tsk
if not flow_args.recollect_knowledge_base:
artifact_names = flow_args.artifact_list # depends on [control=['if'], data=[]]
else:
artifact_names = GetArtifactsForCollection(knowledge_base.os, flow_args.artifact_list)
expander = ArtifactExpander(knowledge_base, flow_args.path_type, flow_args.max_file_size)
for artifact_name in artifact_names:
rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)
if not MeetsConditions(knowledge_base, rdf_artifact):
continue # depends on [control=['if'], data=[]]
if artifact_name in expander.processed_artifacts:
continue # depends on [control=['if'], data=[]]
requested_by_user = artifact_name in flow_args.artifact_list
for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user):
args.artifacts.append(expanded_artifact) # depends on [control=['for'], data=['expanded_artifact']] # depends on [control=['for'], data=['artifact_name']]
return args
|
def sync(self, browser):
""" Enables cookie synchronization with specified browser, returns result
Returns
bool - True if successful, false otherwise
"""
BrowserCookies.loadBrowsers()
if not browser in BrowserCookies.browsers:
return False
self.browserSync = True
self.browser = browser
return True
|
def function[sync, parameter[self, browser]]:
constant[ Enables cookie synchronization with specified browser, returns result
Returns
bool - True if successful, false otherwise
]
call[name[BrowserCookies].loadBrowsers, parameter[]]
if <ast.UnaryOp object at 0x7da20c6c7790> begin[:]
return[constant[False]]
name[self].browserSync assign[=] constant[True]
name[self].browser assign[=] name[browser]
return[constant[True]]
|
keyword[def] identifier[sync] ( identifier[self] , identifier[browser] ):
literal[string]
identifier[BrowserCookies] . identifier[loadBrowsers] ()
keyword[if] keyword[not] identifier[browser] keyword[in] identifier[BrowserCookies] . identifier[browsers] :
keyword[return] keyword[False]
identifier[self] . identifier[browserSync] = keyword[True]
identifier[self] . identifier[browser] = identifier[browser]
keyword[return] keyword[True]
|
def sync(self, browser):
""" Enables cookie synchronization with specified browser, returns result
Returns
bool - True if successful, false otherwise
"""
BrowserCookies.loadBrowsers()
if not browser in BrowserCookies.browsers:
return False # depends on [control=['if'], data=[]]
self.browserSync = True
self.browser = browser
return True
|
def _cond_bb(self, word, suffix_len):
"""Return Lovins' condition BB.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return (
len(word) - suffix_len >= 3
and word[-suffix_len - 3 : -suffix_len] != 'met'
and word[-suffix_len - 4 : -suffix_len] != 'ryst'
)
|
def function[_cond_bb, parameter[self, word, suffix_len]]:
constant[Return Lovins' condition BB.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
]
return[<ast.BoolOp object at 0x7da1b00da5c0>]
|
keyword[def] identifier[_cond_bb] ( identifier[self] , identifier[word] , identifier[suffix_len] ):
literal[string]
keyword[return] (
identifier[len] ( identifier[word] )- identifier[suffix_len] >= literal[int]
keyword[and] identifier[word] [- identifier[suffix_len] - literal[int] :- identifier[suffix_len] ]!= literal[string]
keyword[and] identifier[word] [- identifier[suffix_len] - literal[int] :- identifier[suffix_len] ]!= literal[string]
)
|
def _cond_bb(self, word, suffix_len):
"""Return Lovins' condition BB.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return len(word) - suffix_len >= 3 and word[-suffix_len - 3:-suffix_len] != 'met' and (word[-suffix_len - 4:-suffix_len] != 'ryst')
|
def _pastore32(ins):
''' Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
'''
output = _paddr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
try:
value = int(ins.quad[2]) & 0xFFFFFFFF # Immediate?
if indirect:
output.append('push hl')
output.append('ld hl, %i' % (value & 0xFFFF))
output.append('call __ILOAD32')
output.append('ld b, h')
output.append('ld c, l') # BC = Lower 16 bits
output.append('pop hl')
REQUIRES.add('iload32.asm')
else:
output.append('ld de, %i' % (value >> 16))
output.append('ld bc, %i' % (value & 0xFFFF))
except ValueError:
output.append('pop bc')
output.append('pop de')
output.append('call __STORE32')
REQUIRES.add('store32.asm')
return output
|
def function[_pastore32, parameter[ins]]:
constant[ Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
]
variable[output] assign[=] call[name[_paddr], parameter[call[name[ins].quad][constant[1]]]]
variable[value] assign[=] call[name[ins].quad][constant[2]]
if compare[call[name[value]][constant[0]] equal[==] constant[*]] begin[:]
variable[value] assign[=] call[name[value]][<ast.Slice object at 0x7da204566410>]
variable[indirect] assign[=] constant[True]
<ast.Try object at 0x7da2054a5f90>
call[name[output].append, parameter[constant[call __STORE32]]]
call[name[REQUIRES].add, parameter[constant[store32.asm]]]
return[name[output]]
|
keyword[def] identifier[_pastore32] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_paddr] ( identifier[ins] . identifier[quad] [ literal[int] ])
identifier[value] = identifier[ins] . identifier[quad] [ literal[int] ]
keyword[if] identifier[value] [ literal[int] ]== literal[string] :
identifier[value] = identifier[value] [ literal[int] :]
identifier[indirect] = keyword[True]
keyword[else] :
identifier[indirect] = keyword[False]
keyword[try] :
identifier[value] = identifier[int] ( identifier[ins] . identifier[quad] [ literal[int] ])& literal[int]
keyword[if] identifier[indirect] :
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] %( identifier[value] & literal[int] ))
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[else] :
identifier[output] . identifier[append] ( literal[string] %( identifier[value] >> literal[int] ))
identifier[output] . identifier[append] ( literal[string] %( identifier[value] & literal[int] ))
keyword[except] identifier[ValueError] :
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output]
|
def _pastore32(ins):
""" Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
"""
output = _paddr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True # depends on [control=['if'], data=[]]
else:
indirect = False
try:
value = int(ins.quad[2]) & 4294967295 # Immediate?
if indirect:
output.append('push hl')
output.append('ld hl, %i' % (value & 65535))
output.append('call __ILOAD32')
output.append('ld b, h')
output.append('ld c, l') # BC = Lower 16 bits
output.append('pop hl')
REQUIRES.add('iload32.asm') # depends on [control=['if'], data=[]]
else:
output.append('ld de, %i' % (value >> 16))
output.append('ld bc, %i' % (value & 65535)) # depends on [control=['try'], data=[]]
except ValueError:
output.append('pop bc')
output.append('pop de') # depends on [control=['except'], data=[]]
output.append('call __STORE32')
REQUIRES.add('store32.asm')
return output
|
def after_init_app(self, app: FlaskUnchained):
"""
Configure the JSON encoder for Flask to be able to serialize Enums,
LocalProxy objects, and SQLAlchemy models.
"""
self.set_json_encoder(app)
app.before_first_request(self.register_model_resources)
|
def function[after_init_app, parameter[self, app]]:
constant[
Configure the JSON encoder for Flask to be able to serialize Enums,
LocalProxy objects, and SQLAlchemy models.
]
call[name[self].set_json_encoder, parameter[name[app]]]
call[name[app].before_first_request, parameter[name[self].register_model_resources]]
|
keyword[def] identifier[after_init_app] ( identifier[self] , identifier[app] : identifier[FlaskUnchained] ):
literal[string]
identifier[self] . identifier[set_json_encoder] ( identifier[app] )
identifier[app] . identifier[before_first_request] ( identifier[self] . identifier[register_model_resources] )
|
def after_init_app(self, app: FlaskUnchained):
"""
Configure the JSON encoder for Flask to be able to serialize Enums,
LocalProxy objects, and SQLAlchemy models.
"""
self.set_json_encoder(app)
app.before_first_request(self.register_model_resources)
|
def can_convert(strict: bool, from_type: Type[S], to_type: Type[T]):
"""
None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time)
:param strict:
:param from_type:
:param to_type:
:return:
"""
if (to_type is not None) and (to_type not in (all_primitive_types + all_np_primitive_types)):
return False
else:
return True
|
def function[can_convert, parameter[strict, from_type, to_type]]:
constant[
None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time)
:param strict:
:param from_type:
:param to_type:
:return:
]
if <ast.BoolOp object at 0x7da18ede6860> begin[:]
return[constant[False]]
|
keyword[def] identifier[can_convert] ( identifier[strict] : identifier[bool] , identifier[from_type] : identifier[Type] [ identifier[S] ], identifier[to_type] : identifier[Type] [ identifier[T] ]):
literal[string]
keyword[if] ( identifier[to_type] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[to_type] keyword[not] keyword[in] ( identifier[all_primitive_types] + identifier[all_np_primitive_types] )):
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True]
|
def can_convert(strict: bool, from_type: Type[S], to_type: Type[T]):
"""
None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time)
:param strict:
:param from_type:
:param to_type:
:return:
"""
if to_type is not None and to_type not in all_primitive_types + all_np_primitive_types:
return False # depends on [control=['if'], data=[]]
else:
return True
|
def _setup(self, name=None):
"""
Load the settings module referenced by env_var. This environment-
defined configuration process is called during the settings
configuration process.
"""
envvar = self.__dict__['env_var']
if envvar:
settings_file = os.environ.get(envvar)
else:
settings_file = self.__dict__['file_path']
if not settings_file:
msg = ("Requested settings, but none can be obtained for the envvar."
"Since no config filepath can be obtained, a default config "
"will be used.")
logger.error(msg)
raise OSError(msg)
self._wrapped = Settings(settings_file)
|
def function[_setup, parameter[self, name]]:
constant[
Load the settings module referenced by env_var. This environment-
defined configuration process is called during the settings
configuration process.
]
variable[envvar] assign[=] call[name[self].__dict__][constant[env_var]]
if name[envvar] begin[:]
variable[settings_file] assign[=] call[name[os].environ.get, parameter[name[envvar]]]
if <ast.UnaryOp object at 0x7da18dc052a0> begin[:]
variable[msg] assign[=] constant[Requested settings, but none can be obtained for the envvar.Since no config filepath can be obtained, a default config will be used.]
call[name[logger].error, parameter[name[msg]]]
<ast.Raise object at 0x7da18dc05e10>
name[self]._wrapped assign[=] call[name[Settings], parameter[name[settings_file]]]
|
keyword[def] identifier[_setup] ( identifier[self] , identifier[name] = keyword[None] ):
literal[string]
identifier[envvar] = identifier[self] . identifier[__dict__] [ literal[string] ]
keyword[if] identifier[envvar] :
identifier[settings_file] = identifier[os] . identifier[environ] . identifier[get] ( identifier[envvar] )
keyword[else] :
identifier[settings_file] = identifier[self] . identifier[__dict__] [ literal[string] ]
keyword[if] keyword[not] identifier[settings_file] :
identifier[msg] =( literal[string]
literal[string]
literal[string] )
identifier[logger] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[OSError] ( identifier[msg] )
identifier[self] . identifier[_wrapped] = identifier[Settings] ( identifier[settings_file] )
|
def _setup(self, name=None):
"""
Load the settings module referenced by env_var. This environment-
defined configuration process is called during the settings
configuration process.
"""
envvar = self.__dict__['env_var']
if envvar:
settings_file = os.environ.get(envvar) # depends on [control=['if'], data=[]]
else:
settings_file = self.__dict__['file_path']
if not settings_file:
msg = 'Requested settings, but none can be obtained for the envvar.Since no config filepath can be obtained, a default config will be used.'
logger.error(msg)
raise OSError(msg) # depends on [control=['if'], data=[]]
self._wrapped = Settings(settings_file)
|
def get_crystal_field_spin(self, coordination: str = "oct",
spin_config: str = "high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
|
def function[get_crystal_field_spin, parameter[self, coordination, spin_config]]:
constant[
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
]
if <ast.BoolOp object at 0x7da20c992680> begin[:]
<ast.Raise object at 0x7da20c990fa0>
variable[elec] assign[=] name[self].full_electronic_structure
if <ast.BoolOp object at 0x7da20c9917e0> begin[:]
<ast.Raise object at 0x7da20c993a60>
variable[nelectrons] assign[=] binary_operation[binary_operation[call[call[name[elec]][<ast.UnaryOp object at 0x7da20c992da0>]][constant[2]] + call[call[name[elec]][<ast.UnaryOp object at 0x7da20c993490>]][constant[2]]] - name[self].oxi_state]
if <ast.BoolOp object at 0x7da20c9915d0> begin[:]
<ast.Raise object at 0x7da20c993460>
if compare[name[spin_config] equal[==] constant[high]] begin[:]
return[<ast.IfExp object at 0x7da20c9912d0>]
|
keyword[def] identifier[get_crystal_field_spin] ( identifier[self] , identifier[coordination] : identifier[str] = literal[string] ,
identifier[spin_config] : identifier[str] = literal[string] ):
literal[string]
keyword[if] identifier[coordination] keyword[not] keyword[in] ( literal[string] , literal[string] ) keyword[or] identifier[spin_config] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[elec] = identifier[self] . identifier[full_electronic_structure]
keyword[if] identifier[len] ( identifier[elec] )< literal[int] keyword[or] identifier[elec] [- literal[int] ][ literal[int] ]!= literal[string] keyword[or] identifier[elec] [- literal[int] ][ literal[int] ]!= literal[string] :
keyword[raise] identifier[AttributeError] (
literal[string] . identifier[format] (
identifier[self] . identifier[symbol] ))
identifier[nelectrons] = identifier[elec] [- literal[int] ][ literal[int] ]+ identifier[elec] [- literal[int] ][ literal[int] ]- identifier[self] . identifier[oxi_state]
keyword[if] identifier[nelectrons] < literal[int] keyword[or] identifier[nelectrons] > literal[int] :
keyword[raise] identifier[AttributeError] (
literal[string]
. identifier[format] ( identifier[self] . identifier[oxi_state] , identifier[self] . identifier[symbol] ))
keyword[if] identifier[spin_config] == literal[string] :
keyword[return] identifier[nelectrons] keyword[if] identifier[nelectrons] <= literal[int] keyword[else] literal[int] - identifier[nelectrons]
keyword[elif] identifier[spin_config] == literal[string] :
keyword[if] identifier[coordination] == literal[string] :
keyword[if] identifier[nelectrons] <= literal[int] :
keyword[return] identifier[nelectrons]
keyword[elif] identifier[nelectrons] <= literal[int] :
keyword[return] literal[int] - identifier[nelectrons]
keyword[elif] identifier[nelectrons] <= literal[int] :
keyword[return] identifier[nelectrons] - literal[int]
keyword[else] :
keyword[return] literal[int] - identifier[nelectrons]
keyword[elif] identifier[coordination] == literal[string] :
keyword[if] identifier[nelectrons] <= literal[int] :
keyword[return] identifier[nelectrons]
keyword[elif] identifier[nelectrons] <= literal[int] :
keyword[return] literal[int] - identifier[nelectrons]
keyword[elif] identifier[nelectrons] <= literal[int] :
keyword[return] identifier[nelectrons] - literal[int]
keyword[else] :
keyword[return] literal[int] - identifier[nelectrons]
|
def get_crystal_field_spin(self, coordination: str='oct', spin_config: str='high'):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ('oct', 'tet') or spin_config not in ('high', 'low'):
raise ValueError('Invalid coordination or spin config.') # depends on [control=['if'], data=[]]
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != 's' or elec[-2][1] != 'd':
raise AttributeError('Invalid element {} for crystal field calculation.'.format(self.symbol)) # depends on [control=['if'], data=[]]
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError('Invalid oxidation state {} for element {}'.format(self.oxi_state, self.symbol)) # depends on [control=['if'], data=[]]
if spin_config == 'high':
return nelectrons if nelectrons <= 5 else 10 - nelectrons # depends on [control=['if'], data=[]]
elif spin_config == 'low':
if coordination == 'oct':
if nelectrons <= 3:
return nelectrons # depends on [control=['if'], data=['nelectrons']]
elif nelectrons <= 6:
return 6 - nelectrons # depends on [control=['if'], data=['nelectrons']]
elif nelectrons <= 8:
return nelectrons - 6 # depends on [control=['if'], data=['nelectrons']]
else:
return 10 - nelectrons # depends on [control=['if'], data=[]]
elif coordination == 'tet':
if nelectrons <= 2:
return nelectrons # depends on [control=['if'], data=['nelectrons']]
elif nelectrons <= 4:
return 4 - nelectrons # depends on [control=['if'], data=['nelectrons']]
elif nelectrons <= 7:
return nelectrons - 4 # depends on [control=['if'], data=['nelectrons']]
else:
return 10 - nelectrons # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def publish_properties(self):
"""publish device and node properties"""
publish = self.publish
# device properties
publish(b"$homie", b"3.0.1")
publish(b"$name", self.settings.DEVICE_NAME)
publish(b"$state", b"init")
publish(b"$fw/name", b"Microhomie")
publish(b"$fw/version", __version__)
publish(b"$implementation", bytes(sys.platform, "utf-8"))
publish(b"$localip", utils.get_local_ip())
publish(b"$mac", utils.get_local_mac())
publish(b"$stats", b"interval,uptime,freeheap")
publish(b"$stats/interval", self.stats_interval)
publish(b"$nodes", b",".join(self.node_ids))
# node properties
for node in self.nodes:
try:
for propertie in node.get_properties():
if propertie:
publish(*propertie)
except NotImplementedError:
raise
except Exception as error:
self.node_error(node, error)
|
def function[publish_properties, parameter[self]]:
constant[publish device and node properties]
variable[publish] assign[=] name[self].publish
call[name[publish], parameter[constant[b'$homie'], constant[b'3.0.1']]]
call[name[publish], parameter[constant[b'$name'], name[self].settings.DEVICE_NAME]]
call[name[publish], parameter[constant[b'$state'], constant[b'init']]]
call[name[publish], parameter[constant[b'$fw/name'], constant[b'Microhomie']]]
call[name[publish], parameter[constant[b'$fw/version'], name[__version__]]]
call[name[publish], parameter[constant[b'$implementation'], call[name[bytes], parameter[name[sys].platform, constant[utf-8]]]]]
call[name[publish], parameter[constant[b'$localip'], call[name[utils].get_local_ip, parameter[]]]]
call[name[publish], parameter[constant[b'$mac'], call[name[utils].get_local_mac, parameter[]]]]
call[name[publish], parameter[constant[b'$stats'], constant[b'interval,uptime,freeheap']]]
call[name[publish], parameter[constant[b'$stats/interval'], name[self].stats_interval]]
call[name[publish], parameter[constant[b'$nodes'], call[constant[b','].join, parameter[name[self].node_ids]]]]
for taget[name[node]] in starred[name[self].nodes] begin[:]
<ast.Try object at 0x7da1b1c192a0>
|
keyword[def] identifier[publish_properties] ( identifier[self] ):
literal[string]
identifier[publish] = identifier[self] . identifier[publish]
identifier[publish] ( literal[string] , literal[string] )
identifier[publish] ( literal[string] , identifier[self] . identifier[settings] . identifier[DEVICE_NAME] )
identifier[publish] ( literal[string] , literal[string] )
identifier[publish] ( literal[string] , literal[string] )
identifier[publish] ( literal[string] , identifier[__version__] )
identifier[publish] ( literal[string] , identifier[bytes] ( identifier[sys] . identifier[platform] , literal[string] ))
identifier[publish] ( literal[string] , identifier[utils] . identifier[get_local_ip] ())
identifier[publish] ( literal[string] , identifier[utils] . identifier[get_local_mac] ())
identifier[publish] ( literal[string] , literal[string] )
identifier[publish] ( literal[string] , identifier[self] . identifier[stats_interval] )
identifier[publish] ( literal[string] , literal[string] . identifier[join] ( identifier[self] . identifier[node_ids] ))
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] :
keyword[try] :
keyword[for] identifier[propertie] keyword[in] identifier[node] . identifier[get_properties] ():
keyword[if] identifier[propertie] :
identifier[publish] (* identifier[propertie] )
keyword[except] identifier[NotImplementedError] :
keyword[raise]
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[self] . identifier[node_error] ( identifier[node] , identifier[error] )
|
def publish_properties(self):
"""publish device and node properties"""
publish = self.publish
# device properties
publish(b'$homie', b'3.0.1')
publish(b'$name', self.settings.DEVICE_NAME)
publish(b'$state', b'init')
publish(b'$fw/name', b'Microhomie')
publish(b'$fw/version', __version__)
publish(b'$implementation', bytes(sys.platform, 'utf-8'))
publish(b'$localip', utils.get_local_ip())
publish(b'$mac', utils.get_local_mac())
publish(b'$stats', b'interval,uptime,freeheap')
publish(b'$stats/interval', self.stats_interval)
publish(b'$nodes', b','.join(self.node_ids))
# node properties
for node in self.nodes:
try:
for propertie in node.get_properties():
if propertie:
publish(*propertie) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['propertie']] # depends on [control=['try'], data=[]]
except NotImplementedError:
raise # depends on [control=['except'], data=[]]
except Exception as error:
self.node_error(node, error) # depends on [control=['except'], data=['error']] # depends on [control=['for'], data=['node']]
|
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
|
def function[__ComputeEndByte, parameter[self, start, end, use_chunks]]:
constant[Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
]
variable[end_byte] assign[=] name[end]
if <ast.BoolOp object at 0x7da1b07d53c0> begin[:]
return[name[end_byte]]
if name[use_chunks] begin[:]
variable[alternate] assign[=] binary_operation[binary_operation[name[start] + name[self].chunksize] - constant[1]]
if compare[name[end_byte] is_not constant[None]] begin[:]
variable[end_byte] assign[=] call[name[min], parameter[name[end_byte], name[alternate]]]
if name[self].total_size begin[:]
variable[alternate] assign[=] binary_operation[name[self].total_size - constant[1]]
if compare[name[end_byte] is_not constant[None]] begin[:]
variable[end_byte] assign[=] call[name[min], parameter[name[end_byte], name[alternate]]]
return[name[end_byte]]
|
keyword[def] identifier[__ComputeEndByte] ( identifier[self] , identifier[start] , identifier[end] = keyword[None] , identifier[use_chunks] = keyword[True] ):
literal[string]
identifier[end_byte] = identifier[end]
keyword[if] identifier[start] < literal[int] keyword[and] keyword[not] identifier[self] . identifier[total_size] :
keyword[return] identifier[end_byte]
keyword[if] identifier[use_chunks] :
identifier[alternate] = identifier[start] + identifier[self] . identifier[chunksize] - literal[int]
keyword[if] identifier[end_byte] keyword[is] keyword[not] keyword[None] :
identifier[end_byte] = identifier[min] ( identifier[end_byte] , identifier[alternate] )
keyword[else] :
identifier[end_byte] = identifier[alternate]
keyword[if] identifier[self] . identifier[total_size] :
identifier[alternate] = identifier[self] . identifier[total_size] - literal[int]
keyword[if] identifier[end_byte] keyword[is] keyword[not] keyword[None] :
identifier[end_byte] = identifier[min] ( identifier[end_byte] , identifier[alternate] )
keyword[else] :
identifier[end_byte] = identifier[alternate]
keyword[return] identifier[end_byte]
|
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and (not self.total_size):
return end_byte # depends on [control=['if'], data=[]]
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate) # depends on [control=['if'], data=['end_byte']]
else:
end_byte = alternate # depends on [control=['if'], data=[]]
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate) # depends on [control=['if'], data=['end_byte']]
else:
end_byte = alternate # depends on [control=['if'], data=[]]
return end_byte
|
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response)
|
def function[update_team_days_off, parameter[self, days_off_patch, team_context, iteration_id]]:
constant[UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
]
variable[project] assign[=] constant[None]
variable[team] assign[=] constant[None]
if compare[name[team_context] is_not constant[None]] begin[:]
if name[team_context].project_id begin[:]
variable[project] assign[=] name[team_context].project_id
if name[team_context].team_id begin[:]
variable[team] assign[=] name[team_context].team_id
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[string]]]
if compare[name[team] is_not constant[None]] begin[:]
call[name[route_values]][constant[team]] assign[=] call[name[self]._serialize.url, parameter[constant[team], name[team], constant[string]]]
if compare[name[iteration_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[iterationId]] assign[=] call[name[self]._serialize.url, parameter[constant[iteration_id], name[iteration_id], constant[str]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[days_off_patch], constant[TeamSettingsDaysOffPatch]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[TeamSettingsDaysOff], name[response]]]]
|
keyword[def] identifier[update_team_days_off] ( identifier[self] , identifier[days_off_patch] , identifier[team_context] , identifier[iteration_id] ):
literal[string]
identifier[project] = keyword[None]
identifier[team] = keyword[None]
keyword[if] identifier[team_context] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[team_context] . identifier[project_id] :
identifier[project] = identifier[team_context] . identifier[project_id]
keyword[else] :
identifier[project] = identifier[team_context] . identifier[project]
keyword[if] identifier[team_context] . identifier[team_id] :
identifier[team] = identifier[team_context] . identifier[team_id]
keyword[else] :
identifier[team] = identifier[team_context] . identifier[team]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[team] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[team] , literal[string] )
keyword[if] identifier[iteration_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[iteration_id] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[days_off_patch] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
|
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id # depends on [control=['if'], data=[]]
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id # depends on [control=['if'], data=[]]
else:
team = team_context.team # depends on [control=['if'], data=['team_context']]
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string') # depends on [control=['if'], data=['project']]
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string') # depends on [control=['if'], data=['team']]
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str') # depends on [control=['if'], data=['iteration_id']]
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH', location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773', version='5.0', route_values=route_values, content=content)
return self._deserialize('TeamSettingsDaysOff', response)
|
def main():
"""Run chartpress"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('--commit-range',
help='Range of commits to consider when building images')
argparser.add_argument('--push', action='store_true',
help='push built images to docker hub')
argparser.add_argument('--publish-chart', action='store_true',
help='publish updated chart to gh-pages')
argparser.add_argument('--tag', default=None,
help='Use this tag for images & charts')
argparser.add_argument('--extra-message', default='',
help='extra message to add to the commit message when publishing charts')
argparser.add_argument('--image-prefix', default=None,
help='override image prefix with this value')
args = argparser.parse_args()
with open('chartpress.yaml') as f:
config = yaml.load(f)
for chart in config['charts']:
chart_paths = ['.'] + list(chart.get('paths', []))
version = args.tag
if version:
# version of the chart shouldn't have leading 'v' prefix
# if tag is of the form 'v1.2.3'
version = version.lstrip('v')
chart_version = build_chart(chart['name'], paths=chart_paths, version=version)
if 'images' in chart:
image_prefix = args.image_prefix if args.image_prefix is not None else chart['imagePrefix']
value_mods = build_images(
prefix=image_prefix,
images=chart['images'],
tag=args.tag,
commit_range=args.commit_range,
push=args.push,
# exclude `-<hash>` from chart_version prefix for images
chart_version=chart_version.split('-', 1)[0],
)
build_values(chart['name'], value_mods)
if args.publish_chart:
publish_pages(chart['name'],
paths=chart_paths,
git_repo=chart['repo']['git'],
published_repo=chart['repo']['published'],
extra_message=args.extra_message,
)
|
def function[main, parameter[]]:
constant[Run chartpress]
variable[argparser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[argparser].add_argument, parameter[constant[--commit-range]]]
call[name[argparser].add_argument, parameter[constant[--push]]]
call[name[argparser].add_argument, parameter[constant[--publish-chart]]]
call[name[argparser].add_argument, parameter[constant[--tag]]]
call[name[argparser].add_argument, parameter[constant[--extra-message]]]
call[name[argparser].add_argument, parameter[constant[--image-prefix]]]
variable[args] assign[=] call[name[argparser].parse_args, parameter[]]
with call[name[open], parameter[constant[chartpress.yaml]]] begin[:]
variable[config] assign[=] call[name[yaml].load, parameter[name[f]]]
for taget[name[chart]] in starred[call[name[config]][constant[charts]]] begin[:]
variable[chart_paths] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1913fa0>]] + call[name[list], parameter[call[name[chart].get, parameter[constant[paths], list[[]]]]]]]
variable[version] assign[=] name[args].tag
if name[version] begin[:]
variable[version] assign[=] call[name[version].lstrip, parameter[constant[v]]]
variable[chart_version] assign[=] call[name[build_chart], parameter[call[name[chart]][constant[name]]]]
if compare[constant[images] in name[chart]] begin[:]
variable[image_prefix] assign[=] <ast.IfExp object at 0x7da1b1912f80>
variable[value_mods] assign[=] call[name[build_images], parameter[]]
call[name[build_values], parameter[call[name[chart]][constant[name]], name[value_mods]]]
if name[args].publish_chart begin[:]
call[name[publish_pages], parameter[call[name[chart]][constant[name]]]]
|
keyword[def] identifier[main] ():
literal[string]
identifier[argparser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[__doc__] )
identifier[argparser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[args] = identifier[argparser] . identifier[parse_args] ()
keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[f] :
identifier[config] = identifier[yaml] . identifier[load] ( identifier[f] )
keyword[for] identifier[chart] keyword[in] identifier[config] [ literal[string] ]:
identifier[chart_paths] =[ literal[string] ]+ identifier[list] ( identifier[chart] . identifier[get] ( literal[string] ,[]))
identifier[version] = identifier[args] . identifier[tag]
keyword[if] identifier[version] :
identifier[version] = identifier[version] . identifier[lstrip] ( literal[string] )
identifier[chart_version] = identifier[build_chart] ( identifier[chart] [ literal[string] ], identifier[paths] = identifier[chart_paths] , identifier[version] = identifier[version] )
keyword[if] literal[string] keyword[in] identifier[chart] :
identifier[image_prefix] = identifier[args] . identifier[image_prefix] keyword[if] identifier[args] . identifier[image_prefix] keyword[is] keyword[not] keyword[None] keyword[else] identifier[chart] [ literal[string] ]
identifier[value_mods] = identifier[build_images] (
identifier[prefix] = identifier[image_prefix] ,
identifier[images] = identifier[chart] [ literal[string] ],
identifier[tag] = identifier[args] . identifier[tag] ,
identifier[commit_range] = identifier[args] . identifier[commit_range] ,
identifier[push] = identifier[args] . identifier[push] ,
identifier[chart_version] = identifier[chart_version] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ],
)
identifier[build_values] ( identifier[chart] [ literal[string] ], identifier[value_mods] )
keyword[if] identifier[args] . identifier[publish_chart] :
identifier[publish_pages] ( identifier[chart] [ literal[string] ],
identifier[paths] = identifier[chart_paths] ,
identifier[git_repo] = identifier[chart] [ literal[string] ][ literal[string] ],
identifier[published_repo] = identifier[chart] [ literal[string] ][ literal[string] ],
identifier[extra_message] = identifier[args] . identifier[extra_message] ,
)
|
def main():
"""Run chartpress"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('--commit-range', help='Range of commits to consider when building images')
argparser.add_argument('--push', action='store_true', help='push built images to docker hub')
argparser.add_argument('--publish-chart', action='store_true', help='publish updated chart to gh-pages')
argparser.add_argument('--tag', default=None, help='Use this tag for images & charts')
argparser.add_argument('--extra-message', default='', help='extra message to add to the commit message when publishing charts')
argparser.add_argument('--image-prefix', default=None, help='override image prefix with this value')
args = argparser.parse_args()
with open('chartpress.yaml') as f:
config = yaml.load(f) # depends on [control=['with'], data=['f']]
for chart in config['charts']:
chart_paths = ['.'] + list(chart.get('paths', []))
version = args.tag
if version:
# version of the chart shouldn't have leading 'v' prefix
# if tag is of the form 'v1.2.3'
version = version.lstrip('v') # depends on [control=['if'], data=[]]
chart_version = build_chart(chart['name'], paths=chart_paths, version=version)
if 'images' in chart:
image_prefix = args.image_prefix if args.image_prefix is not None else chart['imagePrefix']
# exclude `-<hash>` from chart_version prefix for images
value_mods = build_images(prefix=image_prefix, images=chart['images'], tag=args.tag, commit_range=args.commit_range, push=args.push, chart_version=chart_version.split('-', 1)[0])
build_values(chart['name'], value_mods) # depends on [control=['if'], data=['chart']]
if args.publish_chart:
publish_pages(chart['name'], paths=chart_paths, git_repo=chart['repo']['git'], published_repo=chart['repo']['published'], extra_message=args.extra_message) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chart']]
|
def is_micropython_usb_device(port):
"""Checks a USB device to see if it looks like a MicroPython device.
"""
if type(port).__name__ == 'Device':
# Assume its a pyudev.device.Device
if ('ID_BUS' not in port or port['ID_BUS'] != 'usb' or
'SUBSYSTEM' not in port or port['SUBSYSTEM'] != 'tty'):
return False
usb_id = 'usb vid:pid={}:{}'.format(port['ID_VENDOR_ID'], port['ID_MODEL_ID'])
else:
# Assume its a port from serial.tools.list_ports.comports()
usb_id = port[2].lower()
# We don't check the last digit of the PID since there are 3 possible
# values.
if usb_id.startswith('usb vid:pid=f055:980'):
return True
# Check for Teensy VID:PID
if usb_id.startswith('usb vid:pid=16c0:0483'):
return True
return False
|
def function[is_micropython_usb_device, parameter[port]]:
constant[Checks a USB device to see if it looks like a MicroPython device.
]
if compare[call[name[type], parameter[name[port]]].__name__ equal[==] constant[Device]] begin[:]
if <ast.BoolOp object at 0x7da1b170e860> begin[:]
return[constant[False]]
variable[usb_id] assign[=] call[constant[usb vid:pid={}:{}].format, parameter[call[name[port]][constant[ID_VENDOR_ID]], call[name[port]][constant[ID_MODEL_ID]]]]
if call[name[usb_id].startswith, parameter[constant[usb vid:pid=f055:980]]] begin[:]
return[constant[True]]
if call[name[usb_id].startswith, parameter[constant[usb vid:pid=16c0:0483]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_micropython_usb_device] ( identifier[port] ):
literal[string]
keyword[if] identifier[type] ( identifier[port] ). identifier[__name__] == literal[string] :
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[port] keyword[or] identifier[port] [ literal[string] ]!= literal[string] keyword[or]
literal[string] keyword[not] keyword[in] identifier[port] keyword[or] identifier[port] [ literal[string] ]!= literal[string] ):
keyword[return] keyword[False]
identifier[usb_id] = literal[string] . identifier[format] ( identifier[port] [ literal[string] ], identifier[port] [ literal[string] ])
keyword[else] :
identifier[usb_id] = identifier[port] [ literal[int] ]. identifier[lower] ()
keyword[if] identifier[usb_id] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[True]
keyword[if] identifier[usb_id] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_micropython_usb_device(port):
"""Checks a USB device to see if it looks like a MicroPython device.
"""
if type(port).__name__ == 'Device':
# Assume its a pyudev.device.Device
if 'ID_BUS' not in port or port['ID_BUS'] != 'usb' or 'SUBSYSTEM' not in port or (port['SUBSYSTEM'] != 'tty'):
return False # depends on [control=['if'], data=[]]
usb_id = 'usb vid:pid={}:{}'.format(port['ID_VENDOR_ID'], port['ID_MODEL_ID']) # depends on [control=['if'], data=[]]
else:
# Assume its a port from serial.tools.list_ports.comports()
usb_id = port[2].lower()
# We don't check the last digit of the PID since there are 3 possible
# values.
if usb_id.startswith('usb vid:pid=f055:980'):
return True # depends on [control=['if'], data=[]]
# Check for Teensy VID:PID
if usb_id.startswith('usb vid:pid=16c0:0483'):
return True # depends on [control=['if'], data=[]]
return False
|
def routingAreaUpdateComplete(ReceiveNpduNumbersList_presence=0):
"""ROUTING AREA UPDATE COMPLETE Section 9.4.16"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0xa) # 00001010
packet = a / b
if ReceiveNpduNumbersList_presence is 1:
c = ReceiveNpduNumbersList(ieiRNNL=0x26)
packet = packet / c
return packet
|
def function[routingAreaUpdateComplete, parameter[ReceiveNpduNumbersList_presence]]:
constant[ROUTING AREA UPDATE COMPLETE Section 9.4.16]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[packet] assign[=] binary_operation[name[a] / name[b]]
if compare[name[ReceiveNpduNumbersList_presence] is constant[1]] begin[:]
variable[c] assign[=] call[name[ReceiveNpduNumbersList], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[c]]
return[name[packet]]
|
keyword[def] identifier[routingAreaUpdateComplete] ( identifier[ReceiveNpduNumbersList_presence] = literal[int] ):
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[packet] = identifier[a] / identifier[b]
keyword[if] identifier[ReceiveNpduNumbersList_presence] keyword[is] literal[int] :
identifier[c] = identifier[ReceiveNpduNumbersList] ( identifier[ieiRNNL] = literal[int] )
identifier[packet] = identifier[packet] / identifier[c]
keyword[return] identifier[packet]
|
def routingAreaUpdateComplete(ReceiveNpduNumbersList_presence=0):
"""ROUTING AREA UPDATE COMPLETE Section 9.4.16"""
a = TpPd(pd=3)
b = MessageType(mesType=10) # 00001010
packet = a / b
if ReceiveNpduNumbersList_presence is 1:
c = ReceiveNpduNumbersList(ieiRNNL=38)
packet = packet / c # depends on [control=['if'], data=[]]
return packet
|
def wait_for_visible(self, locator):
"""
Synchronization to deal with elements that are present, but are disabled until some action
triggers their visibility.
:raises: ElementVisiblityTimeout
"""
for i in range(timeout_seconds):
try:
if self.driver.is_visible(locator):
break
except:
pass
time.sleep(1)
else:
raise ElementVisiblityTimeout("%s visibility timed out" % locator)
return True
|
def function[wait_for_visible, parameter[self, locator]]:
constant[
Synchronization to deal with elements that are present, but are disabled until some action
triggers their visibility.
:raises: ElementVisiblityTimeout
]
for taget[name[i]] in starred[call[name[range], parameter[name[timeout_seconds]]]] begin[:]
<ast.Try object at 0x7da2041d9ab0>
call[name[time].sleep, parameter[constant[1]]]
return[constant[True]]
|
keyword[def] identifier[wait_for_visible] ( identifier[self] , identifier[locator] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[timeout_seconds] ):
keyword[try] :
keyword[if] identifier[self] . identifier[driver] . identifier[is_visible] ( identifier[locator] ):
keyword[break]
keyword[except] :
keyword[pass]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
keyword[raise] identifier[ElementVisiblityTimeout] ( literal[string] % identifier[locator] )
keyword[return] keyword[True]
|
def wait_for_visible(self, locator):
"""
Synchronization to deal with elements that are present, but are disabled until some action
triggers their visibility.
:raises: ElementVisiblityTimeout
"""
for i in range(timeout_seconds):
try:
if self.driver.is_visible(locator):
break # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
time.sleep(1) # depends on [control=['for'], data=[]]
else:
raise ElementVisiblityTimeout('%s visibility timed out' % locator)
return True
|
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile(r"^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
|
def function[read_route_line, parameter[route]]:
constant[
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
]
variable[scrf_patt] assign[=] call[name[re].compile, parameter[constant[^([sS][cC][rR][fF])\s*=\s*(.+)]]]
variable[multi_params_patt] assign[=] call[name[re].compile, parameter[constant[^([A-z]+[0-9]*)[\s=]+\((.*)\)$]]]
variable[functional] assign[=] constant[None]
variable[basis_set] assign[=] constant[None]
variable[route_params] assign[=] dictionary[[], []]
variable[dieze_tag] assign[=] constant[None]
if name[route] begin[:]
if compare[constant[/] in name[route]] begin[:]
variable[tok] assign[=] call[name[route].split, parameter[constant[/]]]
variable[functional] assign[=] call[call[call[name[tok]][constant[0]].split, parameter[]]][<ast.UnaryOp object at 0x7da1b1cec040>]
variable[basis_set] assign[=] call[call[call[name[tok]][constant[1]].split, parameter[]]][constant[0]]
for taget[name[tok]] in starred[list[[<ast.Name object at 0x7da1b1cef0d0>, <ast.Name object at 0x7da1b1cedba0>, <ast.Constant object at 0x7da1b1cecaf0>]]] begin[:]
variable[route] assign[=] call[name[route].replace, parameter[name[tok], constant[]]]
for taget[name[tok]] in starred[call[name[route].split, parameter[]]] begin[:]
if call[name[scrf_patt].match, parameter[name[tok]]] begin[:]
variable[m] assign[=] call[name[scrf_patt].match, parameter[name[tok]]]
call[name[route_params]][call[name[m].group, parameter[constant[1]]]] assign[=] call[name[m].group, parameter[constant[2]]]
return[tuple[[<ast.Name object at 0x7da1b1cad420>, <ast.Name object at 0x7da1b1cad480>, <ast.Name object at 0x7da1b1cad270>, <ast.Name object at 0x7da1b1cae0e0>]]]
|
keyword[def] identifier[read_route_line] ( identifier[route] ):
literal[string]
identifier[scrf_patt] = identifier[re] . identifier[compile] ( literal[string] )
identifier[multi_params_patt] = identifier[re] . identifier[compile] ( literal[string] )
identifier[functional] = keyword[None]
identifier[basis_set] = keyword[None]
identifier[route_params] ={}
identifier[dieze_tag] = keyword[None]
keyword[if] identifier[route] :
keyword[if] literal[string] keyword[in] identifier[route] :
identifier[tok] = identifier[route] . identifier[split] ( literal[string] )
identifier[functional] = identifier[tok] [ literal[int] ]. identifier[split] ()[- literal[int] ]
identifier[basis_set] = identifier[tok] [ literal[int] ]. identifier[split] ()[ literal[int] ]
keyword[for] identifier[tok] keyword[in] [ identifier[functional] , identifier[basis_set] , literal[string] ]:
identifier[route] = identifier[route] . identifier[replace] ( identifier[tok] , literal[string] )
keyword[for] identifier[tok] keyword[in] identifier[route] . identifier[split] ():
keyword[if] identifier[scrf_patt] . identifier[match] ( identifier[tok] ):
identifier[m] = identifier[scrf_patt] . identifier[match] ( identifier[tok] )
identifier[route_params] [ identifier[m] . identifier[group] ( literal[int] )]= identifier[m] . identifier[group] ( literal[int] )
keyword[elif] identifier[tok] . identifier[upper] () keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[tok] == literal[string] :
identifier[dieze_tag] = literal[string]
keyword[else] :
identifier[dieze_tag] = identifier[tok]
keyword[continue]
keyword[else] :
identifier[m] = identifier[re] . identifier[match] ( identifier[multi_params_patt] , identifier[tok] . identifier[strip] ( literal[string] ))
keyword[if] identifier[m] :
identifier[pars] ={}
keyword[for] identifier[par] keyword[in] identifier[m] . identifier[group] ( literal[int] ). identifier[split] ( literal[string] ):
identifier[p] = identifier[par] . identifier[split] ( literal[string] )
identifier[pars] [ identifier[p] [ literal[int] ]]= keyword[None] keyword[if] identifier[len] ( identifier[p] )== literal[int] keyword[else] identifier[p] [ literal[int] ]
identifier[route_params] [ identifier[m] . identifier[group] ( literal[int] )]= identifier[pars]
keyword[else] :
identifier[d] = identifier[tok] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[route_params] [ identifier[d] [ literal[int] ]]= keyword[None] keyword[if] identifier[len] ( identifier[d] )== literal[int] keyword[else] identifier[d] [ literal[int] ]
keyword[return] identifier[functional] , identifier[basis_set] , identifier[route_params] , identifier[dieze_tag]
|
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile('^([sS][cC][rR][fF])\\s*=\\s*(.+)')
multi_params_patt = re.compile('^([A-z]+[0-9]*)[\\s=]+\\((.*)\\)$')
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if '/' in route:
tok = route.split('/')
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, '/']:
route = route.replace(tok, '') # depends on [control=['for'], data=['tok']] # depends on [control=['if'], data=['route']]
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2) # depends on [control=['if'], data=[]]
elif tok.upper() in ['#', '#N', '#P', '#T']:
# does not store # in route to avoid error in input
if tok == '#':
dieze_tag = '#N' # depends on [control=['if'], data=[]]
else:
dieze_tag = tok
continue # depends on [control=['if'], data=[]]
else:
m = re.match(multi_params_patt, tok.strip('#'))
if m:
pars = {}
for par in m.group(2).split(','):
p = par.split('=')
pars[p[0]] = None if len(p) == 1 else p[1] # depends on [control=['for'], data=['par']]
route_params[m.group(1)] = pars # depends on [control=['if'], data=[]]
else:
d = tok.strip('#').split('=')
route_params[d[0]] = None if len(d) == 1 else d[1] # depends on [control=['for'], data=['tok']] # depends on [control=['if'], data=[]]
return (functional, basis_set, route_params, dieze_tag)
|
def gru_cell(input_layer,
state,
num_units,
bias=tf.zeros_initializer(),
weights=None,
phase=prettytensor.Phase.train,
parameter_modifier=parameters.identity):
"""Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
"""
# As a compound op, it needs to respect whether or not this is a sequential
# builder.
if input_layer.is_sequential_builder():
layer = input_layer.as_layer()
else:
layer = input_layer
# We start with bias of 1.0 to not reset and not udpate.
# NB We compute activation_input and activation_state in two different ops,
# instead of concatenating them, followed by one matrix multiplication. The
# reason is that input has size [batch_size x input_size], while state has
# [ ? x state_size ], where the first dimension is 1 initially and will be
# batch_size only after the first RNN computation. We thus cannot concatenate
# input and state, and instead add the results of two fully connected ops,
# which works thanks to broadcasting, independent of state's batch size.
state = state[0]
state_pt = prettytensor.wrap(state, layer.bookkeeper)
activation_input = layer.fully_connected(
2 * num_units,
bias=None if bias is None else tf.constant_initializer(1.0),
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
activation_state = state_pt.fully_connected(
2 * num_units,
bias=None,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
# adds batch_size x (2 * num_units) + ? x (2 * num_inputs)
activation = activation_input + activation_state
activation = activation.sigmoid()
split = activation.split(1, 2)
r = split[0]
u = split[1]
c = layer.concat(1, [r * state]).fully_connected(
num_units,
bias=bias,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier).apply(tf.tanh)
new_h = u * state + (1 - u) * c
if input_layer.is_sequential_builder():
new_h = input_layer.set_head(input_layer)
return RecurrentResult(new_h, [new_h])
|
def function[gru_cell, parameter[input_layer, state, num_units, bias, weights, phase, parameter_modifier]]:
constant[Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
]
if call[name[input_layer].is_sequential_builder, parameter[]] begin[:]
variable[layer] assign[=] call[name[input_layer].as_layer, parameter[]]
variable[state] assign[=] call[name[state]][constant[0]]
variable[state_pt] assign[=] call[name[prettytensor].wrap, parameter[name[state], name[layer].bookkeeper]]
variable[activation_input] assign[=] call[name[layer].fully_connected, parameter[binary_operation[constant[2] * name[num_units]]]]
variable[activation_state] assign[=] call[name[state_pt].fully_connected, parameter[binary_operation[constant[2] * name[num_units]]]]
variable[activation] assign[=] binary_operation[name[activation_input] + name[activation_state]]
variable[activation] assign[=] call[name[activation].sigmoid, parameter[]]
variable[split] assign[=] call[name[activation].split, parameter[constant[1], constant[2]]]
variable[r] assign[=] call[name[split]][constant[0]]
variable[u] assign[=] call[name[split]][constant[1]]
variable[c] assign[=] call[call[call[name[layer].concat, parameter[constant[1], list[[<ast.BinOp object at 0x7da1b23445e0>]]]].fully_connected, parameter[name[num_units]]].apply, parameter[name[tf].tanh]]
variable[new_h] assign[=] binary_operation[binary_operation[name[u] * name[state]] + binary_operation[binary_operation[constant[1] - name[u]] * name[c]]]
if call[name[input_layer].is_sequential_builder, parameter[]] begin[:]
variable[new_h] assign[=] call[name[input_layer].set_head, parameter[name[input_layer]]]
return[call[name[RecurrentResult], parameter[name[new_h], list[[<ast.Name object at 0x7da1b2344130>]]]]]
|
keyword[def] identifier[gru_cell] ( identifier[input_layer] ,
identifier[state] ,
identifier[num_units] ,
identifier[bias] = identifier[tf] . identifier[zeros_initializer] (),
identifier[weights] = keyword[None] ,
identifier[phase] = identifier[prettytensor] . identifier[Phase] . identifier[train] ,
identifier[parameter_modifier] = identifier[parameters] . identifier[identity] ):
literal[string]
keyword[if] identifier[input_layer] . identifier[is_sequential_builder] ():
identifier[layer] = identifier[input_layer] . identifier[as_layer] ()
keyword[else] :
identifier[layer] = identifier[input_layer]
identifier[state] = identifier[state] [ literal[int] ]
identifier[state_pt] = identifier[prettytensor] . identifier[wrap] ( identifier[state] , identifier[layer] . identifier[bookkeeper] )
identifier[activation_input] = identifier[layer] . identifier[fully_connected] (
literal[int] * identifier[num_units] ,
identifier[bias] = keyword[None] keyword[if] identifier[bias] keyword[is] keyword[None] keyword[else] identifier[tf] . identifier[constant_initializer] ( literal[int] ),
identifier[activation_fn] = keyword[None] ,
identifier[weights] = identifier[weights] ,
identifier[phase] = identifier[phase] ,
identifier[parameter_modifier] = identifier[parameter_modifier] )
identifier[activation_state] = identifier[state_pt] . identifier[fully_connected] (
literal[int] * identifier[num_units] ,
identifier[bias] = keyword[None] ,
identifier[activation_fn] = keyword[None] ,
identifier[weights] = identifier[weights] ,
identifier[phase] = identifier[phase] ,
identifier[parameter_modifier] = identifier[parameter_modifier] )
identifier[activation] = identifier[activation_input] + identifier[activation_state]
identifier[activation] = identifier[activation] . identifier[sigmoid] ()
identifier[split] = identifier[activation] . identifier[split] ( literal[int] , literal[int] )
identifier[r] = identifier[split] [ literal[int] ]
identifier[u] = identifier[split] [ literal[int] ]
identifier[c] = identifier[layer] . identifier[concat] ( literal[int] ,[ identifier[r] * identifier[state] ]). identifier[fully_connected] (
identifier[num_units] ,
identifier[bias] = identifier[bias] ,
identifier[activation_fn] = keyword[None] ,
identifier[weights] = identifier[weights] ,
identifier[phase] = identifier[phase] ,
identifier[parameter_modifier] = identifier[parameter_modifier] ). identifier[apply] ( identifier[tf] . identifier[tanh] )
identifier[new_h] = identifier[u] * identifier[state] +( literal[int] - identifier[u] )* identifier[c]
keyword[if] identifier[input_layer] . identifier[is_sequential_builder] ():
identifier[new_h] = identifier[input_layer] . identifier[set_head] ( identifier[input_layer] )
keyword[return] identifier[RecurrentResult] ( identifier[new_h] ,[ identifier[new_h] ])
|
def gru_cell(input_layer, state, num_units, bias=tf.zeros_initializer(), weights=None, phase=prettytensor.Phase.train, parameter_modifier=parameters.identity):
"""Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
"""
# As a compound op, it needs to respect whether or not this is a sequential
# builder.
if input_layer.is_sequential_builder():
layer = input_layer.as_layer() # depends on [control=['if'], data=[]]
else:
layer = input_layer
# We start with bias of 1.0 to not reset and not udpate.
# NB We compute activation_input and activation_state in two different ops,
# instead of concatenating them, followed by one matrix multiplication. The
# reason is that input has size [batch_size x input_size], while state has
# [ ? x state_size ], where the first dimension is 1 initially and will be
# batch_size only after the first RNN computation. We thus cannot concatenate
# input and state, and instead add the results of two fully connected ops,
# which works thanks to broadcasting, independent of state's batch size.
state = state[0]
state_pt = prettytensor.wrap(state, layer.bookkeeper)
activation_input = layer.fully_connected(2 * num_units, bias=None if bias is None else tf.constant_initializer(1.0), activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier)
activation_state = state_pt.fully_connected(2 * num_units, bias=None, activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier)
# adds batch_size x (2 * num_units) + ? x (2 * num_inputs)
activation = activation_input + activation_state
activation = activation.sigmoid()
split = activation.split(1, 2)
r = split[0]
u = split[1]
c = layer.concat(1, [r * state]).fully_connected(num_units, bias=bias, activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier).apply(tf.tanh)
new_h = u * state + (1 - u) * c
if input_layer.is_sequential_builder():
new_h = input_layer.set_head(input_layer) # depends on [control=['if'], data=[]]
return RecurrentResult(new_h, [new_h])
|
def info(self, params=None):
"""Get the basic info from the current cluster.
:rtype: dict
"""
_, data = yield self.transport.perform_request('GET', '/',
params=params)
raise gen.Return(data)
|
def function[info, parameter[self, params]]:
constant[Get the basic info from the current cluster.
:rtype: dict
]
<ast.Tuple object at 0x7da1b033df90> assign[=] <ast.Yield object at 0x7da1b033dcf0>
<ast.Raise object at 0x7da1b033c730>
|
keyword[def] identifier[info] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
identifier[_] , identifier[data] = keyword[yield] identifier[self] . identifier[transport] . identifier[perform_request] ( literal[string] , literal[string] ,
identifier[params] = identifier[params] )
keyword[raise] identifier[gen] . identifier[Return] ( identifier[data] )
|
def info(self, params=None):
"""Get the basic info from the current cluster.
:rtype: dict
"""
(_, data) = (yield self.transport.perform_request('GET', '/', params=params))
raise gen.Return(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.