code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _strip_stray_atoms(self):
"""Remove stray atoms and surface pieces. """
components = self.bond_graph.connected_components()
major_component = max(components, key=len)
for atom in list(self.particles()):
if atom not in major_component:
self.remove(atom) | def function[_strip_stray_atoms, parameter[self]]:
constant[Remove stray atoms and surface pieces. ]
variable[components] assign[=] call[name[self].bond_graph.connected_components, parameter[]]
variable[major_component] assign[=] call[name[max], parameter[name[components]]]
for taget[name[atom]] in starred[call[name[list], parameter[call[name[self].particles, parameter[]]]]] begin[:]
if compare[name[atom] <ast.NotIn object at 0x7da2590d7190> name[major_component]] begin[:]
call[name[self].remove, parameter[name[atom]]] | keyword[def] identifier[_strip_stray_atoms] ( identifier[self] ):
literal[string]
identifier[components] = identifier[self] . identifier[bond_graph] . identifier[connected_components] ()
identifier[major_component] = identifier[max] ( identifier[components] , identifier[key] = identifier[len] )
keyword[for] identifier[atom] keyword[in] identifier[list] ( identifier[self] . identifier[particles] ()):
keyword[if] identifier[atom] keyword[not] keyword[in] identifier[major_component] :
identifier[self] . identifier[remove] ( identifier[atom] ) | def _strip_stray_atoms(self):
"""Remove stray atoms and surface pieces. """
components = self.bond_graph.connected_components()
major_component = max(components, key=len)
for atom in list(self.particles()):
if atom not in major_component:
self.remove(atom) # depends on [control=['if'], data=['atom']] # depends on [control=['for'], data=['atom']] |
def _expander(namepath):
""" expand ./ ~ and ../ designators in location names """
if "~" in namepath:
namepath = os.path.expanduser(namepath)
else:
namepath = os.path.abspath(namepath)
return namepath | def function[_expander, parameter[namepath]]:
constant[ expand ./ ~ and ../ designators in location names ]
if compare[constant[~] in name[namepath]] begin[:]
variable[namepath] assign[=] call[name[os].path.expanduser, parameter[name[namepath]]]
return[name[namepath]] | keyword[def] identifier[_expander] ( identifier[namepath] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[namepath] :
identifier[namepath] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[namepath] )
keyword[else] :
identifier[namepath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[namepath] )
keyword[return] identifier[namepath] | def _expander(namepath):
""" expand ./ ~ and ../ designators in location names """
if '~' in namepath:
namepath = os.path.expanduser(namepath) # depends on [control=['if'], data=['namepath']]
else:
namepath = os.path.abspath(namepath)
return namepath |
def _insert_paragraph_before(self):
"""
Return a newly created paragraph, inserted directly before this
paragraph.
"""
p = self._p.add_p_before()
return Paragraph(p, self._parent) | def function[_insert_paragraph_before, parameter[self]]:
constant[
Return a newly created paragraph, inserted directly before this
paragraph.
]
variable[p] assign[=] call[name[self]._p.add_p_before, parameter[]]
return[call[name[Paragraph], parameter[name[p], name[self]._parent]]] | keyword[def] identifier[_insert_paragraph_before] ( identifier[self] ):
literal[string]
identifier[p] = identifier[self] . identifier[_p] . identifier[add_p_before] ()
keyword[return] identifier[Paragraph] ( identifier[p] , identifier[self] . identifier[_parent] ) | def _insert_paragraph_before(self):
"""
Return a newly created paragraph, inserted directly before this
paragraph.
"""
p = self._p.add_p_before()
return Paragraph(p, self._parent) |
def start_polling(dispatcher, *, loop=None, skip_updates=False, reset_webhook=True,
on_startup=None, on_shutdown=None, timeout=20, fast=True):
"""
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
"""
executor = Executor(dispatcher, skip_updates=skip_updates, loop=loop)
_setup_callbacks(executor, on_startup, on_shutdown)
executor.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast) | def function[start_polling, parameter[dispatcher]]:
constant[
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
]
variable[executor] assign[=] call[name[Executor], parameter[name[dispatcher]]]
call[name[_setup_callbacks], parameter[name[executor], name[on_startup], name[on_shutdown]]]
call[name[executor].start_polling, parameter[]] | keyword[def] identifier[start_polling] ( identifier[dispatcher] ,*, identifier[loop] = keyword[None] , identifier[skip_updates] = keyword[False] , identifier[reset_webhook] = keyword[True] ,
identifier[on_startup] = keyword[None] , identifier[on_shutdown] = keyword[None] , identifier[timeout] = literal[int] , identifier[fast] = keyword[True] ):
literal[string]
identifier[executor] = identifier[Executor] ( identifier[dispatcher] , identifier[skip_updates] = identifier[skip_updates] , identifier[loop] = identifier[loop] )
identifier[_setup_callbacks] ( identifier[executor] , identifier[on_startup] , identifier[on_shutdown] )
identifier[executor] . identifier[start_polling] ( identifier[reset_webhook] = identifier[reset_webhook] , identifier[timeout] = identifier[timeout] , identifier[fast] = identifier[fast] ) | def start_polling(dispatcher, *, loop=None, skip_updates=False, reset_webhook=True, on_startup=None, on_shutdown=None, timeout=20, fast=True):
"""
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
"""
executor = Executor(dispatcher, skip_updates=skip_updates, loop=loop)
_setup_callbacks(executor, on_startup, on_shutdown)
executor.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast) |
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw | def function[FrameworkDir32, parameter[self]]:
constant[
Microsoft .NET Framework 32bit directory.
]
variable[guess_fw] assign[=] call[name[os].path.join, parameter[name[self].WinDir, constant[Microsoft.NET\Framework]]]
return[<ast.BoolOp object at 0x7da1b1b86620>] | keyword[def] identifier[FrameworkDir32] ( identifier[self] ):
literal[string]
identifier[guess_fw] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[WinDir] , literal[string] )
keyword[return] identifier[self] . identifier[ri] . identifier[lookup] ( identifier[self] . identifier[ri] . identifier[vc] , literal[string] ) keyword[or] identifier[guess_fw] | def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, 'Microsoft.NET\\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw |
def find_formatter(name, path):
"""
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*.
Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply
uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
"""
if name == AUTO_FORMATTER:
return find_formatters(path, silent=False)[0]
else:
return get_formatter(name, silent=False) | def function[find_formatter, parameter[name, path]]:
constant[
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*.
Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply
uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
]
if compare[name[name] equal[==] name[AUTO_FORMATTER]] begin[:]
return[call[call[name[find_formatters], parameter[name[path]]]][constant[0]]] | keyword[def] identifier[find_formatter] ( identifier[name] , identifier[path] ):
literal[string]
keyword[if] identifier[name] == identifier[AUTO_FORMATTER] :
keyword[return] identifier[find_formatters] ( identifier[path] , identifier[silent] = keyword[False] )[ literal[int] ]
keyword[else] :
keyword[return] identifier[get_formatter] ( identifier[name] , identifier[silent] = keyword[False] ) | def find_formatter(name, path):
"""
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*.
Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply
uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
"""
if name == AUTO_FORMATTER:
return find_formatters(path, silent=False)[0] # depends on [control=['if'], data=[]]
else:
return get_formatter(name, silent=False) |
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
potcars = glob.glob(os.path.join(dirname, "*POTCAR*"))
if potcars:
try:
potcar = Potcar.from_file(sorted(potcars)[0])
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names,
read_velocities=read_velocities) | def function[from_file, parameter[filename, check_for_POTCAR, read_velocities]]:
constant[
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
]
variable[dirname] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[filename]]]]]
variable[names] assign[=] constant[None]
if name[check_for_POTCAR] begin[:]
variable[potcars] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[dirname], constant[*POTCAR*]]]]]
if name[potcars] begin[:]
<ast.Try object at 0x7da1b26ac910>
with call[name[zopen], parameter[name[filename], constant[rt]]] begin[:]
return[call[name[Poscar].from_string, parameter[call[name[f].read, parameter[]], name[names]]]] | keyword[def] identifier[from_file] ( identifier[filename] , identifier[check_for_POTCAR] = keyword[True] , identifier[read_velocities] = keyword[True] ):
literal[string]
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[filename] ))
identifier[names] = keyword[None]
keyword[if] identifier[check_for_POTCAR] :
identifier[potcars] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , literal[string] ))
keyword[if] identifier[potcars] :
keyword[try] :
identifier[potcar] = identifier[Potcar] . identifier[from_file] ( identifier[sorted] ( identifier[potcars] )[ literal[int] ])
identifier[names] =[ identifier[sym] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[sym] keyword[in] identifier[potcar] . identifier[symbols] ]
[ identifier[get_el_sp] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[names] ]
keyword[except] :
identifier[names] = keyword[None]
keyword[with] identifier[zopen] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[Poscar] . identifier[from_string] ( identifier[f] . identifier[read] (), identifier[names] ,
identifier[read_velocities] = identifier[read_velocities] ) | def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
potcars = glob.glob(os.path.join(dirname, '*POTCAR*'))
if potcars:
try:
potcar = Potcar.from_file(sorted(potcars)[0])
names = [sym.split('_')[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names # depends on [control=['try'], data=[]]
except:
names = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
with zopen(filename, 'rt') as f:
return Poscar.from_string(f.read(), names, read_velocities=read_velocities) # depends on [control=['with'], data=['f']] |
def annotation(args):
"""
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
"""
from jcvi.formats.base import DictFile
p = OptionParser(annotation.__doc__)
p.add_option("--queryids", help="Query IDS file to switch [default: %default]")
p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
d = "\t"
qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None
sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None
blast = Blast(blastfile)
for b in blast:
query, subject = b.query, b.subject
if qids:
query = qids[query]
if sids:
subject = sids[subject]
print("\t".join((query, subject))) | def function[annotation, parameter[args]]:
constant[
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
]
from relative_module[jcvi.formats.base] import module[DictFile]
variable[p] assign[=] call[name[OptionParser], parameter[name[annotation].__doc__]]
call[name[p].add_option, parameter[constant[--queryids]]]
call[name[p].add_option, parameter[constant[--subjectids]]]
<ast.Tuple object at 0x7da1b09033d0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b09017e0>]]
<ast.Tuple object at 0x7da1b0854460> assign[=] name[args]
variable[d] assign[=] constant[ ]
variable[qids] assign[=] <ast.IfExp object at 0x7da1b0862ce0>
variable[sids] assign[=] <ast.IfExp object at 0x7da1b086bbe0>
variable[blast] assign[=] call[name[Blast], parameter[name[blastfile]]]
for taget[name[b]] in starred[name[blast]] begin[:]
<ast.Tuple object at 0x7da1b086b880> assign[=] tuple[[<ast.Attribute object at 0x7da1b086ba30>, <ast.Attribute object at 0x7da1b086b7f0>]]
if name[qids] begin[:]
variable[query] assign[=] call[name[qids]][name[query]]
if name[sids] begin[:]
variable[subject] assign[=] call[name[sids]][name[subject]]
call[name[print], parameter[call[constant[ ].join, parameter[tuple[[<ast.Name object at 0x7da1b086b2b0>, <ast.Name object at 0x7da1b086bfa0>]]]]]] | keyword[def] identifier[annotation] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[base] keyword[import] identifier[DictFile]
identifier[p] = identifier[OptionParser] ( identifier[annotation] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[blastfile] ,= identifier[args]
identifier[d] = literal[string]
identifier[qids] = identifier[DictFile] ( identifier[opts] . identifier[queryids] , identifier[delimiter] = identifier[d] ) keyword[if] identifier[opts] . identifier[queryids] keyword[else] keyword[None]
identifier[sids] = identifier[DictFile] ( identifier[opts] . identifier[subjectids] , identifier[delimiter] = identifier[d] ) keyword[if] identifier[opts] . identifier[subjectids] keyword[else] keyword[None]
identifier[blast] = identifier[Blast] ( identifier[blastfile] )
keyword[for] identifier[b] keyword[in] identifier[blast] :
identifier[query] , identifier[subject] = identifier[b] . identifier[query] , identifier[b] . identifier[subject]
keyword[if] identifier[qids] :
identifier[query] = identifier[qids] [ identifier[query] ]
keyword[if] identifier[sids] :
identifier[subject] = identifier[sids] [ identifier[subject] ]
identifier[print] ( literal[string] . identifier[join] (( identifier[query] , identifier[subject] ))) | def annotation(args):
"""
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
"""
from jcvi.formats.base import DictFile
p = OptionParser(annotation.__doc__)
p.add_option('--queryids', help='Query IDS file to switch [default: %default]')
p.add_option('--subjectids', help='Subject IDS file to switch [default: %default]')
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(blastfile,) = args
d = '\t'
qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None
sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None
blast = Blast(blastfile)
for b in blast:
(query, subject) = (b.query, b.subject)
if qids:
query = qids[query] # depends on [control=['if'], data=[]]
if sids:
subject = sids[subject] # depends on [control=['if'], data=[]]
print('\t'.join((query, subject))) # depends on [control=['for'], data=['b']] |
def new(cls, *args, **kwargs):
"""Create a new instance of this model based on its spec and either
a map or the provided kwargs."""
new = cls(make_default(getattr(cls, 'spec', {})))
new.update(args[0] if args and not kwargs else kwargs)
return new | def function[new, parameter[cls]]:
constant[Create a new instance of this model based on its spec and either
a map or the provided kwargs.]
variable[new] assign[=] call[name[cls], parameter[call[name[make_default], parameter[call[name[getattr], parameter[name[cls], constant[spec], dictionary[[], []]]]]]]]
call[name[new].update, parameter[<ast.IfExp object at 0x7da18f00d240>]]
return[name[new]] | keyword[def] identifier[new] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[new] = identifier[cls] ( identifier[make_default] ( identifier[getattr] ( identifier[cls] , literal[string] ,{})))
identifier[new] . identifier[update] ( identifier[args] [ literal[int] ] keyword[if] identifier[args] keyword[and] keyword[not] identifier[kwargs] keyword[else] identifier[kwargs] )
keyword[return] identifier[new] | def new(cls, *args, **kwargs):
"""Create a new instance of this model based on its spec and either
a map or the provided kwargs."""
new = cls(make_default(getattr(cls, 'spec', {})))
new.update(args[0] if args and (not kwargs) else kwargs)
return new |
def find_count_label(d):
"""Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences".
"""
for name in COUNT_NAMES:
if name in d:
return name
for name in COUNT_NAMES:
if str(name).lower() in d:
return name | def function[find_count_label, parameter[d]]:
constant[Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences".
]
for taget[name[name]] in starred[name[COUNT_NAMES]] begin[:]
if compare[name[name] in name[d]] begin[:]
return[name[name]]
for taget[name[name]] in starred[name[COUNT_NAMES]] begin[:]
if compare[call[call[name[str], parameter[name[name]]].lower, parameter[]] in name[d]] begin[:]
return[name[name]] | keyword[def] identifier[find_count_label] ( identifier[d] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[COUNT_NAMES] :
keyword[if] identifier[name] keyword[in] identifier[d] :
keyword[return] identifier[name]
keyword[for] identifier[name] keyword[in] identifier[COUNT_NAMES] :
keyword[if] identifier[str] ( identifier[name] ). identifier[lower] () keyword[in] identifier[d] :
keyword[return] identifier[name] | def find_count_label(d):
"""Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences".
"""
for name in COUNT_NAMES:
if name in d:
return name # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']]
for name in COUNT_NAMES:
if str(name).lower() in d:
return name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] |
def _build_preconditions_table(self):
'''Builds the local action precondition expressions.'''
self.local_action_preconditions = dict()
self.global_action_preconditions = []
action_fluents = self.action_fluents
for precond in self.preconds:
scope = precond.scope
action_scope = [action for action in scope if action in action_fluents]
if len(action_scope) == 1:
name = action_scope[0]
self.local_action_preconditions[name] = self.local_action_preconditions.get(name, [])
self.local_action_preconditions[name].append(precond)
else:
self.global_action_preconditions.append(precond) | def function[_build_preconditions_table, parameter[self]]:
constant[Builds the local action precondition expressions.]
name[self].local_action_preconditions assign[=] call[name[dict], parameter[]]
name[self].global_action_preconditions assign[=] list[[]]
variable[action_fluents] assign[=] name[self].action_fluents
for taget[name[precond]] in starred[name[self].preconds] begin[:]
variable[scope] assign[=] name[precond].scope
variable[action_scope] assign[=] <ast.ListComp object at 0x7da1b09b65f0>
if compare[call[name[len], parameter[name[action_scope]]] equal[==] constant[1]] begin[:]
variable[name] assign[=] call[name[action_scope]][constant[0]]
call[name[self].local_action_preconditions][name[name]] assign[=] call[name[self].local_action_preconditions.get, parameter[name[name], list[[]]]]
call[call[name[self].local_action_preconditions][name[name]].append, parameter[name[precond]]] | keyword[def] identifier[_build_preconditions_table] ( identifier[self] ):
literal[string]
identifier[self] . identifier[local_action_preconditions] = identifier[dict] ()
identifier[self] . identifier[global_action_preconditions] =[]
identifier[action_fluents] = identifier[self] . identifier[action_fluents]
keyword[for] identifier[precond] keyword[in] identifier[self] . identifier[preconds] :
identifier[scope] = identifier[precond] . identifier[scope]
identifier[action_scope] =[ identifier[action] keyword[for] identifier[action] keyword[in] identifier[scope] keyword[if] identifier[action] keyword[in] identifier[action_fluents] ]
keyword[if] identifier[len] ( identifier[action_scope] )== literal[int] :
identifier[name] = identifier[action_scope] [ literal[int] ]
identifier[self] . identifier[local_action_preconditions] [ identifier[name] ]= identifier[self] . identifier[local_action_preconditions] . identifier[get] ( identifier[name] ,[])
identifier[self] . identifier[local_action_preconditions] [ identifier[name] ]. identifier[append] ( identifier[precond] )
keyword[else] :
identifier[self] . identifier[global_action_preconditions] . identifier[append] ( identifier[precond] ) | def _build_preconditions_table(self):
"""Builds the local action precondition expressions."""
self.local_action_preconditions = dict()
self.global_action_preconditions = []
action_fluents = self.action_fluents
for precond in self.preconds:
scope = precond.scope
action_scope = [action for action in scope if action in action_fluents]
if len(action_scope) == 1:
name = action_scope[0]
self.local_action_preconditions[name] = self.local_action_preconditions.get(name, [])
self.local_action_preconditions[name].append(precond) # depends on [control=['if'], data=[]]
else:
self.global_action_preconditions.append(precond) # depends on [control=['for'], data=['precond']] |
def equals(self, other):
"""
Determine if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com.values_from_object(self),
com.values_from_object(other))
except Exception:
return False | def function[equals, parameter[self, other]]:
constant[
Determine if two Index objects contain the same elements.
]
if call[name[self].is_, parameter[name[other]]] begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da18fe91de0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da18fe90e50> begin[:]
return[call[name[other].equals, parameter[name[self]]]]
<ast.Try object at 0x7da18dc9bfa0> | keyword[def] identifier[equals] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[self] . identifier[is_] ( identifier[other] ):
keyword[return] keyword[True]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[Index] ):
keyword[return] keyword[False]
keyword[if] identifier[is_object_dtype] ( identifier[self] ) keyword[and] keyword[not] identifier[is_object_dtype] ( identifier[other] ):
keyword[return] identifier[other] . identifier[equals] ( identifier[self] )
keyword[try] :
keyword[return] identifier[array_equivalent] ( identifier[com] . identifier[values_from_object] ( identifier[self] ),
identifier[com] . identifier[values_from_object] ( identifier[other] ))
keyword[except] identifier[Exception] :
keyword[return] keyword[False] | def equals(self, other):
"""
Determine if two Index objects contain the same elements.
"""
if self.is_(other):
return True # depends on [control=['if'], data=[]]
if not isinstance(other, Index):
return False # depends on [control=['if'], data=[]]
if is_object_dtype(self) and (not is_object_dtype(other)):
# if other is not object, use other's logic for coercion
return other.equals(self) # depends on [control=['if'], data=[]]
try:
return array_equivalent(com.values_from_object(self), com.values_from_object(other)) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]] |
def read(self, uri):
""" Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
"""
uri = self.__absolute__(uri)
mime, _ = guess_type(uri)
if "image" in mime:
return send_file(uri), mime
else:
with open(uri, "r") as f:
file = f.read()
return file, mime | def function[read, parameter[self, uri]]:
constant[ Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
]
variable[uri] assign[=] call[name[self].__absolute__, parameter[name[uri]]]
<ast.Tuple object at 0x7da1b004b550> assign[=] call[name[guess_type], parameter[name[uri]]]
if compare[constant[image] in name[mime]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b00492d0>, <ast.Name object at 0x7da1b0049780>]]]
return[tuple[[<ast.Name object at 0x7da1b009e290>, <ast.Name object at 0x7da1b009fbb0>]]] | keyword[def] identifier[read] ( identifier[self] , identifier[uri] ):
literal[string]
identifier[uri] = identifier[self] . identifier[__absolute__] ( identifier[uri] )
identifier[mime] , identifier[_] = identifier[guess_type] ( identifier[uri] )
keyword[if] literal[string] keyword[in] identifier[mime] :
keyword[return] identifier[send_file] ( identifier[uri] ), identifier[mime]
keyword[else] :
keyword[with] identifier[open] ( identifier[uri] , literal[string] ) keyword[as] identifier[f] :
identifier[file] = identifier[f] . identifier[read] ()
keyword[return] identifier[file] , identifier[mime] | def read(self, uri):
""" Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
"""
uri = self.__absolute__(uri)
(mime, _) = guess_type(uri)
if 'image' in mime:
return (send_file(uri), mime) # depends on [control=['if'], data=['mime']]
else:
with open(uri, 'r') as f:
file = f.read() # depends on [control=['with'], data=['f']]
return (file, mime) |
def Harrison_Brunner_Hecker(dp, voidage, vs, rho, mu, L=1, Dt=None):
r'''Calculates pressure drop across a packed bed of spheres using a
correlation developed in [1]_, also shown in [2]_. Fourth most accurate
correlation overall in the review of [2]_.
Applies a wall correction if diameter of tube is provided.
.. math::
f_p = \left(119.8A + 4.63B\left(\frac{Re}{1-\epsilon}\right)^{5/6}
\right)\frac{(1-\epsilon)^2}{\epsilon^3 Re}
.. math::
A = \left(1 + \pi \frac{d_p}{6(1-\epsilon)D_t}\right)^2
.. math::
B = 1 - \frac{\pi^2 d_p}{24D_t}\left(1 - \frac{0.5d_p}{D_t}\right)
.. math::
f_p = \frac{\Delta P d_p}{\rho v_s^2 L}
.. math::
Re = \frac{\rho v_s d_p}{\mu}
Parameters
----------
dp : float
Particle diameter of spheres [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid (volumetric flow rate/cross-sectional
area)[m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
L : float, optional
Length the fluid flows in the packed bed [m]
Dt : float, optional
Diameter of the tube, [m]
Returns
-------
dP : float
Pressure drop across the bed [Pa]
Notes
-----
Uses data from other sources only. Correlation will underestimate pressure
drop if tube diameter is not provided. Limits are specified in [1]_ as:
.. math::
0.72 < Re < 7700 \\
8.3 < d_t/d_p < 50 \\
0.33 < \epsilon < 0.88
Examples
--------
>>> Harrison_Brunner_Hecker(dp=8E-4, voidage=0.4, vs=1E-3, rho=1E3, mu=1E-3, Dt=1E-2)
1255.1625662548427
References
----------
.. [1] KTA. KTA 3102.3 Reactor Core Design of High-Temperature Gas-Cooled
Reactors Part 3: Loss of Pressure through Friction in Pebble Bed Cores.
Germany, 1981.
.. [2] Erdim, Esra, Ömer Akgiray, and İbrahim Demir. "A Revisit of Pressure
Drop-Flow Rate Correlations for Packed Beds of Spheres." Powder
Technology 283 (October 2015): 488-504. doi:10.1016/j.powtec.2015.06.017.
'''
Re = dp*rho*vs/mu
if not Dt:
A, B = 1, 1
else:
A = (1 + pi*dp/(6*(1-voidage)*Dt))**2
B = 1 - pi**2*dp/24/Dt*(1 - dp/(2*Dt))
fp = (119.8*A + 4.63*B*(Re/(1-voidage))**(5/6.))*(1-voidage)**2/(voidage**3*Re)
return fp*rho*vs**2*L/dp | def function[Harrison_Brunner_Hecker, parameter[dp, voidage, vs, rho, mu, L, Dt]]:
constant[Calculates pressure drop across a packed bed of spheres using a
correlation developed in [1]_, also shown in [2]_. Fourth most accurate
correlation overall in the review of [2]_.
Applies a wall correction if diameter of tube is provided.
.. math::
f_p = \left(119.8A + 4.63B\left(\frac{Re}{1-\epsilon}\right)^{5/6}
\right)\frac{(1-\epsilon)^2}{\epsilon^3 Re}
.. math::
A = \left(1 + \pi \frac{d_p}{6(1-\epsilon)D_t}\right)^2
.. math::
B = 1 - \frac{\pi^2 d_p}{24D_t}\left(1 - \frac{0.5d_p}{D_t}\right)
.. math::
f_p = \frac{\Delta P d_p}{\rho v_s^2 L}
.. math::
Re = \frac{\rho v_s d_p}{\mu}
Parameters
----------
dp : float
Particle diameter of spheres [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid (volumetric flow rate/cross-sectional
area)[m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
L : float, optional
Length the fluid flows in the packed bed [m]
Dt : float, optional
Diameter of the tube, [m]
Returns
-------
dP : float
Pressure drop across the bed [Pa]
Notes
-----
Uses data from other sources only. Correlation will underestimate pressure
drop if tube diameter is not provided. Limits are specified in [1]_ as:
.. math::
0.72 < Re < 7700 \\
8.3 < d_t/d_p < 50 \\
0.33 < \epsilon < 0.88
Examples
--------
>>> Harrison_Brunner_Hecker(dp=8E-4, voidage=0.4, vs=1E-3, rho=1E3, mu=1E-3, Dt=1E-2)
1255.1625662548427
References
----------
.. [1] KTA. KTA 3102.3 Reactor Core Design of High-Temperature Gas-Cooled
Reactors Part 3: Loss of Pressure through Friction in Pebble Bed Cores.
Germany, 1981.
.. [2] Erdim, Esra, Ömer Akgiray, and İbrahim Demir. "A Revisit of Pressure
Drop-Flow Rate Correlations for Packed Beds of Spheres." Powder
Technology 283 (October 2015): 488-504. doi:10.1016/j.powtec.2015.06.017.
]
variable[Re] assign[=] binary_operation[binary_operation[binary_operation[name[dp] * name[rho]] * name[vs]] / name[mu]]
if <ast.UnaryOp object at 0x7da20c6abe80> begin[:]
<ast.Tuple object at 0x7da20c6aa8f0> assign[=] tuple[[<ast.Constant object at 0x7da20c6a8160>, <ast.Constant object at 0x7da20c6a8280>]]
variable[fp] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[119.8] * name[A]] + binary_operation[binary_operation[constant[4.63] * name[B]] * binary_operation[binary_operation[name[Re] / binary_operation[constant[1] - name[voidage]]] ** binary_operation[constant[5] / constant[6.0]]]]] * binary_operation[binary_operation[constant[1] - name[voidage]] ** constant[2]]] / binary_operation[binary_operation[name[voidage] ** constant[3]] * name[Re]]]
return[binary_operation[binary_operation[binary_operation[binary_operation[name[fp] * name[rho]] * binary_operation[name[vs] ** constant[2]]] * name[L]] / name[dp]]] | keyword[def] identifier[Harrison_Brunner_Hecker] ( identifier[dp] , identifier[voidage] , identifier[vs] , identifier[rho] , identifier[mu] , identifier[L] = literal[int] , identifier[Dt] = keyword[None] ):
literal[string]
identifier[Re] = identifier[dp] * identifier[rho] * identifier[vs] / identifier[mu]
keyword[if] keyword[not] identifier[Dt] :
identifier[A] , identifier[B] = literal[int] , literal[int]
keyword[else] :
identifier[A] =( literal[int] + identifier[pi] * identifier[dp] /( literal[int] *( literal[int] - identifier[voidage] )* identifier[Dt] ))** literal[int]
identifier[B] = literal[int] - identifier[pi] ** literal[int] * identifier[dp] / literal[int] / identifier[Dt] *( literal[int] - identifier[dp] /( literal[int] * identifier[Dt] ))
identifier[fp] =( literal[int] * identifier[A] + literal[int] * identifier[B] *( identifier[Re] /( literal[int] - identifier[voidage] ))**( literal[int] / literal[int] ))*( literal[int] - identifier[voidage] )** literal[int] /( identifier[voidage] ** literal[int] * identifier[Re] )
keyword[return] identifier[fp] * identifier[rho] * identifier[vs] ** literal[int] * identifier[L] / identifier[dp] | def Harrison_Brunner_Hecker(dp, voidage, vs, rho, mu, L=1, Dt=None):
"""Calculates pressure drop across a packed bed of spheres using a
correlation developed in [1]_, also shown in [2]_. Fourth most accurate
correlation overall in the review of [2]_.
Applies a wall correction if diameter of tube is provided.
.. math::
f_p = \\left(119.8A + 4.63B\\left(\\frac{Re}{1-\\epsilon}\\right)^{5/6}
\\right)\\frac{(1-\\epsilon)^2}{\\epsilon^3 Re}
.. math::
A = \\left(1 + \\pi \\frac{d_p}{6(1-\\epsilon)D_t}\\right)^2
.. math::
B = 1 - \\frac{\\pi^2 d_p}{24D_t}\\left(1 - \\frac{0.5d_p}{D_t}\\right)
.. math::
f_p = \\frac{\\Delta P d_p}{\\rho v_s^2 L}
.. math::
Re = \\frac{\\rho v_s d_p}{\\mu}
Parameters
----------
dp : float
Particle diameter of spheres [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid (volumetric flow rate/cross-sectional
area)[m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
L : float, optional
Length the fluid flows in the packed bed [m]
Dt : float, optional
Diameter of the tube, [m]
Returns
-------
dP : float
Pressure drop across the bed [Pa]
Notes
-----
Uses data from other sources only. Correlation will underestimate pressure
drop if tube diameter is not provided. Limits are specified in [1]_ as:
.. math::
0.72 < Re < 7700 \\\\
8.3 < d_t/d_p < 50 \\\\
0.33 < \\epsilon < 0.88
Examples
--------
>>> Harrison_Brunner_Hecker(dp=8E-4, voidage=0.4, vs=1E-3, rho=1E3, mu=1E-3, Dt=1E-2)
1255.1625662548427
References
----------
.. [1] KTA. KTA 3102.3 Reactor Core Design of High-Temperature Gas-Cooled
Reactors Part 3: Loss of Pressure through Friction in Pebble Bed Cores.
Germany, 1981.
.. [2] Erdim, Esra, Ömer Akgiray, and İbrahim Demir. "A Revisit of Pressure
Drop-Flow Rate Correlations for Packed Beds of Spheres." Powder
Technology 283 (October 2015): 488-504. doi:10.1016/j.powtec.2015.06.017.
"""
Re = dp * rho * vs / mu
if not Dt:
(A, B) = (1, 1) # depends on [control=['if'], data=[]]
else:
A = (1 + pi * dp / (6 * (1 - voidage) * Dt)) ** 2
B = 1 - pi ** 2 * dp / 24 / Dt * (1 - dp / (2 * Dt))
fp = (119.8 * A + 4.63 * B * (Re / (1 - voidage)) ** (5 / 6.0)) * (1 - voidage) ** 2 / (voidage ** 3 * Re)
return fp * rho * vs ** 2 * L / dp |
def command(results_dir, result_id):
"""
Print the command that needs to be used to reproduce a result.
"""
campaign = sem.CampaignManager.load(results_dir)
result = campaign.db.get_results(result_id=result_id)[0]
click.echo("Simulation command:")
click.echo(sem.utils.get_command_from_result(campaign.db.get_script(),
result))
click.echo("Debug command:")
click.echo(sem.utils.get_command_from_result(campaign.db.get_script(),
result,
debug=True)) | def function[command, parameter[results_dir, result_id]]:
constant[
Print the command that needs to be used to reproduce a result.
]
variable[campaign] assign[=] call[name[sem].CampaignManager.load, parameter[name[results_dir]]]
variable[result] assign[=] call[call[name[campaign].db.get_results, parameter[]]][constant[0]]
call[name[click].echo, parameter[constant[Simulation command:]]]
call[name[click].echo, parameter[call[name[sem].utils.get_command_from_result, parameter[call[name[campaign].db.get_script, parameter[]], name[result]]]]]
call[name[click].echo, parameter[constant[Debug command:]]]
call[name[click].echo, parameter[call[name[sem].utils.get_command_from_result, parameter[call[name[campaign].db.get_script, parameter[]], name[result]]]]] | keyword[def] identifier[command] ( identifier[results_dir] , identifier[result_id] ):
literal[string]
identifier[campaign] = identifier[sem] . identifier[CampaignManager] . identifier[load] ( identifier[results_dir] )
identifier[result] = identifier[campaign] . identifier[db] . identifier[get_results] ( identifier[result_id] = identifier[result_id] )[ literal[int] ]
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( identifier[sem] . identifier[utils] . identifier[get_command_from_result] ( identifier[campaign] . identifier[db] . identifier[get_script] (),
identifier[result] ))
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( identifier[sem] . identifier[utils] . identifier[get_command_from_result] ( identifier[campaign] . identifier[db] . identifier[get_script] (),
identifier[result] ,
identifier[debug] = keyword[True] )) | def command(results_dir, result_id):
"""
Print the command that needs to be used to reproduce a result.
"""
campaign = sem.CampaignManager.load(results_dir)
result = campaign.db.get_results(result_id=result_id)[0]
click.echo('Simulation command:')
click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result))
click.echo('Debug command:')
click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result, debug=True)) |
def as_fs(self):
"""
Returns the value of component encoded as formatted string.
Inspect each character in value of component.
Certain nonalpha characters pass thru without escaping
into the result, but most retain escaping.
:returns: Formatted string associated with component
:rtype: string
"""
s = self._standard_value
result = []
idx = 0
while (idx < len(s)):
c = s[idx] # get the idx'th character of s
if c != "\\":
# unquoted characters pass thru unharmed
result.append(c)
else:
# Escaped characters are examined
nextchr = s[idx + 1]
if (nextchr == ".") or (nextchr == "-") or (nextchr == "_"):
# the period, hyphen and underscore pass unharmed
result.append(nextchr)
idx += 1
else:
# all others retain escaping
result.append("\\")
result.append(nextchr)
idx += 2
continue
idx += 1
return "".join(result) | def function[as_fs, parameter[self]]:
constant[
Returns the value of component encoded as formatted string.
Inspect each character in value of component.
Certain nonalpha characters pass thru without escaping
into the result, but most retain escaping.
:returns: Formatted string associated with component
:rtype: string
]
variable[s] assign[=] name[self]._standard_value
variable[result] assign[=] list[[]]
variable[idx] assign[=] constant[0]
while compare[name[idx] less[<] call[name[len], parameter[name[s]]]] begin[:]
variable[c] assign[=] call[name[s]][name[idx]]
if compare[name[c] not_equal[!=] constant[\]] begin[:]
call[name[result].append, parameter[name[c]]]
<ast.AugAssign object at 0x7da1b0f39570>
return[call[constant[].join, parameter[name[result]]]] | keyword[def] identifier[as_fs] ( identifier[self] ):
literal[string]
identifier[s] = identifier[self] . identifier[_standard_value]
identifier[result] =[]
identifier[idx] = literal[int]
keyword[while] ( identifier[idx] < identifier[len] ( identifier[s] )):
identifier[c] = identifier[s] [ identifier[idx] ]
keyword[if] identifier[c] != literal[string] :
identifier[result] . identifier[append] ( identifier[c] )
keyword[else] :
identifier[nextchr] = identifier[s] [ identifier[idx] + literal[int] ]
keyword[if] ( identifier[nextchr] == literal[string] ) keyword[or] ( identifier[nextchr] == literal[string] ) keyword[or] ( identifier[nextchr] == literal[string] ):
identifier[result] . identifier[append] ( identifier[nextchr] )
identifier[idx] += literal[int]
keyword[else] :
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[append] ( identifier[nextchr] )
identifier[idx] += literal[int]
keyword[continue]
identifier[idx] += literal[int]
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def as_fs(self):
"""
Returns the value of component encoded as formatted string.
Inspect each character in value of component.
Certain nonalpha characters pass thru without escaping
into the result, but most retain escaping.
:returns: Formatted string associated with component
:rtype: string
"""
s = self._standard_value
result = []
idx = 0
while idx < len(s):
c = s[idx] # get the idx'th character of s
if c != '\\':
# unquoted characters pass thru unharmed
result.append(c) # depends on [control=['if'], data=['c']]
else:
# Escaped characters are examined
nextchr = s[idx + 1]
if nextchr == '.' or nextchr == '-' or nextchr == '_':
# the period, hyphen and underscore pass unharmed
result.append(nextchr)
idx += 1 # depends on [control=['if'], data=[]]
else:
# all others retain escaping
result.append('\\')
result.append(nextchr)
idx += 2
continue
idx += 1 # depends on [control=['while'], data=['idx']]
return ''.join(result) |
def _le_from_ge(self, other):
"""Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b)."""
op_result = self.__ge__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result or self == other | def function[_le_from_ge, parameter[self, other]]:
constant[Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).]
variable[op_result] assign[=] call[name[self].__ge__, parameter[name[other]]]
if compare[name[op_result] is name[NotImplemented]] begin[:]
return[name[NotImplemented]]
return[<ast.BoolOp object at 0x7da1b25182e0>] | keyword[def] identifier[_le_from_ge] ( identifier[self] , identifier[other] ):
literal[string]
identifier[op_result] = identifier[self] . identifier[__ge__] ( identifier[other] )
keyword[if] identifier[op_result] keyword[is] identifier[NotImplemented] :
keyword[return] identifier[NotImplemented]
keyword[return] keyword[not] identifier[op_result] keyword[or] identifier[self] == identifier[other] | def _le_from_ge(self, other):
"""Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b)."""
op_result = self.__ge__(other)
if op_result is NotImplemented:
return NotImplemented # depends on [control=['if'], data=['NotImplemented']]
return not op_result or self == other |
def get_absolute_url(self, domain=False):
" Get object's URL. "
category = self.category
kwargs = {
'slug': self.slug,
}
if self.static:
kwargs['id'] = self.pk
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('static_detail', kwargs=kwargs)
else:
url = reverse('home_static_detail', kwargs=kwargs)
else:
publish_from = localize(self.publish_from)
kwargs.update({
'year': publish_from.year,
'month': publish_from.month,
'day': publish_from.day,
})
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('object_detail', kwargs=kwargs)
else:
url = reverse('home_object_detail', kwargs=kwargs)
if category.site_id != settings.SITE_ID or domain:
return 'http://' + category.site.domain + url
return url | def function[get_absolute_url, parameter[self, domain]]:
constant[ Get object's URL. ]
variable[category] assign[=] name[self].category
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da2054a4d00>], [<ast.Attribute object at 0x7da2054a7d30>]]
if name[self].static begin[:]
call[name[kwargs]][constant[id]] assign[=] name[self].pk
if name[category].tree_parent_id begin[:]
call[name[kwargs]][constant[category]] assign[=] name[category].tree_path
variable[url] assign[=] call[name[reverse], parameter[constant[static_detail]]]
if <ast.BoolOp object at 0x7da2054a6e60> begin[:]
return[binary_operation[binary_operation[constant[http://] + name[category].site.domain] + name[url]]]
return[name[url]] | keyword[def] identifier[get_absolute_url] ( identifier[self] , identifier[domain] = keyword[False] ):
literal[string]
identifier[category] = identifier[self] . identifier[category]
identifier[kwargs] ={
literal[string] : identifier[self] . identifier[slug] ,
}
keyword[if] identifier[self] . identifier[static] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[pk]
keyword[if] identifier[category] . identifier[tree_parent_id] :
identifier[kwargs] [ literal[string] ]= identifier[category] . identifier[tree_path]
identifier[url] = identifier[reverse] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
keyword[else] :
identifier[url] = identifier[reverse] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
keyword[else] :
identifier[publish_from] = identifier[localize] ( identifier[self] . identifier[publish_from] )
identifier[kwargs] . identifier[update] ({
literal[string] : identifier[publish_from] . identifier[year] ,
literal[string] : identifier[publish_from] . identifier[month] ,
literal[string] : identifier[publish_from] . identifier[day] ,
})
keyword[if] identifier[category] . identifier[tree_parent_id] :
identifier[kwargs] [ literal[string] ]= identifier[category] . identifier[tree_path]
identifier[url] = identifier[reverse] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
keyword[else] :
identifier[url] = identifier[reverse] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
keyword[if] identifier[category] . identifier[site_id] != identifier[settings] . identifier[SITE_ID] keyword[or] identifier[domain] :
keyword[return] literal[string] + identifier[category] . identifier[site] . identifier[domain] + identifier[url]
keyword[return] identifier[url] | def get_absolute_url(self, domain=False):
""" Get object's URL. """
category = self.category
kwargs = {'slug': self.slug}
if self.static:
kwargs['id'] = self.pk
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('static_detail', kwargs=kwargs) # depends on [control=['if'], data=[]]
else:
url = reverse('home_static_detail', kwargs=kwargs) # depends on [control=['if'], data=[]]
else:
publish_from = localize(self.publish_from)
kwargs.update({'year': publish_from.year, 'month': publish_from.month, 'day': publish_from.day})
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('object_detail', kwargs=kwargs) # depends on [control=['if'], data=[]]
else:
url = reverse('home_object_detail', kwargs=kwargs)
if category.site_id != settings.SITE_ID or domain:
return 'http://' + category.site.domain + url # depends on [control=['if'], data=[]]
return url |
def create_param_info(task_params, parameter_map):
"""
Builds the code block for the GPTool GetParameterInfo method based on the input task_params.
:param task_params: A list of task parameters to map to GPTool parameters.
:return: A string representing the code block to the GPTool GetParameterInfo method.
"""
gp_params = []
gp_param_list = []
gp_param_idx_list = []
gp_param_idx = 0
for task_param in task_params:
# Setup to gp_param dictionary used to substitute against the parameter info template.
gp_param = {}
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
if len(task_param['dimensions'].split(',')) > 1:
raise UnknownDataTypeError('Only one-dimensional arrays are supported.')
data_type += 'ARRAY'
if data_type in parameter_map:
gp_param['dataType'] = parameter_map[data_type].data_type
else:
# No Mapping exists for this data type!
raise UnknownDataTypeError('Unable to map task datatype: ' +
data_type +
'. A template must be created.')
gp_param['name'] = task_param['name']
gp_param['displayName'] = task_param['display_name']
gp_param['direction'] = _DIRECTION_MAP[task_param['direction']]
gp_param['paramType'] = 'Required' if task_param['required'] else 'Optional'
# ENVI/IDL output type translates to a derived output type in Arc
if gp_param['direction'] is 'Output':
gp_param['paramType'] = 'Derived'
gp_param['multiValue'] = True if 'dimensions' in task_param else False
# Substitute values into the template
gp_params.append(parameter_map[data_type].get_parameter(task_param).substitute(gp_param))
# Convert the default value
if 'default_value' in task_param:
gp_param['defaultValue'] = task_param['default_value']
gp_params.append(parameter_map[data_type].default_value().substitute(gp_param))
# Convert any choicelist
if 'choice_list' in task_param:
gp_param['choiceList'] = task_param['choice_list']
gp_params.append(_CHOICELIST_TEMPLATE.substitute(gp_param))
# Construct the parameter list and indicies for future reference
for param_name in parameter_map[data_type].parameter_names(task_param):
gp_param_list.append(param_name.substitute(gp_param))
gp_param_idx_list.append(_PARAM_INDEX_TEMPLATE.substitute(
{'name': param_name.substitute(gp_param),
'idx': gp_param_idx}))
gp_param_idx += 1
# Construct the final parameter string
gp_params.append(_PARAM_RETURN_TEMPLATE.substitute({'paramList': convert_list(gp_param_list)}))
return ''.join((''.join(gp_params), ''.join(gp_param_idx_list))) | def function[create_param_info, parameter[task_params, parameter_map]]:
constant[
Builds the code block for the GPTool GetParameterInfo method based on the input task_params.
:param task_params: A list of task parameters to map to GPTool parameters.
:return: A string representing the code block to the GPTool GetParameterInfo method.
]
variable[gp_params] assign[=] list[[]]
variable[gp_param_list] assign[=] list[[]]
variable[gp_param_idx_list] assign[=] list[[]]
variable[gp_param_idx] assign[=] constant[0]
for taget[name[task_param]] in starred[name[task_params]] begin[:]
variable[gp_param] assign[=] dictionary[[], []]
variable[data_type] assign[=] call[call[name[task_param]][constant[type]].upper, parameter[]]
if compare[constant[dimensions] in name[task_param]] begin[:]
if compare[call[name[len], parameter[call[call[name[task_param]][constant[dimensions]].split, parameter[constant[,]]]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da207f01d20>
<ast.AugAssign object at 0x7da207f03fa0>
if compare[name[data_type] in name[parameter_map]] begin[:]
call[name[gp_param]][constant[dataType]] assign[=] call[name[parameter_map]][name[data_type]].data_type
call[name[gp_param]][constant[name]] assign[=] call[name[task_param]][constant[name]]
call[name[gp_param]][constant[displayName]] assign[=] call[name[task_param]][constant[display_name]]
call[name[gp_param]][constant[direction]] assign[=] call[name[_DIRECTION_MAP]][call[name[task_param]][constant[direction]]]
call[name[gp_param]][constant[paramType]] assign[=] <ast.IfExp object at 0x7da207f01330>
if compare[call[name[gp_param]][constant[direction]] is constant[Output]] begin[:]
call[name[gp_param]][constant[paramType]] assign[=] constant[Derived]
call[name[gp_param]][constant[multiValue]] assign[=] <ast.IfExp object at 0x7da20c6c7f10>
call[name[gp_params].append, parameter[call[call[call[name[parameter_map]][name[data_type]].get_parameter, parameter[name[task_param]]].substitute, parameter[name[gp_param]]]]]
if compare[constant[default_value] in name[task_param]] begin[:]
call[name[gp_param]][constant[defaultValue]] assign[=] call[name[task_param]][constant[default_value]]
call[name[gp_params].append, parameter[call[call[call[name[parameter_map]][name[data_type]].default_value, parameter[]].substitute, parameter[name[gp_param]]]]]
if compare[constant[choice_list] in name[task_param]] begin[:]
call[name[gp_param]][constant[choiceList]] assign[=] call[name[task_param]][constant[choice_list]]
call[name[gp_params].append, parameter[call[name[_CHOICELIST_TEMPLATE].substitute, parameter[name[gp_param]]]]]
for taget[name[param_name]] in starred[call[call[name[parameter_map]][name[data_type]].parameter_names, parameter[name[task_param]]]] begin[:]
call[name[gp_param_list].append, parameter[call[name[param_name].substitute, parameter[name[gp_param]]]]]
call[name[gp_param_idx_list].append, parameter[call[name[_PARAM_INDEX_TEMPLATE].substitute, parameter[dictionary[[<ast.Constant object at 0x7da20c6c4df0>, <ast.Constant object at 0x7da20c6c5fc0>], [<ast.Call object at 0x7da20c6c6320>, <ast.Name object at 0x7da20c6c4b20>]]]]]]
<ast.AugAssign object at 0x7da20c6c5450>
call[name[gp_params].append, parameter[call[name[_PARAM_RETURN_TEMPLATE].substitute, parameter[dictionary[[<ast.Constant object at 0x7da20c6c4550>], [<ast.Call object at 0x7da20c6c5e10>]]]]]]
return[call[constant[].join, parameter[tuple[[<ast.Call object at 0x7da20c6c7760>, <ast.Call object at 0x7da20c6c6a40>]]]]] | keyword[def] identifier[create_param_info] ( identifier[task_params] , identifier[parameter_map] ):
literal[string]
identifier[gp_params] =[]
identifier[gp_param_list] =[]
identifier[gp_param_idx_list] =[]
identifier[gp_param_idx] = literal[int]
keyword[for] identifier[task_param] keyword[in] identifier[task_params] :
identifier[gp_param] ={}
identifier[data_type] = identifier[task_param] [ literal[string] ]. identifier[upper] ()
keyword[if] literal[string] keyword[in] identifier[task_param] :
keyword[if] identifier[len] ( identifier[task_param] [ literal[string] ]. identifier[split] ( literal[string] ))> literal[int] :
keyword[raise] identifier[UnknownDataTypeError] ( literal[string] )
identifier[data_type] += literal[string]
keyword[if] identifier[data_type] keyword[in] identifier[parameter_map] :
identifier[gp_param] [ literal[string] ]= identifier[parameter_map] [ identifier[data_type] ]. identifier[data_type]
keyword[else] :
keyword[raise] identifier[UnknownDataTypeError] ( literal[string] +
identifier[data_type] +
literal[string] )
identifier[gp_param] [ literal[string] ]= identifier[task_param] [ literal[string] ]
identifier[gp_param] [ literal[string] ]= identifier[task_param] [ literal[string] ]
identifier[gp_param] [ literal[string] ]= identifier[_DIRECTION_MAP] [ identifier[task_param] [ literal[string] ]]
identifier[gp_param] [ literal[string] ]= literal[string] keyword[if] identifier[task_param] [ literal[string] ] keyword[else] literal[string]
keyword[if] identifier[gp_param] [ literal[string] ] keyword[is] literal[string] :
identifier[gp_param] [ literal[string] ]= literal[string]
identifier[gp_param] [ literal[string] ]= keyword[True] keyword[if] literal[string] keyword[in] identifier[task_param] keyword[else] keyword[False]
identifier[gp_params] . identifier[append] ( identifier[parameter_map] [ identifier[data_type] ]. identifier[get_parameter] ( identifier[task_param] ). identifier[substitute] ( identifier[gp_param] ))
keyword[if] literal[string] keyword[in] identifier[task_param] :
identifier[gp_param] [ literal[string] ]= identifier[task_param] [ literal[string] ]
identifier[gp_params] . identifier[append] ( identifier[parameter_map] [ identifier[data_type] ]. identifier[default_value] (). identifier[substitute] ( identifier[gp_param] ))
keyword[if] literal[string] keyword[in] identifier[task_param] :
identifier[gp_param] [ literal[string] ]= identifier[task_param] [ literal[string] ]
identifier[gp_params] . identifier[append] ( identifier[_CHOICELIST_TEMPLATE] . identifier[substitute] ( identifier[gp_param] ))
keyword[for] identifier[param_name] keyword[in] identifier[parameter_map] [ identifier[data_type] ]. identifier[parameter_names] ( identifier[task_param] ):
identifier[gp_param_list] . identifier[append] ( identifier[param_name] . identifier[substitute] ( identifier[gp_param] ))
identifier[gp_param_idx_list] . identifier[append] ( identifier[_PARAM_INDEX_TEMPLATE] . identifier[substitute] (
{ literal[string] : identifier[param_name] . identifier[substitute] ( identifier[gp_param] ),
literal[string] : identifier[gp_param_idx] }))
identifier[gp_param_idx] += literal[int]
identifier[gp_params] . identifier[append] ( identifier[_PARAM_RETURN_TEMPLATE] . identifier[substitute] ({ literal[string] : identifier[convert_list] ( identifier[gp_param_list] )}))
keyword[return] literal[string] . identifier[join] (( literal[string] . identifier[join] ( identifier[gp_params] ), literal[string] . identifier[join] ( identifier[gp_param_idx_list] ))) | def create_param_info(task_params, parameter_map):
"""
Builds the code block for the GPTool GetParameterInfo method based on the input task_params.
:param task_params: A list of task parameters to map to GPTool parameters.
:return: A string representing the code block to the GPTool GetParameterInfo method.
"""
gp_params = []
gp_param_list = []
gp_param_idx_list = []
gp_param_idx = 0
for task_param in task_params:
# Setup to gp_param dictionary used to substitute against the parameter info template.
gp_param = {}
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
if len(task_param['dimensions'].split(',')) > 1:
raise UnknownDataTypeError('Only one-dimensional arrays are supported.') # depends on [control=['if'], data=[]]
data_type += 'ARRAY' # depends on [control=['if'], data=['task_param']]
if data_type in parameter_map:
gp_param['dataType'] = parameter_map[data_type].data_type # depends on [control=['if'], data=['data_type', 'parameter_map']]
else:
# No Mapping exists for this data type!
raise UnknownDataTypeError('Unable to map task datatype: ' + data_type + '. A template must be created.')
gp_param['name'] = task_param['name']
gp_param['displayName'] = task_param['display_name']
gp_param['direction'] = _DIRECTION_MAP[task_param['direction']]
gp_param['paramType'] = 'Required' if task_param['required'] else 'Optional'
# ENVI/IDL output type translates to a derived output type in Arc
if gp_param['direction'] is 'Output':
gp_param['paramType'] = 'Derived' # depends on [control=['if'], data=[]]
gp_param['multiValue'] = True if 'dimensions' in task_param else False
# Substitute values into the template
gp_params.append(parameter_map[data_type].get_parameter(task_param).substitute(gp_param))
# Convert the default value
if 'default_value' in task_param:
gp_param['defaultValue'] = task_param['default_value']
gp_params.append(parameter_map[data_type].default_value().substitute(gp_param)) # depends on [control=['if'], data=['task_param']]
# Convert any choicelist
if 'choice_list' in task_param:
gp_param['choiceList'] = task_param['choice_list']
gp_params.append(_CHOICELIST_TEMPLATE.substitute(gp_param)) # depends on [control=['if'], data=['task_param']]
# Construct the parameter list and indicies for future reference
for param_name in parameter_map[data_type].parameter_names(task_param):
gp_param_list.append(param_name.substitute(gp_param))
gp_param_idx_list.append(_PARAM_INDEX_TEMPLATE.substitute({'name': param_name.substitute(gp_param), 'idx': gp_param_idx}))
gp_param_idx += 1 # depends on [control=['for'], data=['param_name']] # depends on [control=['for'], data=['task_param']]
# Construct the final parameter string
gp_params.append(_PARAM_RETURN_TEMPLATE.substitute({'paramList': convert_list(gp_param_list)}))
return ''.join((''.join(gp_params), ''.join(gp_param_idx_list))) |
def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj | def function[_clone, parameter[self]]:
constant[Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
]
variable[cls] assign[=] name[self].__class__
variable[obj] assign[=] call[name[cls].__new__, parameter[name[cls]]]
name[obj].items assign[=] call[name[list], parameter[name[self].items]]
return[name[obj]] | keyword[def] identifier[_clone] ( identifier[self] ):
literal[string]
identifier[cls] = identifier[self] . identifier[__class__]
identifier[obj] = identifier[cls] . identifier[__new__] ( identifier[cls] )
identifier[obj] . identifier[items] = identifier[list] ( identifier[self] . identifier[items] )
keyword[return] identifier[obj] | def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj |
def default_instance(cls):
"""
For use like a singleton, return the existing instance of the object
or a new instance
"""
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = MessageDispatcher()
return cls._instance | def function[default_instance, parameter[cls]]:
constant[
For use like a singleton, return the existing instance of the object
or a new instance
]
if compare[name[cls]._instance is constant[None]] begin[:]
with name[cls]._instance_lock begin[:]
if compare[name[cls]._instance is constant[None]] begin[:]
name[cls]._instance assign[=] call[name[MessageDispatcher], parameter[]]
return[name[cls]._instance] | keyword[def] identifier[default_instance] ( identifier[cls] ):
literal[string]
keyword[if] identifier[cls] . identifier[_instance] keyword[is] keyword[None] :
keyword[with] identifier[cls] . identifier[_instance_lock] :
keyword[if] identifier[cls] . identifier[_instance] keyword[is] keyword[None] :
identifier[cls] . identifier[_instance] = identifier[MessageDispatcher] ()
keyword[return] identifier[cls] . identifier[_instance] | def default_instance(cls):
"""
For use like a singleton, return the existing instance of the object
or a new instance
"""
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = MessageDispatcher() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return cls._instance |
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] | def function[clipped_zoom, parameter[img, zoom_factor]]:
constant[Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
]
variable[h] assign[=] call[name[img].shape][constant[0]]
variable[ch] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[h] / call[name[float], parameter[name[zoom_factor]]]]]]]]
variable[top_h] assign[=] binary_operation[binary_operation[name[h] - name[ch]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[w] assign[=] call[name[img].shape][constant[1]]
variable[cw] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[w] / call[name[float], parameter[name[zoom_factor]]]]]]]]
variable[top_w] assign[=] binary_operation[binary_operation[name[w] - name[cw]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[img] assign[=] call[name[tfds].core.lazy_imports.scipy.ndimage.zoom, parameter[call[name[img]][tuple[[<ast.Slice object at 0x7da1b2063e20>, <ast.Slice object at 0x7da1b20606a0>]]], tuple[[<ast.Name object at 0x7da1b2061330>, <ast.Name object at 0x7da1b2061090>, <ast.Constant object at 0x7da1b20610f0>]]]]
variable[trim_top_h] assign[=] binary_operation[binary_operation[call[name[img].shape][constant[0]] - name[h]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[trim_top_w] assign[=] binary_operation[binary_operation[call[name[img].shape][constant[1]] - name[w]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
return[call[name[img]][tuple[[<ast.Slice object at 0x7da1b20614e0>, <ast.Slice object at 0x7da1b2060f40>]]]] | keyword[def] identifier[clipped_zoom] ( identifier[img] , identifier[zoom_factor] ):
literal[string]
identifier[h] = identifier[img] . identifier[shape] [ literal[int] ]
identifier[ch] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[h] / identifier[float] ( identifier[zoom_factor] )))
identifier[top_h] =( identifier[h] - identifier[ch] )// literal[int]
identifier[w] = identifier[img] . identifier[shape] [ literal[int] ]
identifier[cw] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[w] / identifier[float] ( identifier[zoom_factor] )))
identifier[top_w] =( identifier[w] - identifier[cw] )// literal[int]
identifier[img] = identifier[tfds] . identifier[core] . identifier[lazy_imports] . identifier[scipy] . identifier[ndimage] . identifier[zoom] (
identifier[img] [ identifier[top_h] : identifier[top_h] + identifier[ch] , identifier[top_w] : identifier[top_w] + identifier[cw] ],( identifier[zoom_factor] , identifier[zoom_factor] , literal[int] ),
identifier[order] = literal[int] )
identifier[trim_top_h] =( identifier[img] . identifier[shape] [ literal[int] ]- identifier[h] )// literal[int]
identifier[trim_top_w] =( identifier[img] . identifier[shape] [ literal[int] ]- identifier[w] )// literal[int]
keyword[return] identifier[img] [ identifier[trim_top_h] : identifier[trim_top_h] + identifier[h] , identifier[trim_top_w] : identifier[trim_top_w] + identifier[w] ] | def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] |
def get_valid_indentations(self, idx):
"""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if (
self._tokens.token(idx) in ("}", "for")
and self._cont_stack[-1].token == ":"
):
stack_top = -2
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_indentations = indent.valid_outdent_strings
else:
valid_indentations = indent.valid_continuation_strings
return indent, valid_indentations.copy() | def function[get_valid_indentations, parameter[self, idx]]:
constant[Returns the valid offsets for the token at the given position.]
variable[stack_top] assign[=] <ast.UnaryOp object at 0x7da1b024ecb0>
if <ast.BoolOp object at 0x7da1b024e170> begin[:]
variable[stack_top] assign[=] <ast.UnaryOp object at 0x7da1b024f910>
variable[indent] assign[=] call[name[self]._cont_stack][name[stack_top]]
if compare[call[name[self]._tokens.token, parameter[name[idx]]] in name[_CLOSING_BRACKETS]] begin[:]
variable[valid_indentations] assign[=] name[indent].valid_outdent_strings
return[tuple[[<ast.Name object at 0x7da1b024f880>, <ast.Call object at 0x7da1b024f8b0>]]] | keyword[def] identifier[get_valid_indentations] ( identifier[self] , identifier[idx] ):
literal[string]
identifier[stack_top] =- literal[int]
keyword[if] (
identifier[self] . identifier[_tokens] . identifier[token] ( identifier[idx] ) keyword[in] ( literal[string] , literal[string] )
keyword[and] identifier[self] . identifier[_cont_stack] [- literal[int] ]. identifier[token] == literal[string]
):
identifier[stack_top] =- literal[int]
identifier[indent] = identifier[self] . identifier[_cont_stack] [ identifier[stack_top] ]
keyword[if] identifier[self] . identifier[_tokens] . identifier[token] ( identifier[idx] ) keyword[in] identifier[_CLOSING_BRACKETS] :
identifier[valid_indentations] = identifier[indent] . identifier[valid_outdent_strings]
keyword[else] :
identifier[valid_indentations] = identifier[indent] . identifier[valid_continuation_strings]
keyword[return] identifier[indent] , identifier[valid_indentations] . identifier[copy] () | def get_valid_indentations(self, idx):
"""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if self._tokens.token(idx) in ('}', 'for') and self._cont_stack[-1].token == ':':
stack_top = -2 # depends on [control=['if'], data=[]]
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_indentations = indent.valid_outdent_strings # depends on [control=['if'], data=[]]
else:
valid_indentations = indent.valid_continuation_strings
return (indent, valid_indentations.copy()) |
def add(self, pk, quantity=1, **kwargs):
"""Add an item to the cart.
If the item is already in the cart, then its quantity will be
increased by `quantity` units.
Parameters
----------
pk : str or int
The primary key of the item.
quantity : int-convertible
A number of units of to add.
**kwargs
Extra keyword arguments to pass to the item class
constructor.
Raises
------
ItemNotInDatabase
NegativeItemQuantity
NonConvertibleItemQuantity
TooLargeItemQuantity
ZeroItemQuantity
"""
pk = str(pk)
if pk in self.items:
existing_item = self.items[pk]
existing_item.quantity += _clean_quantity(quantity)
else:
queryset = self.get_queryset([pk])
try:
obj = queryset[0]
except IndexError:
raise ItemNotInDatabase(pk=pk)
obj = self.process_object(obj)
self.items[pk] = self.item_class(obj, quantity, **kwargs)
self.update() | def function[add, parameter[self, pk, quantity]]:
constant[Add an item to the cart.
If the item is already in the cart, then its quantity will be
increased by `quantity` units.
Parameters
----------
pk : str or int
The primary key of the item.
quantity : int-convertible
A number of units of to add.
**kwargs
Extra keyword arguments to pass to the item class
constructor.
Raises
------
ItemNotInDatabase
NegativeItemQuantity
NonConvertibleItemQuantity
TooLargeItemQuantity
ZeroItemQuantity
]
variable[pk] assign[=] call[name[str], parameter[name[pk]]]
if compare[name[pk] in name[self].items] begin[:]
variable[existing_item] assign[=] call[name[self].items][name[pk]]
<ast.AugAssign object at 0x7da18f58cca0>
call[name[self].update, parameter[]] | keyword[def] identifier[add] ( identifier[self] , identifier[pk] , identifier[quantity] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[pk] = identifier[str] ( identifier[pk] )
keyword[if] identifier[pk] keyword[in] identifier[self] . identifier[items] :
identifier[existing_item] = identifier[self] . identifier[items] [ identifier[pk] ]
identifier[existing_item] . identifier[quantity] += identifier[_clean_quantity] ( identifier[quantity] )
keyword[else] :
identifier[queryset] = identifier[self] . identifier[get_queryset] ([ identifier[pk] ])
keyword[try] :
identifier[obj] = identifier[queryset] [ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ItemNotInDatabase] ( identifier[pk] = identifier[pk] )
identifier[obj] = identifier[self] . identifier[process_object] ( identifier[obj] )
identifier[self] . identifier[items] [ identifier[pk] ]= identifier[self] . identifier[item_class] ( identifier[obj] , identifier[quantity] ,** identifier[kwargs] )
identifier[self] . identifier[update] () | def add(self, pk, quantity=1, **kwargs):
"""Add an item to the cart.
If the item is already in the cart, then its quantity will be
increased by `quantity` units.
Parameters
----------
pk : str or int
The primary key of the item.
quantity : int-convertible
A number of units of to add.
**kwargs
Extra keyword arguments to pass to the item class
constructor.
Raises
------
ItemNotInDatabase
NegativeItemQuantity
NonConvertibleItemQuantity
TooLargeItemQuantity
ZeroItemQuantity
"""
pk = str(pk)
if pk in self.items:
existing_item = self.items[pk]
existing_item.quantity += _clean_quantity(quantity) # depends on [control=['if'], data=['pk']]
else:
queryset = self.get_queryset([pk])
try:
obj = queryset[0] # depends on [control=['try'], data=[]]
except IndexError:
raise ItemNotInDatabase(pk=pk) # depends on [control=['except'], data=[]]
obj = self.process_object(obj)
self.items[pk] = self.item_class(obj, quantity, **kwargs)
self.update() |
def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | def function[statistical_distances, parameter[samples1, samples2, earth_mover_dist, energy_dist]]:
constant[Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
]
variable[out] assign[=] list[[]]
variable[temp] assign[=] call[name[scipy].stats.ks_2samp, parameter[name[samples1], name[samples2]]]
call[name[out].append, parameter[name[temp].pvalue]]
call[name[out].append, parameter[name[temp].statistic]]
if name[earth_mover_dist] begin[:]
call[name[out].append, parameter[call[name[scipy].stats.wasserstein_distance, parameter[name[samples1], name[samples2]]]]]
if name[energy_dist] begin[:]
call[name[out].append, parameter[call[name[scipy].stats.energy_distance, parameter[name[samples1], name[samples2]]]]]
return[call[name[np].asarray, parameter[name[out]]]] | keyword[def] identifier[statistical_distances] ( identifier[samples1] , identifier[samples2] , identifier[earth_mover_dist] = keyword[True] ,
identifier[energy_dist] = keyword[True] ):
literal[string]
identifier[out] =[]
identifier[temp] = identifier[scipy] . identifier[stats] . identifier[ks_2samp] ( identifier[samples1] , identifier[samples2] )
identifier[out] . identifier[append] ( identifier[temp] . identifier[pvalue] )
identifier[out] . identifier[append] ( identifier[temp] . identifier[statistic] )
keyword[if] identifier[earth_mover_dist] :
identifier[out] . identifier[append] ( identifier[scipy] . identifier[stats] . identifier[wasserstein_distance] ( identifier[samples1] , identifier[samples2] ))
keyword[if] identifier[energy_dist] :
identifier[out] . identifier[append] ( identifier[scipy] . identifier[stats] . identifier[energy_distance] ( identifier[samples1] , identifier[samples2] ))
keyword[return] identifier[np] . identifier[asarray] ( identifier[out] ) | def statistical_distances(samples1, samples2, earth_mover_dist=True, energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2)) # depends on [control=['if'], data=[]]
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2)) # depends on [control=['if'], data=[]]
return np.asarray(out) |
def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = '' if value in validators.EMPTY_VALUES else smart_text(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
self._on_invalid_value(value)
return value | def function[to_python, parameter[self, value]]:
constant[
Validates that the value is in self.choices and can be coerced to the
right type.
]
variable[value] assign[=] <ast.IfExp object at 0x7da1b0b72290>
if <ast.BoolOp object at 0x7da1b0b72bc0> begin[:]
return[name[self].empty_value]
<ast.Try object at 0x7da1b0b702b0>
return[name[value]] | keyword[def] identifier[to_python] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] = literal[string] keyword[if] identifier[value] keyword[in] identifier[validators] . identifier[EMPTY_VALUES] keyword[else] identifier[smart_text] ( identifier[value] )
keyword[if] identifier[value] == identifier[self] . identifier[empty_value] keyword[or] identifier[value] keyword[in] identifier[validators] . identifier[EMPTY_VALUES] :
keyword[return] identifier[self] . identifier[empty_value]
keyword[try] :
identifier[value] = identifier[self] . identifier[coerce] ( identifier[value] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] , identifier[ValidationError] ):
identifier[self] . identifier[_on_invalid_value] ( identifier[value] )
keyword[return] identifier[value] | def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = '' if value in validators.EMPTY_VALUES else smart_text(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value # depends on [control=['if'], data=[]]
try:
value = self.coerce(value) # depends on [control=['try'], data=[]]
except (ValueError, TypeError, ValidationError):
self._on_invalid_value(value) # depends on [control=['except'], data=[]]
return value |
def artist_commentary_create_update(self, post_id, original_title,
original_description, translated_title,
translated_description):
"""Create or update artist commentary (Requires login) (UNTESTED).
Parameters:
post_id (int): Post id.
original_title (str): Original title.
original_description (str): Original description.
translated_title (str): Translated title.
translated_description (str): Translated description.
"""
params = {
'artist_commentary[post_id]': post_id,
'artist_commentary[original_title]': original_title,
'artist_commentary[original_description]': original_description,
'artist_commentary[translated_title]': translated_title,
'artist_commentary[translated_description]': translated_description
}
return self._get('artist_commentaries/create_or_update.json', params,
method='POST', auth=True) | def function[artist_commentary_create_update, parameter[self, post_id, original_title, original_description, translated_title, translated_description]]:
constant[Create or update artist commentary (Requires login) (UNTESTED).
Parameters:
post_id (int): Post id.
original_title (str): Original title.
original_description (str): Original description.
translated_title (str): Translated title.
translated_description (str): Translated description.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d18b20>, <ast.Constant object at 0x7da1b0d19570>, <ast.Constant object at 0x7da1b0d1ab00>, <ast.Constant object at 0x7da1b0d1a890>, <ast.Constant object at 0x7da1b0d182b0>], [<ast.Name object at 0x7da1b0f38310>, <ast.Name object at 0x7da1b0f39bd0>, <ast.Name object at 0x7da1b0f3bd60>, <ast.Name object at 0x7da1b0f38910>, <ast.Name object at 0x7da1b0f39f30>]]
return[call[name[self]._get, parameter[constant[artist_commentaries/create_or_update.json], name[params]]]] | keyword[def] identifier[artist_commentary_create_update] ( identifier[self] , identifier[post_id] , identifier[original_title] ,
identifier[original_description] , identifier[translated_title] ,
identifier[translated_description] ):
literal[string]
identifier[params] ={
literal[string] : identifier[post_id] ,
literal[string] : identifier[original_title] ,
literal[string] : identifier[original_description] ,
literal[string] : identifier[translated_title] ,
literal[string] : identifier[translated_description]
}
keyword[return] identifier[self] . identifier[_get] ( literal[string] , identifier[params] ,
identifier[method] = literal[string] , identifier[auth] = keyword[True] ) | def artist_commentary_create_update(self, post_id, original_title, original_description, translated_title, translated_description):
"""Create or update artist commentary (Requires login) (UNTESTED).
Parameters:
post_id (int): Post id.
original_title (str): Original title.
original_description (str): Original description.
translated_title (str): Translated title.
translated_description (str): Translated description.
"""
params = {'artist_commentary[post_id]': post_id, 'artist_commentary[original_title]': original_title, 'artist_commentary[original_description]': original_description, 'artist_commentary[translated_title]': translated_title, 'artist_commentary[translated_description]': translated_description}
return self._get('artist_commentaries/create_or_update.json', params, method='POST', auth=True) |
def plot1D_mat(a, b, M, title=''):
""" Plot matrix M with the source and target 1D distribution
Creates a subplot with the source distribution a on the left and
target distribution b on the tot. The matrix M is shown in between.
Parameters
----------
a : np.array, shape (na,)
Source distribution
b : np.array, shape (nb,)
Target distribution
M : np.array, shape (na,nb)
Matrix to plot
"""
na, nb = M.shape
gs = gridspec.GridSpec(3, 3)
xa = np.arange(na)
xb = np.arange(nb)
ax1 = pl.subplot(gs[0, 1:])
pl.plot(xb, b, 'r', label='Target distribution')
pl.yticks(())
pl.title(title)
ax2 = pl.subplot(gs[1:, 0])
pl.plot(a, xa, 'b', label='Source distribution')
pl.gca().invert_xaxis()
pl.gca().invert_yaxis()
pl.xticks(())
pl.subplot(gs[1:, 1:], sharex=ax1, sharey=ax2)
pl.imshow(M, interpolation='nearest')
pl.axis('off')
pl.xlim((0, nb))
pl.tight_layout()
pl.subplots_adjust(wspace=0., hspace=0.2) | def function[plot1D_mat, parameter[a, b, M, title]]:
constant[ Plot matrix M with the source and target 1D distribution
Creates a subplot with the source distribution a on the left and
target distribution b on the tot. The matrix M is shown in between.
Parameters
----------
a : np.array, shape (na,)
Source distribution
b : np.array, shape (nb,)
Target distribution
M : np.array, shape (na,nb)
Matrix to plot
]
<ast.Tuple object at 0x7da1b1600400> assign[=] name[M].shape
variable[gs] assign[=] call[name[gridspec].GridSpec, parameter[constant[3], constant[3]]]
variable[xa] assign[=] call[name[np].arange, parameter[name[na]]]
variable[xb] assign[=] call[name[np].arange, parameter[name[nb]]]
variable[ax1] assign[=] call[name[pl].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b18a9270>, <ast.Slice object at 0x7da1b18a8250>]]]]]
call[name[pl].plot, parameter[name[xb], name[b], constant[r]]]
call[name[pl].yticks, parameter[tuple[[]]]]
call[name[pl].title, parameter[name[title]]]
variable[ax2] assign[=] call[name[pl].subplot, parameter[call[name[gs]][tuple[[<ast.Slice object at 0x7da1b18aa7a0>, <ast.Constant object at 0x7da1b18a93c0>]]]]]
call[name[pl].plot, parameter[name[a], name[xa], constant[b]]]
call[call[name[pl].gca, parameter[]].invert_xaxis, parameter[]]
call[call[name[pl].gca, parameter[]].invert_yaxis, parameter[]]
call[name[pl].xticks, parameter[tuple[[]]]]
call[name[pl].subplot, parameter[call[name[gs]][tuple[[<ast.Slice object at 0x7da1b18a99c0>, <ast.Slice object at 0x7da1b18a80a0>]]]]]
call[name[pl].imshow, parameter[name[M]]]
call[name[pl].axis, parameter[constant[off]]]
call[name[pl].xlim, parameter[tuple[[<ast.Constant object at 0x7da1b18df7c0>, <ast.Name object at 0x7da1b18ddf00>]]]]
call[name[pl].tight_layout, parameter[]]
call[name[pl].subplots_adjust, parameter[]] | keyword[def] identifier[plot1D_mat] ( identifier[a] , identifier[b] , identifier[M] , identifier[title] = literal[string] ):
literal[string]
identifier[na] , identifier[nb] = identifier[M] . identifier[shape]
identifier[gs] = identifier[gridspec] . identifier[GridSpec] ( literal[int] , literal[int] )
identifier[xa] = identifier[np] . identifier[arange] ( identifier[na] )
identifier[xb] = identifier[np] . identifier[arange] ( identifier[nb] )
identifier[ax1] = identifier[pl] . identifier[subplot] ( identifier[gs] [ literal[int] , literal[int] :])
identifier[pl] . identifier[plot] ( identifier[xb] , identifier[b] , literal[string] , identifier[label] = literal[string] )
identifier[pl] . identifier[yticks] (())
identifier[pl] . identifier[title] ( identifier[title] )
identifier[ax2] = identifier[pl] . identifier[subplot] ( identifier[gs] [ literal[int] :, literal[int] ])
identifier[pl] . identifier[plot] ( identifier[a] , identifier[xa] , literal[string] , identifier[label] = literal[string] )
identifier[pl] . identifier[gca] (). identifier[invert_xaxis] ()
identifier[pl] . identifier[gca] (). identifier[invert_yaxis] ()
identifier[pl] . identifier[xticks] (())
identifier[pl] . identifier[subplot] ( identifier[gs] [ literal[int] :, literal[int] :], identifier[sharex] = identifier[ax1] , identifier[sharey] = identifier[ax2] )
identifier[pl] . identifier[imshow] ( identifier[M] , identifier[interpolation] = literal[string] )
identifier[pl] . identifier[axis] ( literal[string] )
identifier[pl] . identifier[xlim] (( literal[int] , identifier[nb] ))
identifier[pl] . identifier[tight_layout] ()
identifier[pl] . identifier[subplots_adjust] ( identifier[wspace] = literal[int] , identifier[hspace] = literal[int] ) | def plot1D_mat(a, b, M, title=''):
""" Plot matrix M with the source and target 1D distribution
Creates a subplot with the source distribution a on the left and
target distribution b on the tot. The matrix M is shown in between.
Parameters
----------
a : np.array, shape (na,)
Source distribution
b : np.array, shape (nb,)
Target distribution
M : np.array, shape (na,nb)
Matrix to plot
"""
(na, nb) = M.shape
gs = gridspec.GridSpec(3, 3)
xa = np.arange(na)
xb = np.arange(nb)
ax1 = pl.subplot(gs[0, 1:])
pl.plot(xb, b, 'r', label='Target distribution')
pl.yticks(())
pl.title(title)
ax2 = pl.subplot(gs[1:, 0])
pl.plot(a, xa, 'b', label='Source distribution')
pl.gca().invert_xaxis()
pl.gca().invert_yaxis()
pl.xticks(())
pl.subplot(gs[1:, 1:], sharex=ax1, sharey=ax2)
pl.imshow(M, interpolation='nearest')
pl.axis('off')
pl.xlim((0, nb))
pl.tight_layout()
pl.subplots_adjust(wspace=0.0, hspace=0.2) |
def read_padding(fp, size, divisor=2):
"""
Read padding bytes for the given byte size.
:param fp: file-like object
:param divisor: divisor of the byte alignment
:return: read byte size
"""
remainder = size % divisor
if remainder:
return fp.read(divisor - remainder)
return b'' | def function[read_padding, parameter[fp, size, divisor]]:
constant[
Read padding bytes for the given byte size.
:param fp: file-like object
:param divisor: divisor of the byte alignment
:return: read byte size
]
variable[remainder] assign[=] binary_operation[name[size] <ast.Mod object at 0x7da2590d6920> name[divisor]]
if name[remainder] begin[:]
return[call[name[fp].read, parameter[binary_operation[name[divisor] - name[remainder]]]]]
return[constant[b'']] | keyword[def] identifier[read_padding] ( identifier[fp] , identifier[size] , identifier[divisor] = literal[int] ):
literal[string]
identifier[remainder] = identifier[size] % identifier[divisor]
keyword[if] identifier[remainder] :
keyword[return] identifier[fp] . identifier[read] ( identifier[divisor] - identifier[remainder] )
keyword[return] literal[string] | def read_padding(fp, size, divisor=2):
"""
Read padding bytes for the given byte size.
:param fp: file-like object
:param divisor: divisor of the byte alignment
:return: read byte size
"""
remainder = size % divisor
if remainder:
return fp.read(divisor - remainder) # depends on [control=['if'], data=[]]
return b'' |
def get_server(self, datacenter_id, server_id, depth=1):
"""
Retrieves a server by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s/servers/%s?depth=%s' % (
datacenter_id,
server_id,
str(depth)))
return response | def function[get_server, parameter[self, datacenter_id, server_id, depth]]:
constant[
Retrieves a server by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
]
variable[response] assign[=] call[name[self]._perform_request, parameter[binary_operation[constant[/datacenters/%s/servers/%s?depth=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b23464a0>, <ast.Name object at 0x7da1b2347a30>, <ast.Call object at 0x7da1b2344100>]]]]]
return[name[response]] | keyword[def] identifier[get_server] ( identifier[self] , identifier[datacenter_id] , identifier[server_id] , identifier[depth] = literal[int] ):
literal[string]
identifier[response] = identifier[self] . identifier[_perform_request] (
literal[string] %(
identifier[datacenter_id] ,
identifier[server_id] ,
identifier[str] ( identifier[depth] )))
keyword[return] identifier[response] | def get_server(self, datacenter_id, server_id, depth=1):
"""
Retrieves a server by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request('/datacenters/%s/servers/%s?depth=%s' % (datacenter_id, server_id, str(depth)))
return response |
def get_assessment_part_mdata():
"""Return default mdata map for AssessmentPart"""
return {
'assessment_part': {
'element_label': {
'text': 'assessment part',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'assessment': {
'element_label': {
'text': 'assessment',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'weight': {
'element_label': {
'text': 'weight',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a cardinal value',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_cardinal_values': [None],
'syntax': 'CARDINAL',
'minimum_cardinal': None,
'maximum_cardinal': None,
'cardinal_set': []
},
'allocated_time': {
'element_label': {
'text': 'allocated time',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid duration object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_duration_values': [None],
'syntax': 'DURATION',
'date_time_set': [],
},
} | def function[get_assessment_part_mdata, parameter[]]:
constant[Return default mdata map for AssessmentPart]
return[dictionary[[<ast.Constant object at 0x7da204347bb0>, <ast.Constant object at 0x7da204347d00>, <ast.Constant object at 0x7da204347010>, <ast.Constant object at 0x7da2043478b0>], [<ast.Dict object at 0x7da204346b30>, <ast.Dict object at 0x7da1b0a70520>, <ast.Dict object at 0x7da1b0971ed0>, <ast.Dict object at 0x7da1b0973790>]]] | keyword[def] identifier[get_assessment_part_mdata] ():
literal[string]
keyword[return] {
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] :[ literal[string] ],
literal[string] : literal[string] ,
literal[string] :[],
},
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] :[ literal[string] ],
literal[string] : literal[string] ,
literal[string] :[],
},
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] :[ keyword[None] ],
literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] :[]
},
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[DEFAULT_LANGUAGE_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_SCRIPT_TYPE] ),
literal[string] : identifier[str] ( identifier[DEFAULT_FORMAT_TYPE] ),
},
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] :[ keyword[None] ],
literal[string] : literal[string] ,
literal[string] :[],
},
} | def get_assessment_part_mdata():
"""Return default mdata map for AssessmentPart"""
return {'assessment_part': {'element_label': {'text': 'assessment part', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'instructions': {'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': []}, 'assessment': {'element_label': {'text': 'assessment', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'instructions': {'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': []}, 'weight': {'element_label': {'text': 'weight', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'instructions': {'text': 'enter a cardinal value', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_cardinal_values': [None], 'syntax': 'CARDINAL', 'minimum_cardinal': None, 'maximum_cardinal': None, 'cardinal_set': []}, 'allocated_time': {'element_label': {'text': 'allocated time', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'instructions': {'text': 'enter a valid duration object.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE)}, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_duration_values': [None], 'syntax': 'DURATION', 'date_time_set': []}} |
async def download_file(
self, input_location, file=None, *, part_size_kb=None,
file_size=None, progress_callback=None, dc_id=None):
"""
Downloads the given input location to a file.
Args:
input_location (:tl:`InputFileLocation`):
The file location from which the file will be downloaded.
See `telethon.utils.get_input_location` source for a complete
list of supported types.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If the file path is ``None`` or ``bytes``, then the result
will be saved in memory and returned as `bytes`.
part_size_kb (`int`, optional):
Chunk size when downloading files. The larger, the less
requests will be made (up to 512KB maximum).
file_size (`int`, optional):
The file size that is about to be downloaded, if known.
Only used if ``progress_callback`` is specified.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(downloaded bytes, total)``. Note that the
``total`` is the provided ``file_size``.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
"""
if not part_size_kb:
if not file_size:
part_size_kb = 64 # Reasonable default
else:
part_size_kb = utils.get_appropriated_part_size(file_size)
part_size = int(part_size_kb * 1024)
# https://core.telegram.org/api/files says:
# > part_size % 1024 = 0 (divisible by 1KB)
#
# But https://core.telegram.org/cdn (more recent) says:
# > limit must be divisible by 4096 bytes
# So we just stick to the 4096 limit.
if part_size % 4096 != 0:
raise ValueError(
'The part size must be evenly divisible by 4096.')
in_memory = file is None or file is bytes
if in_memory:
f = io.BytesIO()
elif isinstance(file, str):
# Ensure that we'll be able to download the media
helpers.ensure_parent_dir_exists(file)
f = open(file, 'wb')
else:
f = file
old_dc = dc_id
dc_id, input_location = utils.get_input_location(input_location)
if dc_id is None:
dc_id = old_dc
exported = dc_id and self.session.dc_id != dc_id
if exported:
try:
sender = await self._borrow_exported_sender(dc_id)
except errors.DcIdInvalidError:
# Can't export a sender for the ID we are currently in
config = await self(functions.help.GetConfigRequest())
for option in config.dc_options:
if option.ip_address == self.session.server_address:
self.session.set_dc(
option.id, option.ip_address, option.port)
self.session.save()
break
# TODO Figure out why the session may have the wrong DC ID
sender = self._sender
exported = False
else:
# The used sender will also change if ``FileMigrateError`` occurs
sender = self._sender
self._log[__name__].info('Downloading file in chunks of %d bytes',
part_size)
try:
offset = 0
while True:
try:
result = await sender.send(functions.upload.GetFileRequest(
input_location, offset, part_size
))
if isinstance(result, types.upload.FileCdnRedirect):
# TODO Implement
raise NotImplementedError
except errors.FileMigrateError as e:
self._log[__name__].info('File lives in another DC')
sender = await self._borrow_exported_sender(e.new_dc)
exported = True
continue
offset += part_size
if not result.bytes:
if in_memory:
f.flush()
return f.getvalue()
else:
return getattr(result, 'type', '')
self._log[__name__].debug('Saving %d more bytes',
len(result.bytes))
f.write(result.bytes)
if progress_callback:
progress_callback(f.tell(), file_size)
finally:
if exported:
await self._return_exported_sender(sender)
elif sender != self._sender:
await sender.disconnect()
if isinstance(file, str) or in_memory:
f.close() | <ast.AsyncFunctionDef object at 0x7da1b21e1ba0> | keyword[async] keyword[def] identifier[download_file] (
identifier[self] , identifier[input_location] , identifier[file] = keyword[None] ,*, identifier[part_size_kb] = keyword[None] ,
identifier[file_size] = keyword[None] , identifier[progress_callback] = keyword[None] , identifier[dc_id] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[part_size_kb] :
keyword[if] keyword[not] identifier[file_size] :
identifier[part_size_kb] = literal[int]
keyword[else] :
identifier[part_size_kb] = identifier[utils] . identifier[get_appropriated_part_size] ( identifier[file_size] )
identifier[part_size] = identifier[int] ( identifier[part_size_kb] * literal[int] )
keyword[if] identifier[part_size] % literal[int] != literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[in_memory] = identifier[file] keyword[is] keyword[None] keyword[or] identifier[file] keyword[is] identifier[bytes]
keyword[if] identifier[in_memory] :
identifier[f] = identifier[io] . identifier[BytesIO] ()
keyword[elif] identifier[isinstance] ( identifier[file] , identifier[str] ):
identifier[helpers] . identifier[ensure_parent_dir_exists] ( identifier[file] )
identifier[f] = identifier[open] ( identifier[file] , literal[string] )
keyword[else] :
identifier[f] = identifier[file]
identifier[old_dc] = identifier[dc_id]
identifier[dc_id] , identifier[input_location] = identifier[utils] . identifier[get_input_location] ( identifier[input_location] )
keyword[if] identifier[dc_id] keyword[is] keyword[None] :
identifier[dc_id] = identifier[old_dc]
identifier[exported] = identifier[dc_id] keyword[and] identifier[self] . identifier[session] . identifier[dc_id] != identifier[dc_id]
keyword[if] identifier[exported] :
keyword[try] :
identifier[sender] = keyword[await] identifier[self] . identifier[_borrow_exported_sender] ( identifier[dc_id] )
keyword[except] identifier[errors] . identifier[DcIdInvalidError] :
identifier[config] = keyword[await] identifier[self] ( identifier[functions] . identifier[help] . identifier[GetConfigRequest] ())
keyword[for] identifier[option] keyword[in] identifier[config] . identifier[dc_options] :
keyword[if] identifier[option] . identifier[ip_address] == identifier[self] . identifier[session] . identifier[server_address] :
identifier[self] . identifier[session] . identifier[set_dc] (
identifier[option] . identifier[id] , identifier[option] . identifier[ip_address] , identifier[option] . identifier[port] )
identifier[self] . identifier[session] . identifier[save] ()
keyword[break]
identifier[sender] = identifier[self] . identifier[_sender]
identifier[exported] = keyword[False]
keyword[else] :
identifier[sender] = identifier[self] . identifier[_sender]
identifier[self] . identifier[_log] [ identifier[__name__] ]. identifier[info] ( literal[string] ,
identifier[part_size] )
keyword[try] :
identifier[offset] = literal[int]
keyword[while] keyword[True] :
keyword[try] :
identifier[result] = keyword[await] identifier[sender] . identifier[send] ( identifier[functions] . identifier[upload] . identifier[GetFileRequest] (
identifier[input_location] , identifier[offset] , identifier[part_size]
))
keyword[if] identifier[isinstance] ( identifier[result] , identifier[types] . identifier[upload] . identifier[FileCdnRedirect] ):
keyword[raise] identifier[NotImplementedError]
keyword[except] identifier[errors] . identifier[FileMigrateError] keyword[as] identifier[e] :
identifier[self] . identifier[_log] [ identifier[__name__] ]. identifier[info] ( literal[string] )
identifier[sender] = keyword[await] identifier[self] . identifier[_borrow_exported_sender] ( identifier[e] . identifier[new_dc] )
identifier[exported] = keyword[True]
keyword[continue]
identifier[offset] += identifier[part_size]
keyword[if] keyword[not] identifier[result] . identifier[bytes] :
keyword[if] identifier[in_memory] :
identifier[f] . identifier[flush] ()
keyword[return] identifier[f] . identifier[getvalue] ()
keyword[else] :
keyword[return] identifier[getattr] ( identifier[result] , literal[string] , literal[string] )
identifier[self] . identifier[_log] [ identifier[__name__] ]. identifier[debug] ( literal[string] ,
identifier[len] ( identifier[result] . identifier[bytes] ))
identifier[f] . identifier[write] ( identifier[result] . identifier[bytes] )
keyword[if] identifier[progress_callback] :
identifier[progress_callback] ( identifier[f] . identifier[tell] (), identifier[file_size] )
keyword[finally] :
keyword[if] identifier[exported] :
keyword[await] identifier[self] . identifier[_return_exported_sender] ( identifier[sender] )
keyword[elif] identifier[sender] != identifier[self] . identifier[_sender] :
keyword[await] identifier[sender] . identifier[disconnect] ()
keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ) keyword[or] identifier[in_memory] :
identifier[f] . identifier[close] () | async def download_file(self, input_location, file=None, *, part_size_kb=None, file_size=None, progress_callback=None, dc_id=None):
"""
Downloads the given input location to a file.
Args:
input_location (:tl:`InputFileLocation`):
The file location from which the file will be downloaded.
See `telethon.utils.get_input_location` source for a complete
list of supported types.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If the file path is ``None`` or ``bytes``, then the result
will be saved in memory and returned as `bytes`.
part_size_kb (`int`, optional):
Chunk size when downloading files. The larger, the less
requests will be made (up to 512KB maximum).
file_size (`int`, optional):
The file size that is about to be downloaded, if known.
Only used if ``progress_callback`` is specified.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(downloaded bytes, total)``. Note that the
``total`` is the provided ``file_size``.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
"""
if not part_size_kb:
if not file_size:
part_size_kb = 64 # Reasonable default # depends on [control=['if'], data=[]]
else:
part_size_kb = utils.get_appropriated_part_size(file_size) # depends on [control=['if'], data=[]]
part_size = int(part_size_kb * 1024)
# https://core.telegram.org/api/files says:
# > part_size % 1024 = 0 (divisible by 1KB)
#
# But https://core.telegram.org/cdn (more recent) says:
# > limit must be divisible by 4096 bytes
# So we just stick to the 4096 limit.
if part_size % 4096 != 0:
raise ValueError('The part size must be evenly divisible by 4096.') # depends on [control=['if'], data=[]]
in_memory = file is None or file is bytes
if in_memory:
f = io.BytesIO() # depends on [control=['if'], data=[]]
elif isinstance(file, str):
# Ensure that we'll be able to download the media
helpers.ensure_parent_dir_exists(file)
f = open(file, 'wb') # depends on [control=['if'], data=[]]
else:
f = file
old_dc = dc_id
(dc_id, input_location) = utils.get_input_location(input_location)
if dc_id is None:
dc_id = old_dc # depends on [control=['if'], data=['dc_id']]
exported = dc_id and self.session.dc_id != dc_id
if exported:
try:
sender = await self._borrow_exported_sender(dc_id) # depends on [control=['try'], data=[]]
except errors.DcIdInvalidError:
# Can't export a sender for the ID we are currently in
config = await self(functions.help.GetConfigRequest())
for option in config.dc_options:
if option.ip_address == self.session.server_address:
self.session.set_dc(option.id, option.ip_address, option.port)
self.session.save()
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['option']]
# TODO Figure out why the session may have the wrong DC ID
sender = self._sender
exported = False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
# The used sender will also change if ``FileMigrateError`` occurs
sender = self._sender
self._log[__name__].info('Downloading file in chunks of %d bytes', part_size)
try:
offset = 0
while True:
try:
result = await sender.send(functions.upload.GetFileRequest(input_location, offset, part_size))
if isinstance(result, types.upload.FileCdnRedirect):
# TODO Implement
raise NotImplementedError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except errors.FileMigrateError as e:
self._log[__name__].info('File lives in another DC')
sender = await self._borrow_exported_sender(e.new_dc)
exported = True
continue # depends on [control=['except'], data=['e']]
offset += part_size
if not result.bytes:
if in_memory:
f.flush()
return f.getvalue() # depends on [control=['if'], data=[]]
else:
return getattr(result, 'type', '') # depends on [control=['if'], data=[]]
self._log[__name__].debug('Saving %d more bytes', len(result.bytes))
f.write(result.bytes)
if progress_callback:
progress_callback(f.tell(), file_size) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
finally:
if exported:
await self._return_exported_sender(sender) # depends on [control=['if'], data=[]]
elif sender != self._sender:
await sender.disconnect() # depends on [control=['if'], data=['sender']]
if isinstance(file, str) or in_memory:
f.close() # depends on [control=['if'], data=[]] |
def merge_plugin_from_baseline(baseline_plugins, args):
"""
:type baseline_plugins: tuple of BasePlugin
:param baseline_plugins: BasePlugin instances from baseline file
:type args: dict
:param args: diction of arguments parsed from usage
param priority: input param > baseline param > default
:Returns tuple of initialized plugins
"""
def _remove_key(d, key):
r = dict(d)
r.pop(key)
return r
baseline_plugins_dict = {
vars(plugin)["name"]: _remove_key(vars(plugin), "name")
for plugin in baseline_plugins
}
# Use input plugin as starting point
if args.use_all_plugins:
# input param and default param are used
plugins_dict = dict(args.plugins)
# baseline param priority > default
for plugin_name, param_name, param_value in _get_prioritized_parameters(
baseline_plugins_dict,
args.is_using_default_value,
prefer_default=True,
):
try:
plugins_dict[plugin_name][param_name] = param_value
except KeyError:
log.warning(
'Baseline contain plugin %s which is not in all plugins! Ignoring...'
% (plugin_name),
)
return from_parser_builder(
plugins_dict,
exclude_lines_regex=args.exclude_lines,
)
# Use baseline plugin as starting point
disabled_plugins = PluginOptions.get_disabled_plugins(args)
plugins_dict = {
plugin_name: plugin_params
for plugin_name, plugin_params in baseline_plugins_dict.items()
if plugin_name not in disabled_plugins
}
# input param priority > baseline
input_plugins_dict = dict(args.plugins)
for plugin_name, param_name, param_value in _get_prioritized_parameters(
input_plugins_dict,
args.is_using_default_value,
prefer_default=False,
):
try:
plugins_dict[plugin_name][param_name] = param_value
except KeyError:
log.warning(
'%s specified, but %s not configured! Ignoring...'
% ("".join(["--", param_name.replace("_", "-")]), plugin_name),
)
return from_parser_builder(
plugins_dict,
exclude_lines_regex=args.exclude_lines,
) | def function[merge_plugin_from_baseline, parameter[baseline_plugins, args]]:
constant[
:type baseline_plugins: tuple of BasePlugin
:param baseline_plugins: BasePlugin instances from baseline file
:type args: dict
:param args: diction of arguments parsed from usage
param priority: input param > baseline param > default
:Returns tuple of initialized plugins
]
def function[_remove_key, parameter[d, key]]:
variable[r] assign[=] call[name[dict], parameter[name[d]]]
call[name[r].pop, parameter[name[key]]]
return[name[r]]
variable[baseline_plugins_dict] assign[=] <ast.DictComp object at 0x7da20c794be0>
if name[args].use_all_plugins begin[:]
variable[plugins_dict] assign[=] call[name[dict], parameter[name[args].plugins]]
for taget[tuple[[<ast.Name object at 0x7da20c794af0>, <ast.Name object at 0x7da20c794e20>, <ast.Name object at 0x7da20c7959f0>]]] in starred[call[name[_get_prioritized_parameters], parameter[name[baseline_plugins_dict], name[args].is_using_default_value]]] begin[:]
<ast.Try object at 0x7da20c795570>
return[call[name[from_parser_builder], parameter[name[plugins_dict]]]]
variable[disabled_plugins] assign[=] call[name[PluginOptions].get_disabled_plugins, parameter[name[args]]]
variable[plugins_dict] assign[=] <ast.DictComp object at 0x7da20c795030>
variable[input_plugins_dict] assign[=] call[name[dict], parameter[name[args].plugins]]
for taget[tuple[[<ast.Name object at 0x7da20c795870>, <ast.Name object at 0x7da20c794c70>, <ast.Name object at 0x7da20c7949d0>]]] in starred[call[name[_get_prioritized_parameters], parameter[name[input_plugins_dict], name[args].is_using_default_value]]] begin[:]
<ast.Try object at 0x7da20c993a30>
return[call[name[from_parser_builder], parameter[name[plugins_dict]]]] | keyword[def] identifier[merge_plugin_from_baseline] ( identifier[baseline_plugins] , identifier[args] ):
literal[string]
keyword[def] identifier[_remove_key] ( identifier[d] , identifier[key] ):
identifier[r] = identifier[dict] ( identifier[d] )
identifier[r] . identifier[pop] ( identifier[key] )
keyword[return] identifier[r]
identifier[baseline_plugins_dict] ={
identifier[vars] ( identifier[plugin] )[ literal[string] ]: identifier[_remove_key] ( identifier[vars] ( identifier[plugin] ), literal[string] )
keyword[for] identifier[plugin] keyword[in] identifier[baseline_plugins]
}
keyword[if] identifier[args] . identifier[use_all_plugins] :
identifier[plugins_dict] = identifier[dict] ( identifier[args] . identifier[plugins] )
keyword[for] identifier[plugin_name] , identifier[param_name] , identifier[param_value] keyword[in] identifier[_get_prioritized_parameters] (
identifier[baseline_plugins_dict] ,
identifier[args] . identifier[is_using_default_value] ,
identifier[prefer_default] = keyword[True] ,
):
keyword[try] :
identifier[plugins_dict] [ identifier[plugin_name] ][ identifier[param_name] ]= identifier[param_value]
keyword[except] identifier[KeyError] :
identifier[log] . identifier[warning] (
literal[string]
%( identifier[plugin_name] ),
)
keyword[return] identifier[from_parser_builder] (
identifier[plugins_dict] ,
identifier[exclude_lines_regex] = identifier[args] . identifier[exclude_lines] ,
)
identifier[disabled_plugins] = identifier[PluginOptions] . identifier[get_disabled_plugins] ( identifier[args] )
identifier[plugins_dict] ={
identifier[plugin_name] : identifier[plugin_params]
keyword[for] identifier[plugin_name] , identifier[plugin_params] keyword[in] identifier[baseline_plugins_dict] . identifier[items] ()
keyword[if] identifier[plugin_name] keyword[not] keyword[in] identifier[disabled_plugins]
}
identifier[input_plugins_dict] = identifier[dict] ( identifier[args] . identifier[plugins] )
keyword[for] identifier[plugin_name] , identifier[param_name] , identifier[param_value] keyword[in] identifier[_get_prioritized_parameters] (
identifier[input_plugins_dict] ,
identifier[args] . identifier[is_using_default_value] ,
identifier[prefer_default] = keyword[False] ,
):
keyword[try] :
identifier[plugins_dict] [ identifier[plugin_name] ][ identifier[param_name] ]= identifier[param_value]
keyword[except] identifier[KeyError] :
identifier[log] . identifier[warning] (
literal[string]
%( literal[string] . identifier[join] ([ literal[string] , identifier[param_name] . identifier[replace] ( literal[string] , literal[string] )]), identifier[plugin_name] ),
)
keyword[return] identifier[from_parser_builder] (
identifier[plugins_dict] ,
identifier[exclude_lines_regex] = identifier[args] . identifier[exclude_lines] ,
) | def merge_plugin_from_baseline(baseline_plugins, args):
"""
:type baseline_plugins: tuple of BasePlugin
:param baseline_plugins: BasePlugin instances from baseline file
:type args: dict
:param args: diction of arguments parsed from usage
param priority: input param > baseline param > default
:Returns tuple of initialized plugins
"""
def _remove_key(d, key):
r = dict(d)
r.pop(key)
return r
baseline_plugins_dict = {vars(plugin)['name']: _remove_key(vars(plugin), 'name') for plugin in baseline_plugins}
# Use input plugin as starting point
if args.use_all_plugins:
# input param and default param are used
plugins_dict = dict(args.plugins)
# baseline param priority > default
for (plugin_name, param_name, param_value) in _get_prioritized_parameters(baseline_plugins_dict, args.is_using_default_value, prefer_default=True):
try:
plugins_dict[plugin_name][param_name] = param_value # depends on [control=['try'], data=[]]
except KeyError:
log.warning('Baseline contain plugin %s which is not in all plugins! Ignoring...' % plugin_name) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return from_parser_builder(plugins_dict, exclude_lines_regex=args.exclude_lines) # depends on [control=['if'], data=[]]
# Use baseline plugin as starting point
disabled_plugins = PluginOptions.get_disabled_plugins(args)
plugins_dict = {plugin_name: plugin_params for (plugin_name, plugin_params) in baseline_plugins_dict.items() if plugin_name not in disabled_plugins}
# input param priority > baseline
input_plugins_dict = dict(args.plugins)
for (plugin_name, param_name, param_value) in _get_prioritized_parameters(input_plugins_dict, args.is_using_default_value, prefer_default=False):
try:
plugins_dict[plugin_name][param_name] = param_value # depends on [control=['try'], data=[]]
except KeyError:
log.warning('%s specified, but %s not configured! Ignoring...' % (''.join(['--', param_name.replace('_', '-')]), plugin_name)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return from_parser_builder(plugins_dict, exclude_lines_regex=args.exclude_lines) |
def getSDSSImage(ra,dec,radius=1.0,xsize=800,opt='GML',**kwargs):
"""
Download Sloan Digital Sky Survey images
http://skyserver.sdss3.org/dr9/en/tools/chart/chart.asp
radius (degrees)
opts: (G) Grid, (L) Label, P (PhotoObj), S (SpecObj), O (Outline), (B) Bounding Box,
(F) Fields, (M) Mask, (Q) Plates, (I) Invert
"""
import subprocess
import tempfile
url="http://skyservice.pha.jhu.edu/DR10/ImgCutout/getjpeg.aspx?"
scale = 2. * radius * 3600. / xsize
params=dict(ra=ra,dec=dec,
width=xsize,height=xsize,
scale=scale,opt=opt)
query='&'.join("%s=%s"%(k,v) for k,v in params.items())
tmp = tempfile.NamedTemporaryFile(suffix='.jpeg')
cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query)
subprocess.call(cmd,shell=True)
im = plt.imread(tmp.name)
tmp.close()
return im | def function[getSDSSImage, parameter[ra, dec, radius, xsize, opt]]:
constant[
Download Sloan Digital Sky Survey images
http://skyserver.sdss3.org/dr9/en/tools/chart/chart.asp
radius (degrees)
opts: (G) Grid, (L) Label, P (PhotoObj), S (SpecObj), O (Outline), (B) Bounding Box,
(F) Fields, (M) Mask, (Q) Plates, (I) Invert
]
import module[subprocess]
import module[tempfile]
variable[url] assign[=] constant[http://skyservice.pha.jhu.edu/DR10/ImgCutout/getjpeg.aspx?]
variable[scale] assign[=] binary_operation[binary_operation[binary_operation[constant[2.0] * name[radius]] * constant[3600.0]] / name[xsize]]
variable[params] assign[=] call[name[dict], parameter[]]
variable[query] assign[=] call[constant[&].join, parameter[<ast.GeneratorExp object at 0x7da18bccab00>]]
variable[tmp] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
variable[cmd] assign[=] binary_operation[constant[wget --progress=dot:mega -O %s "%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18bccab30>, <ast.BinOp object at 0x7da18bcc88e0>]]]
call[name[subprocess].call, parameter[name[cmd]]]
variable[im] assign[=] call[name[plt].imread, parameter[name[tmp].name]]
call[name[tmp].close, parameter[]]
return[name[im]] | keyword[def] identifier[getSDSSImage] ( identifier[ra] , identifier[dec] , identifier[radius] = literal[int] , identifier[xsize] = literal[int] , identifier[opt] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[subprocess]
keyword[import] identifier[tempfile]
identifier[url] = literal[string]
identifier[scale] = literal[int] * identifier[radius] * literal[int] / identifier[xsize]
identifier[params] = identifier[dict] ( identifier[ra] = identifier[ra] , identifier[dec] = identifier[dec] ,
identifier[width] = identifier[xsize] , identifier[height] = identifier[xsize] ,
identifier[scale] = identifier[scale] , identifier[opt] = identifier[opt] )
identifier[query] = literal[string] . identifier[join] ( literal[string] %( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[params] . identifier[items] ())
identifier[tmp] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[suffix] = literal[string] )
identifier[cmd] = literal[string] %( identifier[tmp] . identifier[name] , identifier[url] + identifier[query] )
identifier[subprocess] . identifier[call] ( identifier[cmd] , identifier[shell] = keyword[True] )
identifier[im] = identifier[plt] . identifier[imread] ( identifier[tmp] . identifier[name] )
identifier[tmp] . identifier[close] ()
keyword[return] identifier[im] | def getSDSSImage(ra, dec, radius=1.0, xsize=800, opt='GML', **kwargs):
"""
Download Sloan Digital Sky Survey images
http://skyserver.sdss3.org/dr9/en/tools/chart/chart.asp
radius (degrees)
opts: (G) Grid, (L) Label, P (PhotoObj), S (SpecObj), O (Outline), (B) Bounding Box,
(F) Fields, (M) Mask, (Q) Plates, (I) Invert
"""
import subprocess
import tempfile
url = 'http://skyservice.pha.jhu.edu/DR10/ImgCutout/getjpeg.aspx?'
scale = 2.0 * radius * 3600.0 / xsize
params = dict(ra=ra, dec=dec, width=xsize, height=xsize, scale=scale, opt=opt)
query = '&'.join(('%s=%s' % (k, v) for (k, v) in params.items()))
tmp = tempfile.NamedTemporaryFile(suffix='.jpeg')
cmd = 'wget --progress=dot:mega -O %s "%s"' % (tmp.name, url + query)
subprocess.call(cmd, shell=True)
im = plt.imread(tmp.name)
tmp.close()
return im |
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Initialise the GenObject
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
try:
sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus]
except KeyError:
sample[self.analysistype].pointfindergenus = 'ND'
# Run the raw read mapping
PointSipping(inputobject=self,
cutoff=self.cutoff)
# Create FASTA files from the raw read matcves
self.fasta()
# Run PointFinder on the FASTA files
self.run_pointfinder()
# Create summary reports of the PointFinder outputs
self.parse_pointfinder() | def function[runner, parameter[self]]:
constant[
Run the necessary methods in the correct order
]
call[name[logging].info, parameter[call[constant[Starting {} analysis pipeline].format, parameter[name[self].analysistype]]]]
for taget[name[sample]] in starred[name[self].runmetadata.samples] begin[:]
call[name[setattr], parameter[name[sample], name[self].analysistype, call[name[GenObject], parameter[]]]]
<ast.Try object at 0x7da1b11e3e20>
call[name[PointSipping], parameter[]]
call[name[self].fasta, parameter[]]
call[name[self].run_pointfinder, parameter[]]
call[name[self].parse_pointfinder, parameter[]] | keyword[def] identifier[runner] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[analysistype] ))
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[runmetadata] . identifier[samples] :
identifier[setattr] ( identifier[sample] , identifier[self] . identifier[analysistype] , identifier[GenObject] ())
keyword[try] :
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[pointfindergenus] = identifier[self] . identifier[pointfinder_org_dict] [ identifier[sample] . identifier[general] . identifier[referencegenus] ]
keyword[except] identifier[KeyError] :
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[pointfindergenus] = literal[string]
identifier[PointSipping] ( identifier[inputobject] = identifier[self] ,
identifier[cutoff] = identifier[self] . identifier[cutoff] )
identifier[self] . identifier[fasta] ()
identifier[self] . identifier[run_pointfinder] ()
identifier[self] . identifier[parse_pointfinder] () | def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Initialise the GenObject
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
try:
sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus] # depends on [control=['try'], data=[]]
except KeyError:
sample[self.analysistype].pointfindergenus = 'ND' # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['sample']]
# Run the raw read mapping
PointSipping(inputobject=self, cutoff=self.cutoff)
# Create FASTA files from the raw read matcves
self.fasta()
# Run PointFinder on the FASTA files
self.run_pointfinder()
# Create summary reports of the PointFinder outputs
self.parse_pointfinder() |
def range(self):
"""A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotRange(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () | def function[range, parameter[self]]:
constant[A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
]
variable[data] assign[=] call[name[clips].data.DataObject, parameter[name[self]._env]]
call[name[lib].EnvDeftemplateSlotRange, parameter[name[self]._env, name[self]._tpl, name[self]._name, name[data].byref]]
return[<ast.IfExp object at 0x7da18bcc8250>] | keyword[def] identifier[range] ( identifier[self] ):
literal[string]
identifier[data] = identifier[clips] . identifier[data] . identifier[DataObject] ( identifier[self] . identifier[_env] )
identifier[lib] . identifier[EnvDeftemplateSlotRange] (
identifier[self] . identifier[_env] , identifier[self] . identifier[_tpl] , identifier[self] . identifier[_name] , identifier[data] . identifier[byref] )
keyword[return] identifier[tuple] ( identifier[data] . identifier[value] ) keyword[if] identifier[isinstance] ( identifier[data] . identifier[value] , identifier[list] ) keyword[else] () | def range(self):
"""A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotRange(self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () |
def from_variant_sequence_and_reference_context(
cls, variant_sequence, reference_context):
"""
Combines a VariantSequence with the reading frame implied by a
ReferenceContext, reverse complementing if necessary and finding the
offset to the first complete codon in the cDNA sequence.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a VariantSequenceInReadingFrame object
"""
(cdna_prefix, cdna_alt, cdna_suffix,
reference_prefix, reference_suffix, n_trimmed_from_reference) = trim_sequences(
variant_sequence, reference_context)
logger.info(
("cdna_predix='%s', cdna_alt='%s', cdna_suffix='%s', "
"reference_prefix='%s', reference_suffix='%s', n_trimmed=%d"),
cdna_prefix,
cdna_alt,
cdna_suffix,
reference_prefix,
reference_suffix,
n_trimmed_from_reference)
n_mismatch_before_variant = count_mismatches_before_variant(reference_prefix, cdna_prefix)
n_mismatch_after_variant = count_mismatches_after_variant(reference_suffix, cdna_suffix)
ref_codon_offset = reference_context.offset_to_first_complete_codon
# ReferenceContext carries with an offset to the first complete codon
# in the reference sequence. This may need to be adjusted if the reference
# sequence is longer than the variant sequence (and thus needs to be trimmed)
offset_to_first_complete_codon = compute_offset_to_first_complete_codon(
offset_to_first_complete_reference_codon=ref_codon_offset,
n_trimmed_from_reference_sequence=n_trimmed_from_reference)
cdna_sequence = cdna_prefix + cdna_alt + cdna_suffix
variant_interval_start = len(cdna_prefix)
variant_interval_end = variant_interval_start + len(cdna_alt)
return VariantSequenceInReadingFrame(
cdna_sequence=cdna_sequence,
offset_to_first_complete_codon=offset_to_first_complete_codon,
variant_cdna_interval_start=variant_interval_start,
variant_cdna_interval_end=variant_interval_end,
reference_cdna_sequence_before_variant=reference_prefix,
reference_cdna_sequence_after_variant=reference_suffix,
number_mismatches_before_variant=n_mismatch_before_variant,
number_mismatches_after_variant=n_mismatch_after_variant) | def function[from_variant_sequence_and_reference_context, parameter[cls, variant_sequence, reference_context]]:
constant[
Combines a VariantSequence with the reading frame implied by a
ReferenceContext, reverse complementing if necessary and finding the
offset to the first complete codon in the cDNA sequence.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a VariantSequenceInReadingFrame object
]
<ast.Tuple object at 0x7da1b25d9630> assign[=] call[name[trim_sequences], parameter[name[variant_sequence], name[reference_context]]]
call[name[logger].info, parameter[constant[cdna_predix='%s', cdna_alt='%s', cdna_suffix='%s', reference_prefix='%s', reference_suffix='%s', n_trimmed=%d], name[cdna_prefix], name[cdna_alt], name[cdna_suffix], name[reference_prefix], name[reference_suffix], name[n_trimmed_from_reference]]]
variable[n_mismatch_before_variant] assign[=] call[name[count_mismatches_before_variant], parameter[name[reference_prefix], name[cdna_prefix]]]
variable[n_mismatch_after_variant] assign[=] call[name[count_mismatches_after_variant], parameter[name[reference_suffix], name[cdna_suffix]]]
variable[ref_codon_offset] assign[=] name[reference_context].offset_to_first_complete_codon
variable[offset_to_first_complete_codon] assign[=] call[name[compute_offset_to_first_complete_codon], parameter[]]
variable[cdna_sequence] assign[=] binary_operation[binary_operation[name[cdna_prefix] + name[cdna_alt]] + name[cdna_suffix]]
variable[variant_interval_start] assign[=] call[name[len], parameter[name[cdna_prefix]]]
variable[variant_interval_end] assign[=] binary_operation[name[variant_interval_start] + call[name[len], parameter[name[cdna_alt]]]]
return[call[name[VariantSequenceInReadingFrame], parameter[]]] | keyword[def] identifier[from_variant_sequence_and_reference_context] (
identifier[cls] , identifier[variant_sequence] , identifier[reference_context] ):
literal[string]
( identifier[cdna_prefix] , identifier[cdna_alt] , identifier[cdna_suffix] ,
identifier[reference_prefix] , identifier[reference_suffix] , identifier[n_trimmed_from_reference] )= identifier[trim_sequences] (
identifier[variant_sequence] , identifier[reference_context] )
identifier[logger] . identifier[info] (
( literal[string]
literal[string] ),
identifier[cdna_prefix] ,
identifier[cdna_alt] ,
identifier[cdna_suffix] ,
identifier[reference_prefix] ,
identifier[reference_suffix] ,
identifier[n_trimmed_from_reference] )
identifier[n_mismatch_before_variant] = identifier[count_mismatches_before_variant] ( identifier[reference_prefix] , identifier[cdna_prefix] )
identifier[n_mismatch_after_variant] = identifier[count_mismatches_after_variant] ( identifier[reference_suffix] , identifier[cdna_suffix] )
identifier[ref_codon_offset] = identifier[reference_context] . identifier[offset_to_first_complete_codon]
identifier[offset_to_first_complete_codon] = identifier[compute_offset_to_first_complete_codon] (
identifier[offset_to_first_complete_reference_codon] = identifier[ref_codon_offset] ,
identifier[n_trimmed_from_reference_sequence] = identifier[n_trimmed_from_reference] )
identifier[cdna_sequence] = identifier[cdna_prefix] + identifier[cdna_alt] + identifier[cdna_suffix]
identifier[variant_interval_start] = identifier[len] ( identifier[cdna_prefix] )
identifier[variant_interval_end] = identifier[variant_interval_start] + identifier[len] ( identifier[cdna_alt] )
keyword[return] identifier[VariantSequenceInReadingFrame] (
identifier[cdna_sequence] = identifier[cdna_sequence] ,
identifier[offset_to_first_complete_codon] = identifier[offset_to_first_complete_codon] ,
identifier[variant_cdna_interval_start] = identifier[variant_interval_start] ,
identifier[variant_cdna_interval_end] = identifier[variant_interval_end] ,
identifier[reference_cdna_sequence_before_variant] = identifier[reference_prefix] ,
identifier[reference_cdna_sequence_after_variant] = identifier[reference_suffix] ,
identifier[number_mismatches_before_variant] = identifier[n_mismatch_before_variant] ,
identifier[number_mismatches_after_variant] = identifier[n_mismatch_after_variant] ) | def from_variant_sequence_and_reference_context(cls, variant_sequence, reference_context):
"""
Combines a VariantSequence with the reading frame implied by a
ReferenceContext, reverse complementing if necessary and finding the
offset to the first complete codon in the cDNA sequence.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a VariantSequenceInReadingFrame object
"""
(cdna_prefix, cdna_alt, cdna_suffix, reference_prefix, reference_suffix, n_trimmed_from_reference) = trim_sequences(variant_sequence, reference_context)
logger.info("cdna_predix='%s', cdna_alt='%s', cdna_suffix='%s', reference_prefix='%s', reference_suffix='%s', n_trimmed=%d", cdna_prefix, cdna_alt, cdna_suffix, reference_prefix, reference_suffix, n_trimmed_from_reference)
n_mismatch_before_variant = count_mismatches_before_variant(reference_prefix, cdna_prefix)
n_mismatch_after_variant = count_mismatches_after_variant(reference_suffix, cdna_suffix)
ref_codon_offset = reference_context.offset_to_first_complete_codon
# ReferenceContext carries with an offset to the first complete codon
# in the reference sequence. This may need to be adjusted if the reference
# sequence is longer than the variant sequence (and thus needs to be trimmed)
offset_to_first_complete_codon = compute_offset_to_first_complete_codon(offset_to_first_complete_reference_codon=ref_codon_offset, n_trimmed_from_reference_sequence=n_trimmed_from_reference)
cdna_sequence = cdna_prefix + cdna_alt + cdna_suffix
variant_interval_start = len(cdna_prefix)
variant_interval_end = variant_interval_start + len(cdna_alt)
return VariantSequenceInReadingFrame(cdna_sequence=cdna_sequence, offset_to_first_complete_codon=offset_to_first_complete_codon, variant_cdna_interval_start=variant_interval_start, variant_cdna_interval_end=variant_interval_end, reference_cdna_sequence_before_variant=reference_prefix, reference_cdna_sequence_after_variant=reference_suffix, number_mismatches_before_variant=n_mismatch_before_variant, number_mismatches_after_variant=n_mismatch_after_variant) |
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to) | def function[api, parameter[endpoint, data, json, filename, save_to]]:
constant[
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
]
call[name[_check_connection], parameter[]]
return[call[name[h2oconn].request, parameter[name[endpoint]]]] | keyword[def] identifier[api] ( identifier[endpoint] , identifier[data] = keyword[None] , identifier[json] = keyword[None] , identifier[filename] = keyword[None] , identifier[save_to] = keyword[None] ):
literal[string]
identifier[_check_connection] ()
keyword[return] identifier[h2oconn] . identifier[request] ( identifier[endpoint] , identifier[data] = identifier[data] , identifier[json] = identifier[json] , identifier[filename] = identifier[filename] , identifier[save_to] = identifier[save_to] ) | def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to) |
async def get_albums(self, *, limit=20, offset=0) -> List[Album]:
"""Get a list of the albums saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
data = await self.user.http.saved_albums(limit=limit, offset=offset)
return [Album(self.__client, item['album']) for item in data['items']] | <ast.AsyncFunctionDef object at 0x7da20c6e6da0> | keyword[async] keyword[def] identifier[get_albums] ( identifier[self] ,*, identifier[limit] = literal[int] , identifier[offset] = literal[int] )-> identifier[List] [ identifier[Album] ]:
literal[string]
identifier[data] = keyword[await] identifier[self] . identifier[user] . identifier[http] . identifier[saved_albums] ( identifier[limit] = identifier[limit] , identifier[offset] = identifier[offset] )
keyword[return] [ identifier[Album] ( identifier[self] . identifier[__client] , identifier[item] [ literal[string] ]) keyword[for] identifier[item] keyword[in] identifier[data] [ literal[string] ]] | async def get_albums(self, *, limit=20, offset=0) -> List[Album]:
"""Get a list of the albums saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
data = await self.user.http.saved_albums(limit=limit, offset=offset)
return [Album(self.__client, item['album']) for item in data['items']] |
def _ExtractJQuery(self, jquery_raw):
"""Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
"""
data_part = ''
if not jquery_raw:
return {}
if '[' in jquery_raw:
_, _, first_part = jquery_raw.partition('[')
data_part, _, _ = first_part.partition(']')
elif jquery_raw.startswith('//'):
_, _, first_part = jquery_raw.partition('{')
data_part = '{{{0:s}'.format(first_part)
elif '({' in jquery_raw:
_, _, first_part = jquery_raw.partition('(')
data_part, _, _ = first_part.rpartition(')')
if not data_part:
return {}
try:
data_dict = json.loads(data_part)
except ValueError:
return {}
return data_dict | def function[_ExtractJQuery, parameter[self, jquery_raw]]:
constant[Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
]
variable[data_part] assign[=] constant[]
if <ast.UnaryOp object at 0x7da20cabe8f0> begin[:]
return[dictionary[[], []]]
if compare[constant[[] in name[jquery_raw]] begin[:]
<ast.Tuple object at 0x7da20cabf2b0> assign[=] call[name[jquery_raw].partition, parameter[constant[[]]]
<ast.Tuple object at 0x7da20cabd2a0> assign[=] call[name[first_part].partition, parameter[constant[]]]]
if <ast.UnaryOp object at 0x7da20cabce80> begin[:]
return[dictionary[[], []]]
<ast.Try object at 0x7da18dc98340>
return[name[data_dict]] | keyword[def] identifier[_ExtractJQuery] ( identifier[self] , identifier[jquery_raw] ):
literal[string]
identifier[data_part] = literal[string]
keyword[if] keyword[not] identifier[jquery_raw] :
keyword[return] {}
keyword[if] literal[string] keyword[in] identifier[jquery_raw] :
identifier[_] , identifier[_] , identifier[first_part] = identifier[jquery_raw] . identifier[partition] ( literal[string] )
identifier[data_part] , identifier[_] , identifier[_] = identifier[first_part] . identifier[partition] ( literal[string] )
keyword[elif] identifier[jquery_raw] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[_] , identifier[first_part] = identifier[jquery_raw] . identifier[partition] ( literal[string] )
identifier[data_part] = literal[string] . identifier[format] ( identifier[first_part] )
keyword[elif] literal[string] keyword[in] identifier[jquery_raw] :
identifier[_] , identifier[_] , identifier[first_part] = identifier[jquery_raw] . identifier[partition] ( literal[string] )
identifier[data_part] , identifier[_] , identifier[_] = identifier[first_part] . identifier[rpartition] ( literal[string] )
keyword[if] keyword[not] identifier[data_part] :
keyword[return] {}
keyword[try] :
identifier[data_dict] = identifier[json] . identifier[loads] ( identifier[data_part] )
keyword[except] identifier[ValueError] :
keyword[return] {}
keyword[return] identifier[data_dict] | def _ExtractJQuery(self, jquery_raw):
"""Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
"""
data_part = ''
if not jquery_raw:
return {} # depends on [control=['if'], data=[]]
if '[' in jquery_raw:
(_, _, first_part) = jquery_raw.partition('[')
(data_part, _, _) = first_part.partition(']') # depends on [control=['if'], data=['jquery_raw']]
elif jquery_raw.startswith('//'):
(_, _, first_part) = jquery_raw.partition('{')
data_part = '{{{0:s}'.format(first_part) # depends on [control=['if'], data=[]]
elif '({' in jquery_raw:
(_, _, first_part) = jquery_raw.partition('(')
(data_part, _, _) = first_part.rpartition(')') # depends on [control=['if'], data=['jquery_raw']]
if not data_part:
return {} # depends on [control=['if'], data=[]]
try:
data_dict = json.loads(data_part) # depends on [control=['try'], data=[]]
except ValueError:
return {} # depends on [control=['except'], data=[]]
return data_dict |
def rst_preprocess(file):
"""
Preprocess reST file to support Sphinx like include directive. Includes are
relative to the current working directory.
"""
with open(file) as fp:
return re.sub(
"^\.\.\s+include:: (.*?)$",
lambda x: (rst_preprocess(x.group(1)) or "").rstrip(),
fp.read(),
flags=re.MULTILINE) | def function[rst_preprocess, parameter[file]]:
constant[
Preprocess reST file to support Sphinx like include directive. Includes are
relative to the current working directory.
]
with call[name[open], parameter[name[file]]] begin[:]
return[call[name[re].sub, parameter[constant[^\.\.\s+include:: (.*?)$], <ast.Lambda object at 0x7da18bccbee0>, call[name[fp].read, parameter[]]]]] | keyword[def] identifier[rst_preprocess] ( identifier[file] ):
literal[string]
keyword[with] identifier[open] ( identifier[file] ) keyword[as] identifier[fp] :
keyword[return] identifier[re] . identifier[sub] (
literal[string] ,
keyword[lambda] identifier[x] :( identifier[rst_preprocess] ( identifier[x] . identifier[group] ( literal[int] )) keyword[or] literal[string] ). identifier[rstrip] (),
identifier[fp] . identifier[read] (),
identifier[flags] = identifier[re] . identifier[MULTILINE] ) | def rst_preprocess(file):
"""
Preprocess reST file to support Sphinx like include directive. Includes are
relative to the current working directory.
"""
with open(file) as fp:
return re.sub('^\\.\\.\\s+include:: (.*?)$', lambda x: (rst_preprocess(x.group(1)) or '').rstrip(), fp.read(), flags=re.MULTILINE) # depends on [control=['with'], data=['fp']] |
def walk(self, head=None):
"""Do a breadth-first walk of the graph, yielding on each node,
starting at `head`."""
head = head or self._root_node
queue = []
queue.insert(0, head)
while queue:
node = queue.pop()
yield node.num, node.previous, node.siblings
for child in node.siblings:
if child in self._graph:
queue.insert(0, self._graph[child]) | def function[walk, parameter[self, head]]:
constant[Do a breadth-first walk of the graph, yielding on each node,
starting at `head`.]
variable[head] assign[=] <ast.BoolOp object at 0x7da20c6e7550>
variable[queue] assign[=] list[[]]
call[name[queue].insert, parameter[constant[0], name[head]]]
while name[queue] begin[:]
variable[node] assign[=] call[name[queue].pop, parameter[]]
<ast.Yield object at 0x7da2045672b0>
for taget[name[child]] in starred[name[node].siblings] begin[:]
if compare[name[child] in name[self]._graph] begin[:]
call[name[queue].insert, parameter[constant[0], call[name[self]._graph][name[child]]]] | keyword[def] identifier[walk] ( identifier[self] , identifier[head] = keyword[None] ):
literal[string]
identifier[head] = identifier[head] keyword[or] identifier[self] . identifier[_root_node]
identifier[queue] =[]
identifier[queue] . identifier[insert] ( literal[int] , identifier[head] )
keyword[while] identifier[queue] :
identifier[node] = identifier[queue] . identifier[pop] ()
keyword[yield] identifier[node] . identifier[num] , identifier[node] . identifier[previous] , identifier[node] . identifier[siblings]
keyword[for] identifier[child] keyword[in] identifier[node] . identifier[siblings] :
keyword[if] identifier[child] keyword[in] identifier[self] . identifier[_graph] :
identifier[queue] . identifier[insert] ( literal[int] , identifier[self] . identifier[_graph] [ identifier[child] ]) | def walk(self, head=None):
"""Do a breadth-first walk of the graph, yielding on each node,
starting at `head`."""
head = head or self._root_node
queue = []
queue.insert(0, head)
while queue:
node = queue.pop()
yield (node.num, node.previous, node.siblings)
for child in node.siblings:
if child in self._graph:
queue.insert(0, self._graph[child]) # depends on [control=['if'], data=['child']] # depends on [control=['for'], data=['child']] # depends on [control=['while'], data=[]] |
def uniquenessRatio(self, value):
"""Set private ``_uniqueness`` and reset ``_block_matcher``."""
if value >= 5 and value <= 15:
self._uniqueness = value
else:
raise InvalidUniquenessRatioError("Uniqueness ratio must be "
"between 5 and 15.")
self._replace_bm() | def function[uniquenessRatio, parameter[self, value]]:
constant[Set private ``_uniqueness`` and reset ``_block_matcher``.]
if <ast.BoolOp object at 0x7da20c990430> begin[:]
name[self]._uniqueness assign[=] name[value]
call[name[self]._replace_bm, parameter[]] | keyword[def] identifier[uniquenessRatio] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] >= literal[int] keyword[and] identifier[value] <= literal[int] :
identifier[self] . identifier[_uniqueness] = identifier[value]
keyword[else] :
keyword[raise] identifier[InvalidUniquenessRatioError] ( literal[string]
literal[string] )
identifier[self] . identifier[_replace_bm] () | def uniquenessRatio(self, value):
"""Set private ``_uniqueness`` and reset ``_block_matcher``."""
if value >= 5 and value <= 15:
self._uniqueness = value # depends on [control=['if'], data=[]]
else:
raise InvalidUniquenessRatioError('Uniqueness ratio must be between 5 and 15.')
self._replace_bm() |
def dump_objects(self):
"""Dump scheduler objects into a dump (temp) file
:return: None
"""
path = os.path.join(tempfile.gettempdir(),
'dump-obj-scheduler-%s-%d.json' % (self.name, int(time.time())))
logger.info('Dumping scheduler objects to: %s', path)
try:
fd = open(path, 'wb')
output = 'type:uuid:status:t_to_go:poller_tag:worker:command\n'
fd.write(output.encode('utf-8'))
for check in list(self.checks.values()):
output = 'check:%s:%s:%s:%s:%s:%s\n' \
% (check.uuid, check.status, check.t_to_go, check.poller_tag,
check.command, check.my_worker)
fd.write(output.encode('utf-8'))
logger.info('- dumped checks')
for action in list(self.actions.values()):
output = '%s: %s:%s:%s:%s:%s:%s\n'\
% (action.__class__.my_type, action.uuid, action.status,
action.t_to_go, action.reactionner_tag, action.command,
action.my_worker)
fd.write(output.encode('utf-8'))
logger.info('- dumped actions')
broks = []
for broker in list(self.my_daemon.brokers.values()):
for brok in broker.broks:
broks.append(brok)
for brok in broks:
output = 'BROK: %s:%s\n' % (brok.uuid, brok.type)
fd.write(output.encode('utf-8'))
logger.info('- dumped broks')
fd.close()
logger.info('Dumped')
except OSError as exp: # pragma: no cover, should never happen...
logger.critical("Error when writing the objects dump file %s : %s", path, str(exp)) | def function[dump_objects, parameter[self]]:
constant[Dump scheduler objects into a dump (temp) file
:return: None
]
variable[path] assign[=] call[name[os].path.join, parameter[call[name[tempfile].gettempdir, parameter[]], binary_operation[constant[dump-obj-scheduler-%s-%d.json] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18bcc9210>, <ast.Call object at 0x7da18bcc8550>]]]]]
call[name[logger].info, parameter[constant[Dumping scheduler objects to: %s], name[path]]]
<ast.Try object at 0x7da18bcca830> | keyword[def] identifier[dump_objects] ( identifier[self] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[tempfile] . identifier[gettempdir] (),
literal[string] %( identifier[self] . identifier[name] , identifier[int] ( identifier[time] . identifier[time] ())))
identifier[logger] . identifier[info] ( literal[string] , identifier[path] )
keyword[try] :
identifier[fd] = identifier[open] ( identifier[path] , literal[string] )
identifier[output] = literal[string]
identifier[fd] . identifier[write] ( identifier[output] . identifier[encode] ( literal[string] ))
keyword[for] identifier[check] keyword[in] identifier[list] ( identifier[self] . identifier[checks] . identifier[values] ()):
identifier[output] = literal[string] %( identifier[check] . identifier[uuid] , identifier[check] . identifier[status] , identifier[check] . identifier[t_to_go] , identifier[check] . identifier[poller_tag] ,
identifier[check] . identifier[command] , identifier[check] . identifier[my_worker] )
identifier[fd] . identifier[write] ( identifier[output] . identifier[encode] ( literal[string] ))
identifier[logger] . identifier[info] ( literal[string] )
keyword[for] identifier[action] keyword[in] identifier[list] ( identifier[self] . identifier[actions] . identifier[values] ()):
identifier[output] = literal[string] %( identifier[action] . identifier[__class__] . identifier[my_type] , identifier[action] . identifier[uuid] , identifier[action] . identifier[status] ,
identifier[action] . identifier[t_to_go] , identifier[action] . identifier[reactionner_tag] , identifier[action] . identifier[command] ,
identifier[action] . identifier[my_worker] )
identifier[fd] . identifier[write] ( identifier[output] . identifier[encode] ( literal[string] ))
identifier[logger] . identifier[info] ( literal[string] )
identifier[broks] =[]
keyword[for] identifier[broker] keyword[in] identifier[list] ( identifier[self] . identifier[my_daemon] . identifier[brokers] . identifier[values] ()):
keyword[for] identifier[brok] keyword[in] identifier[broker] . identifier[broks] :
identifier[broks] . identifier[append] ( identifier[brok] )
keyword[for] identifier[brok] keyword[in] identifier[broks] :
identifier[output] = literal[string] %( identifier[brok] . identifier[uuid] , identifier[brok] . identifier[type] )
identifier[fd] . identifier[write] ( identifier[output] . identifier[encode] ( literal[string] ))
identifier[logger] . identifier[info] ( literal[string] )
identifier[fd] . identifier[close] ()
identifier[logger] . identifier[info] ( literal[string] )
keyword[except] identifier[OSError] keyword[as] identifier[exp] :
identifier[logger] . identifier[critical] ( literal[string] , identifier[path] , identifier[str] ( identifier[exp] )) | def dump_objects(self):
"""Dump scheduler objects into a dump (temp) file
:return: None
"""
path = os.path.join(tempfile.gettempdir(), 'dump-obj-scheduler-%s-%d.json' % (self.name, int(time.time())))
logger.info('Dumping scheduler objects to: %s', path)
try:
fd = open(path, 'wb')
output = 'type:uuid:status:t_to_go:poller_tag:worker:command\n'
fd.write(output.encode('utf-8'))
for check in list(self.checks.values()):
output = 'check:%s:%s:%s:%s:%s:%s\n' % (check.uuid, check.status, check.t_to_go, check.poller_tag, check.command, check.my_worker)
fd.write(output.encode('utf-8')) # depends on [control=['for'], data=['check']]
logger.info('- dumped checks')
for action in list(self.actions.values()):
output = '%s: %s:%s:%s:%s:%s:%s\n' % (action.__class__.my_type, action.uuid, action.status, action.t_to_go, action.reactionner_tag, action.command, action.my_worker)
fd.write(output.encode('utf-8')) # depends on [control=['for'], data=['action']]
logger.info('- dumped actions')
broks = []
for broker in list(self.my_daemon.brokers.values()):
for brok in broker.broks:
broks.append(brok) # depends on [control=['for'], data=['brok']] # depends on [control=['for'], data=['broker']]
for brok in broks:
output = 'BROK: %s:%s\n' % (brok.uuid, brok.type)
fd.write(output.encode('utf-8')) # depends on [control=['for'], data=['brok']]
logger.info('- dumped broks')
fd.close()
logger.info('Dumped') # depends on [control=['try'], data=[]]
except OSError as exp: # pragma: no cover, should never happen...
logger.critical('Error when writing the objects dump file %s : %s', path, str(exp)) # depends on [control=['except'], data=['exp']] |
def field_get_subfields(field):
""" Given a field, will place all subfields into a dictionary
Parameters:
* field - tuple: The field to get subfields for
Returns: a dictionary, codes as keys and a list of values as the value """
pairs = {}
for key, value in field[0]:
if key in pairs and pairs[key] != value:
pairs[key].append(value)
else:
pairs[key] = [value]
return pairs | def function[field_get_subfields, parameter[field]]:
constant[ Given a field, will place all subfields into a dictionary
Parameters:
* field - tuple: The field to get subfields for
Returns: a dictionary, codes as keys and a list of values as the value ]
variable[pairs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2047ea350>, <ast.Name object at 0x7da2047e9fc0>]]] in starred[call[name[field]][constant[0]]] begin[:]
if <ast.BoolOp object at 0x7da2047ea3e0> begin[:]
call[call[name[pairs]][name[key]].append, parameter[name[value]]]
return[name[pairs]] | keyword[def] identifier[field_get_subfields] ( identifier[field] ):
literal[string]
identifier[pairs] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[field] [ literal[int] ]:
keyword[if] identifier[key] keyword[in] identifier[pairs] keyword[and] identifier[pairs] [ identifier[key] ]!= identifier[value] :
identifier[pairs] [ identifier[key] ]. identifier[append] ( identifier[value] )
keyword[else] :
identifier[pairs] [ identifier[key] ]=[ identifier[value] ]
keyword[return] identifier[pairs] | def field_get_subfields(field):
""" Given a field, will place all subfields into a dictionary
Parameters:
* field - tuple: The field to get subfields for
Returns: a dictionary, codes as keys and a list of values as the value """
pairs = {}
for (key, value) in field[0]:
if key in pairs and pairs[key] != value:
pairs[key].append(value) # depends on [control=['if'], data=[]]
else:
pairs[key] = [value] # depends on [control=['for'], data=[]]
return pairs |
def _parse_game_date_and_location(self, boxscore):
"""
Retrieve the game's date and location.
The date and location of the game follow a more complicated parsing
scheme and should be handled differently from other tags. Both fields
are separated by a newline character ('\n') with the first line being
the date and the second being the location.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
"""
scheme = BOXSCORE_SCHEME['time']
items = [i.text() for i in boxscore(scheme).items()]
game_info = items[0].split('\n')
time = ''
date = ''
stadium = ''
for line in game_info:
time_match = re.findall(r'(\d:\d\d|\d\d:\d\d)', line.lower())
if len(time_match) > 0:
time = line
for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
if day in line.lower():
date = line
# In general, locations are in the format 'Stadium Name - City,
# State'. Since the ' - ' characters seem to be unique to the
# location line, it should be safe to use this as a matcher.
if ' - ' in line:
stadium = line
setattr(self, '_time', time)
setattr(self, '_date', date)
setattr(self, '_stadium', stadium) | def function[_parse_game_date_and_location, parameter[self, boxscore]]:
constant[
Retrieve the game's date and location.
The date and location of the game follow a more complicated parsing
scheme and should be handled differently from other tags. Both fields
are separated by a newline character ('
') with the first line being
the date and the second being the location.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
]
variable[scheme] assign[=] call[name[BOXSCORE_SCHEME]][constant[time]]
variable[items] assign[=] <ast.ListComp object at 0x7da1b0bd9690>
variable[game_info] assign[=] call[call[name[items]][constant[0]].split, parameter[constant[
]]]
variable[time] assign[=] constant[]
variable[date] assign[=] constant[]
variable[stadium] assign[=] constant[]
for taget[name[line]] in starred[name[game_info]] begin[:]
variable[time_match] assign[=] call[name[re].findall, parameter[constant[(\d:\d\d|\d\d:\d\d)], call[name[line].lower, parameter[]]]]
if compare[call[name[len], parameter[name[time_match]]] greater[>] constant[0]] begin[:]
variable[time] assign[=] name[line]
for taget[name[day]] in starred[list[[<ast.Constant object at 0x7da1b0b67730>, <ast.Constant object at 0x7da1b0b667d0>, <ast.Constant object at 0x7da1b0b645b0>, <ast.Constant object at 0x7da1b0b65a80>, <ast.Constant object at 0x7da1b0b64580>, <ast.Constant object at 0x7da1b0b661d0>, <ast.Constant object at 0x7da1b0b649d0>]]] begin[:]
if compare[name[day] in call[name[line].lower, parameter[]]] begin[:]
variable[date] assign[=] name[line]
if compare[constant[ - ] in name[line]] begin[:]
variable[stadium] assign[=] name[line]
call[name[setattr], parameter[name[self], constant[_time], name[time]]]
call[name[setattr], parameter[name[self], constant[_date], name[date]]]
call[name[setattr], parameter[name[self], constant[_stadium], name[stadium]]] | keyword[def] identifier[_parse_game_date_and_location] ( identifier[self] , identifier[boxscore] ):
literal[string]
identifier[scheme] = identifier[BOXSCORE_SCHEME] [ literal[string] ]
identifier[items] =[ identifier[i] . identifier[text] () keyword[for] identifier[i] keyword[in] identifier[boxscore] ( identifier[scheme] ). identifier[items] ()]
identifier[game_info] = identifier[items] [ literal[int] ]. identifier[split] ( literal[string] )
identifier[time] = literal[string]
identifier[date] = literal[string]
identifier[stadium] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[game_info] :
identifier[time_match] = identifier[re] . identifier[findall] ( literal[string] , identifier[line] . identifier[lower] ())
keyword[if] identifier[len] ( identifier[time_match] )> literal[int] :
identifier[time] = identifier[line]
keyword[for] identifier[day] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ]:
keyword[if] identifier[day] keyword[in] identifier[line] . identifier[lower] ():
identifier[date] = identifier[line]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[stadium] = identifier[line]
identifier[setattr] ( identifier[self] , literal[string] , identifier[time] )
identifier[setattr] ( identifier[self] , literal[string] , identifier[date] )
identifier[setattr] ( identifier[self] , literal[string] , identifier[stadium] ) | def _parse_game_date_and_location(self, boxscore):
"""
Retrieve the game's date and location.
The date and location of the game follow a more complicated parsing
scheme and should be handled differently from other tags. Both fields
are separated by a newline character ('
') with the first line being
the date and the second being the location.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
"""
scheme = BOXSCORE_SCHEME['time']
items = [i.text() for i in boxscore(scheme).items()]
game_info = items[0].split('\n')
time = ''
date = ''
stadium = ''
for line in game_info:
time_match = re.findall('(\\d:\\d\\d|\\d\\d:\\d\\d)', line.lower())
if len(time_match) > 0:
time = line # depends on [control=['if'], data=[]]
for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:
if day in line.lower():
date = line # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['day']]
# In general, locations are in the format 'Stadium Name - City,
# State'. Since the ' - ' characters seem to be unique to the
# location line, it should be safe to use this as a matcher.
if ' - ' in line:
stadium = line # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']]
setattr(self, '_time', time)
setattr(self, '_date', date)
setattr(self, '_stadium', stadium) |
def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295) | def function[send_audio_packet, parameter[self, data]]:
constant[Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
]
call[name[self].checked_add, parameter[constant[sequence], constant[1], constant[65535]]]
if name[encode] begin[:]
variable[encoded_data] assign[=] call[name[self].encoder.encode, parameter[name[data], name[self].encoder.SAMPLES_PER_FRAME]]
variable[packet] assign[=] call[name[self]._get_voice_packet, parameter[name[encoded_data]]]
<ast.Try object at 0x7da1b1faab90>
call[name[self].checked_add, parameter[constant[timestamp], name[self].encoder.SAMPLES_PER_FRAME, constant[4294967295]]] | keyword[def] identifier[send_audio_packet] ( identifier[self] , identifier[data] ,*, identifier[encode] = keyword[True] ):
literal[string]
identifier[self] . identifier[checked_add] ( literal[string] , literal[int] , literal[int] )
keyword[if] identifier[encode] :
identifier[encoded_data] = identifier[self] . identifier[encoder] . identifier[encode] ( identifier[data] , identifier[self] . identifier[encoder] . identifier[SAMPLES_PER_FRAME] )
keyword[else] :
identifier[encoded_data] = identifier[data]
identifier[packet] = identifier[self] . identifier[_get_voice_packet] ( identifier[encoded_data] )
keyword[try] :
identifier[self] . identifier[socket] . identifier[sendto] ( identifier[packet] ,( identifier[self] . identifier[endpoint_ip] , identifier[self] . identifier[voice_port] ))
keyword[except] identifier[BlockingIOError] :
identifier[log] . identifier[warning] ( literal[string] , identifier[self] . identifier[sequence] , identifier[self] . identifier[timestamp] )
identifier[self] . identifier[checked_add] ( literal[string] , identifier[self] . identifier[encoder] . identifier[SAMPLES_PER_FRAME] , literal[int] ) | def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) # depends on [control=['if'], data=[]]
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) # depends on [control=['try'], data=[]]
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp) # depends on [control=['except'], data=[]]
self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295) |
def set_resolution(self, resolution=None):
"""
Add physical pixel dimensions
`resolution` supposed two be tuple of two parameterts: pixels per unit
and unit type; unit type may be omitted
pixels per unit could be simple integer or tuple of (ppu_x, ppu_y)
Also possible to use all three parameters im row
* resolution = ((1, 4), ) # wide pixels (4:1) without unit specifier
* resolution = (300, 'inch') # 300dpi in both dimensions
* resolution = (4, 1, 0) # tall pixels (1:4) without unit specifier
"""
if resolution is None:
self.resolution = None
return
# All in row
if len(resolution) == 3:
resolution = ((resolution[0], resolution[1]), resolution[2])
# Ensure length and convert all false to 0 (no unit)
if len(resolution) == 1 or not resolution[1]:
resolution = (resolution[0], 0)
# Single dimension
if isinstance(resolution[0], float) or isinteger(resolution[0]):
resolution = ((resolution[0], resolution[0]), resolution[1])
# Unit conversion
if resolution[1] in (1, 'm', 'meter'):
resolution = (resolution[0], 1)
elif resolution[1] in ('i', 'in', 'inch'):
resolution = ((int(resolution[0][0] / 0.0254 + 0.5),
int(resolution[0][1] / 0.0254 + 0.5)), 1)
elif resolution[1] in ('cm', 'centimeter'):
resolution = ((resolution[0][0] * 100,
resolution[0][1] * 100), 1)
self.resolution = resolution | def function[set_resolution, parameter[self, resolution]]:
constant[
Add physical pixel dimensions
`resolution` supposed two be tuple of two parameterts: pixels per unit
and unit type; unit type may be omitted
pixels per unit could be simple integer or tuple of (ppu_x, ppu_y)
Also possible to use all three parameters im row
* resolution = ((1, 4), ) # wide pixels (4:1) without unit specifier
* resolution = (300, 'inch') # 300dpi in both dimensions
* resolution = (4, 1, 0) # tall pixels (1:4) without unit specifier
]
if compare[name[resolution] is constant[None]] begin[:]
name[self].resolution assign[=] constant[None]
return[None]
if compare[call[name[len], parameter[name[resolution]]] equal[==] constant[3]] begin[:]
variable[resolution] assign[=] tuple[[<ast.Tuple object at 0x7da18dc9bc70>, <ast.Subscript object at 0x7da18dc98df0>]]
if <ast.BoolOp object at 0x7da18dc99b10> begin[:]
variable[resolution] assign[=] tuple[[<ast.Subscript object at 0x7da18dc98fd0>, <ast.Constant object at 0x7da18dc99120>]]
if <ast.BoolOp object at 0x7da18dc983d0> begin[:]
variable[resolution] assign[=] tuple[[<ast.Tuple object at 0x7da18dc9a6b0>, <ast.Subscript object at 0x7da18dc99c00>]]
if compare[call[name[resolution]][constant[1]] in tuple[[<ast.Constant object at 0x7da18dc98400>, <ast.Constant object at 0x7da18dc9b730>, <ast.Constant object at 0x7da18dc99270>]]] begin[:]
variable[resolution] assign[=] tuple[[<ast.Subscript object at 0x7da18dc99db0>, <ast.Constant object at 0x7da18dc9a650>]]
name[self].resolution assign[=] name[resolution] | keyword[def] identifier[set_resolution] ( identifier[self] , identifier[resolution] = keyword[None] ):
literal[string]
keyword[if] identifier[resolution] keyword[is] keyword[None] :
identifier[self] . identifier[resolution] = keyword[None]
keyword[return]
keyword[if] identifier[len] ( identifier[resolution] )== literal[int] :
identifier[resolution] =(( identifier[resolution] [ literal[int] ], identifier[resolution] [ literal[int] ]), identifier[resolution] [ literal[int] ])
keyword[if] identifier[len] ( identifier[resolution] )== literal[int] keyword[or] keyword[not] identifier[resolution] [ literal[int] ]:
identifier[resolution] =( identifier[resolution] [ literal[int] ], literal[int] )
keyword[if] identifier[isinstance] ( identifier[resolution] [ literal[int] ], identifier[float] ) keyword[or] identifier[isinteger] ( identifier[resolution] [ literal[int] ]):
identifier[resolution] =(( identifier[resolution] [ literal[int] ], identifier[resolution] [ literal[int] ]), identifier[resolution] [ literal[int] ])
keyword[if] identifier[resolution] [ literal[int] ] keyword[in] ( literal[int] , literal[string] , literal[string] ):
identifier[resolution] =( identifier[resolution] [ literal[int] ], literal[int] )
keyword[elif] identifier[resolution] [ literal[int] ] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[resolution] =(( identifier[int] ( identifier[resolution] [ literal[int] ][ literal[int] ]/ literal[int] + literal[int] ),
identifier[int] ( identifier[resolution] [ literal[int] ][ literal[int] ]/ literal[int] + literal[int] )), literal[int] )
keyword[elif] identifier[resolution] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ):
identifier[resolution] =(( identifier[resolution] [ literal[int] ][ literal[int] ]* literal[int] ,
identifier[resolution] [ literal[int] ][ literal[int] ]* literal[int] ), literal[int] )
identifier[self] . identifier[resolution] = identifier[resolution] | def set_resolution(self, resolution=None):
"""
Add physical pixel dimensions
`resolution` supposed two be tuple of two parameterts: pixels per unit
and unit type; unit type may be omitted
pixels per unit could be simple integer or tuple of (ppu_x, ppu_y)
Also possible to use all three parameters im row
* resolution = ((1, 4), ) # wide pixels (4:1) without unit specifier
* resolution = (300, 'inch') # 300dpi in both dimensions
* resolution = (4, 1, 0) # tall pixels (1:4) without unit specifier
"""
if resolution is None:
self.resolution = None
return # depends on [control=['if'], data=[]]
# All in row
if len(resolution) == 3:
resolution = ((resolution[0], resolution[1]), resolution[2]) # depends on [control=['if'], data=[]]
# Ensure length and convert all false to 0 (no unit)
if len(resolution) == 1 or not resolution[1]:
resolution = (resolution[0], 0) # depends on [control=['if'], data=[]]
# Single dimension
if isinstance(resolution[0], float) or isinteger(resolution[0]):
resolution = ((resolution[0], resolution[0]), resolution[1]) # depends on [control=['if'], data=[]]
# Unit conversion
if resolution[1] in (1, 'm', 'meter'):
resolution = (resolution[0], 1) # depends on [control=['if'], data=[]]
elif resolution[1] in ('i', 'in', 'inch'):
resolution = ((int(resolution[0][0] / 0.0254 + 0.5), int(resolution[0][1] / 0.0254 + 0.5)), 1) # depends on [control=['if'], data=[]]
elif resolution[1] in ('cm', 'centimeter'):
resolution = ((resolution[0][0] * 100, resolution[0][1] * 100), 1) # depends on [control=['if'], data=[]]
self.resolution = resolution |
def search_meta(self, attr, value=None, stronly=False):
""" Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list
"""
if stronly:
qry = self.ses.query(Symbol.name).join(SymbolMeta)
else:
qry = self.ses.query(Symbol).join(SymbolMeta)
crits = []
if value is None:
crits.append(SymbolMeta.attr == attr)
else:
if isinstance(value, str):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
for v in values:
crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value)))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()] | def function[search_meta, parameter[self, attr, value, stronly]]:
constant[ Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list
]
if name[stronly] begin[:]
variable[qry] assign[=] call[call[name[self].ses.query, parameter[name[Symbol].name]].join, parameter[name[SymbolMeta]]]
variable[crits] assign[=] list[[]]
if compare[name[value] is constant[None]] begin[:]
call[name[crits].append, parameter[compare[name[SymbolMeta].attr equal[==] name[attr]]]]
if call[name[len], parameter[name[crits]]] begin[:]
variable[qry] assign[=] call[name[qry].filter, parameter[call[name[or_], parameter[<ast.Starred object at 0x7da18ede66e0>]]]]
variable[qry] assign[=] call[name[qry].order_by, parameter[name[Symbol].name]]
if name[stronly] begin[:]
return[<ast.ListComp object at 0x7da18ede5570>] | keyword[def] identifier[search_meta] ( identifier[self] , identifier[attr] , identifier[value] = keyword[None] , identifier[stronly] = keyword[False] ):
literal[string]
keyword[if] identifier[stronly] :
identifier[qry] = identifier[self] . identifier[ses] . identifier[query] ( identifier[Symbol] . identifier[name] ). identifier[join] ( identifier[SymbolMeta] )
keyword[else] :
identifier[qry] = identifier[self] . identifier[ses] . identifier[query] ( identifier[Symbol] ). identifier[join] ( identifier[SymbolMeta] )
identifier[crits] =[]
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[crits] . identifier[append] ( identifier[SymbolMeta] . identifier[attr] == identifier[attr] )
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[values] =[ identifier[value] ]
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] )):
identifier[values] = identifier[value]
keyword[for] identifier[v] keyword[in] identifier[values] :
identifier[crits] . identifier[append] ( identifier[and_] ( identifier[SymbolMeta] . identifier[attr] == identifier[attr] , identifier[SymbolMeta] . identifier[value] . identifier[like] ( identifier[value] )))
keyword[if] identifier[len] ( identifier[crits] ):
identifier[qry] = identifier[qry] . identifier[filter] ( identifier[or_] (* identifier[crits] ))
identifier[qry] = identifier[qry] . identifier[order_by] ( identifier[Symbol] . identifier[name] )
keyword[if] identifier[stronly] :
keyword[return] [ identifier[sym] [ literal[int] ] keyword[for] identifier[sym] keyword[in] identifier[qry] . identifier[distinct] ()]
keyword[else] :
keyword[return] [ identifier[sym] keyword[for] identifier[sym] keyword[in] identifier[qry] . identifier[distinct] ()] | def search_meta(self, attr, value=None, stronly=False):
""" Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list
"""
if stronly:
qry = self.ses.query(Symbol.name).join(SymbolMeta) # depends on [control=['if'], data=[]]
else:
qry = self.ses.query(Symbol).join(SymbolMeta)
crits = []
if value is None:
crits.append(SymbolMeta.attr == attr) # depends on [control=['if'], data=[]]
else:
if isinstance(value, str):
values = [value] # depends on [control=['if'], data=[]]
elif isinstance(value, (tuple, list)):
values = value # depends on [control=['if'], data=[]]
for v in values:
crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value))) # depends on [control=['for'], data=[]]
if len(crits):
qry = qry.filter(or_(*crits)) # depends on [control=['if'], data=[]]
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()] # depends on [control=['if'], data=[]]
else:
return [sym for sym in qry.distinct()] |
def legacy_approximant_name(apx):
"""Convert the old style xml approximant name to a name
and phase_order. Alex: I hate this function. Please delete this when we
use Collin's new tables.
"""
apx = str(apx)
try:
order = sim.GetOrderFromString(apx)
except:
print("Warning: Could not read phase order from string, using default")
order = -1
name = sim.GetStringFromApproximant(sim.GetApproximantFromString(apx))
return name, order | def function[legacy_approximant_name, parameter[apx]]:
constant[Convert the old style xml approximant name to a name
and phase_order. Alex: I hate this function. Please delete this when we
use Collin's new tables.
]
variable[apx] assign[=] call[name[str], parameter[name[apx]]]
<ast.Try object at 0x7da20e9b2320>
variable[name] assign[=] call[name[sim].GetStringFromApproximant, parameter[call[name[sim].GetApproximantFromString, parameter[name[apx]]]]]
return[tuple[[<ast.Name object at 0x7da20e9b3f70>, <ast.Name object at 0x7da20e9b0ee0>]]] | keyword[def] identifier[legacy_approximant_name] ( identifier[apx] ):
literal[string]
identifier[apx] = identifier[str] ( identifier[apx] )
keyword[try] :
identifier[order] = identifier[sim] . identifier[GetOrderFromString] ( identifier[apx] )
keyword[except] :
identifier[print] ( literal[string] )
identifier[order] =- literal[int]
identifier[name] = identifier[sim] . identifier[GetStringFromApproximant] ( identifier[sim] . identifier[GetApproximantFromString] ( identifier[apx] ))
keyword[return] identifier[name] , identifier[order] | def legacy_approximant_name(apx):
"""Convert the old style xml approximant name to a name
and phase_order. Alex: I hate this function. Please delete this when we
use Collin's new tables.
"""
apx = str(apx)
try:
order = sim.GetOrderFromString(apx) # depends on [control=['try'], data=[]]
except:
print('Warning: Could not read phase order from string, using default')
order = -1 # depends on [control=['except'], data=[]]
name = sim.GetStringFromApproximant(sim.GetApproximantFromString(apx))
return (name, order) |
def get_pypi_auth(configfile='~/.pypirc'):
"""Read auth from pip config."""
pypi_cfg = ConfigParser()
if pypi_cfg.read(os.path.expanduser(configfile)):
try:
user = pypi_cfg.get('pypi', 'username')
pwd = pypi_cfg.get('pypi', 'password')
return user, pwd
except ConfigError:
notify.warning("No PyPI credentials in '{}',"
" will fall back to '~/.netrc'...".format(configfile))
return None | def function[get_pypi_auth, parameter[configfile]]:
constant[Read auth from pip config.]
variable[pypi_cfg] assign[=] call[name[ConfigParser], parameter[]]
if call[name[pypi_cfg].read, parameter[call[name[os].path.expanduser, parameter[name[configfile]]]]] begin[:]
<ast.Try object at 0x7da1b008a500>
return[constant[None]] | keyword[def] identifier[get_pypi_auth] ( identifier[configfile] = literal[string] ):
literal[string]
identifier[pypi_cfg] = identifier[ConfigParser] ()
keyword[if] identifier[pypi_cfg] . identifier[read] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[configfile] )):
keyword[try] :
identifier[user] = identifier[pypi_cfg] . identifier[get] ( literal[string] , literal[string] )
identifier[pwd] = identifier[pypi_cfg] . identifier[get] ( literal[string] , literal[string] )
keyword[return] identifier[user] , identifier[pwd]
keyword[except] identifier[ConfigError] :
identifier[notify] . identifier[warning] ( literal[string]
literal[string] . identifier[format] ( identifier[configfile] ))
keyword[return] keyword[None] | def get_pypi_auth(configfile='~/.pypirc'):
"""Read auth from pip config."""
pypi_cfg = ConfigParser()
if pypi_cfg.read(os.path.expanduser(configfile)):
try:
user = pypi_cfg.get('pypi', 'username')
pwd = pypi_cfg.get('pypi', 'password')
return (user, pwd) # depends on [control=['try'], data=[]]
except ConfigError:
notify.warning("No PyPI credentials in '{}', will fall back to '~/.netrc'...".format(configfile)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None |
def is_empty(self):
"""Determines if the priority queue has any elements.
Performs removal of any elements that were "marked-as-invalid".
:returns: true iff the priority queue has no elements.
"""
while self.pq:
if self.pq[0][1] != self.INVALID:
return False
else:
_, _, element = heapq.heappop(self.pq)
if element in self.element_finder:
del self.element_finder[element]
return True | def function[is_empty, parameter[self]]:
constant[Determines if the priority queue has any elements.
Performs removal of any elements that were "marked-as-invalid".
:returns: true iff the priority queue has no elements.
]
while name[self].pq begin[:]
if compare[call[call[name[self].pq][constant[0]]][constant[1]] not_equal[!=] name[self].INVALID] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_empty] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[pq] :
keyword[if] identifier[self] . identifier[pq] [ literal[int] ][ literal[int] ]!= identifier[self] . identifier[INVALID] :
keyword[return] keyword[False]
keyword[else] :
identifier[_] , identifier[_] , identifier[element] = identifier[heapq] . identifier[heappop] ( identifier[self] . identifier[pq] )
keyword[if] identifier[element] keyword[in] identifier[self] . identifier[element_finder] :
keyword[del] identifier[self] . identifier[element_finder] [ identifier[element] ]
keyword[return] keyword[True] | def is_empty(self):
"""Determines if the priority queue has any elements.
Performs removal of any elements that were "marked-as-invalid".
:returns: true iff the priority queue has no elements.
"""
while self.pq:
if self.pq[0][1] != self.INVALID:
return False # depends on [control=['if'], data=[]]
else:
(_, _, element) = heapq.heappop(self.pq)
if element in self.element_finder:
del self.element_finder[element] # depends on [control=['if'], data=['element']] # depends on [control=['while'], data=[]]
return True |
def saveSettings(self, settings):
"""
Saves the data for this tree to the inputed xml entry.
:param xml | <xml.etree.ElementTree.Element>
:return <bool> success
"""
# save order data
settings.setValue('headerState', wrapVariant(str(self.header().saveState().toBase64())))
settings.setValue('sortColumn', wrapVariant(str(self.sortColumn())))
settings.setValue('sortOrder', wrapVariant(str(int(self.sortOrder()))))
settings.setValue('sortingEnabled', wrapVariant(str(self.isSortingEnabled()))) | def function[saveSettings, parameter[self, settings]]:
constant[
Saves the data for this tree to the inputed xml entry.
:param xml | <xml.etree.ElementTree.Element>
:return <bool> success
]
call[name[settings].setValue, parameter[constant[headerState], call[name[wrapVariant], parameter[call[name[str], parameter[call[call[call[name[self].header, parameter[]].saveState, parameter[]].toBase64, parameter[]]]]]]]]
call[name[settings].setValue, parameter[constant[sortColumn], call[name[wrapVariant], parameter[call[name[str], parameter[call[name[self].sortColumn, parameter[]]]]]]]]
call[name[settings].setValue, parameter[constant[sortOrder], call[name[wrapVariant], parameter[call[name[str], parameter[call[name[int], parameter[call[name[self].sortOrder, parameter[]]]]]]]]]]
call[name[settings].setValue, parameter[constant[sortingEnabled], call[name[wrapVariant], parameter[call[name[str], parameter[call[name[self].isSortingEnabled, parameter[]]]]]]]] | keyword[def] identifier[saveSettings] ( identifier[self] , identifier[settings] ):
literal[string]
identifier[settings] . identifier[setValue] ( literal[string] , identifier[wrapVariant] ( identifier[str] ( identifier[self] . identifier[header] (). identifier[saveState] (). identifier[toBase64] ())))
identifier[settings] . identifier[setValue] ( literal[string] , identifier[wrapVariant] ( identifier[str] ( identifier[self] . identifier[sortColumn] ())))
identifier[settings] . identifier[setValue] ( literal[string] , identifier[wrapVariant] ( identifier[str] ( identifier[int] ( identifier[self] . identifier[sortOrder] ()))))
identifier[settings] . identifier[setValue] ( literal[string] , identifier[wrapVariant] ( identifier[str] ( identifier[self] . identifier[isSortingEnabled] ()))) | def saveSettings(self, settings):
"""
Saves the data for this tree to the inputed xml entry.
:param xml | <xml.etree.ElementTree.Element>
:return <bool> success
""" # save order data
settings.setValue('headerState', wrapVariant(str(self.header().saveState().toBase64())))
settings.setValue('sortColumn', wrapVariant(str(self.sortColumn())))
settings.setValue('sortOrder', wrapVariant(str(int(self.sortOrder()))))
settings.setValue('sortingEnabled', wrapVariant(str(self.isSortingEnabled()))) |
def bracket(value_and_gradients_function,
search_interval,
f_lim,
max_iterations,
expansion_param=5.0):
"""Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
already_stopped = search_interval.failed | search_interval.converged
# If the slope at right end point is positive, step B1 in [2], then the given
# initial points already bracket a minimum.
bracketed = search_interval.right.df >= 0
# Bisection is needed, step B2, if right end point almost works as a new left
# end point but the objective value is too high.
needs_bisect = (
search_interval.right.df < 0) & (search_interval.right.f > f_lim)
# In these three cases bracketing is already `stopped` and there is no need
# to perform further evaluations. Otherwise the bracketing loop is needed to
# expand the interval, step B3, until the conditions are met.
initial_args = _IntermediateResult(
iteration=search_interval.iterations,
stopped=already_stopped | bracketed | needs_bisect,
failed=search_interval.failed,
num_evals=search_interval.func_evals,
left=search_interval.left,
right=search_interval.right)
def _loop_cond(curr):
return (curr.iteration <
max_iterations) & ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Main body of bracketing loop."""
# The loop maintains the invariant that curr.stopped is true if we have
# either: failed, successfully bracketed, or not yet bracketed but needs
# bisect. On the only remaining case, step B3 in [2]. case we need to
# expand and update the left/right values appropriately.
new_right = value_and_gradients_function(expansion_param * curr.right.x)
left = val_where(curr.stopped, curr.left, curr.right)
right = val_where(curr.stopped, curr.right, new_right)
# Updated the failed, bracketed, and needs_bisect conditions.
failed = curr.failed | ~is_finite(right)
bracketed = right.df >= 0
needs_bisect = (right.df < 0) & (right.f > f_lim)
return [_IntermediateResult(
iteration=curr.iteration + 1,
stopped=curr.stopped | failed | bracketed | needs_bisect,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
bracket_result = tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
# For entries where bisect is still needed, mark them as not yet stopped,
# reset the left end point, and run `_bisect` on them.
needs_bisect = (
(bracket_result.right.df < 0) & (bracket_result.right.f > f_lim))
stopped = already_stopped | bracket_result.failed | ~needs_bisect
left = val_where(stopped, bracket_result.left, search_interval.left)
bisect_args = bracket_result._replace(stopped=stopped, left=left)
return _bisect(value_and_gradients_function, bisect_args, f_lim) | def function[bracket, parameter[value_and_gradients_function, search_interval, f_lim, max_iterations, expansion_param]]:
constant[Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
]
variable[already_stopped] assign[=] binary_operation[name[search_interval].failed <ast.BitOr object at 0x7da2590d6aa0> name[search_interval].converged]
variable[bracketed] assign[=] compare[name[search_interval].right.df greater_or_equal[>=] constant[0]]
variable[needs_bisect] assign[=] binary_operation[compare[name[search_interval].right.df less[<] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[search_interval].right.f greater[>] name[f_lim]]]
variable[initial_args] assign[=] call[name[_IntermediateResult], parameter[]]
def function[_loop_cond, parameter[curr]]:
return[binary_operation[compare[name[curr].iteration less[<] name[max_iterations]] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b02356f0>]]
def function[_loop_body, parameter[curr]]:
constant[Main body of bracketing loop.]
variable[new_right] assign[=] call[name[value_and_gradients_function], parameter[binary_operation[name[expansion_param] * name[curr].right.x]]]
variable[left] assign[=] call[name[val_where], parameter[name[curr].stopped, name[curr].left, name[curr].right]]
variable[right] assign[=] call[name[val_where], parameter[name[curr].stopped, name[curr].right, name[new_right]]]
variable[failed] assign[=] binary_operation[name[curr].failed <ast.BitOr object at 0x7da2590d6aa0> <ast.UnaryOp object at 0x7da1b02340a0>]
variable[bracketed] assign[=] compare[name[right].df greater_or_equal[>=] constant[0]]
variable[needs_bisect] assign[=] binary_operation[compare[name[right].df less[<] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[right].f greater[>] name[f_lim]]]
return[list[[<ast.Call object at 0x7da1b02108b0>]]]
variable[bracket_result] assign[=] call[call[name[tf].while_loop, parameter[]]][constant[0]]
variable[needs_bisect] assign[=] binary_operation[compare[name[bracket_result].right.df less[<] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[bracket_result].right.f greater[>] name[f_lim]]]
variable[stopped] assign[=] binary_operation[binary_operation[name[already_stopped] <ast.BitOr object at 0x7da2590d6aa0> name[bracket_result].failed] <ast.BitOr object at 0x7da2590d6aa0> <ast.UnaryOp object at 0x7da1b03e1b70>]
variable[left] assign[=] call[name[val_where], parameter[name[stopped], name[bracket_result].left, name[search_interval].left]]
variable[bisect_args] assign[=] call[name[bracket_result]._replace, parameter[]]
return[call[name[_bisect], parameter[name[value_and_gradients_function], name[bisect_args], name[f_lim]]]] | keyword[def] identifier[bracket] ( identifier[value_and_gradients_function] ,
identifier[search_interval] ,
identifier[f_lim] ,
identifier[max_iterations] ,
identifier[expansion_param] = literal[int] ):
literal[string]
identifier[already_stopped] = identifier[search_interval] . identifier[failed] | identifier[search_interval] . identifier[converged]
identifier[bracketed] = identifier[search_interval] . identifier[right] . identifier[df] >= literal[int]
identifier[needs_bisect] =(
identifier[search_interval] . identifier[right] . identifier[df] < literal[int] )&( identifier[search_interval] . identifier[right] . identifier[f] > identifier[f_lim] )
identifier[initial_args] = identifier[_IntermediateResult] (
identifier[iteration] = identifier[search_interval] . identifier[iterations] ,
identifier[stopped] = identifier[already_stopped] | identifier[bracketed] | identifier[needs_bisect] ,
identifier[failed] = identifier[search_interval] . identifier[failed] ,
identifier[num_evals] = identifier[search_interval] . identifier[func_evals] ,
identifier[left] = identifier[search_interval] . identifier[left] ,
identifier[right] = identifier[search_interval] . identifier[right] )
keyword[def] identifier[_loop_cond] ( identifier[curr] ):
keyword[return] ( identifier[curr] . identifier[iteration] <
identifier[max_iterations] )&~ identifier[tf] . identifier[reduce_all] ( identifier[input_tensor] = identifier[curr] . identifier[stopped] )
keyword[def] identifier[_loop_body] ( identifier[curr] ):
literal[string]
identifier[new_right] = identifier[value_and_gradients_function] ( identifier[expansion_param] * identifier[curr] . identifier[right] . identifier[x] )
identifier[left] = identifier[val_where] ( identifier[curr] . identifier[stopped] , identifier[curr] . identifier[left] , identifier[curr] . identifier[right] )
identifier[right] = identifier[val_where] ( identifier[curr] . identifier[stopped] , identifier[curr] . identifier[right] , identifier[new_right] )
identifier[failed] = identifier[curr] . identifier[failed] |~ identifier[is_finite] ( identifier[right] )
identifier[bracketed] = identifier[right] . identifier[df] >= literal[int]
identifier[needs_bisect] =( identifier[right] . identifier[df] < literal[int] )&( identifier[right] . identifier[f] > identifier[f_lim] )
keyword[return] [ identifier[_IntermediateResult] (
identifier[iteration] = identifier[curr] . identifier[iteration] + literal[int] ,
identifier[stopped] = identifier[curr] . identifier[stopped] | identifier[failed] | identifier[bracketed] | identifier[needs_bisect] ,
identifier[failed] = identifier[failed] ,
identifier[num_evals] = identifier[curr] . identifier[num_evals] + literal[int] ,
identifier[left] = identifier[left] ,
identifier[right] = identifier[right] )]
identifier[bracket_result] = identifier[tf] . identifier[while_loop] (
identifier[cond] = identifier[_loop_cond] , identifier[body] = identifier[_loop_body] , identifier[loop_vars] =[ identifier[initial_args] ])[ literal[int] ]
identifier[needs_bisect] =(
( identifier[bracket_result] . identifier[right] . identifier[df] < literal[int] )&( identifier[bracket_result] . identifier[right] . identifier[f] > identifier[f_lim] ))
identifier[stopped] = identifier[already_stopped] | identifier[bracket_result] . identifier[failed] |~ identifier[needs_bisect]
identifier[left] = identifier[val_where] ( identifier[stopped] , identifier[bracket_result] . identifier[left] , identifier[search_interval] . identifier[left] )
identifier[bisect_args] = identifier[bracket_result] . identifier[_replace] ( identifier[stopped] = identifier[stopped] , identifier[left] = identifier[left] )
keyword[return] identifier[_bisect] ( identifier[value_and_gradients_function] , identifier[bisect_args] , identifier[f_lim] ) | def bracket(value_and_gradients_function, search_interval, f_lim, max_iterations, expansion_param=5.0):
"""Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
already_stopped = search_interval.failed | search_interval.converged
# If the slope at right end point is positive, step B1 in [2], then the given
# initial points already bracket a minimum.
bracketed = search_interval.right.df >= 0
# Bisection is needed, step B2, if right end point almost works as a new left
# end point but the objective value is too high.
needs_bisect = (search_interval.right.df < 0) & (search_interval.right.f > f_lim)
# In these three cases bracketing is already `stopped` and there is no need
# to perform further evaluations. Otherwise the bracketing loop is needed to
# expand the interval, step B3, until the conditions are met.
initial_args = _IntermediateResult(iteration=search_interval.iterations, stopped=already_stopped | bracketed | needs_bisect, failed=search_interval.failed, num_evals=search_interval.func_evals, left=search_interval.left, right=search_interval.right)
def _loop_cond(curr):
return (curr.iteration < max_iterations) & ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Main body of bracketing loop."""
# The loop maintains the invariant that curr.stopped is true if we have
# either: failed, successfully bracketed, or not yet bracketed but needs
# bisect. On the only remaining case, step B3 in [2]. case we need to
# expand and update the left/right values appropriately.
new_right = value_and_gradients_function(expansion_param * curr.right.x)
left = val_where(curr.stopped, curr.left, curr.right)
right = val_where(curr.stopped, curr.right, new_right)
# Updated the failed, bracketed, and needs_bisect conditions.
failed = curr.failed | ~is_finite(right)
bracketed = right.df >= 0
needs_bisect = (right.df < 0) & (right.f > f_lim)
return [_IntermediateResult(iteration=curr.iteration + 1, stopped=curr.stopped | failed | bracketed | needs_bisect, failed=failed, num_evals=curr.num_evals + 1, left=left, right=right)]
bracket_result = tf.while_loop(cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
# For entries where bisect is still needed, mark them as not yet stopped,
# reset the left end point, and run `_bisect` on them.
needs_bisect = (bracket_result.right.df < 0) & (bracket_result.right.f > f_lim)
stopped = already_stopped | bracket_result.failed | ~needs_bisect
left = val_where(stopped, bracket_result.left, search_interval.left)
bisect_args = bracket_result._replace(stopped=stopped, left=left)
return _bisect(value_and_gradients_function, bisect_args, f_lim) |
def authors(self, *usernames):
"""
Return the entries written by the given usernames
When multiple tags are provided, they operate as "OR" query.
"""
if len(usernames) == 1:
return self.filter(**{"author__{}".format(User.USERNAME_FIELD): usernames[0]})
else:
return self.filter(**{"author__{}__in".format(User.USERNAME_FIELD): usernames}) | def function[authors, parameter[self]]:
constant[
Return the entries written by the given usernames
When multiple tags are provided, they operate as "OR" query.
]
if compare[call[name[len], parameter[name[usernames]]] equal[==] constant[1]] begin[:]
return[call[name[self].filter, parameter[]]] | keyword[def] identifier[authors] ( identifier[self] ,* identifier[usernames] ):
literal[string]
keyword[if] identifier[len] ( identifier[usernames] )== literal[int] :
keyword[return] identifier[self] . identifier[filter] (**{ literal[string] . identifier[format] ( identifier[User] . identifier[USERNAME_FIELD] ): identifier[usernames] [ literal[int] ]})
keyword[else] :
keyword[return] identifier[self] . identifier[filter] (**{ literal[string] . identifier[format] ( identifier[User] . identifier[USERNAME_FIELD] ): identifier[usernames] }) | def authors(self, *usernames):
"""
Return the entries written by the given usernames
When multiple tags are provided, they operate as "OR" query.
"""
if len(usernames) == 1:
return self.filter(**{'author__{}'.format(User.USERNAME_FIELD): usernames[0]}) # depends on [control=['if'], data=[]]
else:
return self.filter(**{'author__{}__in'.format(User.USERNAME_FIELD): usernames}) |
def parse_hpo_phenotypes(hpo_lines):
"""Parse hpo phenotypes
Group the genes that a phenotype is associated to in 'genes'
Args:
hpo_lines(iterable(str)): A file handle to the hpo phenotypes file
Returns:
hpo_terms(dict): A dictionary with hpo_ids as keys and terms as values
{
<hpo_id>: {
'hpo_id':str,
'description': str,
'hgnc_symbols': list(str), # [<hgnc_symbol>, ...]
}
}
"""
hpo_terms = {}
LOG.info("Parsing hpo phenotypes...")
for index, line in enumerate(hpo_lines):
if index > 0 and len(line) > 0:
hpo_info = parse_hpo_phenotype(line)
hpo_term = hpo_info['hpo_id']
hgnc_symbol = hpo_info['hgnc_symbol']
if hpo_term in hpo_terms:
hpo_terms[hpo_term]['hgnc_symbols'].append(hgnc_symbol)
else:
hpo_terms[hpo_term] = {
'hpo_id':hpo_term,
'description': hpo_info['description'],
'hgnc_symbols': [hgnc_symbol]
}
LOG.info("Parsing done.")
return hpo_terms | def function[parse_hpo_phenotypes, parameter[hpo_lines]]:
constant[Parse hpo phenotypes
Group the genes that a phenotype is associated to in 'genes'
Args:
hpo_lines(iterable(str)): A file handle to the hpo phenotypes file
Returns:
hpo_terms(dict): A dictionary with hpo_ids as keys and terms as values
{
<hpo_id>: {
'hpo_id':str,
'description': str,
'hgnc_symbols': list(str), # [<hgnc_symbol>, ...]
}
}
]
variable[hpo_terms] assign[=] dictionary[[], []]
call[name[LOG].info, parameter[constant[Parsing hpo phenotypes...]]]
for taget[tuple[[<ast.Name object at 0x7da20c7c8e50>, <ast.Name object at 0x7da20c7c8c40>]]] in starred[call[name[enumerate], parameter[name[hpo_lines]]]] begin[:]
if <ast.BoolOp object at 0x7da20e9b1870> begin[:]
variable[hpo_info] assign[=] call[name[parse_hpo_phenotype], parameter[name[line]]]
variable[hpo_term] assign[=] call[name[hpo_info]][constant[hpo_id]]
variable[hgnc_symbol] assign[=] call[name[hpo_info]][constant[hgnc_symbol]]
if compare[name[hpo_term] in name[hpo_terms]] begin[:]
call[call[call[name[hpo_terms]][name[hpo_term]]][constant[hgnc_symbols]].append, parameter[name[hgnc_symbol]]]
call[name[LOG].info, parameter[constant[Parsing done.]]]
return[name[hpo_terms]] | keyword[def] identifier[parse_hpo_phenotypes] ( identifier[hpo_lines] ):
literal[string]
identifier[hpo_terms] ={}
identifier[LOG] . identifier[info] ( literal[string] )
keyword[for] identifier[index] , identifier[line] keyword[in] identifier[enumerate] ( identifier[hpo_lines] ):
keyword[if] identifier[index] > literal[int] keyword[and] identifier[len] ( identifier[line] )> literal[int] :
identifier[hpo_info] = identifier[parse_hpo_phenotype] ( identifier[line] )
identifier[hpo_term] = identifier[hpo_info] [ literal[string] ]
identifier[hgnc_symbol] = identifier[hpo_info] [ literal[string] ]
keyword[if] identifier[hpo_term] keyword[in] identifier[hpo_terms] :
identifier[hpo_terms] [ identifier[hpo_term] ][ literal[string] ]. identifier[append] ( identifier[hgnc_symbol] )
keyword[else] :
identifier[hpo_terms] [ identifier[hpo_term] ]={
literal[string] : identifier[hpo_term] ,
literal[string] : identifier[hpo_info] [ literal[string] ],
literal[string] :[ identifier[hgnc_symbol] ]
}
identifier[LOG] . identifier[info] ( literal[string] )
keyword[return] identifier[hpo_terms] | def parse_hpo_phenotypes(hpo_lines):
"""Parse hpo phenotypes
Group the genes that a phenotype is associated to in 'genes'
Args:
hpo_lines(iterable(str)): A file handle to the hpo phenotypes file
Returns:
hpo_terms(dict): A dictionary with hpo_ids as keys and terms as values
{
<hpo_id>: {
'hpo_id':str,
'description': str,
'hgnc_symbols': list(str), # [<hgnc_symbol>, ...]
}
}
"""
hpo_terms = {}
LOG.info('Parsing hpo phenotypes...')
for (index, line) in enumerate(hpo_lines):
if index > 0 and len(line) > 0:
hpo_info = parse_hpo_phenotype(line)
hpo_term = hpo_info['hpo_id']
hgnc_symbol = hpo_info['hgnc_symbol']
if hpo_term in hpo_terms:
hpo_terms[hpo_term]['hgnc_symbols'].append(hgnc_symbol) # depends on [control=['if'], data=['hpo_term', 'hpo_terms']]
else:
hpo_terms[hpo_term] = {'hpo_id': hpo_term, 'description': hpo_info['description'], 'hgnc_symbols': [hgnc_symbol]} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
LOG.info('Parsing done.')
return hpo_terms |
def from_handle(fh, stream_default='fasta'):
"""
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
"""
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default
return from_filename(fh.name) | def function[from_handle, parameter[fh, stream_default]]:
constant[
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
]
if compare[name[fh] in tuple[[<ast.Attribute object at 0x7da1b1a2b460>, <ast.Attribute object at 0x7da1b1a2b220>, <ast.Attribute object at 0x7da1b1a2aec0>]]] begin[:]
return[name[stream_default]]
return[call[name[from_filename], parameter[name[fh].name]]] | keyword[def] identifier[from_handle] ( identifier[fh] , identifier[stream_default] = literal[string] ):
literal[string]
keyword[if] identifier[fh] keyword[in] ( identifier[sys] . identifier[stdin] , identifier[sys] . identifier[stdout] , identifier[sys] . identifier[stderr] ):
keyword[return] identifier[stream_default]
keyword[return] identifier[from_filename] ( identifier[fh] . identifier[name] ) | def from_handle(fh, stream_default='fasta'):
"""
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
"""
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default # depends on [control=['if'], data=[]]
return from_filename(fh.name) |
def check(self, key, value):
"""
Check whether `key` matchs the :attr:`keyword`. If so, set the
:attr:`value` to `value`.
Args:
key (str): Key which will be matched with :attr:`keyword`.
value (str): Value which will be assigned to :attr:`value` if keys
matches.
Returns:
True/False: Whether the key matched :attr:`keyword`.
"""
key = key.lower().strip()
# try unicode conversion
try:
key = key.decode("utf-8")
except UnicodeEncodeError:
pass
key = self._remove_accents(key)
if self.keyword in key.split():
self.value = value
return True
return False | def function[check, parameter[self, key, value]]:
constant[
Check whether `key` matchs the :attr:`keyword`. If so, set the
:attr:`value` to `value`.
Args:
key (str): Key which will be matched with :attr:`keyword`.
value (str): Value which will be assigned to :attr:`value` if keys
matches.
Returns:
True/False: Whether the key matched :attr:`keyword`.
]
variable[key] assign[=] call[call[name[key].lower, parameter[]].strip, parameter[]]
<ast.Try object at 0x7da1b14db070>
variable[key] assign[=] call[name[self]._remove_accents, parameter[name[key]]]
if compare[name[self].keyword in call[name[key].split, parameter[]]] begin[:]
name[self].value assign[=] name[value]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[check] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[key] = identifier[key] . identifier[lower] (). identifier[strip] ()
keyword[try] :
identifier[key] = identifier[key] . identifier[decode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
keyword[pass]
identifier[key] = identifier[self] . identifier[_remove_accents] ( identifier[key] )
keyword[if] identifier[self] . identifier[keyword] keyword[in] identifier[key] . identifier[split] ():
identifier[self] . identifier[value] = identifier[value]
keyword[return] keyword[True]
keyword[return] keyword[False] | def check(self, key, value):
"""
Check whether `key` matchs the :attr:`keyword`. If so, set the
:attr:`value` to `value`.
Args:
key (str): Key which will be matched with :attr:`keyword`.
value (str): Value which will be assigned to :attr:`value` if keys
matches.
Returns:
True/False: Whether the key matched :attr:`keyword`.
"""
key = key.lower().strip()
# try unicode conversion
try:
key = key.decode('utf-8') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
pass # depends on [control=['except'], data=[]]
key = self._remove_accents(key)
if self.keyword in key.split():
self.value = value
return True # depends on [control=['if'], data=[]]
return False |
def _get_site_amplification(self, sites, C):
"""
Compute the fourth term of the equation 1 described on paragraph :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = sj Cj , for j = 1,...,5, where sj are the
coefficients to be determined through the regression analysis,
while Cj are dummy variables used to denote the five different EC8
site classes
"""
ssa, ssb, ssc, ssd, sse = self._get_site_type_dummy_variables(sites)
return (C['sA'] * ssa) + (C['sB'] * ssb) + (C['sC'] * ssc) + \
(C['sD'] * ssd) + (C['sE'] * sse) | def function[_get_site_amplification, parameter[self, sites, C]]:
constant[
Compute the fourth term of the equation 1 described on paragraph :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = sj Cj , for j = 1,...,5, where sj are the
coefficients to be determined through the regression analysis,
while Cj are dummy variables used to denote the five different EC8
site classes
]
<ast.Tuple object at 0x7da204963fd0> assign[=] call[name[self]._get_site_type_dummy_variables, parameter[name[sites]]]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[C]][constant[sA]] * name[ssa]] + binary_operation[call[name[C]][constant[sB]] * name[ssb]]] + binary_operation[call[name[C]][constant[sC]] * name[ssc]]] + binary_operation[call[name[C]][constant[sD]] * name[ssd]]] + binary_operation[call[name[C]][constant[sE]] * name[sse]]]] | keyword[def] identifier[_get_site_amplification] ( identifier[self] , identifier[sites] , identifier[C] ):
literal[string]
identifier[ssa] , identifier[ssb] , identifier[ssc] , identifier[ssd] , identifier[sse] = identifier[self] . identifier[_get_site_type_dummy_variables] ( identifier[sites] )
keyword[return] ( identifier[C] [ literal[string] ]* identifier[ssa] )+( identifier[C] [ literal[string] ]* identifier[ssb] )+( identifier[C] [ literal[string] ]* identifier[ssc] )+( identifier[C] [ literal[string] ]* identifier[ssd] )+( identifier[C] [ literal[string] ]* identifier[sse] ) | def _get_site_amplification(self, sites, C):
"""
Compute the fourth term of the equation 1 described on paragraph :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = sj Cj , for j = 1,...,5, where sj are the
coefficients to be determined through the regression analysis,
while Cj are dummy variables used to denote the five different EC8
site classes
"""
(ssa, ssb, ssc, ssd, sse) = self._get_site_type_dummy_variables(sites)
return C['sA'] * ssa + C['sB'] * ssb + C['sC'] * ssc + C['sD'] * ssd + C['sE'] * sse |
def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name]
except KeyError:
raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join(
self.__result_dict.keys()))) | def function[from_phase, parameter[self, phase_name]]:
constant[
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
]
<ast.Try object at 0x7da1b24e13c0> | keyword[def] identifier[from_phase] ( identifier[self] , identifier[phase_name] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[__result_dict] [ identifier[phase_name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exc] . identifier[PipelineException] ( literal[string] . identifier[format] ( identifier[phase_name] , literal[string] . identifier[join] (
identifier[self] . identifier[__result_dict] . identifier[keys] ()))) | def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name] # depends on [control=['try'], data=[]]
except KeyError:
raise exc.PipelineException('No previous phase named {} found in results ({})'.format(phase_name, ', '.join(self.__result_dict.keys()))) # depends on [control=['except'], data=[]] |
def setTimer(self, timeout, description=None):
"""
Sets a timer.
:param description:
:param timeout: timeout in seconds
:return: the timerId
"""
self.timerId += 1
timer = Timer(timeout, self.__timeoutHandler, (self.timerId, description))
timer.start()
self.timers[self.timerId] = timer
return self.timerId | def function[setTimer, parameter[self, timeout, description]]:
constant[
Sets a timer.
:param description:
:param timeout: timeout in seconds
:return: the timerId
]
<ast.AugAssign object at 0x7da1b1ecdcf0>
variable[timer] assign[=] call[name[Timer], parameter[name[timeout], name[self].__timeoutHandler, tuple[[<ast.Attribute object at 0x7da1b1ecda50>, <ast.Name object at 0x7da1b1ecee60>]]]]
call[name[timer].start, parameter[]]
call[name[self].timers][name[self].timerId] assign[=] name[timer]
return[name[self].timerId] | keyword[def] identifier[setTimer] ( identifier[self] , identifier[timeout] , identifier[description] = keyword[None] ):
literal[string]
identifier[self] . identifier[timerId] += literal[int]
identifier[timer] = identifier[Timer] ( identifier[timeout] , identifier[self] . identifier[__timeoutHandler] ,( identifier[self] . identifier[timerId] , identifier[description] ))
identifier[timer] . identifier[start] ()
identifier[self] . identifier[timers] [ identifier[self] . identifier[timerId] ]= identifier[timer]
keyword[return] identifier[self] . identifier[timerId] | def setTimer(self, timeout, description=None):
"""
Sets a timer.
:param description:
:param timeout: timeout in seconds
:return: the timerId
"""
self.timerId += 1
timer = Timer(timeout, self.__timeoutHandler, (self.timerId, description))
timer.start()
self.timers[self.timerId] = timer
return self.timerId |
def from_iter(cls, data, name=None):
"""Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
"""
if not name:
name = 'table'
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{'idx': k, 'col': 'data', 'val': v}
for k, v in sorted(data.items())]
return cls(name, values=values) | def function[from_iter, parameter[cls, data, name]]:
constant[Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
]
if <ast.UnaryOp object at 0x7da18f09f400> begin[:]
variable[name] assign[=] constant[table]
if call[name[isinstance], parameter[name[data], tuple[[<ast.Name object at 0x7da18f09c280>, <ast.Name object at 0x7da18f09ed40>]]]] begin[:]
variable[data] assign[=] <ast.DictComp object at 0x7da18f09f490>
variable[values] assign[=] <ast.ListComp object at 0x7da18f58ded0>
return[call[name[cls], parameter[name[name]]]] | keyword[def] identifier[from_iter] ( identifier[cls] , identifier[data] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[name] :
identifier[name] = literal[string]
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[list] , identifier[tuple] )):
identifier[data] ={ identifier[x] : identifier[y] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[enumerate] ( identifier[data] )}
identifier[values] =[{ literal[string] : identifier[k] , literal[string] : literal[string] , literal[string] : identifier[v] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[data] . identifier[items] ())]
keyword[return] identifier[cls] ( identifier[name] , identifier[values] = identifier[values] ) | def from_iter(cls, data, name=None):
"""Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
"""
if not name:
name = 'table' # depends on [control=['if'], data=[]]
if isinstance(data, (list, tuple)):
data = {x: y for (x, y) in enumerate(data)} # depends on [control=['if'], data=[]]
values = [{'idx': k, 'col': 'data', 'val': v} for (k, v) in sorted(data.items())]
return cls(name, values=values) |
def list_zones():
'''
Displays a list of available time zones. Use this list when setting a
time zone using ``timezone.set_zone``
:return: a list of time zones
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' timezone.list_zones
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -listtimezones')
zones = salt.utils.mac_utils.parse_return(ret)
return [x.strip() for x in zones.splitlines()] | def function[list_zones, parameter[]]:
constant[
Displays a list of available time zones. Use this list when setting a
time zone using ``timezone.set_zone``
:return: a list of time zones
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' timezone.list_zones
]
variable[ret] assign[=] call[name[salt].utils.mac_utils.execute_return_result, parameter[constant[systemsetup -listtimezones]]]
variable[zones] assign[=] call[name[salt].utils.mac_utils.parse_return, parameter[name[ret]]]
return[<ast.ListComp object at 0x7da1b1c67760>] | keyword[def] identifier[list_zones] ():
literal[string]
identifier[ret] = identifier[salt] . identifier[utils] . identifier[mac_utils] . identifier[execute_return_result] (
literal[string] )
identifier[zones] = identifier[salt] . identifier[utils] . identifier[mac_utils] . identifier[parse_return] ( identifier[ret] )
keyword[return] [ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[zones] . identifier[splitlines] ()] | def list_zones():
"""
Displays a list of available time zones. Use this list when setting a
time zone using ``timezone.set_zone``
:return: a list of time zones
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' timezone.list_zones
"""
ret = salt.utils.mac_utils.execute_return_result('systemsetup -listtimezones')
zones = salt.utils.mac_utils.parse_return(ret)
return [x.strip() for x in zones.splitlines()] |
def _is_peer(self, keys, node_self, node_other):
'''_is_peer
Low-level api: Return True if node_self and node_other are considered
as peer with regards to a set of keys.
Parameters
----------
keys : `list`
A list of keys in `{url}tagname` notation.
node_self : `Element`
An Element node on this side.
node_other : `Element`
An Element node on the other side.
Returns
-------
list
True if node_self is a peer of node_other, otherwise, return False.
'''
for key in keys:
s = list(node_self.iterchildren(tag=key))
o = list(node_other.iterchildren(tag=key))
if len(s) < 1 or len(o) < 1:
raise ConfigError("cannot find key '{}' in node {}" \
.format(key,
self.device.get_xpath(node_self)))
if len(s) > 1 or len(o) > 1:
raise ConfigError("not unique key '{}' in node {}" \
.format(key,
self.device.get_xpath(node_self)))
if s[0].text != o[0].text:
return False
return True | def function[_is_peer, parameter[self, keys, node_self, node_other]]:
constant[_is_peer
Low-level api: Return True if node_self and node_other are considered
as peer with regards to a set of keys.
Parameters
----------
keys : `list`
A list of keys in `{url}tagname` notation.
node_self : `Element`
An Element node on this side.
node_other : `Element`
An Element node on the other side.
Returns
-------
list
True if node_self is a peer of node_other, otherwise, return False.
]
for taget[name[key]] in starred[name[keys]] begin[:]
variable[s] assign[=] call[name[list], parameter[call[name[node_self].iterchildren, parameter[]]]]
variable[o] assign[=] call[name[list], parameter[call[name[node_other].iterchildren, parameter[]]]]
if <ast.BoolOp object at 0x7da1b2526c20> begin[:]
<ast.Raise object at 0x7da1b2525ed0>
if <ast.BoolOp object at 0x7da1b25276d0> begin[:]
<ast.Raise object at 0x7da1b2525300>
if compare[call[name[s]][constant[0]].text not_equal[!=] call[name[o]][constant[0]].text] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_is_peer] ( identifier[self] , identifier[keys] , identifier[node_self] , identifier[node_other] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[s] = identifier[list] ( identifier[node_self] . identifier[iterchildren] ( identifier[tag] = identifier[key] ))
identifier[o] = identifier[list] ( identifier[node_other] . identifier[iterchildren] ( identifier[tag] = identifier[key] ))
keyword[if] identifier[len] ( identifier[s] )< literal[int] keyword[or] identifier[len] ( identifier[o] )< literal[int] :
keyword[raise] identifier[ConfigError] ( literal[string] . identifier[format] ( identifier[key] ,
identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[node_self] )))
keyword[if] identifier[len] ( identifier[s] )> literal[int] keyword[or] identifier[len] ( identifier[o] )> literal[int] :
keyword[raise] identifier[ConfigError] ( literal[string] . identifier[format] ( identifier[key] ,
identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[node_self] )))
keyword[if] identifier[s] [ literal[int] ]. identifier[text] != identifier[o] [ literal[int] ]. identifier[text] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def _is_peer(self, keys, node_self, node_other):
"""_is_peer
Low-level api: Return True if node_self and node_other are considered
as peer with regards to a set of keys.
Parameters
----------
keys : `list`
A list of keys in `{url}tagname` notation.
node_self : `Element`
An Element node on this side.
node_other : `Element`
An Element node on the other side.
Returns
-------
list
True if node_self is a peer of node_other, otherwise, return False.
"""
for key in keys:
s = list(node_self.iterchildren(tag=key))
o = list(node_other.iterchildren(tag=key))
if len(s) < 1 or len(o) < 1:
raise ConfigError("cannot find key '{}' in node {}".format(key, self.device.get_xpath(node_self))) # depends on [control=['if'], data=[]]
if len(s) > 1 or len(o) > 1:
raise ConfigError("not unique key '{}' in node {}".format(key, self.device.get_xpath(node_self))) # depends on [control=['if'], data=[]]
if s[0].text != o[0].text:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return True |
def _run_blocking(self, args, input=None):
"""Internal API: run a blocking command with subprocess.
This closes any open non-blocking dialog before running the command.
Parameters
----------
args: Popen constructor arguments
Command to run.
input: string
Value to feed to the stdin of the process.
Returns
-------
(returncode, stdout)
The exit code (integer) and stdout value (string) from the process.
"""
# Close any existing dialog.
if self._process:
self.close()
# Make sure we grab stdout as text (not bytes).
kwargs = {}
kwargs['stdout'] = subprocess.PIPE
kwargs['universal_newlines'] = True
# Use the run() method if available (Python 3.5+).
if hasattr(subprocess, 'run'):
result = subprocess.run(args, input=input, **kwargs)
return result.returncode, result.stdout
# Have to do our own. If we need to feed stdin, we must open a pipe.
if input is not None:
kwargs['stdin'] = subprocess.PIPE
# Start the process.
with Popen(args, **kwargs) as proc:
# Talk to it (no timeout). This will wait until termination.
stdout, stderr = proc.communicate(input)
# Find out the return code.
returncode = proc.poll()
# Done.
return returncode, stdout | def function[_run_blocking, parameter[self, args, input]]:
constant[Internal API: run a blocking command with subprocess.
This closes any open non-blocking dialog before running the command.
Parameters
----------
args: Popen constructor arguments
Command to run.
input: string
Value to feed to the stdin of the process.
Returns
-------
(returncode, stdout)
The exit code (integer) and stdout value (string) from the process.
]
if name[self]._process begin[:]
call[name[self].close, parameter[]]
variable[kwargs] assign[=] dictionary[[], []]
call[name[kwargs]][constant[stdout]] assign[=] name[subprocess].PIPE
call[name[kwargs]][constant[universal_newlines]] assign[=] constant[True]
if call[name[hasattr], parameter[name[subprocess], constant[run]]] begin[:]
variable[result] assign[=] call[name[subprocess].run, parameter[name[args]]]
return[tuple[[<ast.Attribute object at 0x7da18fe93040>, <ast.Attribute object at 0x7da18fe92620>]]]
if compare[name[input] is_not constant[None]] begin[:]
call[name[kwargs]][constant[stdin]] assign[=] name[subprocess].PIPE
with call[name[Popen], parameter[name[args]]] begin[:]
<ast.Tuple object at 0x7da1b0fdf430> assign[=] call[name[proc].communicate, parameter[name[input]]]
variable[returncode] assign[=] call[name[proc].poll, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b0fdfa30>, <ast.Name object at 0x7da1b0fdda50>]]] | keyword[def] identifier[_run_blocking] ( identifier[self] , identifier[args] , identifier[input] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_process] :
identifier[self] . identifier[close] ()
identifier[kwargs] ={}
identifier[kwargs] [ literal[string] ]= identifier[subprocess] . identifier[PIPE]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[hasattr] ( identifier[subprocess] , literal[string] ):
identifier[result] = identifier[subprocess] . identifier[run] ( identifier[args] , identifier[input] = identifier[input] ,** identifier[kwargs] )
keyword[return] identifier[result] . identifier[returncode] , identifier[result] . identifier[stdout]
keyword[if] identifier[input] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[subprocess] . identifier[PIPE]
keyword[with] identifier[Popen] ( identifier[args] ,** identifier[kwargs] ) keyword[as] identifier[proc] :
identifier[stdout] , identifier[stderr] = identifier[proc] . identifier[communicate] ( identifier[input] )
identifier[returncode] = identifier[proc] . identifier[poll] ()
keyword[return] identifier[returncode] , identifier[stdout] | def _run_blocking(self, args, input=None):
"""Internal API: run a blocking command with subprocess.
This closes any open non-blocking dialog before running the command.
Parameters
----------
args: Popen constructor arguments
Command to run.
input: string
Value to feed to the stdin of the process.
Returns
-------
(returncode, stdout)
The exit code (integer) and stdout value (string) from the process.
"""
# Close any existing dialog.
if self._process:
self.close() # depends on [control=['if'], data=[]]
# Make sure we grab stdout as text (not bytes).
kwargs = {}
kwargs['stdout'] = subprocess.PIPE
kwargs['universal_newlines'] = True
# Use the run() method if available (Python 3.5+).
if hasattr(subprocess, 'run'):
result = subprocess.run(args, input=input, **kwargs)
return (result.returncode, result.stdout) # depends on [control=['if'], data=[]]
# Have to do our own. If we need to feed stdin, we must open a pipe.
if input is not None:
kwargs['stdin'] = subprocess.PIPE # depends on [control=['if'], data=[]]
# Start the process.
with Popen(args, **kwargs) as proc:
# Talk to it (no timeout). This will wait until termination.
(stdout, stderr) = proc.communicate(input)
# Find out the return code.
returncode = proc.poll()
# Done.
return (returncode, stdout) # depends on [control=['with'], data=['proc']] |
async def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position
according to mode . Same as :meth:`Cursor.scroll`, but move cursor
on server side one by one row. If you want to move 20 rows forward
scroll will make 20 queries to move cursor. Currently only forward
scrolling is supported.
:param int value: move cursor to next position according to mode.
:param str mode: scroll mode, possible modes: `relative` and `absolute`
"""
self._check_executed()
if mode == 'relative':
if value < 0:
raise NotSupportedError("Backwards scrolling not supported "
"by this cursor")
for _ in range(value):
await self._read_next()
self._rownumber += value
elif mode == 'absolute':
if value < self._rownumber:
raise NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self._rownumber
for _ in range(end):
await self._read_next()
self._rownumber = value
else:
raise ProgrammingError("unknown scroll mode %s" % mode) | <ast.AsyncFunctionDef object at 0x7da1b1d8e920> | keyword[async] keyword[def] identifier[scroll] ( identifier[self] , identifier[value] , identifier[mode] = literal[string] ):
literal[string]
identifier[self] . identifier[_check_executed] ()
keyword[if] identifier[mode] == literal[string] :
keyword[if] identifier[value] < literal[int] :
keyword[raise] identifier[NotSupportedError] ( literal[string]
literal[string] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[value] ):
keyword[await] identifier[self] . identifier[_read_next] ()
identifier[self] . identifier[_rownumber] += identifier[value]
keyword[elif] identifier[mode] == literal[string] :
keyword[if] identifier[value] < identifier[self] . identifier[_rownumber] :
keyword[raise] identifier[NotSupportedError] (
literal[string] )
identifier[end] = identifier[value] - identifier[self] . identifier[_rownumber]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[end] ):
keyword[await] identifier[self] . identifier[_read_next] ()
identifier[self] . identifier[_rownumber] = identifier[value]
keyword[else] :
keyword[raise] identifier[ProgrammingError] ( literal[string] % identifier[mode] ) | async def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position
according to mode . Same as :meth:`Cursor.scroll`, but move cursor
on server side one by one row. If you want to move 20 rows forward
scroll will make 20 queries to move cursor. Currently only forward
scrolling is supported.
:param int value: move cursor to next position according to mode.
:param str mode: scroll mode, possible modes: `relative` and `absolute`
"""
self._check_executed()
if mode == 'relative':
if value < 0:
raise NotSupportedError('Backwards scrolling not supported by this cursor') # depends on [control=['if'], data=[]]
for _ in range(value):
await self._read_next() # depends on [control=['for'], data=[]]
self._rownumber += value # depends on [control=['if'], data=[]]
elif mode == 'absolute':
if value < self._rownumber:
raise NotSupportedError('Backwards scrolling not supported by this cursor') # depends on [control=['if'], data=[]]
end = value - self._rownumber
for _ in range(end):
await self._read_next() # depends on [control=['for'], data=[]]
self._rownumber = value # depends on [control=['if'], data=[]]
else:
raise ProgrammingError('unknown scroll mode %s' % mode) |
def get_assessments_taken_for_assessment_offered(self, assessment_offered_id):
"""Gets an ``AssessmentTakenList`` by the given assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.AssessmentTakenList) - the returned
``AssessmentTaken`` list
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='AssessmentTaken',
runtime=self._runtime)
result = collection.find(
dict({'assessmentOfferedId': str(assessment_offered_id)},
**self._view_filter()))
return objects.AssessmentTakenList(result, runtime=self._runtime) | def function[get_assessments_taken_for_assessment_offered, parameter[self, assessment_offered_id]]:
constant[Gets an ``AssessmentTakenList`` by the given assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.AssessmentTakenList) - the returned
``AssessmentTaken`` list
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[assessment]]]
variable[result] assign[=] call[name[collection].find, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c991240>], [<ast.Call object at 0x7da20c993f10>]]]]]]
return[call[name[objects].AssessmentTakenList, parameter[name[result]]]] | keyword[def] identifier[get_assessments_taken_for_assessment_offered] ( identifier[self] , identifier[assessment_offered_id] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[result] = identifier[collection] . identifier[find] (
identifier[dict] ({ literal[string] : identifier[str] ( identifier[assessment_offered_id] )},
** identifier[self] . identifier[_view_filter] ()))
keyword[return] identifier[objects] . identifier[AssessmentTakenList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_assessments_taken_for_assessment_offered(self, assessment_offered_id):
"""Gets an ``AssessmentTakenList`` by the given assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.AssessmentTakenList) - the returned
``AssessmentTaken`` list
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime)
result = collection.find(dict({'assessmentOfferedId': str(assessment_offered_id)}, **self._view_filter()))
return objects.AssessmentTakenList(result, runtime=self._runtime) |
def authenticate(self, login=None, password=None):
'''
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
'''
if login is None:
login = os.environ.get('MS_LOGIN')
if password is None:
password = os.environ.get('MS_PASSWD')
if not login or not password:
msg = (
'Authentication credentials required. Please refer to '
'http://xbox.readthedocs.org/en/latest/authentication.html'
)
raise AuthenticationException(msg)
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({
'client_id': '0000000048093EE3',
'redirect_uri': 'https://login.live.com/oauth20_desktop.srf',
'response_type': 'token',
'display': 'touch',
'scope': 'service::user.auth.xboxlive.com::MBI_SSL',
'locale': 'en',
}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)'
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {
'login': login,
'passwd': password,
'PPFT': re.search(ppft_re, resp.content).groups(1)[0],
'PPSX': 'Passpor',
'SI': 'Sign in',
'type': '11',
'NewUser': '1',
'LoginOptions': '1',
'i3': '36728',
'm1': '768',
'm2': '1184',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__Login_Host|1',
}
resp = self.session.post(
login_post_url, data=post_data, allow_redirects=False,
)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg)
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://auth.xboxlive.com",
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": access_token,
}
}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://xboxlive.com",
"TokenType": "JWT",
"Properties": {
"UserTokens": [user_token],
"SandboxId": "RETAIL",
}
}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self | def function[authenticate, parameter[self, login, password]]:
constant[
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
]
if compare[name[login] is constant[None]] begin[:]
variable[login] assign[=] call[name[os].environ.get, parameter[constant[MS_LOGIN]]]
if compare[name[password] is constant[None]] begin[:]
variable[password] assign[=] call[name[os].environ.get, parameter[constant[MS_PASSWD]]]
if <ast.BoolOp object at 0x7da18bccab30> begin[:]
variable[msg] assign[=] constant[Authentication credentials required. Please refer to http://xbox.readthedocs.org/en/latest/authentication.html]
<ast.Raise object at 0x7da18bcc8250>
name[self].login assign[=] name[login]
variable[base_url] assign[=] constant[https://login.live.com/oauth20_authorize.srf?]
variable[qs] assign[=] call[name[unquote], parameter[call[name[urlencode], parameter[dictionary[[<ast.Constant object at 0x7da18bcca350>, <ast.Constant object at 0x7da18bcca980>, <ast.Constant object at 0x7da18bcca200>, <ast.Constant object at 0x7da18bcca620>, <ast.Constant object at 0x7da18bccb3d0>, <ast.Constant object at 0x7da18bccb1c0>], [<ast.Constant object at 0x7da18bccb7c0>, <ast.Constant object at 0x7da18bcca950>, <ast.Constant object at 0x7da18bcc9960>, <ast.Constant object at 0x7da18bcc9540>, <ast.Constant object at 0x7da18bccb940>, <ast.Constant object at 0x7da18bccada0>]]]]]]
variable[resp] assign[=] call[name[self].session.get, parameter[binary_operation[name[base_url] + name[qs]]]]
variable[url_re] assign[=] constant[b"urlPost:\\'([A-Za-z0-9:\\?_\\-\\.&/=]+)"]
variable[ppft_re] assign[=] constant[b'sFTTag:\\\'.*value="(.*)"/>']
variable[login_post_url] assign[=] call[call[name[re].search, parameter[name[url_re], name[resp].content]].group, parameter[constant[1]]]
variable[post_data] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc96c0>, <ast.Constant object at 0x7da18bccb0a0>, <ast.Constant object at 0x7da18bcc8d30>, <ast.Constant object at 0x7da18bcca380>, <ast.Constant object at 0x7da18bcc82b0>, <ast.Constant object at 0x7da18bcc9450>, <ast.Constant object at 0x7da18bccae00>, <ast.Constant object at 0x7da18bccbd90>, <ast.Constant object at 0x7da18bcc8190>, <ast.Constant object at 0x7da18bcc9780>, <ast.Constant object at 0x7da18bcca4d0>, <ast.Constant object at 0x7da18bcca680>, <ast.Constant object at 0x7da18bcc8eb0>, <ast.Constant object at 0x7da18bcc9420>, <ast.Constant object at 0x7da18bcc8a30>], [<ast.Name object at 0x7da18bcc8dc0>, <ast.Name object at 0x7da18bccac50>, <ast.Subscript object at 0x7da18bccbe50>, <ast.Constant object at 0x7da18bcc90f0>, <ast.Constant object at 0x7da207f03eb0>, <ast.Constant object at 0x7da207f003d0>, <ast.Constant object at 0x7da207f001f0>, <ast.Constant object at 0x7da207f03460>, <ast.Constant object at 0x7da207f03130>, <ast.Constant object at 0x7da207f03190>, <ast.Constant object at 0x7da207f02ef0>, <ast.Constant object at 0x7da207f02980>, <ast.Constant object at 0x7da207f028f0>, <ast.Constant object at 0x7da207f022f0>, <ast.Constant object at 0x7da207f01fc0>]]
variable[resp] assign[=] call[name[self].session.post, parameter[name[login_post_url]]]
if compare[constant[Location] <ast.NotIn object at 0x7da2590d7190> name[resp].headers] begin[:]
variable[msg] assign[=] constant[Could not log in with supplied credentials]
<ast.Raise object at 0x7da18f58fac0>
variable[location] assign[=] call[name[resp].headers][constant[Location]]
variable[parsed] assign[=] call[name[urlparse], parameter[name[location]]]
variable[fragment] assign[=] call[name[parse_qs], parameter[name[parsed].fragment]]
variable[access_token] assign[=] call[call[name[fragment]][constant[access_token]]][constant[0]]
variable[url] assign[=] constant[https://user.auth.xboxlive.com/user/authenticate]
variable[resp] assign[=] call[name[self].session.post, parameter[name[url]]]
variable[json_data] assign[=] call[name[resp].json, parameter[]]
variable[user_token] assign[=] call[name[json_data]][constant[Token]]
variable[uhs] assign[=] call[call[call[call[name[json_data]][constant[DisplayClaims]]][constant[xui]]][constant[0]]][constant[uhs]]
variable[url] assign[=] constant[https://xsts.auth.xboxlive.com/xsts/authorize]
variable[resp] assign[=] call[name[self].session.post, parameter[name[url]]]
variable[response] assign[=] call[name[resp].json, parameter[]]
name[self].AUTHORIZATION_HEADER assign[=] binary_operation[constant[XBL3.0 x=%s;%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcc8100>, <ast.Subscript object at 0x7da18bcca260>]]]
name[self].user_xid assign[=] call[call[call[call[name[response]][constant[DisplayClaims]]][constant[xui]]][constant[0]]][constant[xid]]
name[self].authenticated assign[=] constant[True]
return[name[self]] | keyword[def] identifier[authenticate] ( identifier[self] , identifier[login] = keyword[None] , identifier[password] = keyword[None] ):
literal[string]
keyword[if] identifier[login] keyword[is] keyword[None] :
identifier[login] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] identifier[password] keyword[is] keyword[None] :
identifier[password] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[login] keyword[or] keyword[not] identifier[password] :
identifier[msg] =(
literal[string]
literal[string]
)
keyword[raise] identifier[AuthenticationException] ( identifier[msg] )
identifier[self] . identifier[login] = identifier[login]
identifier[base_url] = literal[string]
identifier[qs] = identifier[unquote] ( identifier[urlencode] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}))
identifier[resp] = identifier[self] . identifier[session] . identifier[get] ( identifier[base_url] + identifier[qs] )
identifier[url_re] = literal[string]
identifier[ppft_re] = literal[string]
identifier[login_post_url] = identifier[re] . identifier[search] ( identifier[url_re] , identifier[resp] . identifier[content] ). identifier[group] ( literal[int] )
identifier[post_data] ={
literal[string] : identifier[login] ,
literal[string] : identifier[password] ,
literal[string] : identifier[re] . identifier[search] ( identifier[ppft_re] , identifier[resp] . identifier[content] ). identifier[groups] ( literal[int] )[ literal[int] ],
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[resp] = identifier[self] . identifier[session] . identifier[post] (
identifier[login_post_url] , identifier[data] = identifier[post_data] , identifier[allow_redirects] = keyword[False] ,
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[resp] . identifier[headers] :
identifier[msg] = literal[string]
keyword[raise] identifier[AuthenticationException] ( identifier[msg] )
identifier[location] = identifier[resp] . identifier[headers] [ literal[string] ]
identifier[parsed] = identifier[urlparse] ( identifier[location] )
identifier[fragment] = identifier[parse_qs] ( identifier[parsed] . identifier[fragment] )
identifier[access_token] = identifier[fragment] [ literal[string] ][ literal[int] ]
identifier[url] = literal[string]
identifier[resp] = identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[access_token] ,
}
}), identifier[headers] ={ literal[string] : literal[string] })
identifier[json_data] = identifier[resp] . identifier[json] ()
identifier[user_token] = identifier[json_data] [ literal[string] ]
identifier[uhs] = identifier[json_data] [ literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ]
identifier[url] = literal[string]
identifier[resp] = identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] :[ identifier[user_token] ],
literal[string] : literal[string] ,
}
}), identifier[headers] ={ literal[string] : literal[string] })
identifier[response] = identifier[resp] . identifier[json] ()
identifier[self] . identifier[AUTHORIZATION_HEADER] = literal[string] %( identifier[uhs] , identifier[response] [ literal[string] ])
identifier[self] . identifier[user_xid] = identifier[response] [ literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ]
identifier[self] . identifier[authenticated] = keyword[True]
keyword[return] identifier[self] | def authenticate(self, login=None, password=None):
"""
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
"""
if login is None:
login = os.environ.get('MS_LOGIN') # depends on [control=['if'], data=['login']]
if password is None:
password = os.environ.get('MS_PASSWD') # depends on [control=['if'], data=['password']]
if not login or not password:
msg = 'Authentication credentials required. Please refer to http://xbox.readthedocs.org/en/latest/authentication.html'
raise AuthenticationException(msg) # depends on [control=['if'], data=[]]
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({'client_id': '0000000048093EE3', 'redirect_uri': 'https://login.live.com/oauth20_desktop.srf', 'response_type': 'token', 'display': 'touch', 'scope': 'service::user.auth.xboxlive.com::MBI_SSL', 'locale': 'en'}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b"urlPost:\\'([A-Za-z0-9:\\?_\\-\\.&/=]+)"
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {'login': login, 'passwd': password, 'PPFT': re.search(ppft_re, resp.content).groups(1)[0], 'PPSX': 'Passpor', 'SI': 'Sign in', 'type': '11', 'NewUser': '1', 'LoginOptions': '1', 'i3': '36728', 'm1': '768', 'm2': '1184', 'm3': '0', 'i12': '1', 'i17': '0', 'i18': '__Login_Host|1'}
resp = self.session.post(login_post_url, data=post_data, allow_redirects=False)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg) # depends on [control=['if'], data=[]]
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({'RelyingParty': 'http://auth.xboxlive.com', 'TokenType': 'JWT', 'Properties': {'AuthMethod': 'RPS', 'SiteName': 'user.auth.xboxlive.com', 'RpsTicket': access_token}}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({'RelyingParty': 'http://xboxlive.com', 'TokenType': 'JWT', 'Properties': {'UserTokens': [user_token], 'SandboxId': 'RETAIL'}}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self |
def refill_main_wallet(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
"""
Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean.
Dealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the
unspents and prevent double spends that would result in transactions being rejected by the bitcoin network.
Args:
from_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees
to_address (str): Federation wallet address
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Refill wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
"""
path, from_address = from_address
unsigned_tx = self._t.simple_transaction(from_address,
[(to_address, self.fee)] * nfees + [(to_address, self.token)] * ntokens,
min_confirmations=min_confirmations)
signed_tx = self._t.sign_transaction(unsigned_tx, password)
txid = self._t.push(signed_tx)
return txid | def function[refill_main_wallet, parameter[self, from_address, to_address, nfees, ntokens, password, min_confirmations, sync]]:
constant[
Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean.
Dealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the
unspents and prevent double spends that would result in transactions being rejected by the bitcoin network.
Args:
from_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees
to_address (str): Federation wallet address
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Refill wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
]
<ast.Tuple object at 0x7da1b092f130> assign[=] name[from_address]
variable[unsigned_tx] assign[=] call[name[self]._t.simple_transaction, parameter[name[from_address], binary_operation[binary_operation[list[[<ast.Tuple object at 0x7da1b092c7c0>]] * name[nfees]] + binary_operation[list[[<ast.Tuple object at 0x7da1b092dae0>]] * name[ntokens]]]]]
variable[signed_tx] assign[=] call[name[self]._t.sign_transaction, parameter[name[unsigned_tx], name[password]]]
variable[txid] assign[=] call[name[self]._t.push, parameter[name[signed_tx]]]
return[name[txid]] | keyword[def] identifier[refill_main_wallet] ( identifier[self] , identifier[from_address] , identifier[to_address] , identifier[nfees] , identifier[ntokens] , identifier[password] , identifier[min_confirmations] = literal[int] , identifier[sync] = keyword[False] ):
literal[string]
identifier[path] , identifier[from_address] = identifier[from_address]
identifier[unsigned_tx] = identifier[self] . identifier[_t] . identifier[simple_transaction] ( identifier[from_address] ,
[( identifier[to_address] , identifier[self] . identifier[fee] )]* identifier[nfees] +[( identifier[to_address] , identifier[self] . identifier[token] )]* identifier[ntokens] ,
identifier[min_confirmations] = identifier[min_confirmations] )
identifier[signed_tx] = identifier[self] . identifier[_t] . identifier[sign_transaction] ( identifier[unsigned_tx] , identifier[password] )
identifier[txid] = identifier[self] . identifier[_t] . identifier[push] ( identifier[signed_tx] )
keyword[return] identifier[txid] | def refill_main_wallet(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
"""
Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean.
Dealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the
unspents and prevent double spends that would result in transactions being rejected by the bitcoin network.
Args:
from_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees
to_address (str): Federation wallet address
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Refill wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
"""
(path, from_address) = from_address
unsigned_tx = self._t.simple_transaction(from_address, [(to_address, self.fee)] * nfees + [(to_address, self.token)] * ntokens, min_confirmations=min_confirmations)
signed_tx = self._t.sign_transaction(unsigned_tx, password)
txid = self._t.push(signed_tx)
return txid |
def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"])
print()
print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.") | def function[pre_release, parameter[version]]:
constant[Generates new docs, release announcements and creates a local tag.]
call[name[create_branch], parameter[name[version]]]
call[name[changelog], parameter[name[version]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da1b080a0b0>, <ast.Constant object at 0x7da1b080ac20>, <ast.Constant object at 0x7da1b080a5f0>, <ast.Constant object at 0x7da1b080a410>, <ast.JoinedStr object at 0x7da1b080a200>]]]]
call[name[print], parameter[]]
call[name[print], parameter[<ast.JoinedStr object at 0x7da1b080a950>]] | keyword[def] identifier[pre_release] ( identifier[version] ):
literal[string]
identifier[create_branch] ( identifier[version] )
identifier[changelog] ( identifier[version] , identifier[write_out] = keyword[True] )
identifier[check_call] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[print] ()
identifier[print] ( literal[string] ) | def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(['git', 'commit', '-a', '-m', f'Preparing release {version}'])
print()
print(f'{Fore.GREEN}Please push your branch to your fork and open a PR.') |
def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret | def function[validateDocumentFinal, parameter[self, ctxt]]:
constant[Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity ]
if compare[name[ctxt] is constant[None]] begin[:]
variable[ctxt__o] assign[=] constant[None]
variable[ret] assign[=] call[name[libxml2mod].xmlValidateDocumentFinal, parameter[name[ctxt__o], name[self]._o]]
return[name[ret]] | keyword[def] identifier[validateDocumentFinal] ( identifier[self] , identifier[ctxt] ):
literal[string]
keyword[if] identifier[ctxt] keyword[is] keyword[None] : identifier[ctxt__o] = keyword[None]
keyword[else] : identifier[ctxt__o] = identifier[ctxt] . identifier[_o]
identifier[ret] = identifier[libxml2mod] . identifier[xmlValidateDocumentFinal] ( identifier[ctxt__o] , identifier[self] . identifier[_o] )
keyword[return] identifier[ret] | def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None:
ctxt__o = None # depends on [control=['if'], data=[]]
else:
ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret |
def is_by_step(raw):
"""If the password is alphabet step by step."""
# make sure it is unicode
delta = ord(raw[1]) - ord(raw[0])
for i in range(2, len(raw)):
if ord(raw[i]) - ord(raw[i-1]) != delta:
return False
return True | def function[is_by_step, parameter[raw]]:
constant[If the password is alphabet step by step.]
variable[delta] assign[=] binary_operation[call[name[ord], parameter[call[name[raw]][constant[1]]]] - call[name[ord], parameter[call[name[raw]][constant[0]]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], call[name[len], parameter[name[raw]]]]]] begin[:]
if compare[binary_operation[call[name[ord], parameter[call[name[raw]][name[i]]]] - call[name[ord], parameter[call[name[raw]][binary_operation[name[i] - constant[1]]]]]] not_equal[!=] name[delta]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_by_step] ( identifier[raw] ):
literal[string]
identifier[delta] = identifier[ord] ( identifier[raw] [ literal[int] ])- identifier[ord] ( identifier[raw] [ literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[raw] )):
keyword[if] identifier[ord] ( identifier[raw] [ identifier[i] ])- identifier[ord] ( identifier[raw] [ identifier[i] - literal[int] ])!= identifier[delta] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_by_step(raw):
"""If the password is alphabet step by step."""
# make sure it is unicode
delta = ord(raw[1]) - ord(raw[0])
for i in range(2, len(raw)):
if ord(raw[i]) - ord(raw[i - 1]) != delta:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return True |
def filter_and_normalize(data, min_counts=None, min_counts_u=None, min_cells=None, min_cells_u=None,
min_shared_counts=None, min_shared_cells=None, n_top_genes=None, flavor='seurat', log=True,
copy=False):
"""Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
filter_genes(adata, min_counts=min_counts, min_counts_u=min_counts_u, min_cells=min_cells, min_cells_u=min_cells_u,
min_shared_counts=min_shared_counts, min_shared_cells=min_shared_cells,)
normalize_per_cell(adata)
if n_top_genes is not None:
filter_genes_dispersion(adata, n_top_genes=n_top_genes, flavor=flavor)
log_advised = np.allclose(adata.X[:10].sum(), adata.layers['spliced'][:10].sum())
if log and log_advised:
log1p(adata)
logg.info('Logarithmized X.' if log and log_advised else
'Did not modify X as it looks preprocessed already.' if log else
'Consider logarithmizing X with `scv.pp.log1p` for better results.' if log_advised else '')
return adata if copy else None | def function[filter_and_normalize, parameter[data, min_counts, min_counts_u, min_cells, min_cells_u, min_shared_counts, min_shared_cells, n_top_genes, flavor, log, copy]]:
constant[Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
]
variable[adata] assign[=] <ast.IfExp object at 0x7da18dc05c00>
if <ast.BoolOp object at 0x7da18dc054b0> begin[:]
<ast.Raise object at 0x7da18dc07e50>
call[name[filter_genes], parameter[name[adata]]]
call[name[normalize_per_cell], parameter[name[adata]]]
if compare[name[n_top_genes] is_not constant[None]] begin[:]
call[name[filter_genes_dispersion], parameter[name[adata]]]
variable[log_advised] assign[=] call[name[np].allclose, parameter[call[call[name[adata].X][<ast.Slice object at 0x7da18dc07610>].sum, parameter[]], call[call[call[name[adata].layers][constant[spliced]]][<ast.Slice object at 0x7da18dc07c40>].sum, parameter[]]]]
if <ast.BoolOp object at 0x7da18dc04af0> begin[:]
call[name[log1p], parameter[name[adata]]]
call[name[logg].info, parameter[<ast.IfExp object at 0x7da20e9b1c60>]]
return[<ast.IfExp object at 0x7da18dc07be0>] | keyword[def] identifier[filter_and_normalize] ( identifier[data] , identifier[min_counts] = keyword[None] , identifier[min_counts_u] = keyword[None] , identifier[min_cells] = keyword[None] , identifier[min_cells_u] = keyword[None] ,
identifier[min_shared_counts] = keyword[None] , identifier[min_shared_cells] = keyword[None] , identifier[n_top_genes] = keyword[None] , identifier[flavor] = literal[string] , identifier[log] = keyword[True] ,
identifier[copy] = keyword[False] ):
literal[string]
identifier[adata] = identifier[data] . identifier[copy] () keyword[if] identifier[copy] keyword[else] identifier[data]
keyword[if] literal[string] keyword[not] keyword[in] identifier[adata] . identifier[layers] . identifier[keys] () keyword[or] literal[string] keyword[not] keyword[in] identifier[adata] . identifier[layers] . identifier[keys] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[filter_genes] ( identifier[adata] , identifier[min_counts] = identifier[min_counts] , identifier[min_counts_u] = identifier[min_counts_u] , identifier[min_cells] = identifier[min_cells] , identifier[min_cells_u] = identifier[min_cells_u] ,
identifier[min_shared_counts] = identifier[min_shared_counts] , identifier[min_shared_cells] = identifier[min_shared_cells] ,)
identifier[normalize_per_cell] ( identifier[adata] )
keyword[if] identifier[n_top_genes] keyword[is] keyword[not] keyword[None] :
identifier[filter_genes_dispersion] ( identifier[adata] , identifier[n_top_genes] = identifier[n_top_genes] , identifier[flavor] = identifier[flavor] )
identifier[log_advised] = identifier[np] . identifier[allclose] ( identifier[adata] . identifier[X] [: literal[int] ]. identifier[sum] (), identifier[adata] . identifier[layers] [ literal[string] ][: literal[int] ]. identifier[sum] ())
keyword[if] identifier[log] keyword[and] identifier[log_advised] :
identifier[log1p] ( identifier[adata] )
identifier[logg] . identifier[info] ( literal[string] keyword[if] identifier[log] keyword[and] identifier[log_advised] keyword[else]
literal[string] keyword[if] identifier[log] keyword[else]
literal[string] keyword[if] identifier[log_advised] keyword[else] literal[string] )
keyword[return] identifier[adata] keyword[if] identifier[copy] keyword[else] keyword[None] | def filter_and_normalize(data, min_counts=None, min_counts_u=None, min_cells=None, min_cells_u=None, min_shared_counts=None, min_shared_cells=None, n_top_genes=None, flavor='seurat', log=True, copy=False):
"""Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.') # depends on [control=['if'], data=[]]
filter_genes(adata, min_counts=min_counts, min_counts_u=min_counts_u, min_cells=min_cells, min_cells_u=min_cells_u, min_shared_counts=min_shared_counts, min_shared_cells=min_shared_cells)
normalize_per_cell(adata)
if n_top_genes is not None:
filter_genes_dispersion(adata, n_top_genes=n_top_genes, flavor=flavor) # depends on [control=['if'], data=['n_top_genes']]
log_advised = np.allclose(adata.X[:10].sum(), adata.layers['spliced'][:10].sum())
if log and log_advised:
log1p(adata) # depends on [control=['if'], data=[]]
logg.info('Logarithmized X.' if log and log_advised else 'Did not modify X as it looks preprocessed already.' if log else 'Consider logarithmizing X with `scv.pp.log1p` for better results.' if log_advised else '')
return adata if copy else None |
def dict(self):
"""The dict representation of this sentence."""
return {
'raw': self.raw,
'start_index': self.start_index,
'end_index': self.end_index,
'stripped': self.stripped,
'noun_phrases': self.noun_phrases,
'polarity': self.polarity,
'subjectivity': self.subjectivity,
} | def function[dict, parameter[self]]:
constant[The dict representation of this sentence.]
return[dictionary[[<ast.Constant object at 0x7da20c6e49d0>, <ast.Constant object at 0x7da20c6e6650>, <ast.Constant object at 0x7da20c6e6c20>, <ast.Constant object at 0x7da20c6e47c0>, <ast.Constant object at 0x7da20c6e6710>, <ast.Constant object at 0x7da20c6e4820>, <ast.Constant object at 0x7da20c6e6350>], [<ast.Attribute object at 0x7da20c6e53c0>, <ast.Attribute object at 0x7da20c6e6a10>, <ast.Attribute object at 0x7da20c6e5330>, <ast.Attribute object at 0x7da20c6e5e40>, <ast.Attribute object at 0x7da20c6e6950>, <ast.Attribute object at 0x7da20c6e5ae0>, <ast.Attribute object at 0x7da20c6e5c00>]]] | keyword[def] identifier[dict] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[raw] ,
literal[string] : identifier[self] . identifier[start_index] ,
literal[string] : identifier[self] . identifier[end_index] ,
literal[string] : identifier[self] . identifier[stripped] ,
literal[string] : identifier[self] . identifier[noun_phrases] ,
literal[string] : identifier[self] . identifier[polarity] ,
literal[string] : identifier[self] . identifier[subjectivity] ,
} | def dict(self):
"""The dict representation of this sentence."""
return {'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.polarity, 'subjectivity': self.subjectivity} |
def eq(a, b):
""" The great missing equivalence function: Guaranteed evaluation
to a single bool value.
"""
if a is b:
return True
if a is None or b is None:
return True if a is None and b is None else False
try:
e = a == b
except ValueError:
return False
except AttributeError:
return False
except Exception:
print("a:", str(type(a)), str(a))
print("b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is bool_:
return bool(e)
elif isinstance(e, ndarray):
try:
# disaster: if a is empty and b is not, then e.all() is True
if a.shape != b.shape:
return False
except Exception:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e))) | def function[eq, parameter[a, b]]:
constant[ The great missing equivalence function: Guaranteed evaluation
to a single bool value.
]
if compare[name[a] is name[b]] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b0e49120> begin[:]
return[<ast.IfExp object at 0x7da1b0e48b50>]
<ast.Try object at 0x7da1b0e49b40>
variable[t] assign[=] call[name[type], parameter[name[e]]]
if compare[name[t] is name[bool]] begin[:]
return[name[e]] | keyword[def] identifier[eq] ( identifier[a] , identifier[b] ):
literal[string]
keyword[if] identifier[a] keyword[is] identifier[b] :
keyword[return] keyword[True]
keyword[if] identifier[a] keyword[is] keyword[None] keyword[or] identifier[b] keyword[is] keyword[None] :
keyword[return] keyword[True] keyword[if] identifier[a] keyword[is] keyword[None] keyword[and] identifier[b] keyword[is] keyword[None] keyword[else] keyword[False]
keyword[try] :
identifier[e] = identifier[a] == identifier[b]
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
keyword[except] identifier[AttributeError] :
keyword[return] keyword[False]
keyword[except] identifier[Exception] :
identifier[print] ( literal[string] , identifier[str] ( identifier[type] ( identifier[a] )), identifier[str] ( identifier[a] ))
identifier[print] ( literal[string] , identifier[str] ( identifier[type] ( identifier[b] )), identifier[str] ( identifier[b] ))
keyword[raise]
identifier[t] = identifier[type] ( identifier[e] )
keyword[if] identifier[t] keyword[is] identifier[bool] :
keyword[return] identifier[e]
keyword[elif] identifier[t] keyword[is] identifier[bool_] :
keyword[return] identifier[bool] ( identifier[e] )
keyword[elif] identifier[isinstance] ( identifier[e] , identifier[ndarray] ):
keyword[try] :
keyword[if] identifier[a] . identifier[shape] != identifier[b] . identifier[shape] :
keyword[return] keyword[False]
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[if] ( identifier[hasattr] ( identifier[e] , literal[string] ) keyword[and] identifier[e] . identifier[implements] ( literal[string] )):
keyword[return] identifier[e] . identifier[asarray] (). identifier[all] ()
keyword[else] :
keyword[return] identifier[e] . identifier[all] ()
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[str] ( identifier[type] ( identifier[e] ))) | def eq(a, b):
""" The great missing equivalence function: Guaranteed evaluation
to a single bool value.
"""
if a is b:
return True # depends on [control=['if'], data=[]]
if a is None or b is None:
return True if a is None and b is None else False # depends on [control=['if'], data=[]]
try:
e = a == b # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
except AttributeError:
return False # depends on [control=['except'], data=[]]
except Exception:
print('a:', str(type(a)), str(a))
print('b:', str(type(b)), str(b))
raise # depends on [control=['except'], data=[]]
t = type(e)
if t is bool:
return e # depends on [control=['if'], data=[]]
elif t is bool_:
return bool(e) # depends on [control=['if'], data=[]]
elif isinstance(e, ndarray):
try:
# disaster: if a is empty and b is not, then e.all() is True
if a.shape != b.shape:
return False # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]]
if hasattr(e, 'implements') and e.implements('MetaArray'):
return e.asarray().all() # depends on [control=['if'], data=[]]
else:
return e.all() # depends on [control=['if'], data=[]]
else:
raise Exception('== operator returned type %s' % str(type(e))) |
def add_permissions(self, group_name, resource, permissions, url_prefix, auth, session, send_opts):
"""
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
"""
post_data = {"group": group_name,
"permissions": permissions,
}
post_data.update(resource.get_dict_route())
req = self.get_permission_request('POST', 'application/json',
url_prefix, auth, post_data=post_data)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 201:
msg = ('Failed adding permissions to group {}, got HTTP response: ({}) - {}'.format(group_name,
resp.status_code,
resp.text))
raise HTTPError(msg, request=req, response=resp) | def function[add_permissions, parameter[self, group_name, resource, permissions, url_prefix, auth, session, send_opts]]:
constant[
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
]
variable[post_data] assign[=] dictionary[[<ast.Constant object at 0x7da2044c0a00>, <ast.Constant object at 0x7da2044c0be0>], [<ast.Name object at 0x7da204622e90>, <ast.Name object at 0x7da204620220>]]
call[name[post_data].update, parameter[call[name[resource].get_dict_route, parameter[]]]]
variable[req] assign[=] call[name[self].get_permission_request, parameter[constant[POST], constant[application/json], name[url_prefix], name[auth]]]
variable[prep] assign[=] call[name[session].prepare_request, parameter[name[req]]]
variable[resp] assign[=] call[name[session].send, parameter[name[prep]]]
if compare[name[resp].status_code not_equal[!=] constant[201]] begin[:]
variable[msg] assign[=] call[constant[Failed adding permissions to group {}, got HTTP response: ({}) - {}].format, parameter[name[group_name], name[resp].status_code, name[resp].text]]
<ast.Raise object at 0x7da204623250> | keyword[def] identifier[add_permissions] ( identifier[self] , identifier[group_name] , identifier[resource] , identifier[permissions] , identifier[url_prefix] , identifier[auth] , identifier[session] , identifier[send_opts] ):
literal[string]
identifier[post_data] ={ literal[string] : identifier[group_name] ,
literal[string] : identifier[permissions] ,
}
identifier[post_data] . identifier[update] ( identifier[resource] . identifier[get_dict_route] ())
identifier[req] = identifier[self] . identifier[get_permission_request] ( literal[string] , literal[string] ,
identifier[url_prefix] , identifier[auth] , identifier[post_data] = identifier[post_data] )
identifier[prep] = identifier[session] . identifier[prepare_request] ( identifier[req] )
identifier[resp] = identifier[session] . identifier[send] ( identifier[prep] ,** identifier[send_opts] )
keyword[if] identifier[resp] . identifier[status_code] != literal[int] :
identifier[msg] =( literal[string] . identifier[format] ( identifier[group_name] ,
identifier[resp] . identifier[status_code] ,
identifier[resp] . identifier[text] ))
keyword[raise] identifier[HTTPError] ( identifier[msg] , identifier[request] = identifier[req] , identifier[response] = identifier[resp] ) | def add_permissions(self, group_name, resource, permissions, url_prefix, auth, session, send_opts):
"""
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
"""
post_data = {'group': group_name, 'permissions': permissions}
post_data.update(resource.get_dict_route())
req = self.get_permission_request('POST', 'application/json', url_prefix, auth, post_data=post_data)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 201:
msg = 'Failed adding permissions to group {}, got HTTP response: ({}) - {}'.format(group_name, resp.status_code, resp.text)
raise HTTPError(msg, request=req, response=resp) # depends on [control=['if'], data=[]] |
def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
if hasattr(socket, "SO_KEEPALIVE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "TCP_KEEPIDLE"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
if hasattr(socket, "TCP_KEEPINTVL"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
if hasattr(socket, "TCP_KEEPCNT"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | def function[set_keepalive, parameter[sock, after_idle_sec, interval_sec, max_fails]]:
constant[Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
]
if call[name[hasattr], parameter[name[socket], constant[SO_KEEPALIVE]]] begin[:]
call[name[sock].setsockopt, parameter[name[socket].SOL_SOCKET, name[socket].SO_KEEPALIVE, constant[1]]]
if call[name[hasattr], parameter[name[socket], constant[TCP_KEEPIDLE]]] begin[:]
call[name[sock].setsockopt, parameter[name[socket].IPPROTO_TCP, name[socket].TCP_KEEPIDLE, name[after_idle_sec]]]
if call[name[hasattr], parameter[name[socket], constant[TCP_KEEPINTVL]]] begin[:]
call[name[sock].setsockopt, parameter[name[socket].IPPROTO_TCP, name[socket].TCP_KEEPINTVL, name[interval_sec]]]
if call[name[hasattr], parameter[name[socket], constant[TCP_KEEPCNT]]] begin[:]
call[name[sock].setsockopt, parameter[name[socket].IPPROTO_TCP, name[socket].TCP_KEEPCNT, name[max_fails]]] | keyword[def] identifier[set_keepalive] ( identifier[sock] , identifier[after_idle_sec] = literal[int] , identifier[interval_sec] = literal[int] , identifier[max_fails] = literal[int] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[socket] , literal[string] ):
identifier[sock] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_KEEPALIVE] , literal[int] )
keyword[if] identifier[hasattr] ( identifier[socket] , literal[string] ):
identifier[sock] . identifier[setsockopt] ( identifier[socket] . identifier[IPPROTO_TCP] , identifier[socket] . identifier[TCP_KEEPIDLE] , identifier[after_idle_sec] )
keyword[if] identifier[hasattr] ( identifier[socket] , literal[string] ):
identifier[sock] . identifier[setsockopt] ( identifier[socket] . identifier[IPPROTO_TCP] , identifier[socket] . identifier[TCP_KEEPINTVL] , identifier[interval_sec] )
keyword[if] identifier[hasattr] ( identifier[socket] , literal[string] ):
identifier[sock] . identifier[setsockopt] ( identifier[socket] . identifier[IPPROTO_TCP] , identifier[socket] . identifier[TCP_KEEPCNT] , identifier[max_fails] ) | def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
if hasattr(socket, 'SO_KEEPALIVE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # depends on [control=['if'], data=[]]
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec) # depends on [control=['if'], data=[]]
if hasattr(socket, 'TCP_KEEPINTVL'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec) # depends on [control=['if'], data=[]]
if hasattr(socket, 'TCP_KEEPCNT'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) # depends on [control=['if'], data=[]] |
def regex_search(self, regex: str) -> List[HistoryItem]:
"""Find history items which match a given regular expression
:param regex: the regular expression to search for.
:return: a list of history items, or an empty list if the string was not found
"""
regex = regex.strip()
if regex.startswith(r'/') and regex.endswith(r'/'):
regex = regex[1:-1]
finder = re.compile(regex, re.DOTALL | re.MULTILINE)
def isin(hi):
"""filter function for doing a regular expression search of history"""
return finder.search(hi) or finder.search(hi.expanded)
return [itm for itm in self if isin(itm)] | def function[regex_search, parameter[self, regex]]:
constant[Find history items which match a given regular expression
:param regex: the regular expression to search for.
:return: a list of history items, or an empty list if the string was not found
]
variable[regex] assign[=] call[name[regex].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b26af460> begin[:]
variable[regex] assign[=] call[name[regex]][<ast.Slice object at 0x7da1b26ace50>]
variable[finder] assign[=] call[name[re].compile, parameter[name[regex], binary_operation[name[re].DOTALL <ast.BitOr object at 0x7da2590d6aa0> name[re].MULTILINE]]]
def function[isin, parameter[hi]]:
constant[filter function for doing a regular expression search of history]
return[<ast.BoolOp object at 0x7da1b26afeb0>]
return[<ast.ListComp object at 0x7da20c7c9c90>] | keyword[def] identifier[regex_search] ( identifier[self] , identifier[regex] : identifier[str] )-> identifier[List] [ identifier[HistoryItem] ]:
literal[string]
identifier[regex] = identifier[regex] . identifier[strip] ()
keyword[if] identifier[regex] . identifier[startswith] ( literal[string] ) keyword[and] identifier[regex] . identifier[endswith] ( literal[string] ):
identifier[regex] = identifier[regex] [ literal[int] :- literal[int] ]
identifier[finder] = identifier[re] . identifier[compile] ( identifier[regex] , identifier[re] . identifier[DOTALL] | identifier[re] . identifier[MULTILINE] )
keyword[def] identifier[isin] ( identifier[hi] ):
literal[string]
keyword[return] identifier[finder] . identifier[search] ( identifier[hi] ) keyword[or] identifier[finder] . identifier[search] ( identifier[hi] . identifier[expanded] )
keyword[return] [ identifier[itm] keyword[for] identifier[itm] keyword[in] identifier[self] keyword[if] identifier[isin] ( identifier[itm] )] | def regex_search(self, regex: str) -> List[HistoryItem]:
"""Find history items which match a given regular expression
:param regex: the regular expression to search for.
:return: a list of history items, or an empty list if the string was not found
"""
regex = regex.strip()
if regex.startswith('/') and regex.endswith('/'):
regex = regex[1:-1] # depends on [control=['if'], data=[]]
finder = re.compile(regex, re.DOTALL | re.MULTILINE)
def isin(hi):
"""filter function for doing a regular expression search of history"""
return finder.search(hi) or finder.search(hi.expanded)
return [itm for itm in self if isin(itm)] |
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise | def function[tensors_equal, parameter[tensor1, tensor2, tolerance]]:
constant[
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
]
if call[name[isinstance], parameter[name[tensor1], tuple[[<ast.Name object at 0x7da20c795a80>, <ast.Name object at 0x7da20c7968c0>]]]] begin[:]
if <ast.BoolOp object at 0x7da20c7966b0> begin[:]
return[constant[False]]
return[call[name[all], parameter[<ast.ListComp object at 0x7da2054a4220>]]] | keyword[def] identifier[tensors_equal] ( identifier[tensor1] : identifier[torch] . identifier[Tensor] , identifier[tensor2] : identifier[torch] . identifier[Tensor] , identifier[tolerance] : identifier[float] = literal[int] )-> identifier[bool] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[tensor1] ,( identifier[list] , identifier[tuple] )):
keyword[if] keyword[not] identifier[isinstance] ( identifier[tensor2] ,( identifier[list] , identifier[tuple] )) keyword[or] identifier[len] ( identifier[tensor1] )!= identifier[len] ( identifier[tensor2] ):
keyword[return] keyword[False]
keyword[return] identifier[all] ([ identifier[tensors_equal] ( identifier[t1] , identifier[t2] , identifier[tolerance] ) keyword[for] identifier[t1] , identifier[t2] keyword[in] identifier[zip] ( identifier[tensor1] , identifier[tensor2] )])
keyword[elif] identifier[isinstance] ( identifier[tensor1] , identifier[dict] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[tensor2] , identifier[dict] ):
keyword[return] keyword[False]
keyword[if] identifier[tensor1] . identifier[keys] ()!= identifier[tensor2] . identifier[keys] ():
keyword[return] keyword[False]
keyword[return] identifier[all] ([ identifier[tensors_equal] ( identifier[tensor1] [ identifier[key] ], identifier[tensor2] [ identifier[key] ], identifier[tolerance] ) keyword[for] identifier[key] keyword[in] identifier[tensor1] ])
keyword[elif] identifier[isinstance] ( identifier[tensor1] , identifier[torch] . identifier[Tensor] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[tensor2] , identifier[torch] . identifier[Tensor] ):
keyword[return] keyword[False]
keyword[if] identifier[tensor1] . identifier[size] ()!= identifier[tensor2] . identifier[size] ():
keyword[return] keyword[False]
keyword[return] (( identifier[tensor1] - identifier[tensor2] ). identifier[abs] (). identifier[float] ()< identifier[tolerance] ). identifier[all] ()
keyword[else] :
keyword[try] :
keyword[return] identifier[tensor1] == identifier[tensor2]
keyword[except] identifier[RuntimeError] :
identifier[print] ( identifier[type] ( identifier[tensor1] ), identifier[type] ( identifier[tensor2] ))
keyword[raise] | def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float=1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False # depends on [control=['if'], data=[]]
return all([tensors_equal(t1, t2, tolerance) for (t1, t2) in zip(tensor1, tensor2)]) # depends on [control=['if'], data=[]]
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False # depends on [control=['if'], data=[]]
if tensor1.keys() != tensor2.keys():
return False # depends on [control=['if'], data=[]]
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1]) # depends on [control=['if'], data=[]]
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False # depends on [control=['if'], data=[]]
if tensor1.size() != tensor2.size():
return False # depends on [control=['if'], data=[]]
return ((tensor1 - tensor2).abs().float() < tolerance).all() # depends on [control=['if'], data=[]]
else:
try:
return tensor1 == tensor2 # depends on [control=['try'], data=[]]
except RuntimeError:
print(type(tensor1), type(tensor2))
raise # depends on [control=['except'], data=[]] |
def include(module_name, *module_names):
"""Used by functions in package.py to have access to named modules.
See the 'package_definition_python_path' config setting for more info.
"""
def decorated(fn):
_add_decorator(fn, "include", nargs=[module_name] + list(module_names))
return fn
return decorated | def function[include, parameter[module_name]]:
constant[Used by functions in package.py to have access to named modules.
See the 'package_definition_python_path' config setting for more info.
]
def function[decorated, parameter[fn]]:
call[name[_add_decorator], parameter[name[fn], constant[include]]]
return[name[fn]]
return[name[decorated]] | keyword[def] identifier[include] ( identifier[module_name] ,* identifier[module_names] ):
literal[string]
keyword[def] identifier[decorated] ( identifier[fn] ):
identifier[_add_decorator] ( identifier[fn] , literal[string] , identifier[nargs] =[ identifier[module_name] ]+ identifier[list] ( identifier[module_names] ))
keyword[return] identifier[fn]
keyword[return] identifier[decorated] | def include(module_name, *module_names):
"""Used by functions in package.py to have access to named modules.
See the 'package_definition_python_path' config setting for more info.
"""
def decorated(fn):
_add_decorator(fn, 'include', nargs=[module_name] + list(module_names))
return fn
return decorated |
def save_draft(self, target_folder=OutlookWellKnowFolderNames.DRAFTS):
""" Save this message as a draft on the cloud
:param target_folder: name of the drafts folder
:return: Success / Failure
:rtype: bool
"""
if self.object_id:
# update message. Attachments are NOT included nor saved.
if not self.__is_draft:
raise RuntimeError('Only draft messages can be updated')
if not self._track_changes:
return True # there's nothing to update
url = self.build_url(
self._endpoints.get('get_message').format(id=self.object_id))
method = self.con.patch
data = self.to_api_data(restrict_keys=self._track_changes)
data.pop(self._cc('attachments'),
None) # attachments are handled by the next method call
# noinspection PyProtectedMember
self.attachments._update_attachments_to_cloud()
else:
# new message. Attachments are included and saved.
if not self.__is_draft:
raise RuntimeError('Only draft messages can be saved as drafts')
target_folder = target_folder or OutlookWellKnowFolderNames.DRAFTS
if isinstance(target_folder, OutlookWellKnowFolderNames):
target_folder = target_folder.value
elif not isinstance(target_folder, str):
# a Folder instance
target_folder = getattr(target_folder, 'folder_id',
OutlookWellKnowFolderNames.DRAFTS.value)
url = self.build_url(
self._endpoints.get('create_draft_folder').format(
id=target_folder))
method = self.con.post
data = self.to_api_data()
if not data:
return True
response = method(url, data=data)
if not response:
return False
self._track_changes.clear() # reset the tracked changes as they are all saved
if not self.object_id:
# new message
message = response.json()
self.object_id = message.get(self._cc('id'), None)
self.folder_id = message.get(self._cc('parentFolderId'), None)
# fallback to office365 v1.0
self.__created = message.get(self._cc('createdDateTime'),
message.get(
self._cc('dateTimeCreated'),
None))
# fallback to office365 v1.0
self.__modified = message.get(self._cc('lastModifiedDateTime'),
message.get(
self._cc('dateTimeModified'),
None))
self.__created = parse(self.__created).astimezone(
self.protocol.timezone) if self.__created else None
self.__modified = parse(self.__modified).astimezone(
self.protocol.timezone) if self.__modified else None
else:
self.__modified = self.protocol.timezone.localize(dt.datetime.now())
return True | def function[save_draft, parameter[self, target_folder]]:
constant[ Save this message as a draft on the cloud
:param target_folder: name of the drafts folder
:return: Success / Failure
:rtype: bool
]
if name[self].object_id begin[:]
if <ast.UnaryOp object at 0x7da1b1b0f520> begin[:]
<ast.Raise object at 0x7da1b1b0db70>
if <ast.UnaryOp object at 0x7da1b1b0e4d0> begin[:]
return[constant[True]]
variable[url] assign[=] call[name[self].build_url, parameter[call[call[name[self]._endpoints.get, parameter[constant[get_message]]].format, parameter[]]]]
variable[method] assign[=] name[self].con.patch
variable[data] assign[=] call[name[self].to_api_data, parameter[]]
call[name[data].pop, parameter[call[name[self]._cc, parameter[constant[attachments]]], constant[None]]]
call[name[self].attachments._update_attachments_to_cloud, parameter[]]
if <ast.UnaryOp object at 0x7da1b1b0c670> begin[:]
return[constant[True]]
variable[response] assign[=] call[name[method], parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b1b0dc90> begin[:]
return[constant[False]]
call[name[self]._track_changes.clear, parameter[]]
if <ast.UnaryOp object at 0x7da1b1b0f220> begin[:]
variable[message] assign[=] call[name[response].json, parameter[]]
name[self].object_id assign[=] call[name[message].get, parameter[call[name[self]._cc, parameter[constant[id]]], constant[None]]]
name[self].folder_id assign[=] call[name[message].get, parameter[call[name[self]._cc, parameter[constant[parentFolderId]]], constant[None]]]
name[self].__created assign[=] call[name[message].get, parameter[call[name[self]._cc, parameter[constant[createdDateTime]]], call[name[message].get, parameter[call[name[self]._cc, parameter[constant[dateTimeCreated]]], constant[None]]]]]
name[self].__modified assign[=] call[name[message].get, parameter[call[name[self]._cc, parameter[constant[lastModifiedDateTime]]], call[name[message].get, parameter[call[name[self]._cc, parameter[constant[dateTimeModified]]], constant[None]]]]]
name[self].__created assign[=] <ast.IfExp object at 0x7da1b1b2a0b0>
name[self].__modified assign[=] <ast.IfExp object at 0x7da1b1b29bd0>
return[constant[True]] | keyword[def] identifier[save_draft] ( identifier[self] , identifier[target_folder] = identifier[OutlookWellKnowFolderNames] . identifier[DRAFTS] ):
literal[string]
keyword[if] identifier[self] . identifier[object_id] :
keyword[if] keyword[not] identifier[self] . identifier[__is_draft] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[_track_changes] :
keyword[return] keyword[True]
identifier[url] = identifier[self] . identifier[build_url] (
identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ). identifier[format] ( identifier[id] = identifier[self] . identifier[object_id] ))
identifier[method] = identifier[self] . identifier[con] . identifier[patch]
identifier[data] = identifier[self] . identifier[to_api_data] ( identifier[restrict_keys] = identifier[self] . identifier[_track_changes] )
identifier[data] . identifier[pop] ( identifier[self] . identifier[_cc] ( literal[string] ),
keyword[None] )
identifier[self] . identifier[attachments] . identifier[_update_attachments_to_cloud] ()
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[__is_draft] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[target_folder] = identifier[target_folder] keyword[or] identifier[OutlookWellKnowFolderNames] . identifier[DRAFTS]
keyword[if] identifier[isinstance] ( identifier[target_folder] , identifier[OutlookWellKnowFolderNames] ):
identifier[target_folder] = identifier[target_folder] . identifier[value]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[target_folder] , identifier[str] ):
identifier[target_folder] = identifier[getattr] ( identifier[target_folder] , literal[string] ,
identifier[OutlookWellKnowFolderNames] . identifier[DRAFTS] . identifier[value] )
identifier[url] = identifier[self] . identifier[build_url] (
identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ). identifier[format] (
identifier[id] = identifier[target_folder] ))
identifier[method] = identifier[self] . identifier[con] . identifier[post]
identifier[data] = identifier[self] . identifier[to_api_data] ()
keyword[if] keyword[not] identifier[data] :
keyword[return] keyword[True]
identifier[response] = identifier[method] ( identifier[url] , identifier[data] = identifier[data] )
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[False]
identifier[self] . identifier[_track_changes] . identifier[clear] ()
keyword[if] keyword[not] identifier[self] . identifier[object_id] :
identifier[message] = identifier[response] . identifier[json] ()
identifier[self] . identifier[object_id] = identifier[message] . identifier[get] ( identifier[self] . identifier[_cc] ( literal[string] ), keyword[None] )
identifier[self] . identifier[folder_id] = identifier[message] . identifier[get] ( identifier[self] . identifier[_cc] ( literal[string] ), keyword[None] )
identifier[self] . identifier[__created] = identifier[message] . identifier[get] ( identifier[self] . identifier[_cc] ( literal[string] ),
identifier[message] . identifier[get] (
identifier[self] . identifier[_cc] ( literal[string] ),
keyword[None] ))
identifier[self] . identifier[__modified] = identifier[message] . identifier[get] ( identifier[self] . identifier[_cc] ( literal[string] ),
identifier[message] . identifier[get] (
identifier[self] . identifier[_cc] ( literal[string] ),
keyword[None] ))
identifier[self] . identifier[__created] = identifier[parse] ( identifier[self] . identifier[__created] ). identifier[astimezone] (
identifier[self] . identifier[protocol] . identifier[timezone] ) keyword[if] identifier[self] . identifier[__created] keyword[else] keyword[None]
identifier[self] . identifier[__modified] = identifier[parse] ( identifier[self] . identifier[__modified] ). identifier[astimezone] (
identifier[self] . identifier[protocol] . identifier[timezone] ) keyword[if] identifier[self] . identifier[__modified] keyword[else] keyword[None]
keyword[else] :
identifier[self] . identifier[__modified] = identifier[self] . identifier[protocol] . identifier[timezone] . identifier[localize] ( identifier[dt] . identifier[datetime] . identifier[now] ())
keyword[return] keyword[True] | def save_draft(self, target_folder=OutlookWellKnowFolderNames.DRAFTS):
""" Save this message as a draft on the cloud
:param target_folder: name of the drafts folder
:return: Success / Failure
:rtype: bool
"""
if self.object_id:
# update message. Attachments are NOT included nor saved.
if not self.__is_draft:
raise RuntimeError('Only draft messages can be updated') # depends on [control=['if'], data=[]]
if not self._track_changes:
return True # there's nothing to update # depends on [control=['if'], data=[]]
url = self.build_url(self._endpoints.get('get_message').format(id=self.object_id))
method = self.con.patch
data = self.to_api_data(restrict_keys=self._track_changes)
data.pop(self._cc('attachments'), None) # attachments are handled by the next method call
# noinspection PyProtectedMember
self.attachments._update_attachments_to_cloud() # depends on [control=['if'], data=[]]
else:
# new message. Attachments are included and saved.
if not self.__is_draft:
raise RuntimeError('Only draft messages can be saved as drafts') # depends on [control=['if'], data=[]]
target_folder = target_folder or OutlookWellKnowFolderNames.DRAFTS
if isinstance(target_folder, OutlookWellKnowFolderNames):
target_folder = target_folder.value # depends on [control=['if'], data=[]]
elif not isinstance(target_folder, str):
# a Folder instance
target_folder = getattr(target_folder, 'folder_id', OutlookWellKnowFolderNames.DRAFTS.value) # depends on [control=['if'], data=[]]
url = self.build_url(self._endpoints.get('create_draft_folder').format(id=target_folder))
method = self.con.post
data = self.to_api_data()
if not data:
return True # depends on [control=['if'], data=[]]
response = method(url, data=data)
if not response:
return False # depends on [control=['if'], data=[]]
self._track_changes.clear() # reset the tracked changes as they are all saved
if not self.object_id:
# new message
message = response.json()
self.object_id = message.get(self._cc('id'), None)
self.folder_id = message.get(self._cc('parentFolderId'), None)
# fallback to office365 v1.0
self.__created = message.get(self._cc('createdDateTime'), message.get(self._cc('dateTimeCreated'), None))
# fallback to office365 v1.0
self.__modified = message.get(self._cc('lastModifiedDateTime'), message.get(self._cc('dateTimeModified'), None))
self.__created = parse(self.__created).astimezone(self.protocol.timezone) if self.__created else None
self.__modified = parse(self.__modified).astimezone(self.protocol.timezone) if self.__modified else None # depends on [control=['if'], data=[]]
else:
self.__modified = self.protocol.timezone.localize(dt.datetime.now())
return True |
def reset_logformat_timestamped(logger: logging.Logger,
extraname: str = "",
level: int = logging.INFO) -> None:
"""
Apply a simple time-stamped log format to an existing logger, and set
its loglevel to either ``logging.DEBUG`` or ``logging.INFO``.
Args:
logger: logger to modify
extraname: additional name to append to the logger's name
level: log level to set
"""
namebit = extraname + ":" if extraname else ""
fmt = ("%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:" + namebit +
"%(message)s")
# logger.info(fmt)
reset_logformat(logger, fmt=fmt)
# logger.info(fmt)
logger.setLevel(level) | def function[reset_logformat_timestamped, parameter[logger, extraname, level]]:
constant[
Apply a simple time-stamped log format to an existing logger, and set
its loglevel to either ``logging.DEBUG`` or ``logging.INFO``.
Args:
logger: logger to modify
extraname: additional name to append to the logger's name
level: log level to set
]
variable[namebit] assign[=] <ast.IfExp object at 0x7da1b189ef50>
variable[fmt] assign[=] binary_operation[binary_operation[constant[%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:] + name[namebit]] + constant[%(message)s]]
call[name[reset_logformat], parameter[name[logger]]]
call[name[logger].setLevel, parameter[name[level]]] | keyword[def] identifier[reset_logformat_timestamped] ( identifier[logger] : identifier[logging] . identifier[Logger] ,
identifier[extraname] : identifier[str] = literal[string] ,
identifier[level] : identifier[int] = identifier[logging] . identifier[INFO] )-> keyword[None] :
literal[string]
identifier[namebit] = identifier[extraname] + literal[string] keyword[if] identifier[extraname] keyword[else] literal[string]
identifier[fmt] =( literal[string] + identifier[namebit] +
literal[string] )
identifier[reset_logformat] ( identifier[logger] , identifier[fmt] = identifier[fmt] )
identifier[logger] . identifier[setLevel] ( identifier[level] ) | def reset_logformat_timestamped(logger: logging.Logger, extraname: str='', level: int=logging.INFO) -> None:
"""
Apply a simple time-stamped log format to an existing logger, and set
its loglevel to either ``logging.DEBUG`` or ``logging.INFO``.
Args:
logger: logger to modify
extraname: additional name to append to the logger's name
level: log level to set
"""
namebit = extraname + ':' if extraname else ''
fmt = '%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:' + namebit + '%(message)s'
# logger.info(fmt)
reset_logformat(logger, fmt=fmt)
# logger.info(fmt)
logger.setLevel(level) |
def to_lines(s):
"""
Like `str.splitlines()`, except that an empty string results in a
one-element list and a trailing newline results in a trailing empty string
(and all without re-implementing Python's changing line-splitting
algorithm).
>>> to_lines('')
['']
>>> to_lines('\\n')
['', '']
>>> to_lines('foo')
['foo']
>>> to_lines('foo\\n')
['foo', '']
>>> to_lines('foo\\nbar')
['foo', 'bar']
>>> to_lines('foo\\fbar')
['foo', 'bar']
>>> to_lines('foo\\vbar')
['foo', 'bar']
"""
lines = s.splitlines(True)
if not lines:
return ['']
if lines[-1].splitlines() != [lines[-1]]:
lines.append('')
for i,l in enumerate(lines):
l2 = l.splitlines()
assert len(l2) in (0,1)
lines[i] = l2[0] if l2 else ''
if PY2 and isinstance(s, str):
# Manually split on \f and \v
lines = [
lf for l in lines
for lv in l.split('\v')
for lf in lv.split('\f')
]
return lines | def function[to_lines, parameter[s]]:
constant[
Like `str.splitlines()`, except that an empty string results in a
one-element list and a trailing newline results in a trailing empty string
(and all without re-implementing Python's changing line-splitting
algorithm).
>>> to_lines('')
['']
>>> to_lines('\n')
['', '']
>>> to_lines('foo')
['foo']
>>> to_lines('foo\n')
['foo', '']
>>> to_lines('foo\nbar')
['foo', 'bar']
>>> to_lines('foo\fbar')
['foo', 'bar']
>>> to_lines('foo\vbar')
['foo', 'bar']
]
variable[lines] assign[=] call[name[s].splitlines, parameter[constant[True]]]
if <ast.UnaryOp object at 0x7da18dc04820> begin[:]
return[list[[<ast.Constant object at 0x7da18dc07160>]]]
if compare[call[call[name[lines]][<ast.UnaryOp object at 0x7da18dc04d60>].splitlines, parameter[]] not_equal[!=] list[[<ast.Subscript object at 0x7da18dc06440>]]] begin[:]
call[name[lines].append, parameter[constant[]]]
for taget[tuple[[<ast.Name object at 0x7da18dc05060>, <ast.Name object at 0x7da18dc07010>]]] in starred[call[name[enumerate], parameter[name[lines]]]] begin[:]
variable[l2] assign[=] call[name[l].splitlines, parameter[]]
assert[compare[call[name[len], parameter[name[l2]]] in tuple[[<ast.Constant object at 0x7da18dc050c0>, <ast.Constant object at 0x7da18dc04eb0>]]]]
call[name[lines]][name[i]] assign[=] <ast.IfExp object at 0x7da18dc040d0>
if <ast.BoolOp object at 0x7da1b2345d80> begin[:]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b2347400>
return[name[lines]] | keyword[def] identifier[to_lines] ( identifier[s] ):
literal[string]
identifier[lines] = identifier[s] . identifier[splitlines] ( keyword[True] )
keyword[if] keyword[not] identifier[lines] :
keyword[return] [ literal[string] ]
keyword[if] identifier[lines] [- literal[int] ]. identifier[splitlines] ()!=[ identifier[lines] [- literal[int] ]]:
identifier[lines] . identifier[append] ( literal[string] )
keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[lines] ):
identifier[l2] = identifier[l] . identifier[splitlines] ()
keyword[assert] identifier[len] ( identifier[l2] ) keyword[in] ( literal[int] , literal[int] )
identifier[lines] [ identifier[i] ]= identifier[l2] [ literal[int] ] keyword[if] identifier[l2] keyword[else] literal[string]
keyword[if] identifier[PY2] keyword[and] identifier[isinstance] ( identifier[s] , identifier[str] ):
identifier[lines] =[
identifier[lf] keyword[for] identifier[l] keyword[in] identifier[lines]
keyword[for] identifier[lv] keyword[in] identifier[l] . identifier[split] ( literal[string] )
keyword[for] identifier[lf] keyword[in] identifier[lv] . identifier[split] ( literal[string] )
]
keyword[return] identifier[lines] | def to_lines(s):
"""
Like `str.splitlines()`, except that an empty string results in a
one-element list and a trailing newline results in a trailing empty string
(and all without re-implementing Python's changing line-splitting
algorithm).
>>> to_lines('')
['']
>>> to_lines('\\n')
['', '']
>>> to_lines('foo')
['foo']
>>> to_lines('foo\\n')
['foo', '']
>>> to_lines('foo\\nbar')
['foo', 'bar']
>>> to_lines('foo\\fbar')
['foo', 'bar']
>>> to_lines('foo\\vbar')
['foo', 'bar']
"""
lines = s.splitlines(True)
if not lines:
return [''] # depends on [control=['if'], data=[]]
if lines[-1].splitlines() != [lines[-1]]:
lines.append('') # depends on [control=['if'], data=[]]
for (i, l) in enumerate(lines):
l2 = l.splitlines()
assert len(l2) in (0, 1)
lines[i] = l2[0] if l2 else '' # depends on [control=['for'], data=[]]
if PY2 and isinstance(s, str):
# Manually split on \f and \v
lines = [lf for l in lines for lv in l.split('\x0b') for lf in lv.split('\x0c')] # depends on [control=['if'], data=[]]
return lines |
def build_remap_symbols(self, name_generator, children_only=True):
"""
This builds the replacement table for all the defined symbols
for all the children, and this scope, if the children_only
argument is False.
"""
if not children_only:
replacement = name_generator(skip=(self._reserved_symbols))
for symbol, c in reversed(sorted(
self.referenced_symbols.items(), key=itemgetter(1, 0))):
if symbol not in self.local_declared_symbols:
continue
self.remapped_symbols[symbol] = next(replacement)
for child in self.children:
child.build_remap_symbols(name_generator, False) | def function[build_remap_symbols, parameter[self, name_generator, children_only]]:
constant[
This builds the replacement table for all the defined symbols
for all the children, and this scope, if the children_only
argument is False.
]
if <ast.UnaryOp object at 0x7da20e9b06a0> begin[:]
variable[replacement] assign[=] call[name[name_generator], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9b02e0>, <ast.Name object at 0x7da20e9b05e0>]]] in starred[call[name[reversed], parameter[call[name[sorted], parameter[call[name[self].referenced_symbols.items, parameter[]]]]]]] begin[:]
if compare[name[symbol] <ast.NotIn object at 0x7da2590d7190> name[self].local_declared_symbols] begin[:]
continue
call[name[self].remapped_symbols][name[symbol]] assign[=] call[name[next], parameter[name[replacement]]]
for taget[name[child]] in starred[name[self].children] begin[:]
call[name[child].build_remap_symbols, parameter[name[name_generator], constant[False]]] | keyword[def] identifier[build_remap_symbols] ( identifier[self] , identifier[name_generator] , identifier[children_only] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[children_only] :
identifier[replacement] = identifier[name_generator] ( identifier[skip] =( identifier[self] . identifier[_reserved_symbols] ))
keyword[for] identifier[symbol] , identifier[c] keyword[in] identifier[reversed] ( identifier[sorted] (
identifier[self] . identifier[referenced_symbols] . identifier[items] (), identifier[key] = identifier[itemgetter] ( literal[int] , literal[int] ))):
keyword[if] identifier[symbol] keyword[not] keyword[in] identifier[self] . identifier[local_declared_symbols] :
keyword[continue]
identifier[self] . identifier[remapped_symbols] [ identifier[symbol] ]= identifier[next] ( identifier[replacement] )
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] :
identifier[child] . identifier[build_remap_symbols] ( identifier[name_generator] , keyword[False] ) | def build_remap_symbols(self, name_generator, children_only=True):
"""
This builds the replacement table for all the defined symbols
for all the children, and this scope, if the children_only
argument is False.
"""
if not children_only:
replacement = name_generator(skip=self._reserved_symbols)
for (symbol, c) in reversed(sorted(self.referenced_symbols.items(), key=itemgetter(1, 0))):
if symbol not in self.local_declared_symbols:
continue # depends on [control=['if'], data=[]]
self.remapped_symbols[symbol] = next(replacement) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
for child in self.children:
child.build_remap_symbols(name_generator, False) # depends on [control=['for'], data=['child']] |
def refresh_token(self, client_id, client_secret, refresh_token, grant_type='refresh_token'):
"""Calls oauth/token endpoint with refresh token grant type
Use this endpoint to refresh an access token, using the refresh token you got during authorization.
Args:
grant_type (str): Denotes the flow you're using. For refresh token
use refresh_token
client_id (str): your application's client Id
client_secret (str): your application's client Secret
refresh_token (str): The refresh token returned from the initial token request.
Returns:
access_token, id_token
"""
return self.post(
'https://{}/oauth/token'.format(self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'grant_type': grant_type
},
headers={'Content-Type': 'application/json'}
) | def function[refresh_token, parameter[self, client_id, client_secret, refresh_token, grant_type]]:
constant[Calls oauth/token endpoint with refresh token grant type
Use this endpoint to refresh an access token, using the refresh token you got during authorization.
Args:
grant_type (str): Denotes the flow you're using. For refresh token
use refresh_token
client_id (str): your application's client Id
client_secret (str): your application's client Secret
refresh_token (str): The refresh token returned from the initial token request.
Returns:
access_token, id_token
]
return[call[name[self].post, parameter[call[constant[https://{}/oauth/token].format, parameter[name[self].domain]]]]] | keyword[def] identifier[refresh_token] ( identifier[self] , identifier[client_id] , identifier[client_secret] , identifier[refresh_token] , identifier[grant_type] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[post] (
literal[string] . identifier[format] ( identifier[self] . identifier[domain] ),
identifier[data] ={
literal[string] : identifier[client_id] ,
literal[string] : identifier[client_secret] ,
literal[string] : identifier[refresh_token] ,
literal[string] : identifier[grant_type]
},
identifier[headers] ={ literal[string] : literal[string] }
) | def refresh_token(self, client_id, client_secret, refresh_token, grant_type='refresh_token'):
"""Calls oauth/token endpoint with refresh token grant type
Use this endpoint to refresh an access token, using the refresh token you got during authorization.
Args:
grant_type (str): Denotes the flow you're using. For refresh token
use refresh_token
client_id (str): your application's client Id
client_secret (str): your application's client Secret
refresh_token (str): The refresh token returned from the initial token request.
Returns:
access_token, id_token
"""
return self.post('https://{}/oauth/token'.format(self.domain), data={'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': grant_type}, headers={'Content-Type': 'application/json'}) |
def _gen_3spec(op, path, xattr=False):
"""
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
"""
flags = 0
if xattr:
flags |= _P.SDSPEC_F_XATTR
return Spec(op, path, flags) | def function[_gen_3spec, parameter[op, path, xattr]]:
constant[
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
]
variable[flags] assign[=] constant[0]
if name[xattr] begin[:]
<ast.AugAssign object at 0x7da2054a7be0>
return[call[name[Spec], parameter[name[op], name[path], name[flags]]]] | keyword[def] identifier[_gen_3spec] ( identifier[op] , identifier[path] , identifier[xattr] = keyword[False] ):
literal[string]
identifier[flags] = literal[int]
keyword[if] identifier[xattr] :
identifier[flags] |= identifier[_P] . identifier[SDSPEC_F_XATTR]
keyword[return] identifier[Spec] ( identifier[op] , identifier[path] , identifier[flags] ) | def _gen_3spec(op, path, xattr=False):
"""
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
"""
flags = 0
if xattr:
flags |= _P.SDSPEC_F_XATTR # depends on [control=['if'], data=[]]
return Spec(op, path, flags) |
def node_from_xml(xmlfile, nodefactory=Node):
"""
Convert a .xml file into a Node object.
:param xmlfile: a file name or file object open for reading
"""
root = parse(xmlfile).getroot()
return node_from_elem(root, nodefactory) | def function[node_from_xml, parameter[xmlfile, nodefactory]]:
constant[
Convert a .xml file into a Node object.
:param xmlfile: a file name or file object open for reading
]
variable[root] assign[=] call[call[name[parse], parameter[name[xmlfile]]].getroot, parameter[]]
return[call[name[node_from_elem], parameter[name[root], name[nodefactory]]]] | keyword[def] identifier[node_from_xml] ( identifier[xmlfile] , identifier[nodefactory] = identifier[Node] ):
literal[string]
identifier[root] = identifier[parse] ( identifier[xmlfile] ). identifier[getroot] ()
keyword[return] identifier[node_from_elem] ( identifier[root] , identifier[nodefactory] ) | def node_from_xml(xmlfile, nodefactory=Node):
"""
Convert a .xml file into a Node object.
:param xmlfile: a file name or file object open for reading
"""
root = parse(xmlfile).getroot()
return node_from_elem(root, nodefactory) |
def enable_reporting(self):
"""Set an input pin to report values."""
if self.mode is not INPUT:
raise IOError("{0} is not an input and can therefore not report".format(self))
if self.type == ANALOG:
self.reporting = True
msg = bytearray([REPORT_ANALOG + self.pin_number, 1])
self.board.sp.write(msg)
else:
self.port.enable_reporting() | def function[enable_reporting, parameter[self]]:
constant[Set an input pin to report values.]
if compare[name[self].mode is_not name[INPUT]] begin[:]
<ast.Raise object at 0x7da1b1e65e70>
if compare[name[self].type equal[==] name[ANALOG]] begin[:]
name[self].reporting assign[=] constant[True]
variable[msg] assign[=] call[name[bytearray], parameter[list[[<ast.BinOp object at 0x7da1b1e655d0>, <ast.Constant object at 0x7da1b1e64cd0>]]]]
call[name[self].board.sp.write, parameter[name[msg]]] | keyword[def] identifier[enable_reporting] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[mode] keyword[is] keyword[not] identifier[INPUT] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[self] ))
keyword[if] identifier[self] . identifier[type] == identifier[ANALOG] :
identifier[self] . identifier[reporting] = keyword[True]
identifier[msg] = identifier[bytearray] ([ identifier[REPORT_ANALOG] + identifier[self] . identifier[pin_number] , literal[int] ])
identifier[self] . identifier[board] . identifier[sp] . identifier[write] ( identifier[msg] )
keyword[else] :
identifier[self] . identifier[port] . identifier[enable_reporting] () | def enable_reporting(self):
"""Set an input pin to report values."""
if self.mode is not INPUT:
raise IOError('{0} is not an input and can therefore not report'.format(self)) # depends on [control=['if'], data=[]]
if self.type == ANALOG:
self.reporting = True
msg = bytearray([REPORT_ANALOG + self.pin_number, 1])
self.board.sp.write(msg) # depends on [control=['if'], data=[]]
else:
self.port.enable_reporting() |
def brier_score(observations, forecasts):
"""
Calculate the Brier score (BS)
The Brier score (BS) scores binary forecasts $k \in \{0, 1\}$,
..math:
BS(p, k) = (p_1 - k)^2,
where $p_1$ is the forecast probability of $k=1$.
Parameters
----------
observations, forecasts : array_like
Broadcast compatible arrays of forecasts (probabilities between 0 and
1) and observations (0, 1 or NaN).
Returns
-------
out : np.ndarray
Brier score for each forecast/observation.
References
----------
Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide
in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd
edition, 2012.
https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc
Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules,
prediction, and estimation, 2005. University of Washington Department of
Statistics Technical Report no. 463R.
https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
"""
machine_eps = np.finfo(float).eps
forecasts = np.asarray(forecasts)
if (forecasts < 0.0).any() or (forecasts > (1.0 + machine_eps)).any():
raise ValueError('forecasts must not be outside of the unit interval '
'[0, 1]')
observations = np.asarray(observations)
if observations.ndim > 0:
valid_obs = observations[~np.isnan(observations)]
else:
valid_obs = observations if not np.isnan(observations) else []
if not set(np.unique(valid_obs)) <= {0, 1}:
raise ValueError('observations can only contain 0, 1, or NaN')
return (forecasts - observations) ** 2 | def function[brier_score, parameter[observations, forecasts]]:
constant[
Calculate the Brier score (BS)
The Brier score (BS) scores binary forecasts $k \in \{0, 1\}$,
..math:
BS(p, k) = (p_1 - k)^2,
where $p_1$ is the forecast probability of $k=1$.
Parameters
----------
observations, forecasts : array_like
Broadcast compatible arrays of forecasts (probabilities between 0 and
1) and observations (0, 1 or NaN).
Returns
-------
out : np.ndarray
Brier score for each forecast/observation.
References
----------
Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide
in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd
edition, 2012.
https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc
Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules,
prediction, and estimation, 2005. University of Washington Department of
Statistics Technical Report no. 463R.
https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
]
variable[machine_eps] assign[=] call[name[np].finfo, parameter[name[float]]].eps
variable[forecasts] assign[=] call[name[np].asarray, parameter[name[forecasts]]]
if <ast.BoolOp object at 0x7da207f9a590> begin[:]
<ast.Raise object at 0x7da207f994e0>
variable[observations] assign[=] call[name[np].asarray, parameter[name[observations]]]
if compare[name[observations].ndim greater[>] constant[0]] begin[:]
variable[valid_obs] assign[=] call[name[observations]][<ast.UnaryOp object at 0x7da207f9bdc0>]
if <ast.UnaryOp object at 0x7da207f99ab0> begin[:]
<ast.Raise object at 0x7da207f9a4d0>
return[binary_operation[binary_operation[name[forecasts] - name[observations]] ** constant[2]]] | keyword[def] identifier[brier_score] ( identifier[observations] , identifier[forecasts] ):
literal[string]
identifier[machine_eps] = identifier[np] . identifier[finfo] ( identifier[float] ). identifier[eps]
identifier[forecasts] = identifier[np] . identifier[asarray] ( identifier[forecasts] )
keyword[if] ( identifier[forecasts] < literal[int] ). identifier[any] () keyword[or] ( identifier[forecasts] >( literal[int] + identifier[machine_eps] )). identifier[any] ():
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[observations] = identifier[np] . identifier[asarray] ( identifier[observations] )
keyword[if] identifier[observations] . identifier[ndim] > literal[int] :
identifier[valid_obs] = identifier[observations] [~ identifier[np] . identifier[isnan] ( identifier[observations] )]
keyword[else] :
identifier[valid_obs] = identifier[observations] keyword[if] keyword[not] identifier[np] . identifier[isnan] ( identifier[observations] ) keyword[else] []
keyword[if] keyword[not] identifier[set] ( identifier[np] . identifier[unique] ( identifier[valid_obs] ))<={ literal[int] , literal[int] }:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] ( identifier[forecasts] - identifier[observations] )** literal[int] | def brier_score(observations, forecasts):
"""
Calculate the Brier score (BS)
The Brier score (BS) scores binary forecasts $k \\in \\{0, 1\\}$,
..math:
BS(p, k) = (p_1 - k)^2,
where $p_1$ is the forecast probability of $k=1$.
Parameters
----------
observations, forecasts : array_like
Broadcast compatible arrays of forecasts (probabilities between 0 and
1) and observations (0, 1 or NaN).
Returns
-------
out : np.ndarray
Brier score for each forecast/observation.
References
----------
Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide
in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd
edition, 2012.
https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc
Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules,
prediction, and estimation, 2005. University of Washington Department of
Statistics Technical Report no. 463R.
https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
"""
machine_eps = np.finfo(float).eps
forecasts = np.asarray(forecasts)
if (forecasts < 0.0).any() or (forecasts > 1.0 + machine_eps).any():
raise ValueError('forecasts must not be outside of the unit interval [0, 1]') # depends on [control=['if'], data=[]]
observations = np.asarray(observations)
if observations.ndim > 0:
valid_obs = observations[~np.isnan(observations)] # depends on [control=['if'], data=[]]
else:
valid_obs = observations if not np.isnan(observations) else []
if not set(np.unique(valid_obs)) <= {0, 1}:
raise ValueError('observations can only contain 0, 1, or NaN') # depends on [control=['if'], data=[]]
return (forecasts - observations) ** 2 |
def _get_secedit_value(option):
'''
Helper function that looks for the passed option in the secedit data
'''
secedit_data = _get_secedit_data()
for _line in secedit_data:
if _line.startswith(option):
return _line.split('=')[1].strip()
return 'Not Defined' | def function[_get_secedit_value, parameter[option]]:
constant[
Helper function that looks for the passed option in the secedit data
]
variable[secedit_data] assign[=] call[name[_get_secedit_data], parameter[]]
for taget[name[_line]] in starred[name[secedit_data]] begin[:]
if call[name[_line].startswith, parameter[name[option]]] begin[:]
return[call[call[call[name[_line].split, parameter[constant[=]]]][constant[1]].strip, parameter[]]]
return[constant[Not Defined]] | keyword[def] identifier[_get_secedit_value] ( identifier[option] ):
literal[string]
identifier[secedit_data] = identifier[_get_secedit_data] ()
keyword[for] identifier[_line] keyword[in] identifier[secedit_data] :
keyword[if] identifier[_line] . identifier[startswith] ( identifier[option] ):
keyword[return] identifier[_line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[return] literal[string] | def _get_secedit_value(option):
"""
Helper function that looks for the passed option in the secedit data
"""
secedit_data = _get_secedit_data()
for _line in secedit_data:
if _line.startswith(option):
return _line.split('=')[1].strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_line']]
return 'Not Defined' |
def metric_path(cls, project, metric):
"""Return a fully-qualified metric string."""
return google.api_core.path_template.expand(
"projects/{project}/metrics/{metric}", project=project, metric=metric
) | def function[metric_path, parameter[cls, project, metric]]:
constant[Return a fully-qualified metric string.]
return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/metrics/{metric}]]]] | keyword[def] identifier[metric_path] ( identifier[cls] , identifier[project] , identifier[metric] ):
literal[string]
keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] (
literal[string] , identifier[project] = identifier[project] , identifier[metric] = identifier[metric]
) | def metric_path(cls, project, metric):
"""Return a fully-qualified metric string."""
return google.api_core.path_template.expand('projects/{project}/metrics/{metric}', project=project, metric=metric) |
def make_eventlogitem_message(message, condition='contains', negate=False, preserve_case=False):
"""
Create a node for EventLogItem/message
:return: A IndicatorItem represented as an Element node
"""
document = 'EventLogItem'
search = 'EventLogItem/message'
content_type = 'string'
content = message
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | def function[make_eventlogitem_message, parameter[message, condition, negate, preserve_case]]:
constant[
Create a node for EventLogItem/message
:return: A IndicatorItem represented as an Element node
]
variable[document] assign[=] constant[EventLogItem]
variable[search] assign[=] constant[EventLogItem/message]
variable[content_type] assign[=] constant[string]
variable[content] assign[=] name[message]
variable[ii_node] assign[=] call[name[ioc_api].make_indicatoritem_node, parameter[name[condition], name[document], name[search], name[content_type], name[content]]]
return[name[ii_node]] | keyword[def] identifier[make_eventlogitem_message] ( identifier[message] , identifier[condition] = literal[string] , identifier[negate] = keyword[False] , identifier[preserve_case] = keyword[False] ):
literal[string]
identifier[document] = literal[string]
identifier[search] = literal[string]
identifier[content_type] = literal[string]
identifier[content] = identifier[message]
identifier[ii_node] = identifier[ioc_api] . identifier[make_indicatoritem_node] ( identifier[condition] , identifier[document] , identifier[search] , identifier[content_type] , identifier[content] ,
identifier[negate] = identifier[negate] , identifier[preserve_case] = identifier[preserve_case] )
keyword[return] identifier[ii_node] | def make_eventlogitem_message(message, condition='contains', negate=False, preserve_case=False):
"""
Create a node for EventLogItem/message
:return: A IndicatorItem represented as an Element node
"""
document = 'EventLogItem'
search = 'EventLogItem/message'
content_type = 'string'
content = message
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate, preserve_case=preserve_case)
return ii_node |
def find_identifiers(src):
"""
Search for a valid identifier (DOI, ISBN, arXiv, HAL) in a given file.
.. note::
This function returns the first matching identifier, that is the most
likely to be relevant for this file. However, it may fail and return an
identifier taken from the references or another paper.
.. note::
You will need to have ``pdftotext`` and/or ``djvutxt`` installed \
system-wide before processing files with this function.
:params src: Path to the file to scan.
:returns: a tuple (type, identifier) or ``(None, None)`` if not found or \
an error occurred.
"""
if src.endswith(".pdf"):
totext = subprocess.Popen(["pdftotext", src, "-"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
elif src.endswith(".djvu"):
totext = subprocess.Popen(["djvutxt", src],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
else:
return (None, None)
while totext.poll() is None:
extract_full = ' '.join([i.decode("utf-8").strip()
for i in totext.stdout.readlines()])
# Loop over all the valid identifier types
for identifier in __valid_identifiers__:
# Dynamically call the ``extract_from_text`` method for the
# associated module.
module = sys.modules.get("libbmc.%s" % (identifier,), None)
if module is None:
continue
found_id = getattr(module, "extract_from_text")(extract_full)
if found_id:
totext.terminate()
# found_id is a list of found IDs
return (identifier, found_id[0])
return (None, None) | def function[find_identifiers, parameter[src]]:
constant[
Search for a valid identifier (DOI, ISBN, arXiv, HAL) in a given file.
.. note::
This function returns the first matching identifier, that is the most
likely to be relevant for this file. However, it may fail and return an
identifier taken from the references or another paper.
.. note::
You will need to have ``pdftotext`` and/or ``djvutxt`` installed system-wide before processing files with this function.
:params src: Path to the file to scan.
:returns: a tuple (type, identifier) or ``(None, None)`` if not found or an error occurred.
]
if call[name[src].endswith, parameter[constant[.pdf]]] begin[:]
variable[totext] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da20e954130>, <ast.Name object at 0x7da20e956b30>, <ast.Constant object at 0x7da20e957670>]]]]
while compare[call[name[totext].poll, parameter[]] is constant[None]] begin[:]
variable[extract_full] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20e957370>]]
for taget[name[identifier]] in starred[name[__valid_identifiers__]] begin[:]
variable[module] assign[=] call[name[sys].modules.get, parameter[binary_operation[constant[libbmc.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e957fa0>]]], constant[None]]]
if compare[name[module] is constant[None]] begin[:]
continue
variable[found_id] assign[=] call[call[name[getattr], parameter[name[module], constant[extract_from_text]]], parameter[name[extract_full]]]
if name[found_id] begin[:]
call[name[totext].terminate, parameter[]]
return[tuple[[<ast.Name object at 0x7da20e957250>, <ast.Subscript object at 0x7da20e956590>]]]
return[tuple[[<ast.Constant object at 0x7da20e957ca0>, <ast.Constant object at 0x7da20e957460>]]] | keyword[def] identifier[find_identifiers] ( identifier[src] ):
literal[string]
keyword[if] identifier[src] . identifier[endswith] ( literal[string] ):
identifier[totext] = identifier[subprocess] . identifier[Popen] ([ literal[string] , identifier[src] , literal[string] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[bufsize] = literal[int] )
keyword[elif] identifier[src] . identifier[endswith] ( literal[string] ):
identifier[totext] = identifier[subprocess] . identifier[Popen] ([ literal[string] , identifier[src] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[bufsize] = literal[int] )
keyword[else] :
keyword[return] ( keyword[None] , keyword[None] )
keyword[while] identifier[totext] . identifier[poll] () keyword[is] keyword[None] :
identifier[extract_full] = literal[string] . identifier[join] ([ identifier[i] . identifier[decode] ( literal[string] ). identifier[strip] ()
keyword[for] identifier[i] keyword[in] identifier[totext] . identifier[stdout] . identifier[readlines] ()])
keyword[for] identifier[identifier] keyword[in] identifier[__valid_identifiers__] :
identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( literal[string] %( identifier[identifier] ,), keyword[None] )
keyword[if] identifier[module] keyword[is] keyword[None] :
keyword[continue]
identifier[found_id] = identifier[getattr] ( identifier[module] , literal[string] )( identifier[extract_full] )
keyword[if] identifier[found_id] :
identifier[totext] . identifier[terminate] ()
keyword[return] ( identifier[identifier] , identifier[found_id] [ literal[int] ])
keyword[return] ( keyword[None] , keyword[None] ) | def find_identifiers(src):
"""
Search for a valid identifier (DOI, ISBN, arXiv, HAL) in a given file.
.. note::
This function returns the first matching identifier, that is the most
likely to be relevant for this file. However, it may fail and return an
identifier taken from the references or another paper.
.. note::
You will need to have ``pdftotext`` and/or ``djvutxt`` installed system-wide before processing files with this function.
:params src: Path to the file to scan.
:returns: a tuple (type, identifier) or ``(None, None)`` if not found or an error occurred.
"""
if src.endswith('.pdf'):
totext = subprocess.Popen(['pdftotext', src, '-'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1) # depends on [control=['if'], data=[]]
elif src.endswith('.djvu'):
totext = subprocess.Popen(['djvutxt', src], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1) # depends on [control=['if'], data=[]]
else:
return (None, None)
while totext.poll() is None:
extract_full = ' '.join([i.decode('utf-8').strip() for i in totext.stdout.readlines()])
# Loop over all the valid identifier types
for identifier in __valid_identifiers__:
# Dynamically call the ``extract_from_text`` method for the
# associated module.
module = sys.modules.get('libbmc.%s' % (identifier,), None)
if module is None:
continue # depends on [control=['if'], data=[]]
found_id = getattr(module, 'extract_from_text')(extract_full)
if found_id:
totext.terminate()
# found_id is a list of found IDs
return (identifier, found_id[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['identifier']] # depends on [control=['while'], data=[]]
return (None, None) |
def get_sensors():
""" Detect and return a list of Sensor objects """
import sensors
found_sensors = list()
def get_subfeature_value(feature, subfeature_type):
subfeature = chip.get_subfeature(feature, subfeature_type)
if subfeature:
return chip.get_value(subfeature.number)
for chip in sensors.get_detected_chips():
for feature in chip.get_features():
if feature.type == sensors.FEATURE_TEMP:
try:
name = chip.get_label(feature)
max = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_MAX)
current = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_INPUT)
critical = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_CRIT)
if critical:
found_sensors.append(Sensor(name=name, current=current, maximum=max, critical=critical))
except sensors.SensorsException:
continue
return found_sensors | def function[get_sensors, parameter[]]:
constant[ Detect and return a list of Sensor objects ]
import module[sensors]
variable[found_sensors] assign[=] call[name[list], parameter[]]
def function[get_subfeature_value, parameter[feature, subfeature_type]]:
variable[subfeature] assign[=] call[name[chip].get_subfeature, parameter[name[feature], name[subfeature_type]]]
if name[subfeature] begin[:]
return[call[name[chip].get_value, parameter[name[subfeature].number]]]
for taget[name[chip]] in starred[call[name[sensors].get_detected_chips, parameter[]]] begin[:]
for taget[name[feature]] in starred[call[name[chip].get_features, parameter[]]] begin[:]
if compare[name[feature].type equal[==] name[sensors].FEATURE_TEMP] begin[:]
<ast.Try object at 0x7da1b2346920>
return[name[found_sensors]] | keyword[def] identifier[get_sensors] ():
literal[string]
keyword[import] identifier[sensors]
identifier[found_sensors] = identifier[list] ()
keyword[def] identifier[get_subfeature_value] ( identifier[feature] , identifier[subfeature_type] ):
identifier[subfeature] = identifier[chip] . identifier[get_subfeature] ( identifier[feature] , identifier[subfeature_type] )
keyword[if] identifier[subfeature] :
keyword[return] identifier[chip] . identifier[get_value] ( identifier[subfeature] . identifier[number] )
keyword[for] identifier[chip] keyword[in] identifier[sensors] . identifier[get_detected_chips] ():
keyword[for] identifier[feature] keyword[in] identifier[chip] . identifier[get_features] ():
keyword[if] identifier[feature] . identifier[type] == identifier[sensors] . identifier[FEATURE_TEMP] :
keyword[try] :
identifier[name] = identifier[chip] . identifier[get_label] ( identifier[feature] )
identifier[max] = identifier[get_subfeature_value] ( identifier[feature] , identifier[sensors] . identifier[SUBFEATURE_TEMP_MAX] )
identifier[current] = identifier[get_subfeature_value] ( identifier[feature] , identifier[sensors] . identifier[SUBFEATURE_TEMP_INPUT] )
identifier[critical] = identifier[get_subfeature_value] ( identifier[feature] , identifier[sensors] . identifier[SUBFEATURE_TEMP_CRIT] )
keyword[if] identifier[critical] :
identifier[found_sensors] . identifier[append] ( identifier[Sensor] ( identifier[name] = identifier[name] , identifier[current] = identifier[current] , identifier[maximum] = identifier[max] , identifier[critical] = identifier[critical] ))
keyword[except] identifier[sensors] . identifier[SensorsException] :
keyword[continue]
keyword[return] identifier[found_sensors] | def get_sensors():
""" Detect and return a list of Sensor objects """
import sensors
found_sensors = list()
def get_subfeature_value(feature, subfeature_type):
subfeature = chip.get_subfeature(feature, subfeature_type)
if subfeature:
return chip.get_value(subfeature.number) # depends on [control=['if'], data=[]]
for chip in sensors.get_detected_chips():
for feature in chip.get_features():
if feature.type == sensors.FEATURE_TEMP:
try:
name = chip.get_label(feature)
max = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_MAX)
current = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_INPUT)
critical = get_subfeature_value(feature, sensors.SUBFEATURE_TEMP_CRIT)
if critical:
found_sensors.append(Sensor(name=name, current=current, maximum=max, critical=critical)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except sensors.SensorsException:
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['feature']] # depends on [control=['for'], data=['chip']]
return found_sensors |
def staged_rewards(self):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
hover_mult = 0.7
# filter out objects that are already on the correct pegs
names_to_reach = []
objs_to_reach = []
geoms_to_grasp = []
geoms_by_array = []
for i in range(len(self.ob_inits)):
if self.objects_on_pegs[i]:
continue
obj_str = str(self.item_names[i]) + "0"
names_to_reach.append(obj_str)
objs_to_reach.append(self.obj_body_id[obj_str])
geoms_to_grasp.extend(self.obj_geom_id[obj_str])
geoms_by_array.append(self.obj_geom_id[obj_str])
### reaching reward governed by distance to closest object ###
r_reach = 0.
if len(objs_to_reach):
# reaching reward via minimum distance to the handles of the objects (the last geom of each nut)
geom_ids = [elem[-1] for elem in geoms_by_array]
target_geom_pos = self.sim.data.geom_xpos[geom_ids]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dists = np.linalg.norm(
target_geom_pos - gripper_site_pos.reshape(1, -1), axis=1
)
r_reach = (1 - np.tanh(10.0 * min(dists))) * reach_mult
### grasping reward for touching any objects of interest ###
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in geoms_to_grasp:
if c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
elif c.geom2 in geoms_to_grasp:
if c.geom1 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids:
touch_right_finger = True
has_grasp = touch_left_finger and touch_right_finger
r_grasp = int(has_grasp) * grasp_mult
### lifting reward for picking up an object ###
r_lift = 0.
if len(objs_to_reach) and r_grasp > 0.:
z_target = self.table_pos[2] + 0.2
object_z_locs = self.sim.data.body_xpos[objs_to_reach][:, 2]
z_dists = np.maximum(z_target - object_z_locs, 0.)
r_lift = grasp_mult + (1 - np.tanh(15.0 * min(z_dists))) * (
lift_mult - grasp_mult
)
### hover reward for getting object above peg ###
r_hover = 0.
if len(objs_to_reach):
r_hovers = np.zeros(len(objs_to_reach))
for i in range(len(objs_to_reach)):
if names_to_reach[i].startswith(self.item_names[0]):
peg_pos = self.peg1_pos[:2]
elif names_to_reach[i].startswith(self.item_names[1]):
peg_pos = self.peg2_pos[:2]
else:
raise Exception(
"Got invalid object to reach: {}".format(names_to_reach[i])
)
ob_xy = self.sim.data.body_xpos[objs_to_reach[i]][:2]
dist = np.linalg.norm(peg_pos - ob_xy)
r_hovers[i] = r_lift + (1 - np.tanh(10.0 * dist)) * (
hover_mult - lift_mult
)
r_hover = np.max(r_hovers)
return r_reach, r_grasp, r_lift, r_hover | def function[staged_rewards, parameter[self]]:
constant[
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
]
variable[reach_mult] assign[=] constant[0.1]
variable[grasp_mult] assign[=] constant[0.35]
variable[lift_mult] assign[=] constant[0.5]
variable[hover_mult] assign[=] constant[0.7]
variable[names_to_reach] assign[=] list[[]]
variable[objs_to_reach] assign[=] list[[]]
variable[geoms_to_grasp] assign[=] list[[]]
variable[geoms_by_array] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].ob_inits]]]]] begin[:]
if call[name[self].objects_on_pegs][name[i]] begin[:]
continue
variable[obj_str] assign[=] binary_operation[call[name[str], parameter[call[name[self].item_names][name[i]]]] + constant[0]]
call[name[names_to_reach].append, parameter[name[obj_str]]]
call[name[objs_to_reach].append, parameter[call[name[self].obj_body_id][name[obj_str]]]]
call[name[geoms_to_grasp].extend, parameter[call[name[self].obj_geom_id][name[obj_str]]]]
call[name[geoms_by_array].append, parameter[call[name[self].obj_geom_id][name[obj_str]]]]
variable[r_reach] assign[=] constant[0.0]
if call[name[len], parameter[name[objs_to_reach]]] begin[:]
variable[geom_ids] assign[=] <ast.ListComp object at 0x7da20c795270>
variable[target_geom_pos] assign[=] call[name[self].sim.data.geom_xpos][name[geom_ids]]
variable[gripper_site_pos] assign[=] call[name[self].sim.data.site_xpos][name[self].eef_site_id]
variable[dists] assign[=] call[name[np].linalg.norm, parameter[binary_operation[name[target_geom_pos] - call[name[gripper_site_pos].reshape, parameter[constant[1], <ast.UnaryOp object at 0x7da20c795ff0>]]]]]
variable[r_reach] assign[=] binary_operation[binary_operation[constant[1] - call[name[np].tanh, parameter[binary_operation[constant[10.0] * call[name[min], parameter[name[dists]]]]]]] * name[reach_mult]]
variable[touch_left_finger] assign[=] constant[False]
variable[touch_right_finger] assign[=] constant[False]
for taget[name[i]] in starred[call[name[range], parameter[name[self].sim.data.ncon]]] begin[:]
variable[c] assign[=] call[name[self].sim.data.contact][name[i]]
if compare[name[c].geom1 in name[geoms_to_grasp]] begin[:]
if compare[name[c].geom2 in name[self].l_finger_geom_ids] begin[:]
variable[touch_left_finger] assign[=] constant[True]
if compare[name[c].geom2 in name[self].r_finger_geom_ids] begin[:]
variable[touch_right_finger] assign[=] constant[True]
variable[has_grasp] assign[=] <ast.BoolOp object at 0x7da20c794a90>
variable[r_grasp] assign[=] binary_operation[call[name[int], parameter[name[has_grasp]]] * name[grasp_mult]]
variable[r_lift] assign[=] constant[0.0]
if <ast.BoolOp object at 0x7da20c795000> begin[:]
variable[z_target] assign[=] binary_operation[call[name[self].table_pos][constant[2]] + constant[0.2]]
variable[object_z_locs] assign[=] call[call[name[self].sim.data.body_xpos][name[objs_to_reach]]][tuple[[<ast.Slice object at 0x7da204566aa0>, <ast.Constant object at 0x7da204566980>]]]
variable[z_dists] assign[=] call[name[np].maximum, parameter[binary_operation[name[z_target] - name[object_z_locs]], constant[0.0]]]
variable[r_lift] assign[=] binary_operation[name[grasp_mult] + binary_operation[binary_operation[constant[1] - call[name[np].tanh, parameter[binary_operation[constant[15.0] * call[name[min], parameter[name[z_dists]]]]]]] * binary_operation[name[lift_mult] - name[grasp_mult]]]]
variable[r_hover] assign[=] constant[0.0]
if call[name[len], parameter[name[objs_to_reach]]] begin[:]
variable[r_hovers] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[objs_to_reach]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[objs_to_reach]]]]]] begin[:]
if call[call[name[names_to_reach]][name[i]].startswith, parameter[call[name[self].item_names][constant[0]]]] begin[:]
variable[peg_pos] assign[=] call[name[self].peg1_pos][<ast.Slice object at 0x7da2045670a0>]
variable[ob_xy] assign[=] call[call[name[self].sim.data.body_xpos][call[name[objs_to_reach]][name[i]]]][<ast.Slice object at 0x7da204565480>]
variable[dist] assign[=] call[name[np].linalg.norm, parameter[binary_operation[name[peg_pos] - name[ob_xy]]]]
call[name[r_hovers]][name[i]] assign[=] binary_operation[name[r_lift] + binary_operation[binary_operation[constant[1] - call[name[np].tanh, parameter[binary_operation[constant[10.0] * name[dist]]]]] * binary_operation[name[hover_mult] - name[lift_mult]]]]
variable[r_hover] assign[=] call[name[np].max, parameter[name[r_hovers]]]
return[tuple[[<ast.Name object at 0x7da204566e60>, <ast.Name object at 0x7da2045672e0>, <ast.Name object at 0x7da204566800>, <ast.Name object at 0x7da2045653f0>]]] | keyword[def] identifier[staged_rewards] ( identifier[self] ):
literal[string]
identifier[reach_mult] = literal[int]
identifier[grasp_mult] = literal[int]
identifier[lift_mult] = literal[int]
identifier[hover_mult] = literal[int]
identifier[names_to_reach] =[]
identifier[objs_to_reach] =[]
identifier[geoms_to_grasp] =[]
identifier[geoms_by_array] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[ob_inits] )):
keyword[if] identifier[self] . identifier[objects_on_pegs] [ identifier[i] ]:
keyword[continue]
identifier[obj_str] = identifier[str] ( identifier[self] . identifier[item_names] [ identifier[i] ])+ literal[string]
identifier[names_to_reach] . identifier[append] ( identifier[obj_str] )
identifier[objs_to_reach] . identifier[append] ( identifier[self] . identifier[obj_body_id] [ identifier[obj_str] ])
identifier[geoms_to_grasp] . identifier[extend] ( identifier[self] . identifier[obj_geom_id] [ identifier[obj_str] ])
identifier[geoms_by_array] . identifier[append] ( identifier[self] . identifier[obj_geom_id] [ identifier[obj_str] ])
identifier[r_reach] = literal[int]
keyword[if] identifier[len] ( identifier[objs_to_reach] ):
identifier[geom_ids] =[ identifier[elem] [- literal[int] ] keyword[for] identifier[elem] keyword[in] identifier[geoms_by_array] ]
identifier[target_geom_pos] = identifier[self] . identifier[sim] . identifier[data] . identifier[geom_xpos] [ identifier[geom_ids] ]
identifier[gripper_site_pos] = identifier[self] . identifier[sim] . identifier[data] . identifier[site_xpos] [ identifier[self] . identifier[eef_site_id] ]
identifier[dists] = identifier[np] . identifier[linalg] . identifier[norm] (
identifier[target_geom_pos] - identifier[gripper_site_pos] . identifier[reshape] ( literal[int] ,- literal[int] ), identifier[axis] = literal[int]
)
identifier[r_reach] =( literal[int] - identifier[np] . identifier[tanh] ( literal[int] * identifier[min] ( identifier[dists] )))* identifier[reach_mult]
identifier[touch_left_finger] = keyword[False]
identifier[touch_right_finger] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[sim] . identifier[data] . identifier[ncon] ):
identifier[c] = identifier[self] . identifier[sim] . identifier[data] . identifier[contact] [ identifier[i] ]
keyword[if] identifier[c] . identifier[geom1] keyword[in] identifier[geoms_to_grasp] :
keyword[if] identifier[c] . identifier[geom2] keyword[in] identifier[self] . identifier[l_finger_geom_ids] :
identifier[touch_left_finger] = keyword[True]
keyword[if] identifier[c] . identifier[geom2] keyword[in] identifier[self] . identifier[r_finger_geom_ids] :
identifier[touch_right_finger] = keyword[True]
keyword[elif] identifier[c] . identifier[geom2] keyword[in] identifier[geoms_to_grasp] :
keyword[if] identifier[c] . identifier[geom1] keyword[in] identifier[self] . identifier[l_finger_geom_ids] :
identifier[touch_left_finger] = keyword[True]
keyword[if] identifier[c] . identifier[geom1] keyword[in] identifier[self] . identifier[r_finger_geom_ids] :
identifier[touch_right_finger] = keyword[True]
identifier[has_grasp] = identifier[touch_left_finger] keyword[and] identifier[touch_right_finger]
identifier[r_grasp] = identifier[int] ( identifier[has_grasp] )* identifier[grasp_mult]
identifier[r_lift] = literal[int]
keyword[if] identifier[len] ( identifier[objs_to_reach] ) keyword[and] identifier[r_grasp] > literal[int] :
identifier[z_target] = identifier[self] . identifier[table_pos] [ literal[int] ]+ literal[int]
identifier[object_z_locs] = identifier[self] . identifier[sim] . identifier[data] . identifier[body_xpos] [ identifier[objs_to_reach] ][:, literal[int] ]
identifier[z_dists] = identifier[np] . identifier[maximum] ( identifier[z_target] - identifier[object_z_locs] , literal[int] )
identifier[r_lift] = identifier[grasp_mult] +( literal[int] - identifier[np] . identifier[tanh] ( literal[int] * identifier[min] ( identifier[z_dists] )))*(
identifier[lift_mult] - identifier[grasp_mult]
)
identifier[r_hover] = literal[int]
keyword[if] identifier[len] ( identifier[objs_to_reach] ):
identifier[r_hovers] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[objs_to_reach] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[objs_to_reach] )):
keyword[if] identifier[names_to_reach] [ identifier[i] ]. identifier[startswith] ( identifier[self] . identifier[item_names] [ literal[int] ]):
identifier[peg_pos] = identifier[self] . identifier[peg1_pos] [: literal[int] ]
keyword[elif] identifier[names_to_reach] [ identifier[i] ]. identifier[startswith] ( identifier[self] . identifier[item_names] [ literal[int] ]):
identifier[peg_pos] = identifier[self] . identifier[peg2_pos] [: literal[int] ]
keyword[else] :
keyword[raise] identifier[Exception] (
literal[string] . identifier[format] ( identifier[names_to_reach] [ identifier[i] ])
)
identifier[ob_xy] = identifier[self] . identifier[sim] . identifier[data] . identifier[body_xpos] [ identifier[objs_to_reach] [ identifier[i] ]][: literal[int] ]
identifier[dist] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[peg_pos] - identifier[ob_xy] )
identifier[r_hovers] [ identifier[i] ]= identifier[r_lift] +( literal[int] - identifier[np] . identifier[tanh] ( literal[int] * identifier[dist] ))*(
identifier[hover_mult] - identifier[lift_mult]
)
identifier[r_hover] = identifier[np] . identifier[max] ( identifier[r_hovers] )
keyword[return] identifier[r_reach] , identifier[r_grasp] , identifier[r_lift] , identifier[r_hover] | def staged_rewards(self):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
hover_mult = 0.7
# filter out objects that are already on the correct pegs
names_to_reach = []
objs_to_reach = []
geoms_to_grasp = []
geoms_by_array = []
for i in range(len(self.ob_inits)):
if self.objects_on_pegs[i]:
continue # depends on [control=['if'], data=[]]
obj_str = str(self.item_names[i]) + '0'
names_to_reach.append(obj_str)
objs_to_reach.append(self.obj_body_id[obj_str])
geoms_to_grasp.extend(self.obj_geom_id[obj_str])
geoms_by_array.append(self.obj_geom_id[obj_str]) # depends on [control=['for'], data=['i']]
### reaching reward governed by distance to closest object ###
r_reach = 0.0
if len(objs_to_reach):
# reaching reward via minimum distance to the handles of the objects (the last geom of each nut)
geom_ids = [elem[-1] for elem in geoms_by_array]
target_geom_pos = self.sim.data.geom_xpos[geom_ids]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dists = np.linalg.norm(target_geom_pos - gripper_site_pos.reshape(1, -1), axis=1)
r_reach = (1 - np.tanh(10.0 * min(dists))) * reach_mult # depends on [control=['if'], data=[]]
### grasping reward for touching any objects of interest ###
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in geoms_to_grasp:
if c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True # depends on [control=['if'], data=[]]
if c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif c.geom2 in geoms_to_grasp:
if c.geom1 in self.l_finger_geom_ids:
touch_left_finger = True # depends on [control=['if'], data=[]]
if c.geom1 in self.r_finger_geom_ids:
touch_right_finger = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
has_grasp = touch_left_finger and touch_right_finger
r_grasp = int(has_grasp) * grasp_mult
### lifting reward for picking up an object ###
r_lift = 0.0
if len(objs_to_reach) and r_grasp > 0.0:
z_target = self.table_pos[2] + 0.2
object_z_locs = self.sim.data.body_xpos[objs_to_reach][:, 2]
z_dists = np.maximum(z_target - object_z_locs, 0.0)
r_lift = grasp_mult + (1 - np.tanh(15.0 * min(z_dists))) * (lift_mult - grasp_mult) # depends on [control=['if'], data=[]]
### hover reward for getting object above peg ###
r_hover = 0.0
if len(objs_to_reach):
r_hovers = np.zeros(len(objs_to_reach))
for i in range(len(objs_to_reach)):
if names_to_reach[i].startswith(self.item_names[0]):
peg_pos = self.peg1_pos[:2] # depends on [control=['if'], data=[]]
elif names_to_reach[i].startswith(self.item_names[1]):
peg_pos = self.peg2_pos[:2] # depends on [control=['if'], data=[]]
else:
raise Exception('Got invalid object to reach: {}'.format(names_to_reach[i]))
ob_xy = self.sim.data.body_xpos[objs_to_reach[i]][:2]
dist = np.linalg.norm(peg_pos - ob_xy)
r_hovers[i] = r_lift + (1 - np.tanh(10.0 * dist)) * (hover_mult - lift_mult) # depends on [control=['for'], data=['i']]
r_hover = np.max(r_hovers) # depends on [control=['if'], data=[]]
return (r_reach, r_grasp, r_lift, r_hover) |
def _igam(a, x):
"""Left tail of incomplete Gamma function:
inf. k
a -x - x
x e > ----------
- -
k=0 | (a+k+1)
"""
# Compute x**a * exp(-x) / Gamma(a)
ax = math.exp(a * math.log(x) - x - math.lgamma(a))
# Power series
r = a
c = 1.0
ans = 1.0
while True:
r += 1.0
c *= x/r
ans += c
if c / ans <= MACHEP:
return ans * ax / a | def function[_igam, parameter[a, x]]:
constant[Left tail of incomplete Gamma function:
inf. k
a -x - x
x e > ----------
- -
k=0 | (a+k+1)
]
variable[ax] assign[=] call[name[math].exp, parameter[binary_operation[binary_operation[binary_operation[name[a] * call[name[math].log, parameter[name[x]]]] - name[x]] - call[name[math].lgamma, parameter[name[a]]]]]]
variable[r] assign[=] name[a]
variable[c] assign[=] constant[1.0]
variable[ans] assign[=] constant[1.0]
while constant[True] begin[:]
<ast.AugAssign object at 0x7da1b244a290>
<ast.AugAssign object at 0x7da1b244a8f0>
<ast.AugAssign object at 0x7da1b244b730>
if compare[binary_operation[name[c] / name[ans]] less_or_equal[<=] name[MACHEP]] begin[:]
return[binary_operation[binary_operation[name[ans] * name[ax]] / name[a]]] | keyword[def] identifier[_igam] ( identifier[a] , identifier[x] ):
literal[string]
identifier[ax] = identifier[math] . identifier[exp] ( identifier[a] * identifier[math] . identifier[log] ( identifier[x] )- identifier[x] - identifier[math] . identifier[lgamma] ( identifier[a] ))
identifier[r] = identifier[a]
identifier[c] = literal[int]
identifier[ans] = literal[int]
keyword[while] keyword[True] :
identifier[r] += literal[int]
identifier[c] *= identifier[x] / identifier[r]
identifier[ans] += identifier[c]
keyword[if] identifier[c] / identifier[ans] <= identifier[MACHEP] :
keyword[return] identifier[ans] * identifier[ax] / identifier[a] | def _igam(a, x):
"""Left tail of incomplete Gamma function:
inf. k
a -x - x
x e > ----------
- -
k=0 | (a+k+1)
"""
# Compute x**a * exp(-x) / Gamma(a)
ax = math.exp(a * math.log(x) - x - math.lgamma(a))
# Power series
r = a
c = 1.0
ans = 1.0
while True:
r += 1.0
c *= x / r
ans += c
if c / ans <= MACHEP:
return ans * ax / a # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.solver.VS(state.arch.bits, self.region, self.region_base_addr, self.address) | def function[to_valueset, parameter[self, state]]:
constant[
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
]
return[call[name[state].solver.VS, parameter[name[state].arch.bits, name[self].region, name[self].region_base_addr, name[self].address]]] | keyword[def] identifier[to_valueset] ( identifier[self] , identifier[state] ):
literal[string]
keyword[return] identifier[state] . identifier[solver] . identifier[VS] ( identifier[state] . identifier[arch] . identifier[bits] , identifier[self] . identifier[region] , identifier[self] . identifier[region_base_addr] , identifier[self] . identifier[address] ) | def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.solver.VS(state.arch.bits, self.region, self.region_base_addr, self.address) |
def send_unsigned_transaction(self, tx: Dict[str, any], private_key: Optional[str] = None,
public_key: Optional[str] = None, retry: bool = False,
block_identifier: Optional[str] = None) -> bytes:
"""
Send a tx using an unlocked public key in the node or a private key. Both `public_key` and
`private_key` cannot be `None`
:param tx:
:param private_key:
:param public_key:
:param retry: Retry if a problem with nonce is found
:param block_identifier:
:return: tx hash
"""
if private_key:
address = self.private_key_to_address(private_key)
elif public_key:
address = public_key
else:
logger.error('No ethereum account provided. Need a public_key or private_key')
raise ValueError("Ethereum account was not configured or unlocked in the node")
if tx.get('nonce') is None:
tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)
number_errors = 5
while number_errors >= 0:
try:
if private_key:
signed_tx = self.w3.eth.account.signTransaction(tx, private_key=private_key)
logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])
try:
return self.send_raw_transaction(signed_tx.rawTransaction)
except TransactionAlreadyImported as e:
# Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's
# processed
tx_hash = signed_tx.hash
logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))
return tx_hash
elif public_key:
tx['from'] = address
return self.send_transaction(tx)
except ReplacementTransactionUnderpriced as e:
if not retry or not number_errors:
raise e
logger.error('address=%s Tx with nonce=%d was already sent, retrying with nonce + 1',
address, tx['nonce'])
tx['nonce'] += 1
except InvalidNonce as e:
if not retry or not number_errors:
raise e
logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again',
address, tx['nonce'])
tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)
number_errors -= 1 | def function[send_unsigned_transaction, parameter[self, tx, private_key, public_key, retry, block_identifier]]:
constant[
Send a tx using an unlocked public key in the node or a private key. Both `public_key` and
`private_key` cannot be `None`
:param tx:
:param private_key:
:param public_key:
:param retry: Retry if a problem with nonce is found
:param block_identifier:
:return: tx hash
]
if name[private_key] begin[:]
variable[address] assign[=] call[name[self].private_key_to_address, parameter[name[private_key]]]
if compare[call[name[tx].get, parameter[constant[nonce]]] is constant[None]] begin[:]
call[name[tx]][constant[nonce]] assign[=] call[name[self].get_nonce_for_account, parameter[name[address]]]
variable[number_errors] assign[=] constant[5]
while compare[name[number_errors] greater_or_equal[>=] constant[0]] begin[:]
<ast.Try object at 0x7da207f9b4f0> | keyword[def] identifier[send_unsigned_transaction] ( identifier[self] , identifier[tx] : identifier[Dict] [ identifier[str] , identifier[any] ], identifier[private_key] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[public_key] : identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[retry] : identifier[bool] = keyword[False] ,
identifier[block_identifier] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[bytes] :
literal[string]
keyword[if] identifier[private_key] :
identifier[address] = identifier[self] . identifier[private_key_to_address] ( identifier[private_key] )
keyword[elif] identifier[public_key] :
identifier[address] = identifier[public_key]
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[tx] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[tx] [ literal[string] ]= identifier[self] . identifier[get_nonce_for_account] ( identifier[address] , identifier[block_identifier] = identifier[block_identifier] )
identifier[number_errors] = literal[int]
keyword[while] identifier[number_errors] >= literal[int] :
keyword[try] :
keyword[if] identifier[private_key] :
identifier[signed_tx] = identifier[self] . identifier[w3] . identifier[eth] . identifier[account] . identifier[signTransaction] ( identifier[tx] , identifier[private_key] = identifier[private_key] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[tx] [ literal[string] ], identifier[address] , identifier[tx] [ literal[string] ])
keyword[try] :
keyword[return] identifier[self] . identifier[send_raw_transaction] ( identifier[signed_tx] . identifier[rawTransaction] )
keyword[except] identifier[TransactionAlreadyImported] keyword[as] identifier[e] :
identifier[tx_hash] = identifier[signed_tx] . identifier[hash]
identifier[logger] . identifier[error] ( literal[string] %( identifier[tx_hash] . identifier[hex] (), identifier[str] ( identifier[e] )))
keyword[return] identifier[tx_hash]
keyword[elif] identifier[public_key] :
identifier[tx] [ literal[string] ]= identifier[address]
keyword[return] identifier[self] . identifier[send_transaction] ( identifier[tx] )
keyword[except] identifier[ReplacementTransactionUnderpriced] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[retry] keyword[or] keyword[not] identifier[number_errors] :
keyword[raise] identifier[e]
identifier[logger] . identifier[error] ( literal[string] ,
identifier[address] , identifier[tx] [ literal[string] ])
identifier[tx] [ literal[string] ]+= literal[int]
keyword[except] identifier[InvalidNonce] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[retry] keyword[or] keyword[not] identifier[number_errors] :
keyword[raise] identifier[e]
identifier[logger] . identifier[error] ( literal[string] ,
identifier[address] , identifier[tx] [ literal[string] ])
identifier[tx] [ literal[string] ]= identifier[self] . identifier[get_nonce_for_account] ( identifier[address] , identifier[block_identifier] = identifier[block_identifier] )
identifier[number_errors] -= literal[int] | def send_unsigned_transaction(self, tx: Dict[str, any], private_key: Optional[str]=None, public_key: Optional[str]=None, retry: bool=False, block_identifier: Optional[str]=None) -> bytes:
"""
Send a tx using an unlocked public key in the node or a private key. Both `public_key` and
`private_key` cannot be `None`
:param tx:
:param private_key:
:param public_key:
:param retry: Retry if a problem with nonce is found
:param block_identifier:
:return: tx hash
"""
if private_key:
address = self.private_key_to_address(private_key) # depends on [control=['if'], data=[]]
elif public_key:
address = public_key # depends on [control=['if'], data=[]]
else:
logger.error('No ethereum account provided. Need a public_key or private_key')
raise ValueError('Ethereum account was not configured or unlocked in the node')
if tx.get('nonce') is None:
tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier) # depends on [control=['if'], data=[]]
number_errors = 5
while number_errors >= 0:
try:
if private_key:
signed_tx = self.w3.eth.account.signTransaction(tx, private_key=private_key)
logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])
try:
return self.send_raw_transaction(signed_tx.rawTransaction) # depends on [control=['try'], data=[]]
except TransactionAlreadyImported as e:
# Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's
# processed
tx_hash = signed_tx.hash
logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))
return tx_hash # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
elif public_key:
tx['from'] = address
return self.send_transaction(tx) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ReplacementTransactionUnderpriced as e:
if not retry or not number_errors:
raise e # depends on [control=['if'], data=[]]
logger.error('address=%s Tx with nonce=%d was already sent, retrying with nonce + 1', address, tx['nonce'])
tx['nonce'] += 1 # depends on [control=['except'], data=['e']]
except InvalidNonce as e:
if not retry or not number_errors:
raise e # depends on [control=['if'], data=[]]
logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again', address, tx['nonce'])
tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)
number_errors -= 1 # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=['number_errors']] |
def predict_distributed(self, data_rdd, batch_size = -1):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictRDD", self.value, data_rdd, batch_size)
return result.map(lambda data: data.to_ndarray()) | def function[predict_distributed, parameter[self, data_rdd, batch_size]]:
constant[
Model inference base on the given data.
You need to invoke collect() to trigger those action as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
]
variable[result] assign[=] call[name[callBigDlFunc], parameter[name[self].bigdl_type, constant[modelPredictRDD], name[self].value, name[data_rdd], name[batch_size]]]
return[call[name[result].map, parameter[<ast.Lambda object at 0x7da1b03bb460>]]] | keyword[def] identifier[predict_distributed] ( identifier[self] , identifier[data_rdd] , identifier[batch_size] =- literal[int] ):
literal[string]
identifier[result] = identifier[callBigDlFunc] ( identifier[self] . identifier[bigdl_type] ,
literal[string] , identifier[self] . identifier[value] , identifier[data_rdd] , identifier[batch_size] )
keyword[return] identifier[result] . identifier[map] ( keyword[lambda] identifier[data] : identifier[data] . identifier[to_ndarray] ()) | def predict_distributed(self, data_rdd, batch_size=-1):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type, 'modelPredictRDD', self.value, data_rdd, batch_size)
return result.map(lambda data: data.to_ndarray()) |
def get_handler(id_or_link, project=None):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
'''
try:
cls = _guess_link_target_type(id_or_link)
except Exception as e:
raise DXError("Could not parse link {}: {}".format(id_or_link, e))
if cls in [dxpy.DXApp, dxpy.DXGlobalWorkflow]:
# This special case should translate identifiers of the form
# "app-name" or "app-name/version_or_tag" to the appropriate
# arguments
if dxpy.utils.resolver.is_hashid(id_or_link):
return cls(id_or_link)
else:
slash_pos = id_or_link.find('/')
dash_pos = id_or_link.find('-')
if slash_pos == -1:
return cls(name=id_or_link[dash_pos+1:])
else:
return cls(name=id_or_link[dash_pos+1:slash_pos],
alias=id_or_link[slash_pos + 1:])
elif project is None or cls in [dxpy.DXJob, dxpy.DXAnalysis, dxpy.DXProject, dxpy.DXContainer]:
# This case is important for the handlers which do not
# take a project field
return cls(id_or_link)
else:
return cls(id_or_link, project=project) | def function[get_handler, parameter[id_or_link, project]]:
constant[
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
]
<ast.Try object at 0x7da18bc73d90>
if compare[name[cls] in list[[<ast.Attribute object at 0x7da18bc707c0>, <ast.Attribute object at 0x7da18bc72f50>]]] begin[:]
if call[name[dxpy].utils.resolver.is_hashid, parameter[name[id_or_link]]] begin[:]
return[call[name[cls], parameter[name[id_or_link]]]] | keyword[def] identifier[get_handler] ( identifier[id_or_link] , identifier[project] = keyword[None] ):
literal[string]
keyword[try] :
identifier[cls] = identifier[_guess_link_target_type] ( identifier[id_or_link] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[DXError] ( literal[string] . identifier[format] ( identifier[id_or_link] , identifier[e] ))
keyword[if] identifier[cls] keyword[in] [ identifier[dxpy] . identifier[DXApp] , identifier[dxpy] . identifier[DXGlobalWorkflow] ]:
keyword[if] identifier[dxpy] . identifier[utils] . identifier[resolver] . identifier[is_hashid] ( identifier[id_or_link] ):
keyword[return] identifier[cls] ( identifier[id_or_link] )
keyword[else] :
identifier[slash_pos] = identifier[id_or_link] . identifier[find] ( literal[string] )
identifier[dash_pos] = identifier[id_or_link] . identifier[find] ( literal[string] )
keyword[if] identifier[slash_pos] ==- literal[int] :
keyword[return] identifier[cls] ( identifier[name] = identifier[id_or_link] [ identifier[dash_pos] + literal[int] :])
keyword[else] :
keyword[return] identifier[cls] ( identifier[name] = identifier[id_or_link] [ identifier[dash_pos] + literal[int] : identifier[slash_pos] ],
identifier[alias] = identifier[id_or_link] [ identifier[slash_pos] + literal[int] :])
keyword[elif] identifier[project] keyword[is] keyword[None] keyword[or] identifier[cls] keyword[in] [ identifier[dxpy] . identifier[DXJob] , identifier[dxpy] . identifier[DXAnalysis] , identifier[dxpy] . identifier[DXProject] , identifier[dxpy] . identifier[DXContainer] ]:
keyword[return] identifier[cls] ( identifier[id_or_link] )
keyword[else] :
keyword[return] identifier[cls] ( identifier[id_or_link] , identifier[project] = identifier[project] ) | def get_handler(id_or_link, project=None):
"""
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
"""
try:
cls = _guess_link_target_type(id_or_link) # depends on [control=['try'], data=[]]
except Exception as e:
raise DXError('Could not parse link {}: {}'.format(id_or_link, e)) # depends on [control=['except'], data=['e']]
if cls in [dxpy.DXApp, dxpy.DXGlobalWorkflow]:
# This special case should translate identifiers of the form
# "app-name" or "app-name/version_or_tag" to the appropriate
# arguments
if dxpy.utils.resolver.is_hashid(id_or_link):
return cls(id_or_link) # depends on [control=['if'], data=[]]
else:
slash_pos = id_or_link.find('/')
dash_pos = id_or_link.find('-')
if slash_pos == -1:
return cls(name=id_or_link[dash_pos + 1:]) # depends on [control=['if'], data=[]]
else:
return cls(name=id_or_link[dash_pos + 1:slash_pos], alias=id_or_link[slash_pos + 1:]) # depends on [control=['if'], data=['cls']]
elif project is None or cls in [dxpy.DXJob, dxpy.DXAnalysis, dxpy.DXProject, dxpy.DXContainer]:
# This case is important for the handlers which do not
# take a project field
return cls(id_or_link) # depends on [control=['if'], data=[]]
else:
return cls(id_or_link, project=project) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.