code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _GenerateAssertion(self):
"""Generates the signed assertion that will be used in the request.
Returns:
string, signed Json Web Token (JWT) assertion.
"""
now = int(time.time())
payload = {
'aud': RpcHelper.TOKEN_ENDPOINT,
'scope': 'https://www.googleapis.com/auth/identitytoolkit',
'iat': now,
'exp': now + RpcHelper.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_email
}
return crypt.make_signed_jwt(
crypt.Signer.from_string(self.service_account_key),
payload)
|
def function[_GenerateAssertion, parameter[self]]:
constant[Generates the signed assertion that will be used in the request.
Returns:
string, signed Json Web Token (JWT) assertion.
]
variable[now] assign[=] call[name[int], parameter[call[name[time].time, parameter[]]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1909f90>, <ast.Constant object at 0x7da1b190a4a0>, <ast.Constant object at 0x7da1b19092d0>, <ast.Constant object at 0x7da1b190b520>, <ast.Constant object at 0x7da1b19080a0>], [<ast.Attribute object at 0x7da1b190be80>, <ast.Constant object at 0x7da1b190a440>, <ast.Name object at 0x7da1b190ae00>, <ast.BinOp object at 0x7da1b190b8b0>, <ast.Attribute object at 0x7da1b1909840>]]
return[call[name[crypt].make_signed_jwt, parameter[call[name[crypt].Signer.from_string, parameter[name[self].service_account_key]], name[payload]]]]
|
keyword[def] identifier[_GenerateAssertion] ( identifier[self] ):
literal[string]
identifier[now] = identifier[int] ( identifier[time] . identifier[time] ())
identifier[payload] ={
literal[string] : identifier[RpcHelper] . identifier[TOKEN_ENDPOINT] ,
literal[string] : literal[string] ,
literal[string] : identifier[now] ,
literal[string] : identifier[now] + identifier[RpcHelper] . identifier[MAX_TOKEN_LIFETIME_SECS] ,
literal[string] : identifier[self] . identifier[service_account_email]
}
keyword[return] identifier[crypt] . identifier[make_signed_jwt] (
identifier[crypt] . identifier[Signer] . identifier[from_string] ( identifier[self] . identifier[service_account_key] ),
identifier[payload] )
|
def _GenerateAssertion(self):
"""Generates the signed assertion that will be used in the request.
Returns:
string, signed Json Web Token (JWT) assertion.
"""
now = int(time.time())
payload = {'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https://www.googleapis.com/auth/identitytoolkit', 'iat': now, 'exp': now + RpcHelper.MAX_TOKEN_LIFETIME_SECS, 'iss': self.service_account_email}
return crypt.make_signed_jwt(crypt.Signer.from_string(self.service_account_key), payload)
|
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
module = '.'.join(parts[:-1])
m = __import__(module)
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
module = '.'.join(parts[:-2])
m = __import__(module)
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break
return t, m, starter
|
def function[get_class, parameter[kls]]:
constant[
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
]
variable[parts] assign[=] call[name[kls].split, parameter[constant[.]]]
<ast.Try object at 0x7da20c6c5600>
variable[t] assign[=] constant[None]
variable[starter] assign[=] constant[None]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[parts]]]]]] begin[:]
variable[comp] assign[=] call[name[parts]][name[i]]
variable[starter] assign[=] call[name[parts]][<ast.Slice object at 0x7da20c6c7250>]
variable[m] assign[=] call[name[getattr], parameter[name[m], name[comp]]]
if call[name[isinstance], parameter[name[m], name[class_types]]] begin[:]
variable[t] assign[=] name[type]
variable[starter] assign[=] <ast.IfExp object at 0x7da20c6c7d30>
break
if call[name[isinstance], parameter[name[m], name[types].FunctionType]] begin[:]
variable[t] assign[=] name[types].FunctionType
variable[starter] assign[=] constant[None]
break
return[tuple[[<ast.Name object at 0x7da20c6c6f20>, <ast.Name object at 0x7da20c6c4880>, <ast.Name object at 0x7da20c6c4e20>]]]
|
keyword[def] identifier[get_class] ( identifier[kls] ):
literal[string]
identifier[parts] = identifier[kls] . identifier[split] ( literal[string] )
keyword[try] :
identifier[module] = literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ])
identifier[m] = identifier[__import__] ( identifier[module] )
keyword[except] identifier[ImportError] :
identifier[module] = literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ])
identifier[m] = identifier[__import__] ( identifier[module] )
identifier[t] = keyword[None]
identifier[starter] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[parts] )):
identifier[comp] = identifier[parts] [ identifier[i] ]
identifier[starter] = identifier[parts] [ identifier[i] :]
identifier[m] = identifier[getattr] ( identifier[m] , identifier[comp] )
keyword[if] identifier[isinstance] ( identifier[m] , identifier[class_types] ):
identifier[t] = identifier[type]
identifier[starter] = keyword[None] keyword[if] identifier[len] ( identifier[parts] [ identifier[i] :])== literal[int] keyword[else] literal[string] . identifier[join] ( identifier[parts] [ identifier[i] + literal[int] :])
keyword[break]
keyword[if] identifier[isinstance] ( identifier[m] , identifier[types] . identifier[FunctionType] ):
identifier[t] = identifier[types] . identifier[FunctionType]
identifier[starter] = keyword[None]
keyword[break]
keyword[return] identifier[t] , identifier[m] , identifier[starter]
|
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
module = '.'.join(parts[:-1])
m = __import__(module) # depends on [control=['try'], data=[]]
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
module = '.'.join(parts[:-2])
m = __import__(module) # depends on [control=['except'], data=[]]
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break # depends on [control=['if'], data=[]]
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return (t, m, starter)
|
def delete_stack(self, stack):
"""删除服务组
删除服务组内所有服务并销毁服务组。
Args:
- stack: 服务所属的服务组名称
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}'.format(self.host, stack)
return self.__delete(url)
|
def function[delete_stack, parameter[self, stack]]:
constant[删除服务组
删除服务组内所有服务并销毁服务组。
Args:
- stack: 服务所属的服务组名称
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
]
variable[url] assign[=] call[constant[{0}/v3/stacks/{1}].format, parameter[name[self].host, name[stack]]]
return[call[name[self].__delete, parameter[name[url]]]]
|
keyword[def] identifier[delete_stack] ( identifier[self] , identifier[stack] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[host] , identifier[stack] )
keyword[return] identifier[self] . identifier[__delete] ( identifier[url] )
|
def delete_stack(self, stack):
"""删除服务组
删除服务组内所有服务并销毁服务组。
Args:
- stack: 服务所属的服务组名称
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}'.format(self.host, stack)
return self.__delete(url)
|
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, QtCore.QSocketNotifier.Read)
|
def function[addReader, parameter[self, reader]]:
constant[
Add a FileDescriptor for notification of data available to read.
]
call[name[self]._add, parameter[name[reader], name[self]._reads, name[QtCore].QSocketNotifier.Read]]
|
keyword[def] identifier[addReader] ( identifier[self] , identifier[reader] ):
literal[string]
identifier[self] . identifier[_add] ( identifier[reader] , identifier[self] . identifier[_reads] , identifier[QtCore] . identifier[QSocketNotifier] . identifier[Read] )
|
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, QtCore.QSocketNotifier.Read)
|
def relieve_model(self, model):
"""Do no longer observe the model
The model is also removed from the internal set of tracked models.
:param gtkmvc3.Model model: The model to be relieved
"""
self.__registered_models.remove(model)
return super(ExtendedController, self).relieve_model(model)
|
def function[relieve_model, parameter[self, model]]:
constant[Do no longer observe the model
The model is also removed from the internal set of tracked models.
:param gtkmvc3.Model model: The model to be relieved
]
call[name[self].__registered_models.remove, parameter[name[model]]]
return[call[call[name[super], parameter[name[ExtendedController], name[self]]].relieve_model, parameter[name[model]]]]
|
keyword[def] identifier[relieve_model] ( identifier[self] , identifier[model] ):
literal[string]
identifier[self] . identifier[__registered_models] . identifier[remove] ( identifier[model] )
keyword[return] identifier[super] ( identifier[ExtendedController] , identifier[self] ). identifier[relieve_model] ( identifier[model] )
|
def relieve_model(self, model):
"""Do no longer observe the model
The model is also removed from the internal set of tracked models.
:param gtkmvc3.Model model: The model to be relieved
"""
self.__registered_models.remove(model)
return super(ExtendedController, self).relieve_model(model)
|
def align_generation(self, file_nm, padding=75):
"""
Description : Align to lip position
"""
align = Align(self._align_root + '/' + file_nm + '.align')
return nd.array(align.sentence(padding))
|
def function[align_generation, parameter[self, file_nm, padding]]:
constant[
Description : Align to lip position
]
variable[align] assign[=] call[name[Align], parameter[binary_operation[binary_operation[binary_operation[name[self]._align_root + constant[/]] + name[file_nm]] + constant[.align]]]]
return[call[name[nd].array, parameter[call[name[align].sentence, parameter[name[padding]]]]]]
|
keyword[def] identifier[align_generation] ( identifier[self] , identifier[file_nm] , identifier[padding] = literal[int] ):
literal[string]
identifier[align] = identifier[Align] ( identifier[self] . identifier[_align_root] + literal[string] + identifier[file_nm] + literal[string] )
keyword[return] identifier[nd] . identifier[array] ( identifier[align] . identifier[sentence] ( identifier[padding] ))
|
def align_generation(self, file_nm, padding=75):
"""
Description : Align to lip position
"""
align = Align(self._align_root + '/' + file_nm + '.align')
return nd.array(align.sentence(padding))
|
def add_children_to_node(self, node):
"""
Add children to etree.Element `node`.
"""
if self.has_children:
for child_id in self.children:
child = self.runtime.get_block(child_id)
self.runtime.add_block_as_child_node(child, node)
|
def function[add_children_to_node, parameter[self, node]]:
constant[
Add children to etree.Element `node`.
]
if name[self].has_children begin[:]
for taget[name[child_id]] in starred[name[self].children] begin[:]
variable[child] assign[=] call[name[self].runtime.get_block, parameter[name[child_id]]]
call[name[self].runtime.add_block_as_child_node, parameter[name[child], name[node]]]
|
keyword[def] identifier[add_children_to_node] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[self] . identifier[has_children] :
keyword[for] identifier[child_id] keyword[in] identifier[self] . identifier[children] :
identifier[child] = identifier[self] . identifier[runtime] . identifier[get_block] ( identifier[child_id] )
identifier[self] . identifier[runtime] . identifier[add_block_as_child_node] ( identifier[child] , identifier[node] )
|
def add_children_to_node(self, node):
"""
Add children to etree.Element `node`.
"""
if self.has_children:
for child_id in self.children:
child = self.runtime.get_block(child_id)
self.runtime.add_block_as_child_node(child, node) # depends on [control=['for'], data=['child_id']] # depends on [control=['if'], data=[]]
|
def from_dict(cls, dictionary, root=False):
"""Convert dictionary (and ordered dictionary) into a ConfigTree
:param dictionary: dictionary to convert
:type dictionary: dict
:return: Config object
:type return: Config
"""
def create_tree(value):
if isinstance(value, dict):
res = ConfigTree(root=root)
for key, child_value in value.items():
res.put(key, create_tree(child_value))
return res
if isinstance(value, list):
return [create_tree(v) for v in value]
else:
return value
return create_tree(dictionary)
|
def function[from_dict, parameter[cls, dictionary, root]]:
constant[Convert dictionary (and ordered dictionary) into a ConfigTree
:param dictionary: dictionary to convert
:type dictionary: dict
:return: Config object
:type return: Config
]
def function[create_tree, parameter[value]]:
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
variable[res] assign[=] call[name[ConfigTree], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9b38e0>, <ast.Name object at 0x7da20e9b3fa0>]]] in starred[call[name[value].items, parameter[]]] begin[:]
call[name[res].put, parameter[name[key], call[name[create_tree], parameter[name[child_value]]]]]
return[name[res]]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
return[<ast.ListComp object at 0x7da1b0981f90>]
return[call[name[create_tree], parameter[name[dictionary]]]]
|
keyword[def] identifier[from_dict] ( identifier[cls] , identifier[dictionary] , identifier[root] = keyword[False] ):
literal[string]
keyword[def] identifier[create_tree] ( identifier[value] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[res] = identifier[ConfigTree] ( identifier[root] = identifier[root] )
keyword[for] identifier[key] , identifier[child_value] keyword[in] identifier[value] . identifier[items] ():
identifier[res] . identifier[put] ( identifier[key] , identifier[create_tree] ( identifier[child_value] ))
keyword[return] identifier[res]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] [ identifier[create_tree] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] ]
keyword[else] :
keyword[return] identifier[value]
keyword[return] identifier[create_tree] ( identifier[dictionary] )
|
def from_dict(cls, dictionary, root=False):
"""Convert dictionary (and ordered dictionary) into a ConfigTree
:param dictionary: dictionary to convert
:type dictionary: dict
:return: Config object
:type return: Config
"""
def create_tree(value):
if isinstance(value, dict):
res = ConfigTree(root=root)
for (key, child_value) in value.items():
res.put(key, create_tree(child_value)) # depends on [control=['for'], data=[]]
return res # depends on [control=['if'], data=[]]
if isinstance(value, list):
return [create_tree(v) for v in value] # depends on [control=['if'], data=[]]
else:
return value
return create_tree(dictionary)
|
def list_eids(self):
"""
Returns a list of all known eids
"""
entities = self.list()
return sorted([int(eid) for eid in entities])
|
def function[list_eids, parameter[self]]:
constant[
Returns a list of all known eids
]
variable[entities] assign[=] call[name[self].list, parameter[]]
return[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b14718a0>]]]
|
keyword[def] identifier[list_eids] ( identifier[self] ):
literal[string]
identifier[entities] = identifier[self] . identifier[list] ()
keyword[return] identifier[sorted] ([ identifier[int] ( identifier[eid] ) keyword[for] identifier[eid] keyword[in] identifier[entities] ])
|
def list_eids(self):
"""
Returns a list of all known eids
"""
entities = self.list()
return sorted([int(eid) for eid in entities])
|
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories
|
def function[infer_paths, parameter[output_dir]]:
constant[Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
]
variable[directories] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20e9b2a10>, <ast.Name object at 0x7da20e9b0070>]]] in starred[call[name[six].iteritems, parameter[name[subdirs]]]] begin[:]
call[name[directories]][name[name]] assign[=] <ast.IfExp object at 0x7da20e9b1cf0>
call[name[directories]][constant[output_dir]] assign[=] name[output_dir]
return[name[directories]]
|
keyword[def] identifier[infer_paths] ( identifier[output_dir] ,** identifier[subdirs] ):
literal[string]
identifier[directories] ={}
keyword[for] identifier[name] , identifier[path] keyword[in] identifier[six] . identifier[iteritems] ( identifier[subdirs] ):
identifier[directories] [ identifier[name] ]= identifier[path] keyword[if] identifier[path] keyword[else] identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[name] )
identifier[directories] [ literal[string] ]= identifier[output_dir]
keyword[return] identifier[directories]
|
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for (name, path) in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name) # depends on [control=['for'], data=[]]
directories['output_dir'] = output_dir
return directories
|
def _generate_transformations(self, structure):
"""
The central problem with trying to enumerate magnetic orderings is
that we have to enumerate orderings that might plausibly be magnetic
ground states, while not enumerating orderings that are physically
implausible. The problem is that it is not always obvious by e.g.
symmetry arguments alone which orderings to prefer. Here, we use a
variety of strategies (heuristics) to enumerate plausible orderings,
and later discard any duplicates that might be found by multiple
strategies. This approach is not ideal, but has been found to be
relatively robust over a wide range of magnetic structures.
Args:
structure: A sanitized input structure (_sanitize_input_structure)
Returns: A dict of a transformation class instance (values) and name of
enumeration strategy (keys)
"""
formula = structure.composition.reduced_formula
transformations = {}
# analyzer is used to obtain information on sanitized input
analyzer = CollinearMagneticStructureAnalyzer(
structure,
default_magmoms=self.default_magmoms,
overwrite_magmom_mode="replace_all",
)
if not analyzer.is_magnetic:
raise ValueError(
"Not detected as magnetic, add a new default magmom for the "
"element you believe may be magnetic?"
)
# now we can begin to generate our magnetic orderings
self.logger.info("Generating magnetic orderings for {}".format(formula))
mag_species_spin = analyzer.magnetic_species_and_magmoms
types_mag_species = sorted(
analyzer.types_of_magnetic_specie,
key=lambda sp: analyzer.default_magmoms.get(str(sp), 0),
reverse=True,
)
num_mag_sites = analyzer.number_of_magnetic_sites
num_unique_sites = analyzer.number_of_unique_magnetic_sites()
# enumerations become too slow as number of unique sites (and thus
# permutations) increase, 8 is a soft limit, this can be increased
# but do so with care
if num_unique_sites > self.max_unique_sites:
raise ValueError("Too many magnetic sites to sensibly perform enumeration.")
# maximum cell size to consider: as a rule of thumb, if the primitive cell
# contains a large number of magnetic sites, perhaps we only need to enumerate
# within one cell, whereas on the other extreme if the primitive cell only
# contains a single magnetic site, we have to create larger supercells
if "max_cell_size" not in self.transformation_kwargs:
# TODO: change to 8 / num_mag_sites ?
self.transformation_kwargs["max_cell_size"] = max(1, int(4 / num_mag_sites))
self.logger.info(
"Max cell size set to {}".format(
self.transformation_kwargs["max_cell_size"]
)
)
# when enumerating ferrimagnetic structures, it's useful to detect
# symmetrically distinct magnetic sites, since different
# local environments can result in different magnetic order
# (e.g. inverse spinels)
# initially, this was done by co-ordination number, but is
# now done by a full symmetry analysis
sga = SpacegroupAnalyzer(structure)
structure_sym = sga.get_symmetrized_structure()
wyckoff = ["n/a"] * len(structure)
for indices, symbol in zip(
structure_sym.equivalent_indices, structure_sym.wyckoff_symbols
):
for index in indices:
wyckoff[index] = symbol
is_magnetic_sites = [
True if site.specie in types_mag_species else False for site in structure
]
# we're not interested in sites that we don't think are magnetic,
# set these symbols to None to filter them out later
wyckoff = [
symbol if is_magnetic_site else "n/a"
for symbol, is_magnetic_site in zip(wyckoff, is_magnetic_sites)
]
structure.add_site_property("wyckoff", wyckoff)
wyckoff_symbols = set(wyckoff) - {"n/a"}
# if user doesn't specifically request ferrimagnetic_Cr2NiO4 orderings,
# we apply a heuristic as to whether to attempt them or not
if self.automatic:
if (
"ferrimagnetic_by_motif" not in self.strategies
and len(wyckoff_symbols) > 1
and len(types_mag_species) == 1
):
self.strategies += ("ferrimagnetic_by_motif",)
if (
"antiferromagnetic_by_motif" not in self.strategies
and len(wyckoff_symbols) > 1
and len(types_mag_species) == 1
):
self.strategies += ("antiferromagnetic_by_motif",)
if (
"ferrimagnetic_by_species" not in self.strategies
and len(types_mag_species) > 1
):
self.strategies += ("ferrimagnetic_by_species",)
# we start with a ferromagnetic ordering
if "ferromagnetic" in self.strategies:
# TODO: remove 0 spins !
fm_structure = analyzer.get_ferromagnetic_structure()
# store magmom as spin property, to be consistent with output from
# other transformations
fm_structure.add_spin_by_site(fm_structure.site_properties["magmom"])
fm_structure.remove_site_property("magmom")
# we now have our first magnetic ordering...
self.ordered_structures.append(fm_structure)
self.ordered_structure_origins.append("fm")
# we store constraint(s) for each strategy first,
# and then use each to perform a transformation later
all_constraints = {}
# ...to which we can add simple AFM cases first...
if "antiferromagnetic" in self.strategies:
constraint = MagOrderParameterConstraint(
0.5,
# TODO: update MagOrderParameterConstraint in pymatgen to take types_mag_species directly
species_constraints=list(map(str, types_mag_species)),
)
all_constraints["afm"] = [constraint]
# allows for non-magnetic sublattices
if len(types_mag_species) > 1:
for sp in types_mag_species:
constraints = [
MagOrderParameterConstraint(0.5, species_constraints=str(sp))
]
all_constraints["afm_by_{}".format(sp)] = constraints
# ...and then we also try ferrimagnetic orderings by motif if a
# single magnetic species is present...
if "ferrimagnetic_by_motif" in self.strategies and len(wyckoff_symbols) > 1:
# these orderings are AFM on one local environment, and FM on the rest
for symbol in wyckoff_symbols:
constraints = [
MagOrderParameterConstraint(
0.5, site_constraint_name="wyckoff", site_constraints=symbol
),
MagOrderParameterConstraint(
1.0,
site_constraint_name="wyckoff",
site_constraints=list(wyckoff_symbols - {symbol}),
),
]
all_constraints["ferri_by_motif_{}".format(symbol)] = constraints
# and also try ferrimagnetic when there are multiple magnetic species
if "ferrimagnetic_by_species" in self.strategies:
sp_list = [str(site.specie) for site in structure]
num_sp = {sp: sp_list.count(str(sp)) for sp in types_mag_species}
total_mag_sites = sum(num_sp.values())
for sp in types_mag_species:
# attempt via a global order parameter
all_constraints["ferri_by_{}".format(sp)] = num_sp[sp] / total_mag_sites
# attempt via afm on sp, fm on remaining species
constraints = [
MagOrderParameterConstraint(0.5, species_constraints=str(sp)),
MagOrderParameterConstraint(
1.0,
species_constraints=list(
map(str, set(types_mag_species) - {sp})
),
),
]
all_constraints["ferri_by_{}_afm".format(sp)] = constraints
# ...and finally, we can try orderings that are AFM on one local
# environment, and non-magnetic on the rest -- this is less common
# but unless explicitly attempted, these states are unlikely to be found
if "antiferromagnetic_by_motif" in self.strategies:
for symbol in wyckoff_symbols:
constraints = [
MagOrderParameterConstraint(
0.5, site_constraint_name="wyckoff", site_constraints=symbol
)
]
all_constraints["afm_by_motif_{}".format(symbol)] = constraints
# and now construct all our transformations for each strategy
transformations = {}
for name, constraints in all_constraints.items():
trans = MagOrderingTransformation(
mag_species_spin,
order_parameter=constraints,
**self.transformation_kwargs
)
transformations[name] = trans
return transformations
|
def function[_generate_transformations, parameter[self, structure]]:
constant[
The central problem with trying to enumerate magnetic orderings is
that we have to enumerate orderings that might plausibly be magnetic
ground states, while not enumerating orderings that are physically
implausible. The problem is that it is not always obvious by e.g.
symmetry arguments alone which orderings to prefer. Here, we use a
variety of strategies (heuristics) to enumerate plausible orderings,
and later discard any duplicates that might be found by multiple
strategies. This approach is not ideal, but has been found to be
relatively robust over a wide range of magnetic structures.
Args:
structure: A sanitized input structure (_sanitize_input_structure)
Returns: A dict of a transformation class instance (values) and name of
enumeration strategy (keys)
]
variable[formula] assign[=] name[structure].composition.reduced_formula
variable[transformations] assign[=] dictionary[[], []]
variable[analyzer] assign[=] call[name[CollinearMagneticStructureAnalyzer], parameter[name[structure]]]
if <ast.UnaryOp object at 0x7da18fe92470> begin[:]
<ast.Raise object at 0x7da18fe91570>
call[name[self].logger.info, parameter[call[constant[Generating magnetic orderings for {}].format, parameter[name[formula]]]]]
variable[mag_species_spin] assign[=] name[analyzer].magnetic_species_and_magmoms
variable[types_mag_species] assign[=] call[name[sorted], parameter[name[analyzer].types_of_magnetic_specie]]
variable[num_mag_sites] assign[=] name[analyzer].number_of_magnetic_sites
variable[num_unique_sites] assign[=] call[name[analyzer].number_of_unique_magnetic_sites, parameter[]]
if compare[name[num_unique_sites] greater[>] name[self].max_unique_sites] begin[:]
<ast.Raise object at 0x7da18fe93b80>
if compare[constant[max_cell_size] <ast.NotIn object at 0x7da2590d7190> name[self].transformation_kwargs] begin[:]
call[name[self].transformation_kwargs][constant[max_cell_size]] assign[=] call[name[max], parameter[constant[1], call[name[int], parameter[binary_operation[constant[4] / name[num_mag_sites]]]]]]
call[name[self].logger.info, parameter[call[constant[Max cell size set to {}].format, parameter[call[name[self].transformation_kwargs][constant[max_cell_size]]]]]]
variable[sga] assign[=] call[name[SpacegroupAnalyzer], parameter[name[structure]]]
variable[structure_sym] assign[=] call[name[sga].get_symmetrized_structure, parameter[]]
variable[wyckoff] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1c7b6d0>]] * call[name[len], parameter[name[structure]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c7b820>, <ast.Name object at 0x7da1b1c7bac0>]]] in starred[call[name[zip], parameter[name[structure_sym].equivalent_indices, name[structure_sym].wyckoff_symbols]]] begin[:]
for taget[name[index]] in starred[name[indices]] begin[:]
call[name[wyckoff]][name[index]] assign[=] name[symbol]
variable[is_magnetic_sites] assign[=] <ast.ListComp object at 0x7da1b1c7b640>
variable[wyckoff] assign[=] <ast.ListComp object at 0x7da1b1c7af80>
call[name[structure].add_site_property, parameter[constant[wyckoff], name[wyckoff]]]
variable[wyckoff_symbols] assign[=] binary_operation[call[name[set], parameter[name[wyckoff]]] - <ast.Set object at 0x7da1b1c7ad10>]
if name[self].automatic begin[:]
if <ast.BoolOp object at 0x7da1b1c7aaa0> begin[:]
<ast.AugAssign object at 0x7da1b1c7a6b0>
if <ast.BoolOp object at 0x7da1b1c7a8c0> begin[:]
<ast.AugAssign object at 0x7da1b1c7a470>
if <ast.BoolOp object at 0x7da1b1c7a290> begin[:]
<ast.AugAssign object at 0x7da1b1c79ff0>
if compare[constant[ferromagnetic] in name[self].strategies] begin[:]
variable[fm_structure] assign[=] call[name[analyzer].get_ferromagnetic_structure, parameter[]]
call[name[fm_structure].add_spin_by_site, parameter[call[name[fm_structure].site_properties][constant[magmom]]]]
call[name[fm_structure].remove_site_property, parameter[constant[magmom]]]
call[name[self].ordered_structures.append, parameter[name[fm_structure]]]
call[name[self].ordered_structure_origins.append, parameter[constant[fm]]]
variable[all_constraints] assign[=] dictionary[[], []]
if compare[constant[antiferromagnetic] in name[self].strategies] begin[:]
variable[constraint] assign[=] call[name[MagOrderParameterConstraint], parameter[constant[0.5]]]
call[name[all_constraints]][constant[afm]] assign[=] list[[<ast.Name object at 0x7da2044c27a0>]]
if compare[call[name[len], parameter[name[types_mag_species]]] greater[>] constant[1]] begin[:]
for taget[name[sp]] in starred[name[types_mag_species]] begin[:]
variable[constraints] assign[=] list[[<ast.Call object at 0x7da2044c24a0>]]
call[name[all_constraints]][call[constant[afm_by_{}].format, parameter[name[sp]]]] assign[=] name[constraints]
if <ast.BoolOp object at 0x7da2044c28c0> begin[:]
for taget[name[symbol]] in starred[name[wyckoff_symbols]] begin[:]
variable[constraints] assign[=] list[[<ast.Call object at 0x7da2044c0730>, <ast.Call object at 0x7da2044c3760>]]
call[name[all_constraints]][call[constant[ferri_by_motif_{}].format, parameter[name[symbol]]]] assign[=] name[constraints]
if compare[constant[ferrimagnetic_by_species] in name[self].strategies] begin[:]
variable[sp_list] assign[=] <ast.ListComp object at 0x7da2044c2950>
variable[num_sp] assign[=] <ast.DictComp object at 0x7da2044c0b50>
variable[total_mag_sites] assign[=] call[name[sum], parameter[call[name[num_sp].values, parameter[]]]]
for taget[name[sp]] in starred[name[types_mag_species]] begin[:]
call[name[all_constraints]][call[constant[ferri_by_{}].format, parameter[name[sp]]]] assign[=] binary_operation[call[name[num_sp]][name[sp]] / name[total_mag_sites]]
variable[constraints] assign[=] list[[<ast.Call object at 0x7da2044c2fb0>, <ast.Call object at 0x7da2044c3340>]]
call[name[all_constraints]][call[constant[ferri_by_{}_afm].format, parameter[name[sp]]]] assign[=] name[constraints]
if compare[constant[antiferromagnetic_by_motif] in name[self].strategies] begin[:]
for taget[name[symbol]] in starred[name[wyckoff_symbols]] begin[:]
variable[constraints] assign[=] list[[<ast.Call object at 0x7da2044c1b40>]]
call[name[all_constraints]][call[constant[afm_by_motif_{}].format, parameter[name[symbol]]]] assign[=] name[constraints]
variable[transformations] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1cec1f0>, <ast.Name object at 0x7da1b1cedc60>]]] in starred[call[name[all_constraints].items, parameter[]]] begin[:]
variable[trans] assign[=] call[name[MagOrderingTransformation], parameter[name[mag_species_spin]]]
call[name[transformations]][name[name]] assign[=] name[trans]
return[name[transformations]]
|
keyword[def] identifier[_generate_transformations] ( identifier[self] , identifier[structure] ):
literal[string]
identifier[formula] = identifier[structure] . identifier[composition] . identifier[reduced_formula]
identifier[transformations] ={}
identifier[analyzer] = identifier[CollinearMagneticStructureAnalyzer] (
identifier[structure] ,
identifier[default_magmoms] = identifier[self] . identifier[default_magmoms] ,
identifier[overwrite_magmom_mode] = literal[string] ,
)
keyword[if] keyword[not] identifier[analyzer] . identifier[is_magnetic] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
identifier[self] . identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[formula] ))
identifier[mag_species_spin] = identifier[analyzer] . identifier[magnetic_species_and_magmoms]
identifier[types_mag_species] = identifier[sorted] (
identifier[analyzer] . identifier[types_of_magnetic_specie] ,
identifier[key] = keyword[lambda] identifier[sp] : identifier[analyzer] . identifier[default_magmoms] . identifier[get] ( identifier[str] ( identifier[sp] ), literal[int] ),
identifier[reverse] = keyword[True] ,
)
identifier[num_mag_sites] = identifier[analyzer] . identifier[number_of_magnetic_sites]
identifier[num_unique_sites] = identifier[analyzer] . identifier[number_of_unique_magnetic_sites] ()
keyword[if] identifier[num_unique_sites] > identifier[self] . identifier[max_unique_sites] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[transformation_kwargs] :
identifier[self] . identifier[transformation_kwargs] [ literal[string] ]= identifier[max] ( literal[int] , identifier[int] ( literal[int] / identifier[num_mag_sites] ))
identifier[self] . identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[self] . identifier[transformation_kwargs] [ literal[string] ]
)
)
identifier[sga] = identifier[SpacegroupAnalyzer] ( identifier[structure] )
identifier[structure_sym] = identifier[sga] . identifier[get_symmetrized_structure] ()
identifier[wyckoff] =[ literal[string] ]* identifier[len] ( identifier[structure] )
keyword[for] identifier[indices] , identifier[symbol] keyword[in] identifier[zip] (
identifier[structure_sym] . identifier[equivalent_indices] , identifier[structure_sym] . identifier[wyckoff_symbols]
):
keyword[for] identifier[index] keyword[in] identifier[indices] :
identifier[wyckoff] [ identifier[index] ]= identifier[symbol]
identifier[is_magnetic_sites] =[
keyword[True] keyword[if] identifier[site] . identifier[specie] keyword[in] identifier[types_mag_species] keyword[else] keyword[False] keyword[for] identifier[site] keyword[in] identifier[structure]
]
identifier[wyckoff] =[
identifier[symbol] keyword[if] identifier[is_magnetic_site] keyword[else] literal[string]
keyword[for] identifier[symbol] , identifier[is_magnetic_site] keyword[in] identifier[zip] ( identifier[wyckoff] , identifier[is_magnetic_sites] )
]
identifier[structure] . identifier[add_site_property] ( literal[string] , identifier[wyckoff] )
identifier[wyckoff_symbols] = identifier[set] ( identifier[wyckoff] )-{ literal[string] }
keyword[if] identifier[self] . identifier[automatic] :
keyword[if] (
literal[string] keyword[not] keyword[in] identifier[self] . identifier[strategies]
keyword[and] identifier[len] ( identifier[wyckoff_symbols] )> literal[int]
keyword[and] identifier[len] ( identifier[types_mag_species] )== literal[int]
):
identifier[self] . identifier[strategies] +=( literal[string] ,)
keyword[if] (
literal[string] keyword[not] keyword[in] identifier[self] . identifier[strategies]
keyword[and] identifier[len] ( identifier[wyckoff_symbols] )> literal[int]
keyword[and] identifier[len] ( identifier[types_mag_species] )== literal[int]
):
identifier[self] . identifier[strategies] +=( literal[string] ,)
keyword[if] (
literal[string] keyword[not] keyword[in] identifier[self] . identifier[strategies]
keyword[and] identifier[len] ( identifier[types_mag_species] )> literal[int]
):
identifier[self] . identifier[strategies] +=( literal[string] ,)
keyword[if] literal[string] keyword[in] identifier[self] . identifier[strategies] :
identifier[fm_structure] = identifier[analyzer] . identifier[get_ferromagnetic_structure] ()
identifier[fm_structure] . identifier[add_spin_by_site] ( identifier[fm_structure] . identifier[site_properties] [ literal[string] ])
identifier[fm_structure] . identifier[remove_site_property] ( literal[string] )
identifier[self] . identifier[ordered_structures] . identifier[append] ( identifier[fm_structure] )
identifier[self] . identifier[ordered_structure_origins] . identifier[append] ( literal[string] )
identifier[all_constraints] ={}
keyword[if] literal[string] keyword[in] identifier[self] . identifier[strategies] :
identifier[constraint] = identifier[MagOrderParameterConstraint] (
literal[int] ,
identifier[species_constraints] = identifier[list] ( identifier[map] ( identifier[str] , identifier[types_mag_species] )),
)
identifier[all_constraints] [ literal[string] ]=[ identifier[constraint] ]
keyword[if] identifier[len] ( identifier[types_mag_species] )> literal[int] :
keyword[for] identifier[sp] keyword[in] identifier[types_mag_species] :
identifier[constraints] =[
identifier[MagOrderParameterConstraint] ( literal[int] , identifier[species_constraints] = identifier[str] ( identifier[sp] ))
]
identifier[all_constraints] [ literal[string] . identifier[format] ( identifier[sp] )]= identifier[constraints]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[strategies] keyword[and] identifier[len] ( identifier[wyckoff_symbols] )> literal[int] :
keyword[for] identifier[symbol] keyword[in] identifier[wyckoff_symbols] :
identifier[constraints] =[
identifier[MagOrderParameterConstraint] (
literal[int] , identifier[site_constraint_name] = literal[string] , identifier[site_constraints] = identifier[symbol]
),
identifier[MagOrderParameterConstraint] (
literal[int] ,
identifier[site_constraint_name] = literal[string] ,
identifier[site_constraints] = identifier[list] ( identifier[wyckoff_symbols] -{ identifier[symbol] }),
),
]
identifier[all_constraints] [ literal[string] . identifier[format] ( identifier[symbol] )]= identifier[constraints]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[strategies] :
identifier[sp_list] =[ identifier[str] ( identifier[site] . identifier[specie] ) keyword[for] identifier[site] keyword[in] identifier[structure] ]
identifier[num_sp] ={ identifier[sp] : identifier[sp_list] . identifier[count] ( identifier[str] ( identifier[sp] )) keyword[for] identifier[sp] keyword[in] identifier[types_mag_species] }
identifier[total_mag_sites] = identifier[sum] ( identifier[num_sp] . identifier[values] ())
keyword[for] identifier[sp] keyword[in] identifier[types_mag_species] :
identifier[all_constraints] [ literal[string] . identifier[format] ( identifier[sp] )]= identifier[num_sp] [ identifier[sp] ]/ identifier[total_mag_sites]
identifier[constraints] =[
identifier[MagOrderParameterConstraint] ( literal[int] , identifier[species_constraints] = identifier[str] ( identifier[sp] )),
identifier[MagOrderParameterConstraint] (
literal[int] ,
identifier[species_constraints] = identifier[list] (
identifier[map] ( identifier[str] , identifier[set] ( identifier[types_mag_species] )-{ identifier[sp] })
),
),
]
identifier[all_constraints] [ literal[string] . identifier[format] ( identifier[sp] )]= identifier[constraints]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[strategies] :
keyword[for] identifier[symbol] keyword[in] identifier[wyckoff_symbols] :
identifier[constraints] =[
identifier[MagOrderParameterConstraint] (
literal[int] , identifier[site_constraint_name] = literal[string] , identifier[site_constraints] = identifier[symbol]
)
]
identifier[all_constraints] [ literal[string] . identifier[format] ( identifier[symbol] )]= identifier[constraints]
identifier[transformations] ={}
keyword[for] identifier[name] , identifier[constraints] keyword[in] identifier[all_constraints] . identifier[items] ():
identifier[trans] = identifier[MagOrderingTransformation] (
identifier[mag_species_spin] ,
identifier[order_parameter] = identifier[constraints] ,
** identifier[self] . identifier[transformation_kwargs]
)
identifier[transformations] [ identifier[name] ]= identifier[trans]
keyword[return] identifier[transformations]
|
def _generate_transformations(self, structure):
"""
The central problem with trying to enumerate magnetic orderings is
that we have to enumerate orderings that might plausibly be magnetic
ground states, while not enumerating orderings that are physically
implausible. The problem is that it is not always obvious by e.g.
symmetry arguments alone which orderings to prefer. Here, we use a
variety of strategies (heuristics) to enumerate plausible orderings,
and later discard any duplicates that might be found by multiple
strategies. This approach is not ideal, but has been found to be
relatively robust over a wide range of magnetic structures.
Args:
structure: A sanitized input structure (_sanitize_input_structure)
Returns: A dict of a transformation class instance (values) and name of
enumeration strategy (keys)
"""
formula = structure.composition.reduced_formula
transformations = {}
# analyzer is used to obtain information on sanitized input
analyzer = CollinearMagneticStructureAnalyzer(structure, default_magmoms=self.default_magmoms, overwrite_magmom_mode='replace_all')
if not analyzer.is_magnetic:
raise ValueError('Not detected as magnetic, add a new default magmom for the element you believe may be magnetic?') # depends on [control=['if'], data=[]]
# now we can begin to generate our magnetic orderings
self.logger.info('Generating magnetic orderings for {}'.format(formula))
mag_species_spin = analyzer.magnetic_species_and_magmoms
types_mag_species = sorted(analyzer.types_of_magnetic_specie, key=lambda sp: analyzer.default_magmoms.get(str(sp), 0), reverse=True)
num_mag_sites = analyzer.number_of_magnetic_sites
num_unique_sites = analyzer.number_of_unique_magnetic_sites()
# enumerations become too slow as number of unique sites (and thus
# permutations) increase, 8 is a soft limit, this can be increased
# but do so with care
if num_unique_sites > self.max_unique_sites:
raise ValueError('Too many magnetic sites to sensibly perform enumeration.') # depends on [control=['if'], data=[]]
# maximum cell size to consider: as a rule of thumb, if the primitive cell
# contains a large number of magnetic sites, perhaps we only need to enumerate
# within one cell, whereas on the other extreme if the primitive cell only
# contains a single magnetic site, we have to create larger supercells
if 'max_cell_size' not in self.transformation_kwargs:
# TODO: change to 8 / num_mag_sites ?
self.transformation_kwargs['max_cell_size'] = max(1, int(4 / num_mag_sites)) # depends on [control=['if'], data=[]]
self.logger.info('Max cell size set to {}'.format(self.transformation_kwargs['max_cell_size']))
# when enumerating ferrimagnetic structures, it's useful to detect
# symmetrically distinct magnetic sites, since different
# local environments can result in different magnetic order
# (e.g. inverse spinels)
# initially, this was done by co-ordination number, but is
# now done by a full symmetry analysis
sga = SpacegroupAnalyzer(structure)
structure_sym = sga.get_symmetrized_structure()
wyckoff = ['n/a'] * len(structure)
for (indices, symbol) in zip(structure_sym.equivalent_indices, structure_sym.wyckoff_symbols):
for index in indices:
wyckoff[index] = symbol # depends on [control=['for'], data=['index']] # depends on [control=['for'], data=[]]
is_magnetic_sites = [True if site.specie in types_mag_species else False for site in structure]
# we're not interested in sites that we don't think are magnetic,
# set these symbols to None to filter them out later
wyckoff = [symbol if is_magnetic_site else 'n/a' for (symbol, is_magnetic_site) in zip(wyckoff, is_magnetic_sites)]
structure.add_site_property('wyckoff', wyckoff)
wyckoff_symbols = set(wyckoff) - {'n/a'}
# if user doesn't specifically request ferrimagnetic_Cr2NiO4 orderings,
# we apply a heuristic as to whether to attempt them or not
if self.automatic:
if 'ferrimagnetic_by_motif' not in self.strategies and len(wyckoff_symbols) > 1 and (len(types_mag_species) == 1):
self.strategies += ('ferrimagnetic_by_motif',) # depends on [control=['if'], data=[]]
if 'antiferromagnetic_by_motif' not in self.strategies and len(wyckoff_symbols) > 1 and (len(types_mag_species) == 1):
self.strategies += ('antiferromagnetic_by_motif',) # depends on [control=['if'], data=[]]
if 'ferrimagnetic_by_species' not in self.strategies and len(types_mag_species) > 1:
self.strategies += ('ferrimagnetic_by_species',) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# we start with a ferromagnetic ordering
if 'ferromagnetic' in self.strategies:
# TODO: remove 0 spins !
fm_structure = analyzer.get_ferromagnetic_structure()
# store magmom as spin property, to be consistent with output from
# other transformations
fm_structure.add_spin_by_site(fm_structure.site_properties['magmom'])
fm_structure.remove_site_property('magmom')
# we now have our first magnetic ordering...
self.ordered_structures.append(fm_structure)
self.ordered_structure_origins.append('fm') # depends on [control=['if'], data=[]]
# we store constraint(s) for each strategy first,
# and then use each to perform a transformation later
all_constraints = {}
# ...to which we can add simple AFM cases first...
if 'antiferromagnetic' in self.strategies:
# TODO: update MagOrderParameterConstraint in pymatgen to take types_mag_species directly
constraint = MagOrderParameterConstraint(0.5, species_constraints=list(map(str, types_mag_species)))
all_constraints['afm'] = [constraint]
# allows for non-magnetic sublattices
if len(types_mag_species) > 1:
for sp in types_mag_species:
constraints = [MagOrderParameterConstraint(0.5, species_constraints=str(sp))]
all_constraints['afm_by_{}'.format(sp)] = constraints # depends on [control=['for'], data=['sp']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# ...and then we also try ferrimagnetic orderings by motif if a
# single magnetic species is present...
if 'ferrimagnetic_by_motif' in self.strategies and len(wyckoff_symbols) > 1:
# these orderings are AFM on one local environment, and FM on the rest
for symbol in wyckoff_symbols:
constraints = [MagOrderParameterConstraint(0.5, site_constraint_name='wyckoff', site_constraints=symbol), MagOrderParameterConstraint(1.0, site_constraint_name='wyckoff', site_constraints=list(wyckoff_symbols - {symbol}))]
all_constraints['ferri_by_motif_{}'.format(symbol)] = constraints # depends on [control=['for'], data=['symbol']] # depends on [control=['if'], data=[]]
# and also try ferrimagnetic when there are multiple magnetic species
if 'ferrimagnetic_by_species' in self.strategies:
sp_list = [str(site.specie) for site in structure]
num_sp = {sp: sp_list.count(str(sp)) for sp in types_mag_species}
total_mag_sites = sum(num_sp.values())
for sp in types_mag_species:
# attempt via a global order parameter
all_constraints['ferri_by_{}'.format(sp)] = num_sp[sp] / total_mag_sites
# attempt via afm on sp, fm on remaining species
constraints = [MagOrderParameterConstraint(0.5, species_constraints=str(sp)), MagOrderParameterConstraint(1.0, species_constraints=list(map(str, set(types_mag_species) - {sp})))]
all_constraints['ferri_by_{}_afm'.format(sp)] = constraints # depends on [control=['for'], data=['sp']] # depends on [control=['if'], data=[]]
# ...and finally, we can try orderings that are AFM on one local
# environment, and non-magnetic on the rest -- this is less common
# but unless explicitly attempted, these states are unlikely to be found
if 'antiferromagnetic_by_motif' in self.strategies:
for symbol in wyckoff_symbols:
constraints = [MagOrderParameterConstraint(0.5, site_constraint_name='wyckoff', site_constraints=symbol)]
all_constraints['afm_by_motif_{}'.format(symbol)] = constraints # depends on [control=['for'], data=['symbol']] # depends on [control=['if'], data=[]]
# and now construct all our transformations for each strategy
transformations = {}
for (name, constraints) in all_constraints.items():
trans = MagOrderingTransformation(mag_species_spin, order_parameter=constraints, **self.transformation_kwargs)
transformations[name] = trans # depends on [control=['for'], data=[]]
return transformations
|
def split_hosts(hosts, default_port=DEFAULT_PORT):
"""Takes a string of the form host1[:port],host2[:port]... and
splits it into (host, port) tuples. If [:port] isn't present the
default_port is used.
Returns a set of 2-tuples containing the host name (or IP) followed by
port number.
:Parameters:
- `hosts`: A string of the form host1[:port],host2[:port],...
- `default_port`: The port number to use when one wasn't specified
for a host.
"""
nodes = []
for entity in hosts.split(','):
if not entity:
raise ConfigurationError("Empty host "
"(or extra comma in host list).")
port = default_port
# Unix socket entities don't have ports
if entity.endswith('.sock'):
port = None
nodes.append(parse_host(entity, port))
return nodes
|
def function[split_hosts, parameter[hosts, default_port]]:
constant[Takes a string of the form host1[:port],host2[:port]... and
splits it into (host, port) tuples. If [:port] isn't present the
default_port is used.
Returns a set of 2-tuples containing the host name (or IP) followed by
port number.
:Parameters:
- `hosts`: A string of the form host1[:port],host2[:port],...
- `default_port`: The port number to use when one wasn't specified
for a host.
]
variable[nodes] assign[=] list[[]]
for taget[name[entity]] in starred[call[name[hosts].split, parameter[constant[,]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f8113f0> begin[:]
<ast.Raise object at 0x7da18f812e00>
variable[port] assign[=] name[default_port]
if call[name[entity].endswith, parameter[constant[.sock]]] begin[:]
variable[port] assign[=] constant[None]
call[name[nodes].append, parameter[call[name[parse_host], parameter[name[entity], name[port]]]]]
return[name[nodes]]
|
keyword[def] identifier[split_hosts] ( identifier[hosts] , identifier[default_port] = identifier[DEFAULT_PORT] ):
literal[string]
identifier[nodes] =[]
keyword[for] identifier[entity] keyword[in] identifier[hosts] . identifier[split] ( literal[string] ):
keyword[if] keyword[not] identifier[entity] :
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string] )
identifier[port] = identifier[default_port]
keyword[if] identifier[entity] . identifier[endswith] ( literal[string] ):
identifier[port] = keyword[None]
identifier[nodes] . identifier[append] ( identifier[parse_host] ( identifier[entity] , identifier[port] ))
keyword[return] identifier[nodes]
|
def split_hosts(hosts, default_port=DEFAULT_PORT):
"""Takes a string of the form host1[:port],host2[:port]... and
splits it into (host, port) tuples. If [:port] isn't present the
default_port is used.
Returns a set of 2-tuples containing the host name (or IP) followed by
port number.
:Parameters:
- `hosts`: A string of the form host1[:port],host2[:port],...
- `default_port`: The port number to use when one wasn't specified
for a host.
"""
nodes = []
for entity in hosts.split(','):
if not entity:
raise ConfigurationError('Empty host (or extra comma in host list).') # depends on [control=['if'], data=[]]
port = default_port
# Unix socket entities don't have ports
if entity.endswith('.sock'):
port = None # depends on [control=['if'], data=[]]
nodes.append(parse_host(entity, port)) # depends on [control=['for'], data=['entity']]
return nodes
|
def _containerSetPath(container, folderpath, specfiles):
"""Helper function for :class:`MsrunContainer`, :class:`SiiContainer` and
:class:`FiContainer`. Changes the folderpath of the specified specfiles in
container.info: ``container.info[specfile]['path'] = folderpath``.
:param container: a container like class that has an attribute ``.info``
:param folderpath: a filedirectory
:param specfiles: a list of ms-run names
"""
if not os.path.exists(folderpath):
warntext = 'Error while calling "_containerSetPath()": The specified '\
'directory "%s" does not exist!' %(folderpath, )
warnings.warn(warntext)
for specfile in specfiles:
if specfile in container.info:
container.info[specfile]['path'] = folderpath
else:
warntext = 'Error while calling "_containerSetPath()": The '\
'specfile "%s" is not present in the container!'\
%(specfile, )
warnings.warn(warntext)
|
def function[_containerSetPath, parameter[container, folderpath, specfiles]]:
constant[Helper function for :class:`MsrunContainer`, :class:`SiiContainer` and
:class:`FiContainer`. Changes the folderpath of the specified specfiles in
container.info: ``container.info[specfile]['path'] = folderpath``.
:param container: a container like class that has an attribute ``.info``
:param folderpath: a filedirectory
:param specfiles: a list of ms-run names
]
if <ast.UnaryOp object at 0x7da18f09e860> begin[:]
variable[warntext] assign[=] binary_operation[constant[Error while calling "_containerSetPath()": The specified directory "%s" does not exist!] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f013c0>]]]
call[name[warnings].warn, parameter[name[warntext]]]
for taget[name[specfile]] in starred[name[specfiles]] begin[:]
if compare[name[specfile] in name[container].info] begin[:]
call[call[name[container].info][name[specfile]]][constant[path]] assign[=] name[folderpath]
|
keyword[def] identifier[_containerSetPath] ( identifier[container] , identifier[folderpath] , identifier[specfiles] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[folderpath] ):
identifier[warntext] = literal[string] literal[string] %( identifier[folderpath] ,)
identifier[warnings] . identifier[warn] ( identifier[warntext] )
keyword[for] identifier[specfile] keyword[in] identifier[specfiles] :
keyword[if] identifier[specfile] keyword[in] identifier[container] . identifier[info] :
identifier[container] . identifier[info] [ identifier[specfile] ][ literal[string] ]= identifier[folderpath]
keyword[else] :
identifier[warntext] = literal[string] literal[string] %( identifier[specfile] ,)
identifier[warnings] . identifier[warn] ( identifier[warntext] )
|
def _containerSetPath(container, folderpath, specfiles):
"""Helper function for :class:`MsrunContainer`, :class:`SiiContainer` and
:class:`FiContainer`. Changes the folderpath of the specified specfiles in
container.info: ``container.info[specfile]['path'] = folderpath``.
:param container: a container like class that has an attribute ``.info``
:param folderpath: a filedirectory
:param specfiles: a list of ms-run names
"""
if not os.path.exists(folderpath):
warntext = 'Error while calling "_containerSetPath()": The specified directory "%s" does not exist!' % (folderpath,)
warnings.warn(warntext) # depends on [control=['if'], data=[]]
for specfile in specfiles:
if specfile in container.info:
container.info[specfile]['path'] = folderpath # depends on [control=['if'], data=['specfile']]
else:
warntext = 'Error while calling "_containerSetPath()": The specfile "%s" is not present in the container!' % (specfile,)
warnings.warn(warntext) # depends on [control=['for'], data=['specfile']]
|
def indim(self):
""" The number of action values that the environment accepts.
"""
indim = self.numOffbids * len(self.generators)
if self.maxWithhold is not None:
return indim * 2
else:
return indim
|
def function[indim, parameter[self]]:
constant[ The number of action values that the environment accepts.
]
variable[indim] assign[=] binary_operation[name[self].numOffbids * call[name[len], parameter[name[self].generators]]]
if compare[name[self].maxWithhold is_not constant[None]] begin[:]
return[binary_operation[name[indim] * constant[2]]]
|
keyword[def] identifier[indim] ( identifier[self] ):
literal[string]
identifier[indim] = identifier[self] . identifier[numOffbids] * identifier[len] ( identifier[self] . identifier[generators] )
keyword[if] identifier[self] . identifier[maxWithhold] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[indim] * literal[int]
keyword[else] :
keyword[return] identifier[indim]
|
def indim(self):
""" The number of action values that the environment accepts.
"""
indim = self.numOffbids * len(self.generators)
if self.maxWithhold is not None:
return indim * 2 # depends on [control=['if'], data=[]]
else:
return indim
|
def record(self, action, props=None, path=KISSmetrics.RECORD_PATH,
resp=False):
"""Record event for identity with any properties.
:param action: event performed
:param props: any additional data to include
:type props: dict
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
"""
self.check_id_key()
timestamp = None
if not props:
props = {}
response = self.client.record(person=self.identity, event=action,
properties=props, timestamp=timestamp,
path=path)
if resp:
return response
|
def function[record, parameter[self, action, props, path, resp]]:
constant[Record event for identity with any properties.
:param action: event performed
:param props: any additional data to include
:type props: dict
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
]
call[name[self].check_id_key, parameter[]]
variable[timestamp] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da207f01ed0> begin[:]
variable[props] assign[=] dictionary[[], []]
variable[response] assign[=] call[name[self].client.record, parameter[]]
if name[resp] begin[:]
return[name[response]]
|
keyword[def] identifier[record] ( identifier[self] , identifier[action] , identifier[props] = keyword[None] , identifier[path] = identifier[KISSmetrics] . identifier[RECORD_PATH] ,
identifier[resp] = keyword[False] ):
literal[string]
identifier[self] . identifier[check_id_key] ()
identifier[timestamp] = keyword[None]
keyword[if] keyword[not] identifier[props] :
identifier[props] ={}
identifier[response] = identifier[self] . identifier[client] . identifier[record] ( identifier[person] = identifier[self] . identifier[identity] , identifier[event] = identifier[action] ,
identifier[properties] = identifier[props] , identifier[timestamp] = identifier[timestamp] ,
identifier[path] = identifier[path] )
keyword[if] identifier[resp] :
keyword[return] identifier[response]
|
def record(self, action, props=None, path=KISSmetrics.RECORD_PATH, resp=False):
"""Record event for identity with any properties.
:param action: event performed
:param props: any additional data to include
:type props: dict
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
"""
self.check_id_key()
timestamp = None
if not props:
props = {} # depends on [control=['if'], data=[]]
response = self.client.record(person=self.identity, event=action, properties=props, timestamp=timestamp, path=path)
if resp:
return response # depends on [control=['if'], data=[]]
|
def from_json(cls, json_string):
"""
Creates and return a DataFrame from a JSON of the type created by to_json
:param json_string: JSON
:return: DataFrame
"""
input_dict = json.loads(json_string)
# convert index to tuple if required
if input_dict['index'] and isinstance(input_dict['index'][0], list):
input_dict['index'] = [tuple(x) for x in input_dict['index']]
# convert index_name to tuple if required
if isinstance(input_dict['meta_data']['index_name'], list):
input_dict['meta_data']['index_name'] = tuple(input_dict['meta_data']['index_name'])
data = input_dict['data'] if input_dict['data'] else None
return cls(data=data, index=input_dict['index'], **input_dict['meta_data'])
|
def function[from_json, parameter[cls, json_string]]:
constant[
Creates and return a DataFrame from a JSON of the type created by to_json
:param json_string: JSON
:return: DataFrame
]
variable[input_dict] assign[=] call[name[json].loads, parameter[name[json_string]]]
if <ast.BoolOp object at 0x7da20e954190> begin[:]
call[name[input_dict]][constant[index]] assign[=] <ast.ListComp object at 0x7da20e955e10>
if call[name[isinstance], parameter[call[call[name[input_dict]][constant[meta_data]]][constant[index_name]], name[list]]] begin[:]
call[call[name[input_dict]][constant[meta_data]]][constant[index_name]] assign[=] call[name[tuple], parameter[call[call[name[input_dict]][constant[meta_data]]][constant[index_name]]]]
variable[data] assign[=] <ast.IfExp object at 0x7da20e956dd0>
return[call[name[cls], parameter[]]]
|
keyword[def] identifier[from_json] ( identifier[cls] , identifier[json_string] ):
literal[string]
identifier[input_dict] = identifier[json] . identifier[loads] ( identifier[json_string] )
keyword[if] identifier[input_dict] [ literal[string] ] keyword[and] identifier[isinstance] ( identifier[input_dict] [ literal[string] ][ literal[int] ], identifier[list] ):
identifier[input_dict] [ literal[string] ]=[ identifier[tuple] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[input_dict] [ literal[string] ]]
keyword[if] identifier[isinstance] ( identifier[input_dict] [ literal[string] ][ literal[string] ], identifier[list] ):
identifier[input_dict] [ literal[string] ][ literal[string] ]= identifier[tuple] ( identifier[input_dict] [ literal[string] ][ literal[string] ])
identifier[data] = identifier[input_dict] [ literal[string] ] keyword[if] identifier[input_dict] [ literal[string] ] keyword[else] keyword[None]
keyword[return] identifier[cls] ( identifier[data] = identifier[data] , identifier[index] = identifier[input_dict] [ literal[string] ],** identifier[input_dict] [ literal[string] ])
|
def from_json(cls, json_string):
"""
Creates and return a DataFrame from a JSON of the type created by to_json
:param json_string: JSON
:return: DataFrame
"""
input_dict = json.loads(json_string)
# convert index to tuple if required
if input_dict['index'] and isinstance(input_dict['index'][0], list):
input_dict['index'] = [tuple(x) for x in input_dict['index']] # depends on [control=['if'], data=[]]
# convert index_name to tuple if required
if isinstance(input_dict['meta_data']['index_name'], list):
input_dict['meta_data']['index_name'] = tuple(input_dict['meta_data']['index_name']) # depends on [control=['if'], data=[]]
data = input_dict['data'] if input_dict['data'] else None
return cls(data=data, index=input_dict['index'], **input_dict['meta_data'])
|
def kill_current_session(ctx: Context_T) -> None:
"""
Force kill current session of the given context,
despite whether it is running or not.
:param ctx: message context
"""
ctx_id = context_id(ctx)
if ctx_id in _sessions:
del _sessions[ctx_id]
|
def function[kill_current_session, parameter[ctx]]:
constant[
Force kill current session of the given context,
despite whether it is running or not.
:param ctx: message context
]
variable[ctx_id] assign[=] call[name[context_id], parameter[name[ctx]]]
if compare[name[ctx_id] in name[_sessions]] begin[:]
<ast.Delete object at 0x7da18bc71bd0>
|
keyword[def] identifier[kill_current_session] ( identifier[ctx] : identifier[Context_T] )-> keyword[None] :
literal[string]
identifier[ctx_id] = identifier[context_id] ( identifier[ctx] )
keyword[if] identifier[ctx_id] keyword[in] identifier[_sessions] :
keyword[del] identifier[_sessions] [ identifier[ctx_id] ]
|
def kill_current_session(ctx: Context_T) -> None:
"""
Force kill current session of the given context,
despite whether it is running or not.
:param ctx: message context
"""
ctx_id = context_id(ctx)
if ctx_id in _sessions:
del _sessions[ctx_id] # depends on [control=['if'], data=['ctx_id', '_sessions']]
|
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
refresh
/api/smart_proxies/:id/refresh
Otherwise, call ``super``.
"""
if which in ('refresh',):
return '{0}/{1}'.format(
super(SmartProxy, self).path(which='self'),
which
)
return super(SmartProxy, self).path(which)
|
def function[path, parameter[self, which]]:
constant[Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
refresh
/api/smart_proxies/:id/refresh
Otherwise, call ``super``.
]
if compare[name[which] in tuple[[<ast.Constant object at 0x7da2049608e0>]]] begin[:]
return[call[constant[{0}/{1}].format, parameter[call[call[name[super], parameter[name[SmartProxy], name[self]]].path, parameter[]], name[which]]]]
return[call[call[name[super], parameter[name[SmartProxy], name[self]]].path, parameter[name[which]]]]
|
keyword[def] identifier[path] ( identifier[self] , identifier[which] = keyword[None] ):
literal[string]
keyword[if] identifier[which] keyword[in] ( literal[string] ,):
keyword[return] literal[string] . identifier[format] (
identifier[super] ( identifier[SmartProxy] , identifier[self] ). identifier[path] ( identifier[which] = literal[string] ),
identifier[which]
)
keyword[return] identifier[super] ( identifier[SmartProxy] , identifier[self] ). identifier[path] ( identifier[which] )
|
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
refresh
/api/smart_proxies/:id/refresh
Otherwise, call ``super``.
"""
if which in ('refresh',):
return '{0}/{1}'.format(super(SmartProxy, self).path(which='self'), which) # depends on [control=['if'], data=['which']]
return super(SmartProxy, self).path(which)
|
def get_random_password():
"""Get a random password that complies with most of the requirements.
Note:
This random password is not strong and not "really" random, and should only be
used for testing purposes.
Returns:
str: The random password.
"""
password = []
password.append(RandomInputHelper.get_random_value(4, [string.ascii_lowercase]))
password.append(RandomInputHelper.get_random_value(2, [string.digits]))
password.append(RandomInputHelper.get_random_value(2, ["$&*@!"]))
password.append(RandomInputHelper.get_random_value(4, [string.ascii_uppercase]))
return "".join(password)
|
def function[get_random_password, parameter[]]:
constant[Get a random password that complies with most of the requirements.
Note:
This random password is not strong and not "really" random, and should only be
used for testing purposes.
Returns:
str: The random password.
]
variable[password] assign[=] list[[]]
call[name[password].append, parameter[call[name[RandomInputHelper].get_random_value, parameter[constant[4], list[[<ast.Attribute object at 0x7da18f09f970>]]]]]]
call[name[password].append, parameter[call[name[RandomInputHelper].get_random_value, parameter[constant[2], list[[<ast.Attribute object at 0x7da18f09e9e0>]]]]]]
call[name[password].append, parameter[call[name[RandomInputHelper].get_random_value, parameter[constant[2], list[[<ast.Constant object at 0x7da18f09fe20>]]]]]]
call[name[password].append, parameter[call[name[RandomInputHelper].get_random_value, parameter[constant[4], list[[<ast.Attribute object at 0x7da18f09efb0>]]]]]]
return[call[constant[].join, parameter[name[password]]]]
|
keyword[def] identifier[get_random_password] ():
literal[string]
identifier[password] =[]
identifier[password] . identifier[append] ( identifier[RandomInputHelper] . identifier[get_random_value] ( literal[int] ,[ identifier[string] . identifier[ascii_lowercase] ]))
identifier[password] . identifier[append] ( identifier[RandomInputHelper] . identifier[get_random_value] ( literal[int] ,[ identifier[string] . identifier[digits] ]))
identifier[password] . identifier[append] ( identifier[RandomInputHelper] . identifier[get_random_value] ( literal[int] ,[ literal[string] ]))
identifier[password] . identifier[append] ( identifier[RandomInputHelper] . identifier[get_random_value] ( literal[int] ,[ identifier[string] . identifier[ascii_uppercase] ]))
keyword[return] literal[string] . identifier[join] ( identifier[password] )
|
def get_random_password():
"""Get a random password that complies with most of the requirements.
Note:
This random password is not strong and not "really" random, and should only be
used for testing purposes.
Returns:
str: The random password.
"""
password = []
password.append(RandomInputHelper.get_random_value(4, [string.ascii_lowercase]))
password.append(RandomInputHelper.get_random_value(2, [string.digits]))
password.append(RandomInputHelper.get_random_value(2, ['$&*@!']))
password.append(RandomInputHelper.get_random_value(4, [string.ascii_uppercase]))
return ''.join(password)
|
def add_publication(cursor, epub, epub_file, is_pre_publication=False):
"""Adds a publication entry and makes each item
a pending document.
"""
publisher = epub[0].metadata['publisher']
publish_message = epub[0].metadata['publication_message']
epub_binary = psycopg2.Binary(epub_file.read())
args = (publisher, publish_message, epub_binary, is_pre_publication,)
cursor.execute("""\
INSERT INTO publications
("publisher", "publication_message", "epub", "is_pre_publication")
VALUES (%s, %s, %s, %s)
RETURNING id
""", args)
publication_id = cursor.fetchone()[0]
insert_mapping = {}
models = set([])
for package in epub:
binder = cnxepub.adapt_package(package)
if binder in models:
continue
for document in cnxepub.flatten_to_documents(binder):
if document not in models:
ident_hash = add_pending_model(
cursor, publication_id, document)
insert_mapping[document.id] = ident_hash
models.add(document)
# The binding object could be translucent/see-through,
# (case for a binder that only contains loose-documents).
# Otherwise we should also publish the the binder.
if not binder.is_translucent:
ident_hash = add_pending_model(cursor, publication_id, binder)
insert_mapping[binder.id] = ident_hash
models.add(binder)
for model in models:
# Now that all models have been given an identifier
# we can write the content to the database.
try:
add_pending_model_content(cursor, publication_id, model)
except ResourceFileExceededLimitError as e:
e.publication_id = publication_id
set_publication_failure(cursor, e)
return publication_id, insert_mapping
|
def function[add_publication, parameter[cursor, epub, epub_file, is_pre_publication]]:
constant[Adds a publication entry and makes each item
a pending document.
]
variable[publisher] assign[=] call[call[name[epub]][constant[0]].metadata][constant[publisher]]
variable[publish_message] assign[=] call[call[name[epub]][constant[0]].metadata][constant[publication_message]]
variable[epub_binary] assign[=] call[name[psycopg2].Binary, parameter[call[name[epub_file].read, parameter[]]]]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b003f2b0>, <ast.Name object at 0x7da1b003cf10>, <ast.Name object at 0x7da1b003f640>, <ast.Name object at 0x7da1b003f670>]]
call[name[cursor].execute, parameter[constant[INSERT INTO publications
("publisher", "publication_message", "epub", "is_pre_publication")
VALUES (%s, %s, %s, %s)
RETURNING id
], name[args]]]
variable[publication_id] assign[=] call[call[name[cursor].fetchone, parameter[]]][constant[0]]
variable[insert_mapping] assign[=] dictionary[[], []]
variable[models] assign[=] call[name[set], parameter[list[[]]]]
for taget[name[package]] in starred[name[epub]] begin[:]
variable[binder] assign[=] call[name[cnxepub].adapt_package, parameter[name[package]]]
if compare[name[binder] in name[models]] begin[:]
continue
for taget[name[document]] in starred[call[name[cnxepub].flatten_to_documents, parameter[name[binder]]]] begin[:]
if compare[name[document] <ast.NotIn object at 0x7da2590d7190> name[models]] begin[:]
variable[ident_hash] assign[=] call[name[add_pending_model], parameter[name[cursor], name[publication_id], name[document]]]
call[name[insert_mapping]][name[document].id] assign[=] name[ident_hash]
call[name[models].add, parameter[name[document]]]
if <ast.UnaryOp object at 0x7da1b003ff10> begin[:]
variable[ident_hash] assign[=] call[name[add_pending_model], parameter[name[cursor], name[publication_id], name[binder]]]
call[name[insert_mapping]][name[binder].id] assign[=] name[ident_hash]
call[name[models].add, parameter[name[binder]]]
for taget[name[model]] in starred[name[models]] begin[:]
<ast.Try object at 0x7da1b003e260>
return[tuple[[<ast.Name object at 0x7da1b00fa1d0>, <ast.Name object at 0x7da1b00fb0a0>]]]
|
keyword[def] identifier[add_publication] ( identifier[cursor] , identifier[epub] , identifier[epub_file] , identifier[is_pre_publication] = keyword[False] ):
literal[string]
identifier[publisher] = identifier[epub] [ literal[int] ]. identifier[metadata] [ literal[string] ]
identifier[publish_message] = identifier[epub] [ literal[int] ]. identifier[metadata] [ literal[string] ]
identifier[epub_binary] = identifier[psycopg2] . identifier[Binary] ( identifier[epub_file] . identifier[read] ())
identifier[args] =( identifier[publisher] , identifier[publish_message] , identifier[epub_binary] , identifier[is_pre_publication] ,)
identifier[cursor] . identifier[execute] ( literal[string] , identifier[args] )
identifier[publication_id] = identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
identifier[insert_mapping] ={}
identifier[models] = identifier[set] ([])
keyword[for] identifier[package] keyword[in] identifier[epub] :
identifier[binder] = identifier[cnxepub] . identifier[adapt_package] ( identifier[package] )
keyword[if] identifier[binder] keyword[in] identifier[models] :
keyword[continue]
keyword[for] identifier[document] keyword[in] identifier[cnxepub] . identifier[flatten_to_documents] ( identifier[binder] ):
keyword[if] identifier[document] keyword[not] keyword[in] identifier[models] :
identifier[ident_hash] = identifier[add_pending_model] (
identifier[cursor] , identifier[publication_id] , identifier[document] )
identifier[insert_mapping] [ identifier[document] . identifier[id] ]= identifier[ident_hash]
identifier[models] . identifier[add] ( identifier[document] )
keyword[if] keyword[not] identifier[binder] . identifier[is_translucent] :
identifier[ident_hash] = identifier[add_pending_model] ( identifier[cursor] , identifier[publication_id] , identifier[binder] )
identifier[insert_mapping] [ identifier[binder] . identifier[id] ]= identifier[ident_hash]
identifier[models] . identifier[add] ( identifier[binder] )
keyword[for] identifier[model] keyword[in] identifier[models] :
keyword[try] :
identifier[add_pending_model_content] ( identifier[cursor] , identifier[publication_id] , identifier[model] )
keyword[except] identifier[ResourceFileExceededLimitError] keyword[as] identifier[e] :
identifier[e] . identifier[publication_id] = identifier[publication_id]
identifier[set_publication_failure] ( identifier[cursor] , identifier[e] )
keyword[return] identifier[publication_id] , identifier[insert_mapping]
|
def add_publication(cursor, epub, epub_file, is_pre_publication=False):
"""Adds a publication entry and makes each item
a pending document.
"""
publisher = epub[0].metadata['publisher']
publish_message = epub[0].metadata['publication_message']
epub_binary = psycopg2.Binary(epub_file.read())
args = (publisher, publish_message, epub_binary, is_pre_publication)
cursor.execute('INSERT INTO publications\n ("publisher", "publication_message", "epub", "is_pre_publication")\nVALUES (%s, %s, %s, %s)\nRETURNING id\n', args)
publication_id = cursor.fetchone()[0]
insert_mapping = {}
models = set([])
for package in epub:
binder = cnxepub.adapt_package(package)
if binder in models:
continue # depends on [control=['if'], data=[]]
for document in cnxepub.flatten_to_documents(binder):
if document not in models:
ident_hash = add_pending_model(cursor, publication_id, document)
insert_mapping[document.id] = ident_hash
models.add(document) # depends on [control=['if'], data=['document', 'models']] # depends on [control=['for'], data=['document']]
# The binding object could be translucent/see-through,
# (case for a binder that only contains loose-documents).
# Otherwise we should also publish the the binder.
if not binder.is_translucent:
ident_hash = add_pending_model(cursor, publication_id, binder)
insert_mapping[binder.id] = ident_hash
models.add(binder) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['package']]
for model in models:
# Now that all models have been given an identifier
# we can write the content to the database.
try:
add_pending_model_content(cursor, publication_id, model) # depends on [control=['try'], data=[]]
except ResourceFileExceededLimitError as e:
e.publication_id = publication_id
set_publication_failure(cursor, e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['model']]
return (publication_id, insert_mapping)
|
def get_all_items_of_invoice(self, invoice_id):
"""
Get all items of invoice
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param invoice_id: the invoice id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_items_of_invoice_per_page,
resource=INVOICE_ITEMS,
**{'invoice_id': invoice_id}
)
|
def function[get_all_items_of_invoice, parameter[self, invoice_id]]:
constant[
Get all items of invoice
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param invoice_id: the invoice id
:return: list
]
return[call[name[self]._iterate_through_pages, parameter[]]]
|
keyword[def] identifier[get_all_items_of_invoice] ( identifier[self] , identifier[invoice_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_iterate_through_pages] (
identifier[get_function] = identifier[self] . identifier[get_items_of_invoice_per_page] ,
identifier[resource] = identifier[INVOICE_ITEMS] ,
**{ literal[string] : identifier[invoice_id] }
)
|
def get_all_items_of_invoice(self, invoice_id):
"""
Get all items of invoice
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param invoice_id: the invoice id
:return: list
"""
return self._iterate_through_pages(get_function=self.get_items_of_invoice_per_page, resource=INVOICE_ITEMS, **{'invoice_id': invoice_id})
|
def distributedConnectionMap(names: List[str]) -> OrderedDict:
"""
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
"""
names.sort()
combos = list(itertools.combinations(names, 2))
maxPer = math.ceil(len(list(combos)) / len(names))
# maxconns = math.ceil(len(names) / 2)
connmap = OrderedDict((n, []) for n in names)
for a, b in combos:
if len(connmap[a]) < maxPer:
connmap[a].append(b)
else:
connmap[b].append(a)
return connmap
|
def function[distributedConnectionMap, parameter[names]]:
constant[
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
]
call[name[names].sort, parameter[]]
variable[combos] assign[=] call[name[list], parameter[call[name[itertools].combinations, parameter[name[names], constant[2]]]]]
variable[maxPer] assign[=] call[name[math].ceil, parameter[binary_operation[call[name[len], parameter[call[name[list], parameter[name[combos]]]]] / call[name[len], parameter[name[names]]]]]]
variable[connmap] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da2044c2fe0>]]
for taget[tuple[[<ast.Name object at 0x7da2044c13c0>, <ast.Name object at 0x7da2044c1450>]]] in starred[name[combos]] begin[:]
if compare[call[name[len], parameter[call[name[connmap]][name[a]]]] less[<] name[maxPer]] begin[:]
call[call[name[connmap]][name[a]].append, parameter[name[b]]]
return[name[connmap]]
|
keyword[def] identifier[distributedConnectionMap] ( identifier[names] : identifier[List] [ identifier[str] ])-> identifier[OrderedDict] :
literal[string]
identifier[names] . identifier[sort] ()
identifier[combos] = identifier[list] ( identifier[itertools] . identifier[combinations] ( identifier[names] , literal[int] ))
identifier[maxPer] = identifier[math] . identifier[ceil] ( identifier[len] ( identifier[list] ( identifier[combos] ))/ identifier[len] ( identifier[names] ))
identifier[connmap] = identifier[OrderedDict] (( identifier[n] ,[]) keyword[for] identifier[n] keyword[in] identifier[names] )
keyword[for] identifier[a] , identifier[b] keyword[in] identifier[combos] :
keyword[if] identifier[len] ( identifier[connmap] [ identifier[a] ])< identifier[maxPer] :
identifier[connmap] [ identifier[a] ]. identifier[append] ( identifier[b] )
keyword[else] :
identifier[connmap] [ identifier[b] ]. identifier[append] ( identifier[a] )
keyword[return] identifier[connmap]
|
def distributedConnectionMap(names: List[str]) -> OrderedDict:
"""
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
"""
names.sort()
combos = list(itertools.combinations(names, 2))
maxPer = math.ceil(len(list(combos)) / len(names))
# maxconns = math.ceil(len(names) / 2)
connmap = OrderedDict(((n, []) for n in names))
for (a, b) in combos:
if len(connmap[a]) < maxPer:
connmap[a].append(b) # depends on [control=['if'], data=[]]
else:
connmap[b].append(a) # depends on [control=['for'], data=[]]
return connmap
|
def write(self, fh):
"""
Write set to a GFF3 format file.
:param file fh: file handle for file to write to
"""
fh.write(GFF3_HEADER+"\n")
for root in sorted(self.roots, key=self._recSortKey):
self._writeRec(fh, root)
|
def function[write, parameter[self, fh]]:
constant[
Write set to a GFF3 format file.
:param file fh: file handle for file to write to
]
call[name[fh].write, parameter[binary_operation[name[GFF3_HEADER] + constant[
]]]]
for taget[name[root]] in starred[call[name[sorted], parameter[name[self].roots]]] begin[:]
call[name[self]._writeRec, parameter[name[fh], name[root]]]
|
keyword[def] identifier[write] ( identifier[self] , identifier[fh] ):
literal[string]
identifier[fh] . identifier[write] ( identifier[GFF3_HEADER] + literal[string] )
keyword[for] identifier[root] keyword[in] identifier[sorted] ( identifier[self] . identifier[roots] , identifier[key] = identifier[self] . identifier[_recSortKey] ):
identifier[self] . identifier[_writeRec] ( identifier[fh] , identifier[root] )
|
def write(self, fh):
"""
Write set to a GFF3 format file.
:param file fh: file handle for file to write to
"""
fh.write(GFF3_HEADER + '\n')
for root in sorted(self.roots, key=self._recSortKey):
self._writeRec(fh, root) # depends on [control=['for'], data=['root']]
|
def query_google(point, max_distance, key):
""" Queries google maps API for a location
Args:
point (:obj:`Point`): Point location to query
max_distance (float): Search radius, in meters
key (str): Valid google maps api key
Returns:
:obj:`list` of :obj:`dict`: List of locations with the following format:
{
'label': 'Coffee house',
'types': 'Commerce',
'suggestion_type': 'GOOGLE'
}
"""
if not key:
return []
if from_cache(GG_CACHE, point, max_distance):
return from_cache(GG_CACHE, point, max_distance)
req = requests.get(GOOGLE_PLACES_URL % (
point.lat,
point.lon,
max_distance,
key
))
if req.status_code != 200:
return []
response = req.json()
results = response['results']
# l = len(results)
final_results = []
for local in results:
final_results.append({
'label': local['name'],
'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point),
# 'rank': (l-i)/float(l),
'types': local['types'],
'suggestion_type': 'GOOGLE'
})
google_insert_cache(point, final_results)
return final_results
|
def function[query_google, parameter[point, max_distance, key]]:
constant[ Queries google maps API for a location
Args:
point (:obj:`Point`): Point location to query
max_distance (float): Search radius, in meters
key (str): Valid google maps api key
Returns:
:obj:`list` of :obj:`dict`: List of locations with the following format:
{
'label': 'Coffee house',
'types': 'Commerce',
'suggestion_type': 'GOOGLE'
}
]
if <ast.UnaryOp object at 0x7da1b05f1000> begin[:]
return[list[[]]]
if call[name[from_cache], parameter[name[GG_CACHE], name[point], name[max_distance]]] begin[:]
return[call[name[from_cache], parameter[name[GG_CACHE], name[point], name[max_distance]]]]
variable[req] assign[=] call[name[requests].get, parameter[binary_operation[name[GOOGLE_PLACES_URL] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b05f36d0>, <ast.Attribute object at 0x7da1b05f1300>, <ast.Name object at 0x7da1b05f0970>, <ast.Name object at 0x7da1b05f34c0>]]]]]
if compare[name[req].status_code not_equal[!=] constant[200]] begin[:]
return[list[[]]]
variable[response] assign[=] call[name[req].json, parameter[]]
variable[results] assign[=] call[name[response]][constant[results]]
variable[final_results] assign[=] list[[]]
for taget[name[local]] in starred[name[results]] begin[:]
call[name[final_results].append, parameter[dictionary[[<ast.Constant object at 0x7da1b05f2e30>, <ast.Constant object at 0x7da1b05f2bf0>, <ast.Constant object at 0x7da1b05f0f40>, <ast.Constant object at 0x7da1b05f0e50>], [<ast.Subscript object at 0x7da1b05f3fd0>, <ast.Call object at 0x7da1b05f1900>, <ast.Subscript object at 0x7da1b05f2b90>, <ast.Constant object at 0x7da1b05f12a0>]]]]
call[name[google_insert_cache], parameter[name[point], name[final_results]]]
return[name[final_results]]
|
keyword[def] identifier[query_google] ( identifier[point] , identifier[max_distance] , identifier[key] ):
literal[string]
keyword[if] keyword[not] identifier[key] :
keyword[return] []
keyword[if] identifier[from_cache] ( identifier[GG_CACHE] , identifier[point] , identifier[max_distance] ):
keyword[return] identifier[from_cache] ( identifier[GG_CACHE] , identifier[point] , identifier[max_distance] )
identifier[req] = identifier[requests] . identifier[get] ( identifier[GOOGLE_PLACES_URL] %(
identifier[point] . identifier[lat] ,
identifier[point] . identifier[lon] ,
identifier[max_distance] ,
identifier[key]
))
keyword[if] identifier[req] . identifier[status_code] != literal[int] :
keyword[return] []
identifier[response] = identifier[req] . identifier[json] ()
identifier[results] = identifier[response] [ literal[string] ]
identifier[final_results] =[]
keyword[for] identifier[local] keyword[in] identifier[results] :
identifier[final_results] . identifier[append] ({
literal[string] : identifier[local] [ literal[string] ],
literal[string] : identifier[Point] ( identifier[local] [ literal[string] ][ literal[string] ][ literal[string] ], identifier[local] [ literal[string] ][ literal[string] ][ literal[string] ], keyword[None] ). identifier[distance] ( identifier[point] ),
literal[string] : identifier[local] [ literal[string] ],
literal[string] : literal[string]
})
identifier[google_insert_cache] ( identifier[point] , identifier[final_results] )
keyword[return] identifier[final_results]
|
def query_google(point, max_distance, key):
""" Queries google maps API for a location
Args:
point (:obj:`Point`): Point location to query
max_distance (float): Search radius, in meters
key (str): Valid google maps api key
Returns:
:obj:`list` of :obj:`dict`: List of locations with the following format:
{
'label': 'Coffee house',
'types': 'Commerce',
'suggestion_type': 'GOOGLE'
}
"""
if not key:
return [] # depends on [control=['if'], data=[]]
if from_cache(GG_CACHE, point, max_distance):
return from_cache(GG_CACHE, point, max_distance) # depends on [control=['if'], data=[]]
req = requests.get(GOOGLE_PLACES_URL % (point.lat, point.lon, max_distance, key))
if req.status_code != 200:
return [] # depends on [control=['if'], data=[]]
response = req.json()
results = response['results']
# l = len(results)
final_results = []
for local in results:
# 'rank': (l-i)/float(l),
final_results.append({'label': local['name'], 'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point), 'types': local['types'], 'suggestion_type': 'GOOGLE'}) # depends on [control=['for'], data=['local']]
google_insert_cache(point, final_results)
return final_results
|
def DualDBSystemCronJob(legacy_name=None, stateful=False):
"""Decorator that creates AFF4 and RELDB cronjobs from a given mixin."""
def Decorator(cls):
"""Decorator producing 2 classes: legacy style one and a new style one."""
if not legacy_name:
raise ValueError("legacy_name has to be provided")
# Legacy cron jobs have different base classes depending on whether they're
# stateful or not.
if stateful:
aff4_base_cls = StatefulSystemCronFlow
else:
aff4_base_cls = SystemCronFlow
# Make sure that we're dealing with a true mixin to avoid subtle errors.
if issubclass(cls, cronjobs.SystemCronJobBase):
raise ValueError("Mixin class shouldn't inherit from SystemCronJobBase")
if issubclass(cls, aff4_base_cls):
raise ValueError("Mixin class shouldn't inherit from %s" %
aff4_base_cls.__name__)
# Generate legacy class. Register it within the module as it's not going
# to be returned from the decorator.
aff4_cls = compatibility.MakeType(
legacy_name, (cls, LegacyCronJobAdapterMixin, aff4_base_cls), {})
module = sys.modules[cls.__module__]
setattr(module, legacy_name, aff4_cls)
# Generate new class. No need to register it in the module (like the legacy
# one) since it will replace the original decorated class.
reldb_cls = compatibility.MakeType(
compatibility.GetName(cls), (cls, cronjobs.SystemCronJobBase), {})
return reldb_cls
return Decorator
|
def function[DualDBSystemCronJob, parameter[legacy_name, stateful]]:
constant[Decorator that creates AFF4 and RELDB cronjobs from a given mixin.]
def function[Decorator, parameter[cls]]:
constant[Decorator producing 2 classes: legacy style one and a new style one.]
if <ast.UnaryOp object at 0x7da1b1b29e10> begin[:]
<ast.Raise object at 0x7da1b1b29bd0>
if name[stateful] begin[:]
variable[aff4_base_cls] assign[=] name[StatefulSystemCronFlow]
if call[name[issubclass], parameter[name[cls], name[cronjobs].SystemCronJobBase]] begin[:]
<ast.Raise object at 0x7da1b1b29720>
if call[name[issubclass], parameter[name[cls], name[aff4_base_cls]]] begin[:]
<ast.Raise object at 0x7da1b1b2a800>
variable[aff4_cls] assign[=] call[name[compatibility].MakeType, parameter[name[legacy_name], tuple[[<ast.Name object at 0x7da1b1b2a950>, <ast.Name object at 0x7da1b1b294b0>, <ast.Name object at 0x7da1b1b29450>]], dictionary[[], []]]]
variable[module] assign[=] call[name[sys].modules][name[cls].__module__]
call[name[setattr], parameter[name[module], name[legacy_name], name[aff4_cls]]]
variable[reldb_cls] assign[=] call[name[compatibility].MakeType, parameter[call[name[compatibility].GetName, parameter[name[cls]]], tuple[[<ast.Name object at 0x7da1b1b29810>, <ast.Attribute object at 0x7da1b1b2a110>]], dictionary[[], []]]]
return[name[reldb_cls]]
return[name[Decorator]]
|
keyword[def] identifier[DualDBSystemCronJob] ( identifier[legacy_name] = keyword[None] , identifier[stateful] = keyword[False] ):
literal[string]
keyword[def] identifier[Decorator] ( identifier[cls] ):
literal[string]
keyword[if] keyword[not] identifier[legacy_name] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[stateful] :
identifier[aff4_base_cls] = identifier[StatefulSystemCronFlow]
keyword[else] :
identifier[aff4_base_cls] = identifier[SystemCronFlow]
keyword[if] identifier[issubclass] ( identifier[cls] , identifier[cronjobs] . identifier[SystemCronJobBase] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[issubclass] ( identifier[cls] , identifier[aff4_base_cls] ):
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[aff4_base_cls] . identifier[__name__] )
identifier[aff4_cls] = identifier[compatibility] . identifier[MakeType] (
identifier[legacy_name] ,( identifier[cls] , identifier[LegacyCronJobAdapterMixin] , identifier[aff4_base_cls] ),{})
identifier[module] = identifier[sys] . identifier[modules] [ identifier[cls] . identifier[__module__] ]
identifier[setattr] ( identifier[module] , identifier[legacy_name] , identifier[aff4_cls] )
identifier[reldb_cls] = identifier[compatibility] . identifier[MakeType] (
identifier[compatibility] . identifier[GetName] ( identifier[cls] ),( identifier[cls] , identifier[cronjobs] . identifier[SystemCronJobBase] ),{})
keyword[return] identifier[reldb_cls]
keyword[return] identifier[Decorator]
|
def DualDBSystemCronJob(legacy_name=None, stateful=False):
"""Decorator that creates AFF4 and RELDB cronjobs from a given mixin."""
def Decorator(cls):
"""Decorator producing 2 classes: legacy style one and a new style one."""
if not legacy_name:
raise ValueError('legacy_name has to be provided') # depends on [control=['if'], data=[]]
# Legacy cron jobs have different base classes depending on whether they're
# stateful or not.
if stateful:
aff4_base_cls = StatefulSystemCronFlow # depends on [control=['if'], data=[]]
else:
aff4_base_cls = SystemCronFlow
# Make sure that we're dealing with a true mixin to avoid subtle errors.
if issubclass(cls, cronjobs.SystemCronJobBase):
raise ValueError("Mixin class shouldn't inherit from SystemCronJobBase") # depends on [control=['if'], data=[]]
if issubclass(cls, aff4_base_cls):
raise ValueError("Mixin class shouldn't inherit from %s" % aff4_base_cls.__name__) # depends on [control=['if'], data=[]]
# Generate legacy class. Register it within the module as it's not going
# to be returned from the decorator.
aff4_cls = compatibility.MakeType(legacy_name, (cls, LegacyCronJobAdapterMixin, aff4_base_cls), {})
module = sys.modules[cls.__module__]
setattr(module, legacy_name, aff4_cls)
# Generate new class. No need to register it in the module (like the legacy
# one) since it will replace the original decorated class.
reldb_cls = compatibility.MakeType(compatibility.GetName(cls), (cls, cronjobs.SystemCronJobBase), {})
return reldb_cls
return Decorator
|
def process_paper(model_name, pmid):
"""Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
"""
json_directory = os.path.join(model_name, 'jsons')
json_path = os.path.join(json_directory, 'PMID%s.json' % pmid)
if pmid.startswith('api') or pmid.startswith('PMID'):
logger.warning('Invalid PMID: %s' % pmid)
# If the paper has been read, use the json output file
if os.path.exists(json_path):
rp = reach.process_json_file(json_path, citation=pmid)
txt_format = 'existing_json'
# If the paper has not been read, download the text and read
else:
try:
txt, txt_format = get_full_text(pmid, 'pmid')
except Exception:
return None, None
if txt_format == 'pmc_oa_xml':
rp = reach.process_nxml_str(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'elsevier_xml':
# Extract the raw text from the Elsevier XML
txt = elsevier_client.extract_text(txt)
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'abstract':
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
else:
rp = None
if rp is not None:
check_pmids(rp.statements)
return rp, txt_format
|
def function[process_paper, parameter[model_name, pmid]]:
constant[Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
]
variable[json_directory] assign[=] call[name[os].path.join, parameter[name[model_name], constant[jsons]]]
variable[json_path] assign[=] call[name[os].path.join, parameter[name[json_directory], binary_operation[constant[PMID%s.json] <ast.Mod object at 0x7da2590d6920> name[pmid]]]]
if <ast.BoolOp object at 0x7da20c990d60> begin[:]
call[name[logger].warning, parameter[binary_operation[constant[Invalid PMID: %s] <ast.Mod object at 0x7da2590d6920> name[pmid]]]]
if call[name[os].path.exists, parameter[name[json_path]]] begin[:]
variable[rp] assign[=] call[name[reach].process_json_file, parameter[name[json_path]]]
variable[txt_format] assign[=] constant[existing_json]
if compare[name[rp] is_not constant[None]] begin[:]
call[name[check_pmids], parameter[name[rp].statements]]
return[tuple[[<ast.Name object at 0x7da18ede4b20>, <ast.Name object at 0x7da18ede6650>]]]
|
keyword[def] identifier[process_paper] ( identifier[model_name] , identifier[pmid] ):
literal[string]
identifier[json_directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_name] , literal[string] )
identifier[json_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[json_directory] , literal[string] % identifier[pmid] )
keyword[if] identifier[pmid] . identifier[startswith] ( literal[string] ) keyword[or] identifier[pmid] . identifier[startswith] ( literal[string] ):
identifier[logger] . identifier[warning] ( literal[string] % identifier[pmid] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[json_path] ):
identifier[rp] = identifier[reach] . identifier[process_json_file] ( identifier[json_path] , identifier[citation] = identifier[pmid] )
identifier[txt_format] = literal[string]
keyword[else] :
keyword[try] :
identifier[txt] , identifier[txt_format] = identifier[get_full_text] ( identifier[pmid] , literal[string] )
keyword[except] identifier[Exception] :
keyword[return] keyword[None] , keyword[None]
keyword[if] identifier[txt_format] == literal[string] :
identifier[rp] = identifier[reach] . identifier[process_nxml_str] ( identifier[txt] , identifier[citation] = identifier[pmid] , identifier[offline] = keyword[True] ,
identifier[output_fname] = identifier[json_path] )
keyword[elif] identifier[txt_format] == literal[string] :
identifier[txt] = identifier[elsevier_client] . identifier[extract_text] ( identifier[txt] )
identifier[rp] = identifier[reach] . identifier[process_text] ( identifier[txt] , identifier[citation] = identifier[pmid] , identifier[offline] = keyword[True] ,
identifier[output_fname] = identifier[json_path] )
keyword[elif] identifier[txt_format] == literal[string] :
identifier[rp] = identifier[reach] . identifier[process_text] ( identifier[txt] , identifier[citation] = identifier[pmid] , identifier[offline] = keyword[True] ,
identifier[output_fname] = identifier[json_path] )
keyword[else] :
identifier[rp] = keyword[None]
keyword[if] identifier[rp] keyword[is] keyword[not] keyword[None] :
identifier[check_pmids] ( identifier[rp] . identifier[statements] )
keyword[return] identifier[rp] , identifier[txt_format]
|
def process_paper(model_name, pmid):
"""Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
"""
json_directory = os.path.join(model_name, 'jsons')
json_path = os.path.join(json_directory, 'PMID%s.json' % pmid)
if pmid.startswith('api') or pmid.startswith('PMID'):
logger.warning('Invalid PMID: %s' % pmid) # depends on [control=['if'], data=[]]
# If the paper has been read, use the json output file
if os.path.exists(json_path):
rp = reach.process_json_file(json_path, citation=pmid)
txt_format = 'existing_json' # depends on [control=['if'], data=[]]
else:
# If the paper has not been read, download the text and read
try:
(txt, txt_format) = get_full_text(pmid, 'pmid') # depends on [control=['try'], data=[]]
except Exception:
return (None, None) # depends on [control=['except'], data=[]]
if txt_format == 'pmc_oa_xml':
rp = reach.process_nxml_str(txt, citation=pmid, offline=True, output_fname=json_path) # depends on [control=['if'], data=[]]
elif txt_format == 'elsevier_xml':
# Extract the raw text from the Elsevier XML
txt = elsevier_client.extract_text(txt)
rp = reach.process_text(txt, citation=pmid, offline=True, output_fname=json_path) # depends on [control=['if'], data=[]]
elif txt_format == 'abstract':
rp = reach.process_text(txt, citation=pmid, offline=True, output_fname=json_path) # depends on [control=['if'], data=[]]
else:
rp = None
if rp is not None:
check_pmids(rp.statements) # depends on [control=['if'], data=['rp']]
return (rp, txt_format)
|
def _convert(self, payload):
"""
Converts payload to a string. Complex objects are dumped to json
"""
if not isinstance(payload, six.string_types):
payload = json.dumps(payload, cls=DefaultJSONEncoder, sort_keys=True)
return str(payload)
|
def function[_convert, parameter[self, payload]]:
constant[
Converts payload to a string. Complex objects are dumped to json
]
if <ast.UnaryOp object at 0x7da1b15e72e0> begin[:]
variable[payload] assign[=] call[name[json].dumps, parameter[name[payload]]]
return[call[name[str], parameter[name[payload]]]]
|
keyword[def] identifier[_convert] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[payload] , identifier[six] . identifier[string_types] ):
identifier[payload] = identifier[json] . identifier[dumps] ( identifier[payload] , identifier[cls] = identifier[DefaultJSONEncoder] , identifier[sort_keys] = keyword[True] )
keyword[return] identifier[str] ( identifier[payload] )
|
def _convert(self, payload):
"""
Converts payload to a string. Complex objects are dumped to json
"""
if not isinstance(payload, six.string_types):
payload = json.dumps(payload, cls=DefaultJSONEncoder, sort_keys=True) # depends on [control=['if'], data=[]]
return str(payload)
|
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath
|
def function[OSLibpath, parameter[self]]:
constant[
Microsoft Windows SDK Libraries Paths
]
variable[ref] assign[=] call[name[os].path.join, parameter[name[self].si.WindowsSdkDir, constant[References]]]
variable[libpath] assign[=] list[[]]
if compare[name[self].vc_ver less_or_equal[<=] constant[9.0]] begin[:]
<ast.AugAssign object at 0x7da1b1b86560>
if compare[name[self].vc_ver greater_or_equal[>=] constant[11.0]] begin[:]
<ast.AugAssign object at 0x7da1b1b86140>
if compare[name[self].vc_ver greater_or_equal[>=] constant[14.0]] begin[:]
<ast.AugAssign object at 0x7da1b1b842e0>
return[name[libpath]]
|
keyword[def] identifier[OSLibpath] ( identifier[self] ):
literal[string]
identifier[ref] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[si] . identifier[WindowsSdkDir] , literal[string] )
identifier[libpath] =[]
keyword[if] identifier[self] . identifier[vc_ver] <= literal[int] :
identifier[libpath] += identifier[self] . identifier[OSLibraries]
keyword[if] identifier[self] . identifier[vc_ver] >= literal[int] :
identifier[libpath] +=[ identifier[os] . identifier[path] . identifier[join] ( identifier[ref] , literal[string] )]
keyword[if] identifier[self] . identifier[vc_ver] >= literal[int] :
identifier[libpath] +=[
identifier[ref] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[si] . identifier[WindowsSdkDir] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] (
identifier[ref] ,
literal[string] ,
literal[string] ,
),
identifier[os] . identifier[path] . identifier[join] (
identifier[ref] ,
literal[string] ,
literal[string] ,
),
identifier[os] . identifier[path] . identifier[join] (
identifier[ref] ,
literal[string] ,
literal[string] ,
),
identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[si] . identifier[WindowsSdkDir] ,
literal[string] ,
literal[string] ,
literal[string] % identifier[self] . identifier[vc_ver] ,
literal[string] ,
literal[string] ,
literal[string] ,
),
]
keyword[return] identifier[libpath]
|
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries # depends on [control=['if'], data=[]]
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, 'CommonConfiguration\\Neutral')] # depends on [control=['if'], data=[]]
if self.vc_ver >= 14.0:
libpath += [ref, os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), os.path.join(ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), os.path.join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), os.path.join(ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0'), os.path.join(self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', '%0.1f' % self.vc_ver, 'References', 'CommonConfiguration', 'neutral')] # depends on [control=['if'], data=[]]
return libpath
|
def _load_results(self, container_id):
"""
load results from recent build
:return: BuildResults
"""
if self.temp_dir:
dt = DockerTasker()
# FIXME: load results only when requested
# results_path = os.path.join(self.temp_dir, RESULTS_JSON)
# df_path = os.path.join(self.temp_dir, 'Dockerfile')
# try:
# with open(results_path, 'r') as results_fp:
# results = json.load(results_fp, cls=BuildResultsJSONDecoder)
# except (IOError, OSError) as ex:
# logger.error("Can't open results: '%s'", repr(ex))
# for l in self.dt.logs(self.build_container_id, stream=False):
# logger.debug(l.strip())
# raise RuntimeError("Can't open results: '%s'" % repr(ex))
# results.dockerfile = open(df_path, 'r').read()
results = BuildResults()
results.build_logs = dt.logs(container_id, stream=False)
results.container_id = container_id
return results
|
def function[_load_results, parameter[self, container_id]]:
constant[
load results from recent build
:return: BuildResults
]
if name[self].temp_dir begin[:]
variable[dt] assign[=] call[name[DockerTasker], parameter[]]
variable[results] assign[=] call[name[BuildResults], parameter[]]
name[results].build_logs assign[=] call[name[dt].logs, parameter[name[container_id]]]
name[results].container_id assign[=] name[container_id]
return[name[results]]
|
keyword[def] identifier[_load_results] ( identifier[self] , identifier[container_id] ):
literal[string]
keyword[if] identifier[self] . identifier[temp_dir] :
identifier[dt] = identifier[DockerTasker] ()
identifier[results] = identifier[BuildResults] ()
identifier[results] . identifier[build_logs] = identifier[dt] . identifier[logs] ( identifier[container_id] , identifier[stream] = keyword[False] )
identifier[results] . identifier[container_id] = identifier[container_id]
keyword[return] identifier[results]
|
def _load_results(self, container_id):
"""
load results from recent build
:return: BuildResults
"""
if self.temp_dir:
dt = DockerTasker()
# FIXME: load results only when requested
# results_path = os.path.join(self.temp_dir, RESULTS_JSON)
# df_path = os.path.join(self.temp_dir, 'Dockerfile')
# try:
# with open(results_path, 'r') as results_fp:
# results = json.load(results_fp, cls=BuildResultsJSONDecoder)
# except (IOError, OSError) as ex:
# logger.error("Can't open results: '%s'", repr(ex))
# for l in self.dt.logs(self.build_container_id, stream=False):
# logger.debug(l.strip())
# raise RuntimeError("Can't open results: '%s'" % repr(ex))
# results.dockerfile = open(df_path, 'r').read()
results = BuildResults()
results.build_logs = dt.logs(container_id, stream=False)
results.container_id = container_id
return results # depends on [control=['if'], data=[]]
|
def dePrefixAndSuffixFasta(sequences):
"""
sequences: an iterator producing Bio.Seq sequences.
return: a generator of sequences with no duplicates and no fully contained
subsequences.
"""
sequences = sorted(sequences, key=lambda s: len(s.seq), reverse=True)
seen = set()
for s in sequences:
thisSeq = str(s.seq)
thisHash = md5(thisSeq.encode('UTF-8')).digest()
if thisHash not in seen:
# Add prefixes.
newHash = md5()
for nucl in thisSeq:
newHash.update(nucl.encode('UTF-8'))
seen.add(newHash.digest())
# Add suffixes.
for start in range(len(thisSeq) - 1):
seen.add(md5(thisSeq[start + 1:].encode('UTF-8')).digest())
yield s
|
def function[dePrefixAndSuffixFasta, parameter[sequences]]:
constant[
sequences: an iterator producing Bio.Seq sequences.
return: a generator of sequences with no duplicates and no fully contained
subsequences.
]
variable[sequences] assign[=] call[name[sorted], parameter[name[sequences]]]
variable[seen] assign[=] call[name[set], parameter[]]
for taget[name[s]] in starred[name[sequences]] begin[:]
variable[thisSeq] assign[=] call[name[str], parameter[name[s].seq]]
variable[thisHash] assign[=] call[call[name[md5], parameter[call[name[thisSeq].encode, parameter[constant[UTF-8]]]]].digest, parameter[]]
if compare[name[thisHash] <ast.NotIn object at 0x7da2590d7190> name[seen]] begin[:]
variable[newHash] assign[=] call[name[md5], parameter[]]
for taget[name[nucl]] in starred[name[thisSeq]] begin[:]
call[name[newHash].update, parameter[call[name[nucl].encode, parameter[constant[UTF-8]]]]]
call[name[seen].add, parameter[call[name[newHash].digest, parameter[]]]]
for taget[name[start]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[thisSeq]]] - constant[1]]]]] begin[:]
call[name[seen].add, parameter[call[call[name[md5], parameter[call[call[name[thisSeq]][<ast.Slice object at 0x7da20c794070>].encode, parameter[constant[UTF-8]]]]].digest, parameter[]]]]
<ast.Yield object at 0x7da20c794370>
|
keyword[def] identifier[dePrefixAndSuffixFasta] ( identifier[sequences] ):
literal[string]
identifier[sequences] = identifier[sorted] ( identifier[sequences] , identifier[key] = keyword[lambda] identifier[s] : identifier[len] ( identifier[s] . identifier[seq] ), identifier[reverse] = keyword[True] )
identifier[seen] = identifier[set] ()
keyword[for] identifier[s] keyword[in] identifier[sequences] :
identifier[thisSeq] = identifier[str] ( identifier[s] . identifier[seq] )
identifier[thisHash] = identifier[md5] ( identifier[thisSeq] . identifier[encode] ( literal[string] )). identifier[digest] ()
keyword[if] identifier[thisHash] keyword[not] keyword[in] identifier[seen] :
identifier[newHash] = identifier[md5] ()
keyword[for] identifier[nucl] keyword[in] identifier[thisSeq] :
identifier[newHash] . identifier[update] ( identifier[nucl] . identifier[encode] ( literal[string] ))
identifier[seen] . identifier[add] ( identifier[newHash] . identifier[digest] ())
keyword[for] identifier[start] keyword[in] identifier[range] ( identifier[len] ( identifier[thisSeq] )- literal[int] ):
identifier[seen] . identifier[add] ( identifier[md5] ( identifier[thisSeq] [ identifier[start] + literal[int] :]. identifier[encode] ( literal[string] )). identifier[digest] ())
keyword[yield] identifier[s]
|
def dePrefixAndSuffixFasta(sequences):
"""
sequences: an iterator producing Bio.Seq sequences.
return: a generator of sequences with no duplicates and no fully contained
subsequences.
"""
sequences = sorted(sequences, key=lambda s: len(s.seq), reverse=True)
seen = set()
for s in sequences:
thisSeq = str(s.seq)
thisHash = md5(thisSeq.encode('UTF-8')).digest()
if thisHash not in seen:
# Add prefixes.
newHash = md5()
for nucl in thisSeq:
newHash.update(nucl.encode('UTF-8'))
seen.add(newHash.digest()) # depends on [control=['for'], data=['nucl']]
# Add suffixes.
for start in range(len(thisSeq) - 1):
seen.add(md5(thisSeq[start + 1:].encode('UTF-8')).digest()) # depends on [control=['for'], data=['start']]
yield s # depends on [control=['if'], data=['seen']] # depends on [control=['for'], data=['s']]
|
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
|
def function[adapt_datetimefield_value, parameter[self, value]]:
constant[
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
]
if compare[name[value] is constant[None]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b0ca4190> begin[:]
if call[name[timezone].is_aware, parameter[name[value]]] begin[:]
variable[value] assign[=] call[name[value].astimezone, parameter[name[timezone].utc]]
if <ast.UnaryOp object at 0x7da1b0ca4310> begin[:]
variable[value] assign[=] call[name[value].replace, parameter[]]
return[name[value]]
|
keyword[def] identifier[adapt_datetimefield_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[connection] . identifier[_DJANGO_VERSION] >= literal[int] keyword[and] identifier[settings] . identifier[USE_TZ] :
keyword[if] identifier[timezone] . identifier[is_aware] ( identifier[value] ):
identifier[value] = identifier[value] . identifier[astimezone] ( identifier[timezone] . identifier[utc] )
keyword[if] keyword[not] identifier[self] . identifier[connection] . identifier[features] . identifier[supports_microsecond_precision] :
identifier[value] = identifier[value] . identifier[replace] ( identifier[microsecond] = literal[int] )
keyword[return] identifier[value]
|
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None # depends on [control=['if'], data=[]]
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0) # depends on [control=['if'], data=[]]
return value
|
def func(self, f, state):
'''Intended to be overridden by subclasses. Raises
NotImplementedError.'''
message = 'Tried to use unimplemented lens {}.'
raise NotImplementedError(message.format(type(self)))
|
def function[func, parameter[self, f, state]]:
constant[Intended to be overridden by subclasses. Raises
NotImplementedError.]
variable[message] assign[=] constant[Tried to use unimplemented lens {}.]
<ast.Raise object at 0x7da20c6a8370>
|
keyword[def] identifier[func] ( identifier[self] , identifier[f] , identifier[state] ):
literal[string]
identifier[message] = literal[string]
keyword[raise] identifier[NotImplementedError] ( identifier[message] . identifier[format] ( identifier[type] ( identifier[self] )))
|
def func(self, f, state):
"""Intended to be overridden by subclasses. Raises
NotImplementedError."""
message = 'Tried to use unimplemented lens {}.'
raise NotImplementedError(message.format(type(self)))
|
def gantry_axes(cls) -> Tuple['Axis', 'Axis', 'Axis', 'Axis']:
""" The axes which are tied to the gantry and require the deck
calibration transform
"""
return (cls.X, cls.Y, cls.Z, cls.A)
|
def function[gantry_axes, parameter[cls]]:
constant[ The axes which are tied to the gantry and require the deck
calibration transform
]
return[tuple[[<ast.Attribute object at 0x7da1b0924a00>, <ast.Attribute object at 0x7da1b09257b0>, <ast.Attribute object at 0x7da204345390>, <ast.Attribute object at 0x7da2043472b0>]]]
|
keyword[def] identifier[gantry_axes] ( identifier[cls] )-> identifier[Tuple] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
literal[string]
keyword[return] ( identifier[cls] . identifier[X] , identifier[cls] . identifier[Y] , identifier[cls] . identifier[Z] , identifier[cls] . identifier[A] )
|
def gantry_axes(cls) -> Tuple['Axis', 'Axis', 'Axis', 'Axis']:
""" The axes which are tied to the gantry and require the deck
calibration transform
"""
return (cls.X, cls.Y, cls.Z, cls.A)
|
def after_request(self, region, endpoint_name, method_name, url, response):
"""
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
"""
for limiter in self._limiters:
limiter.update_limiter(region, endpoint_name, method_name, response)
return response
|
def function[after_request, parameter[self, region, endpoint_name, method_name, url, response]]:
constant[
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
]
for taget[name[limiter]] in starred[name[self]._limiters] begin[:]
call[name[limiter].update_limiter, parameter[name[region], name[endpoint_name], name[method_name], name[response]]]
return[name[response]]
|
keyword[def] identifier[after_request] ( identifier[self] , identifier[region] , identifier[endpoint_name] , identifier[method_name] , identifier[url] , identifier[response] ):
literal[string]
keyword[for] identifier[limiter] keyword[in] identifier[self] . identifier[_limiters] :
identifier[limiter] . identifier[update_limiter] ( identifier[region] , identifier[endpoint_name] , identifier[method_name] , identifier[response] )
keyword[return] identifier[response]
|
def after_request(self, region, endpoint_name, method_name, url, response):
"""
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
"""
for limiter in self._limiters:
limiter.update_limiter(region, endpoint_name, method_name, response) # depends on [control=['for'], data=['limiter']]
return response
|
def is_declared(self):
"""
:return: True is the table is declared in the schema.
"""
return self.connection.query(
'SHOW TABLES in `{database}` LIKE "{table_name}"'.format(
database=self.database, table_name=self.table_name)).rowcount > 0
|
def function[is_declared, parameter[self]]:
constant[
:return: True is the table is declared in the schema.
]
return[compare[call[name[self].connection.query, parameter[call[constant[SHOW TABLES in `{database}` LIKE "{table_name}"].format, parameter[]]]].rowcount greater[>] constant[0]]]
|
keyword[def] identifier[is_declared] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[connection] . identifier[query] (
literal[string] . identifier[format] (
identifier[database] = identifier[self] . identifier[database] , identifier[table_name] = identifier[self] . identifier[table_name] )). identifier[rowcount] > literal[int]
|
def is_declared(self):
"""
:return: True is the table is declared in the schema.
"""
return self.connection.query('SHOW TABLES in `{database}` LIKE "{table_name}"'.format(database=self.database, table_name=self.table_name)).rowcount > 0
|
def typeSort(self):
'''
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`~exhale.graph.ExhaleRoot.deepSortList` for more information on
when this is necessary.
'''
self.children.sort()
for c in self.children:
c.typeSort()
|
def function[typeSort, parameter[self]]:
constant[
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`~exhale.graph.ExhaleRoot.deepSortList` for more information on
when this is necessary.
]
call[name[self].children.sort, parameter[]]
for taget[name[c]] in starred[name[self].children] begin[:]
call[name[c].typeSort, parameter[]]
|
keyword[def] identifier[typeSort] ( identifier[self] ):
literal[string]
identifier[self] . identifier[children] . identifier[sort] ()
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[children] :
identifier[c] . identifier[typeSort] ()
|
def typeSort(self):
"""
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`~exhale.graph.ExhaleRoot.deepSortList` for more information on
when this is necessary.
"""
self.children.sort()
for c in self.children:
c.typeSort() # depends on [control=['for'], data=['c']]
|
def makefractalCIJ(mx_lvl, E, sz_cl, seed=None):
'''
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
'''
rng = get_rng(seed)
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2**mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s))
CIJ = (prob > rng.random_sample((n, n)))
# count connections
k = np.sum(CIJ)
return np.array(CIJ, dtype=int), k
|
def function[makefractalCIJ, parameter[mx_lvl, E, sz_cl, seed]]:
constant[
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
]
variable[rng] assign[=] call[name[get_rng], parameter[name[seed]]]
variable[t] assign[=] binary_operation[call[name[np].ones, parameter[tuple[[<ast.Constant object at 0x7da1b08c8430>, <ast.Constant object at 0x7da1b08ca0e0>]]]] * constant[2]]
variable[n] assign[=] binary_operation[constant[2] ** name[mx_lvl]]
<ast.AugAssign object at 0x7da2041d93c0>
for taget[name[lvl]] in starred[call[name[range], parameter[constant[1], name[mx_lvl]]]] begin[:]
variable[s] assign[=] binary_operation[constant[2] ** binary_operation[name[lvl] + constant[1]]]
variable[CIJ] assign[=] call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da2041db400>, <ast.Name object at 0x7da2041d8700>]]]]
variable[grp1] assign[=] call[name[range], parameter[call[name[int], parameter[binary_operation[name[s] / constant[2]]]]]]
variable[grp2] assign[=] call[name[range], parameter[call[name[int], parameter[binary_operation[name[s] / constant[2]]]], name[s]]]
variable[ix1] assign[=] call[call[name[np].add.outer, parameter[binary_operation[call[name[np].array, parameter[name[grp1]]] * name[s]], name[grp1]]].flatten, parameter[]]
variable[ix2] assign[=] call[call[name[np].add.outer, parameter[binary_operation[call[name[np].array, parameter[name[grp2]]] * name[s]], name[grp2]]].flatten, parameter[]]
call[name[CIJ].flat][name[ix1]] assign[=] name[t]
call[name[CIJ].flat][name[ix2]] assign[=] name[t]
<ast.AugAssign object at 0x7da1b088d810>
variable[t] assign[=] call[name[CIJ].copy, parameter[]]
<ast.AugAssign object at 0x7da1b088ee90>
variable[ee] assign[=] binary_operation[binary_operation[name[mx_lvl] - name[CIJ]] - name[sz_cl]]
variable[ee] assign[=] binary_operation[compare[name[ee] greater[>] constant[0]] * name[ee]]
variable[prob] assign[=] binary_operation[binary_operation[constant[1] / binary_operation[name[E] ** name[ee]]] * binary_operation[call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da1b084ec20>, <ast.Name object at 0x7da1b084c6a0>]]]] - call[name[np].eye, parameter[name[s]]]]]
variable[CIJ] assign[=] compare[name[prob] greater[>] call[name[rng].random_sample, parameter[tuple[[<ast.Name object at 0x7da1b084e620>, <ast.Name object at 0x7da1b084efb0>]]]]]
variable[k] assign[=] call[name[np].sum, parameter[name[CIJ]]]
return[tuple[[<ast.Call object at 0x7da1b084fd30>, <ast.Name object at 0x7da1b084f3d0>]]]
|
keyword[def] identifier[makefractalCIJ] ( identifier[mx_lvl] , identifier[E] , identifier[sz_cl] , identifier[seed] = keyword[None] ):
literal[string]
identifier[rng] = identifier[get_rng] ( identifier[seed] )
identifier[t] = identifier[np] . identifier[ones] (( literal[int] , literal[int] ))* literal[int]
identifier[n] = literal[int] ** identifier[mx_lvl]
identifier[sz_cl] -= literal[int]
keyword[for] identifier[lvl] keyword[in] identifier[range] ( literal[int] , identifier[mx_lvl] ):
identifier[s] = literal[int] **( identifier[lvl] + literal[int] )
identifier[CIJ] = identifier[np] . identifier[ones] (( identifier[s] , identifier[s] ))
identifier[grp1] = identifier[range] ( identifier[int] ( identifier[s] / literal[int] ))
identifier[grp2] = identifier[range] ( identifier[int] ( identifier[s] / literal[int] ), identifier[s] )
identifier[ix1] = identifier[np] . identifier[add] . identifier[outer] ( identifier[np] . identifier[array] ( identifier[grp1] )* identifier[s] , identifier[grp1] ). identifier[flatten] ()
identifier[ix2] = identifier[np] . identifier[add] . identifier[outer] ( identifier[np] . identifier[array] ( identifier[grp2] )* identifier[s] , identifier[grp2] ). identifier[flatten] ()
identifier[CIJ] . identifier[flat] [ identifier[ix1] ]= identifier[t]
identifier[CIJ] . identifier[flat] [ identifier[ix2] ]= identifier[t]
identifier[CIJ] += literal[int]
identifier[t] = identifier[CIJ] . identifier[copy] ()
identifier[CIJ] -=( identifier[np] . identifier[ones] (( identifier[s] , identifier[s] ))+ identifier[mx_lvl] * identifier[np] . identifier[eye] ( identifier[s] ))
identifier[ee] = identifier[mx_lvl] - identifier[CIJ] - identifier[sz_cl]
identifier[ee] =( identifier[ee] > literal[int] )* identifier[ee]
identifier[prob] =( literal[int] / identifier[E] ** identifier[ee] )*( identifier[np] . identifier[ones] (( identifier[s] , identifier[s] ))- identifier[np] . identifier[eye] ( identifier[s] ))
identifier[CIJ] =( identifier[prob] > identifier[rng] . identifier[random_sample] (( identifier[n] , identifier[n] )))
identifier[k] = identifier[np] . identifier[sum] ( identifier[CIJ] )
keyword[return] identifier[np] . identifier[array] ( identifier[CIJ] , identifier[dtype] = identifier[int] ), identifier[k]
|
def makefractalCIJ(mx_lvl, E, sz_cl, seed=None):
"""
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
"""
rng = get_rng(seed)
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2 ** mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2 ** (lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy() # depends on [control=['for'], data=['lvl']]
CIJ -= np.ones((s, s)) + mx_lvl * np.eye(s)
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = 1 / E ** ee * (np.ones((s, s)) - np.eye(s))
CIJ = prob > rng.random_sample((n, n))
# count connections
k = np.sum(CIJ)
return (np.array(CIJ, dtype=int), k)
|
def request(self, url, parameters):
"""Perform a http(s) request for given parameters to given URL
Keyword arguments:
url -- API url
parameters -- dict with payload.
"""
try:
request = Request(url + '?' + urlencode(parameters), None, {
'X-Authentication-Token': self.api_key,
'User-Agent': self.user_agent
},
None,
False,
"GET"
)
response = urlopen(request)
except HTTPError as e:
self.logger.error('Code: ' + str(e.code))
self.logger.error('Response: ' + str(e.reason))
if e.code == 400:
raise UnknownOutputFormatException()
if e.code == 401:
raise AuthorizationRequiredException()
if e.code == 402:
raise NotEnoughCreditsException()
if e.code == 403:
raise AccessDeniedException()
if e.code == 422:
raise InvalidMacOrOuiException()
if e.code >= 500:
raise ServerErrorException("Response code: {}".format(e.code))
raise ServerErrorException(e.reason)
if response.code >= 300 or response.code < 200:
raise ServerErrorException("Response code: {}".format(response.code))
headers = dict(response.getheaders())
if "Warning" in headers.keys():
self.logger.warning(headers["Warning"])
self.logger.debug(response.info())
return response.read()
|
def function[request, parameter[self, url, parameters]]:
constant[Perform a http(s) request for given parameters to given URL
Keyword arguments:
url -- API url
parameters -- dict with payload.
]
<ast.Try object at 0x7da18f720e80>
if <ast.BoolOp object at 0x7da18f813520> begin[:]
<ast.Raise object at 0x7da18f810040>
variable[headers] assign[=] call[name[dict], parameter[call[name[response].getheaders, parameter[]]]]
if compare[constant[Warning] in call[name[headers].keys, parameter[]]] begin[:]
call[name[self].logger.warning, parameter[call[name[headers]][constant[Warning]]]]
call[name[self].logger.debug, parameter[call[name[response].info, parameter[]]]]
return[call[name[response].read, parameter[]]]
|
keyword[def] identifier[request] ( identifier[self] , identifier[url] , identifier[parameters] ):
literal[string]
keyword[try] :
identifier[request] = identifier[Request] ( identifier[url] + literal[string] + identifier[urlencode] ( identifier[parameters] ), keyword[None] ,{
literal[string] : identifier[self] . identifier[api_key] ,
literal[string] : identifier[self] . identifier[user_agent]
},
keyword[None] ,
keyword[False] ,
literal[string]
)
identifier[response] = identifier[urlopen] ( identifier[request] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[str] ( identifier[e] . identifier[code] ))
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[str] ( identifier[e] . identifier[reason] ))
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[UnknownOutputFormatException] ()
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[AuthorizationRequiredException] ()
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[NotEnoughCreditsException] ()
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[AccessDeniedException] ()
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[InvalidMacOrOuiException] ()
keyword[if] identifier[e] . identifier[code] >= literal[int] :
keyword[raise] identifier[ServerErrorException] ( literal[string] . identifier[format] ( identifier[e] . identifier[code] ))
keyword[raise] identifier[ServerErrorException] ( identifier[e] . identifier[reason] )
keyword[if] identifier[response] . identifier[code] >= literal[int] keyword[or] identifier[response] . identifier[code] < literal[int] :
keyword[raise] identifier[ServerErrorException] ( literal[string] . identifier[format] ( identifier[response] . identifier[code] ))
identifier[headers] = identifier[dict] ( identifier[response] . identifier[getheaders] ())
keyword[if] literal[string] keyword[in] identifier[headers] . identifier[keys] ():
identifier[self] . identifier[logger] . identifier[warning] ( identifier[headers] [ literal[string] ])
identifier[self] . identifier[logger] . identifier[debug] ( identifier[response] . identifier[info] ())
keyword[return] identifier[response] . identifier[read] ()
|
def request(self, url, parameters):
"""Perform a http(s) request for given parameters to given URL
Keyword arguments:
url -- API url
parameters -- dict with payload.
"""
try:
request = Request(url + '?' + urlencode(parameters), None, {'X-Authentication-Token': self.api_key, 'User-Agent': self.user_agent}, None, False, 'GET')
response = urlopen(request) # depends on [control=['try'], data=[]]
except HTTPError as e:
self.logger.error('Code: ' + str(e.code))
self.logger.error('Response: ' + str(e.reason))
if e.code == 400:
raise UnknownOutputFormatException() # depends on [control=['if'], data=[]]
if e.code == 401:
raise AuthorizationRequiredException() # depends on [control=['if'], data=[]]
if e.code == 402:
raise NotEnoughCreditsException() # depends on [control=['if'], data=[]]
if e.code == 403:
raise AccessDeniedException() # depends on [control=['if'], data=[]]
if e.code == 422:
raise InvalidMacOrOuiException() # depends on [control=['if'], data=[]]
if e.code >= 500:
raise ServerErrorException('Response code: {}'.format(e.code)) # depends on [control=['if'], data=[]]
raise ServerErrorException(e.reason) # depends on [control=['except'], data=['e']]
if response.code >= 300 or response.code < 200:
raise ServerErrorException('Response code: {}'.format(response.code)) # depends on [control=['if'], data=[]]
headers = dict(response.getheaders())
if 'Warning' in headers.keys():
self.logger.warning(headers['Warning']) # depends on [control=['if'], data=[]]
self.logger.debug(response.info())
return response.read()
|
def connect(dsn=None, turbodbc_options=None, connection_string=None, **kwargs):
"""
Create a connection with the database identified by the ``dsn`` or the ``connection_string``.
:param dsn: Data source name as given in the (unix) odbc.ini file
or (Windows) ODBC Data Source Administrator tool.
:param turbodbc_options: Options that control how turbodbc interacts with the database.
Create such a struct with `turbodbc.make_options()` or leave this blank to take the defaults.
:param connection_string: Preformatted ODBC connection string.
Specifying this and dsn or kwargs at the same time raises ParameterError.
:param \**kwargs: You may specify additional options as you please. These options will go into
the connection string that identifies the database. Valid options depend on the specific database you
would like to connect with (e.g. `user` and `password`, or `uid` and `pwd`)
:return: A connection to your database
"""
if turbodbc_options is None:
turbodbc_options = make_options()
if connection_string is not None and (dsn is not None or len(kwargs) > 0):
raise ParameterError("Both connection_string and dsn or kwargs specified")
if connection_string is None:
connection_string = _make_connection_string(dsn, **kwargs)
connection = Connection(intern_connect(connection_string,
turbodbc_options))
return connection
|
def function[connect, parameter[dsn, turbodbc_options, connection_string]]:
constant[
Create a connection with the database identified by the ``dsn`` or the ``connection_string``.
:param dsn: Data source name as given in the (unix) odbc.ini file
or (Windows) ODBC Data Source Administrator tool.
:param turbodbc_options: Options that control how turbodbc interacts with the database.
Create such a struct with `turbodbc.make_options()` or leave this blank to take the defaults.
:param connection_string: Preformatted ODBC connection string.
Specifying this and dsn or kwargs at the same time raises ParameterError.
:param \**kwargs: You may specify additional options as you please. These options will go into
the connection string that identifies the database. Valid options depend on the specific database you
would like to connect with (e.g. `user` and `password`, or `uid` and `pwd`)
:return: A connection to your database
]
if compare[name[turbodbc_options] is constant[None]] begin[:]
variable[turbodbc_options] assign[=] call[name[make_options], parameter[]]
if <ast.BoolOp object at 0x7da18bc72740> begin[:]
<ast.Raise object at 0x7da20c990130>
if compare[name[connection_string] is constant[None]] begin[:]
variable[connection_string] assign[=] call[name[_make_connection_string], parameter[name[dsn]]]
variable[connection] assign[=] call[name[Connection], parameter[call[name[intern_connect], parameter[name[connection_string], name[turbodbc_options]]]]]
return[name[connection]]
|
keyword[def] identifier[connect] ( identifier[dsn] = keyword[None] , identifier[turbodbc_options] = keyword[None] , identifier[connection_string] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[turbodbc_options] keyword[is] keyword[None] :
identifier[turbodbc_options] = identifier[make_options] ()
keyword[if] identifier[connection_string] keyword[is] keyword[not] keyword[None] keyword[and] ( identifier[dsn] keyword[is] keyword[not] keyword[None] keyword[or] identifier[len] ( identifier[kwargs] )> literal[int] ):
keyword[raise] identifier[ParameterError] ( literal[string] )
keyword[if] identifier[connection_string] keyword[is] keyword[None] :
identifier[connection_string] = identifier[_make_connection_string] ( identifier[dsn] ,** identifier[kwargs] )
identifier[connection] = identifier[Connection] ( identifier[intern_connect] ( identifier[connection_string] ,
identifier[turbodbc_options] ))
keyword[return] identifier[connection]
|
def connect(dsn=None, turbodbc_options=None, connection_string=None, **kwargs):
"""
Create a connection with the database identified by the ``dsn`` or the ``connection_string``.
:param dsn: Data source name as given in the (unix) odbc.ini file
or (Windows) ODBC Data Source Administrator tool.
:param turbodbc_options: Options that control how turbodbc interacts with the database.
Create such a struct with `turbodbc.make_options()` or leave this blank to take the defaults.
:param connection_string: Preformatted ODBC connection string.
Specifying this and dsn or kwargs at the same time raises ParameterError.
:param \\**kwargs: You may specify additional options as you please. These options will go into
the connection string that identifies the database. Valid options depend on the specific database you
would like to connect with (e.g. `user` and `password`, or `uid` and `pwd`)
:return: A connection to your database
"""
if turbodbc_options is None:
turbodbc_options = make_options() # depends on [control=['if'], data=['turbodbc_options']]
if connection_string is not None and (dsn is not None or len(kwargs) > 0):
raise ParameterError('Both connection_string and dsn or kwargs specified') # depends on [control=['if'], data=[]]
if connection_string is None:
connection_string = _make_connection_string(dsn, **kwargs) # depends on [control=['if'], data=['connection_string']]
connection = Connection(intern_connect(connection_string, turbodbc_options))
return connection
|
def process_update(self, update):
"""Process an incoming update from a remote NetworkTables"""
data = json.loads(update)
NetworkTables.getEntry(data["k"]).setValue(data["v"])
|
def function[process_update, parameter[self, update]]:
constant[Process an incoming update from a remote NetworkTables]
variable[data] assign[=] call[name[json].loads, parameter[name[update]]]
call[call[name[NetworkTables].getEntry, parameter[call[name[data]][constant[k]]]].setValue, parameter[call[name[data]][constant[v]]]]
|
keyword[def] identifier[process_update] ( identifier[self] , identifier[update] ):
literal[string]
identifier[data] = identifier[json] . identifier[loads] ( identifier[update] )
identifier[NetworkTables] . identifier[getEntry] ( identifier[data] [ literal[string] ]). identifier[setValue] ( identifier[data] [ literal[string] ])
|
def process_update(self, update):
"""Process an incoming update from a remote NetworkTables"""
data = json.loads(update)
NetworkTables.getEntry(data['k']).setValue(data['v'])
|
def enqueue(self, s):
"""
Append `s` to the queue.
Equivalent to::
queue += s
if `queue` where a regular string.
"""
self._parts.append(s)
self._len += len(s)
|
def function[enqueue, parameter[self, s]]:
constant[
Append `s` to the queue.
Equivalent to::
queue += s
if `queue` where a regular string.
]
call[name[self]._parts.append, parameter[name[s]]]
<ast.AugAssign object at 0x7da20e962bc0>
|
keyword[def] identifier[enqueue] ( identifier[self] , identifier[s] ):
literal[string]
identifier[self] . identifier[_parts] . identifier[append] ( identifier[s] )
identifier[self] . identifier[_len] += identifier[len] ( identifier[s] )
|
def enqueue(self, s):
"""
Append `s` to the queue.
Equivalent to::
queue += s
if `queue` where a regular string.
"""
self._parts.append(s)
self._len += len(s)
|
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
|
def function[find, parameter[self, rel, key]]:
constant[
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
]
variable[page] assign[=] call[name[self].readpage, parameter[name[self].firstindex]]
variable[stack] assign[=] list[[]]
while compare[call[name[len], parameter[name[stack]]] less[<] constant[256]] begin[:]
<ast.Tuple object at 0x7da20e9b30d0> assign[=] call[name[page].find, parameter[name[key]]]
call[name[stack].append, parameter[tuple[[<ast.Name object at 0x7da20e9b1930>, <ast.Name object at 0x7da20e9b1990>]]]]
if compare[name[act] not_equal[!=] constant[recurse]] begin[:]
break
variable[page] assign[=] call[name[self].readpage, parameter[call[name[page].getpage, parameter[name[ix]]]]]
if compare[call[name[len], parameter[name[stack]]] equal[==] constant[256]] begin[:]
<ast.Raise object at 0x7da20e9b0a60>
variable[cursor] assign[=] call[name[BTree].Cursor, parameter[name[self], name[stack]]]
if compare[name[act] equal[==] name[rel]] begin[:]
pass
return[name[cursor]]
|
keyword[def] identifier[find] ( identifier[self] , identifier[rel] , identifier[key] ):
literal[string]
identifier[page] = identifier[self] . identifier[readpage] ( identifier[self] . identifier[firstindex] )
identifier[stack] =[]
keyword[while] identifier[len] ( identifier[stack] )< literal[int] :
identifier[act] , identifier[ix] = identifier[page] . identifier[find] ( identifier[key] )
identifier[stack] . identifier[append] (( identifier[page] , identifier[ix] ))
keyword[if] identifier[act] != literal[string] :
keyword[break]
identifier[page] = identifier[self] . identifier[readpage] ( identifier[page] . identifier[getpage] ( identifier[ix] ))
keyword[if] identifier[len] ( identifier[stack] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[cursor] = identifier[BTree] . identifier[Cursor] ( identifier[self] , identifier[stack] )
keyword[if] identifier[act] == identifier[rel] :
keyword[pass]
keyword[elif] identifier[rel] == literal[string] keyword[and] identifier[act] != literal[string] :
keyword[return] keyword[None]
keyword[elif] identifier[rel] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[act] == literal[string] :
keyword[pass]
keyword[elif] identifier[rel] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[act] == literal[string] :
identifier[cursor] . identifier[next] ()
keyword[elif] identifier[rel] == literal[string] keyword[and] identifier[act] == literal[string] :
identifier[cursor] . identifier[next] ()
keyword[elif] identifier[rel] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[act] == literal[string] :
identifier[cursor] . identifier[prev] ()
keyword[elif] identifier[rel] == literal[string] keyword[and] identifier[act] == literal[string] :
identifier[cursor] . identifier[prev] ()
keyword[return] identifier[cursor]
|
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
""" # descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
(act, ix) = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break # depends on [control=['if'], data=[]]
page = self.readpage(page.getpage(ix)) # depends on [control=['while'], data=[]]
if len(stack) == 256:
raise Exception('b-tree corrupted') # depends on [control=['if'], data=[]]
cursor = BTree.Cursor(self, stack) # now correct for what was actually asked.
if act == rel:
pass # depends on [control=['if'], data=[]]
elif rel == 'eq' and act != 'eq':
return None # depends on [control=['if'], data=[]]
elif rel in ('ge', 'le') and act == 'eq':
pass # depends on [control=['if'], data=[]]
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next() # depends on [control=['if'], data=[]]
elif rel == 'gt' and act == 'eq':
cursor.next() # depends on [control=['if'], data=[]]
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev() # depends on [control=['if'], data=[]]
elif rel == 'lt' and act == 'eq':
cursor.prev() # depends on [control=['if'], data=[]]
return cursor
|
def _create_marks(fig, marks=[bq.Mark], options={}, params={}):
"""
Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
# Although fig provides scale_x and scale_y properties, the scales on the
# axes are the only ones that are actually used.
x_ax, y_ax = fig.axes
x_sc, y_sc = x_ax.scale, y_ax.scale
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
marks = [
mark_cls(**_call_params(mark_params, options))
for mark_cls, mark_params in zip(marks, params['marks'])
]
return marks
|
def function[_create_marks, parameter[fig, marks, options, params]]:
constant[
Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } }
]
variable[params] assign[=] call[name[_merge_with_defaults], parameter[name[params]]]
<ast.Tuple object at 0x7da1b19d0550> assign[=] name[fig].axes
<ast.Tuple object at 0x7da1b19cef20> assign[=] tuple[[<ast.Attribute object at 0x7da1b19cc4c0>, <ast.Attribute object at 0x7da1b19cd2a0>]]
variable[options] assign[=] call[name[tz].merge, parameter[name[options], dictionary[[<ast.Constant object at 0x7da1b19cead0>, <ast.Constant object at 0x7da1b19cee90>], [<ast.Name object at 0x7da1b19cf3d0>, <ast.Name object at 0x7da1b19ce950>]]]]
variable[marks] assign[=] <ast.ListComp object at 0x7da1b19cc2e0>
return[name[marks]]
|
keyword[def] identifier[_create_marks] ( identifier[fig] , identifier[marks] =[ identifier[bq] . identifier[Mark] ], identifier[options] ={}, identifier[params] ={}):
literal[string]
identifier[params] = identifier[_merge_with_defaults] ( identifier[params] )
identifier[x_ax] , identifier[y_ax] = identifier[fig] . identifier[axes]
identifier[x_sc] , identifier[y_sc] = identifier[x_ax] . identifier[scale] , identifier[y_ax] . identifier[scale]
identifier[options] = identifier[tz] . identifier[merge] ( identifier[options] ,{ literal[string] : identifier[x_sc] , literal[string] : identifier[y_sc] })
identifier[marks] =[
identifier[mark_cls] (** identifier[_call_params] ( identifier[mark_params] , identifier[options] ))
keyword[for] identifier[mark_cls] , identifier[mark_params] keyword[in] identifier[zip] ( identifier[marks] , identifier[params] [ literal[string] ])
]
keyword[return] identifier[marks]
|
def _create_marks(fig, marks=[bq.Mark], options={}, params={}):
"""
Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
# Although fig provides scale_x and scale_y properties, the scales on the
# axes are the only ones that are actually used.
(x_ax, y_ax) = fig.axes
(x_sc, y_sc) = (x_ax.scale, y_ax.scale)
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
marks = [mark_cls(**_call_params(mark_params, options)) for (mark_cls, mark_params) in zip(marks, params['marks'])]
return marks
|
def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug("- removing interrupts on gpio %s" % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close()
|
def function[del_interrupt_callback, parameter[self, gpio_id]]:
constant[ Delete all interrupt callbacks from a certain gpio ]
call[name[debug], parameter[binary_operation[constant[- removing interrupts on gpio %s] <ast.Mod object at 0x7da2590d6920> name[gpio_id]]]]
variable[gpio_id] assign[=] call[name[_GPIO].channel_to_gpio, parameter[name[gpio_id]]]
variable[fileno] assign[=] call[name[self]._map_gpioid_to_fileno][name[gpio_id]]
call[name[self]._epoll.unregister, parameter[name[fileno]]]
variable[f] assign[=] call[name[self]._map_fileno_to_file][name[fileno]]
<ast.Delete object at 0x7da20e955e10>
<ast.Delete object at 0x7da20e956fe0>
<ast.Delete object at 0x7da20e956e60>
<ast.Delete object at 0x7da20e9571c0>
<ast.Delete object at 0x7da20e9b2320>
call[name[f].close, parameter[]]
|
keyword[def] identifier[del_interrupt_callback] ( identifier[self] , identifier[gpio_id] ):
literal[string]
identifier[debug] ( literal[string] % identifier[gpio_id] )
identifier[gpio_id] = identifier[_GPIO] . identifier[channel_to_gpio] ( identifier[gpio_id] )
identifier[fileno] = identifier[self] . identifier[_map_gpioid_to_fileno] [ identifier[gpio_id] ]
identifier[self] . identifier[_epoll] . identifier[unregister] ( identifier[fileno] )
identifier[f] = identifier[self] . identifier[_map_fileno_to_file] [ identifier[fileno] ]
keyword[del] identifier[self] . identifier[_map_fileno_to_file] [ identifier[fileno] ]
keyword[del] identifier[self] . identifier[_map_fileno_to_gpioid] [ identifier[fileno] ]
keyword[del] identifier[self] . identifier[_map_fileno_to_options] [ identifier[fileno] ]
keyword[del] identifier[self] . identifier[_map_gpioid_to_fileno] [ identifier[gpio_id] ]
keyword[del] identifier[self] . identifier[_map_gpioid_to_callbacks] [ identifier[gpio_id] ]
identifier[f] . identifier[close] ()
|
def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug('- removing interrupts on gpio %s' % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close()
|
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(
cleaned_data['username'],
cleaned_data['email'],
cleaned_data['password1'],
site,
send_email=False
)
new_user.first_name = cleaned_data['first_name']
new_user.last_name = cleaned_data['last_name']
new_user.save()
user_info = UserInfo(
user=new_user,
company=cleaned_data['company'],
function=cleaned_data['function'],
address=cleaned_data['address'],
postal_code=cleaned_data['postal_code'],
city=cleaned_data['city'],
country=cleaned_data['country'],
phone=cleaned_data['phone'],
)
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
|
def function[register, parameter[self, request]]:
constant[
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
]
if name[Site]._meta.installed begin[:]
variable[site] assign[=] call[name[Site].objects.get_current, parameter[]]
variable[create_user] assign[=] name[RegistrationProfile].objects.create_inactive_user
variable[new_user] assign[=] call[name[create_user], parameter[call[name[cleaned_data]][constant[username]], call[name[cleaned_data]][constant[email]], call[name[cleaned_data]][constant[password1]], name[site]]]
name[new_user].first_name assign[=] call[name[cleaned_data]][constant[first_name]]
name[new_user].last_name assign[=] call[name[cleaned_data]][constant[last_name]]
call[name[new_user].save, parameter[]]
variable[user_info] assign[=] call[name[UserInfo], parameter[]]
call[name[user_info].save, parameter[]]
call[name[send_activation_email], parameter[name[new_user], name[site], name[user_info]]]
call[name[send_activation_pending_email], parameter[name[new_user], name[site], name[user_info]]]
call[name[signals].user_registered.send, parameter[]]
return[name[new_user]]
|
keyword[def] identifier[register] ( identifier[self] , identifier[request] ,** identifier[cleaned_data] ):
literal[string]
keyword[if] identifier[Site] . identifier[_meta] . identifier[installed] :
identifier[site] = identifier[Site] . identifier[objects] . identifier[get_current] ()
keyword[else] :
identifier[site] = identifier[RequestSite] ( identifier[request] )
identifier[create_user] = identifier[RegistrationProfile] . identifier[objects] . identifier[create_inactive_user]
identifier[new_user] = identifier[create_user] (
identifier[cleaned_data] [ literal[string] ],
identifier[cleaned_data] [ literal[string] ],
identifier[cleaned_data] [ literal[string] ],
identifier[site] ,
identifier[send_email] = keyword[False]
)
identifier[new_user] . identifier[first_name] = identifier[cleaned_data] [ literal[string] ]
identifier[new_user] . identifier[last_name] = identifier[cleaned_data] [ literal[string] ]
identifier[new_user] . identifier[save] ()
identifier[user_info] = identifier[UserInfo] (
identifier[user] = identifier[new_user] ,
identifier[company] = identifier[cleaned_data] [ literal[string] ],
identifier[function] = identifier[cleaned_data] [ literal[string] ],
identifier[address] = identifier[cleaned_data] [ literal[string] ],
identifier[postal_code] = identifier[cleaned_data] [ literal[string] ],
identifier[city] = identifier[cleaned_data] [ literal[string] ],
identifier[country] = identifier[cleaned_data] [ literal[string] ],
identifier[phone] = identifier[cleaned_data] [ literal[string] ],
)
identifier[user_info] . identifier[save] ()
identifier[send_activation_email] ( identifier[new_user] , identifier[site] , identifier[user_info] )
identifier[send_activation_pending_email] ( identifier[new_user] , identifier[site] , identifier[user_info] )
identifier[signals] . identifier[user_registered] . identifier[send] ( identifier[sender] = identifier[self] . identifier[__class__] , identifier[user] = identifier[new_user] , identifier[request] = identifier[request] )
keyword[return] identifier[new_user]
|
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current() # depends on [control=['if'], data=[]]
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(cleaned_data['username'], cleaned_data['email'], cleaned_data['password1'], site, send_email=False)
new_user.first_name = cleaned_data['first_name']
new_user.last_name = cleaned_data['last_name']
new_user.save()
user_info = UserInfo(user=new_user, company=cleaned_data['company'], function=cleaned_data['function'], address=cleaned_data['address'], postal_code=cleaned_data['postal_code'], city=cleaned_data['city'], country=cleaned_data['country'], phone=cleaned_data['phone'])
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
|
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
|
def function[index, parameter[ref_file, out_dir, data]]:
constant[Create a STAR index in the defined reference directory.
]
<ast.Tuple object at 0x7da1b1894a90> assign[=] call[name[os].path.split, parameter[name[ref_file]]]
variable[gtf_file] assign[=] call[name[dd].get_gtf_file, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da1b1897310> begin[:]
<ast.Raise object at 0x7da1b1895570>
if <ast.UnaryOp object at 0x7da1b1896350> begin[:]
with call[name[tx_tmpdir], parameter[name[data], call[name[os].path.dirname, parameter[name[out_dir]]]]] begin[:]
variable[num_cores] assign[=] call[name[dd].get_cores, parameter[name[data]]]
variable[cmd] assign[=] constant[STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} --runThreadN {num_cores} --runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}]
call[name[do].run, parameter[call[name[cmd].format, parameter[]], constant[Index STAR]]]
if call[name[os].path.exists, parameter[name[out_dir]]] begin[:]
call[name[shutil].rmtree, parameter[name[out_dir]]]
call[name[shutil].move, parameter[name[tx_out_dir], name[out_dir]]]
return[name[out_dir]]
|
keyword[def] identifier[index] ( identifier[ref_file] , identifier[out_dir] , identifier[data] ):
literal[string]
( identifier[ref_dir] , identifier[local_file] )= identifier[os] . identifier[path] . identifier[split] ( identifier[ref_file] )
identifier[gtf_file] = identifier[dd] . identifier[get_gtf_file] ( identifier[data] )
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[gtf_file] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[gtf_file] ))
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_dir] ):
keyword[with] identifier[tx_tmpdir] ( identifier[data] , identifier[os] . identifier[path] . identifier[dirname] ( identifier[out_dir] )) keyword[as] identifier[tx_out_dir] :
identifier[num_cores] = identifier[dd] . identifier[get_cores] ( identifier[data] )
identifier[cmd] =( literal[string]
literal[string]
literal[string] )
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[out_dir] ):
identifier[shutil] . identifier[rmtree] ( identifier[out_dir] )
identifier[shutil] . identifier[move] ( identifier[tx_out_dir] , identifier[out_dir] )
keyword[return] identifier[out_dir]
|
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError('%s not found, could not create a star index.' % gtf_file) # depends on [control=['if'], data=[]]
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = 'STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} --runThreadN {num_cores} --runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}'
do.run(cmd.format(**locals()), 'Index STAR')
if os.path.exists(out_dir):
shutil.rmtree(out_dir) # depends on [control=['if'], data=[]]
shutil.move(tx_out_dir, out_dir) # depends on [control=['with'], data=['tx_out_dir']] # depends on [control=['if'], data=[]]
return out_dir
|
def hll_count(expr, error_rate=0.01, splitter=None):
"""
Calculate HyperLogLog count
:param expr:
:param error_rate: error rate
:type error_rate: float
:param splitter: the splitter to split the column value
:return: sequence or scalar
:Example:
>>> df = DataFrame(pd.DataFrame({'a': np.random.randint(100000, size=100000)}))
>>> df.a.hll_count()
63270
>>> df.a.nunique()
63250
"""
# to make the class pickled right by the cloudpickle
with open(os.path.join(path, 'lib', 'hll.py')) as hll_file:
local = {}
six.exec_(hll_file.read(), local)
HyperLogLog = local['HyperLogLog']
return expr.agg(HyperLogLog, rtype=types.int64, args=(error_rate, splitter))
|
def function[hll_count, parameter[expr, error_rate, splitter]]:
constant[
Calculate HyperLogLog count
:param expr:
:param error_rate: error rate
:type error_rate: float
:param splitter: the splitter to split the column value
:return: sequence or scalar
:Example:
>>> df = DataFrame(pd.DataFrame({'a': np.random.randint(100000, size=100000)}))
>>> df.a.hll_count()
63270
>>> df.a.nunique()
63250
]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path], constant[lib], constant[hll.py]]]]] begin[:]
variable[local] assign[=] dictionary[[], []]
call[name[six].exec_, parameter[call[name[hll_file].read, parameter[]], name[local]]]
variable[HyperLogLog] assign[=] call[name[local]][constant[HyperLogLog]]
return[call[name[expr].agg, parameter[name[HyperLogLog]]]]
|
keyword[def] identifier[hll_count] ( identifier[expr] , identifier[error_rate] = literal[int] , identifier[splitter] = keyword[None] ):
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] , literal[string] )) keyword[as] identifier[hll_file] :
identifier[local] ={}
identifier[six] . identifier[exec_] ( identifier[hll_file] . identifier[read] (), identifier[local] )
identifier[HyperLogLog] = identifier[local] [ literal[string] ]
keyword[return] identifier[expr] . identifier[agg] ( identifier[HyperLogLog] , identifier[rtype] = identifier[types] . identifier[int64] , identifier[args] =( identifier[error_rate] , identifier[splitter] ))
|
def hll_count(expr, error_rate=0.01, splitter=None):
"""
Calculate HyperLogLog count
:param expr:
:param error_rate: error rate
:type error_rate: float
:param splitter: the splitter to split the column value
:return: sequence or scalar
:Example:
>>> df = DataFrame(pd.DataFrame({'a': np.random.randint(100000, size=100000)}))
>>> df.a.hll_count()
63270
>>> df.a.nunique()
63250
"""
# to make the class pickled right by the cloudpickle
with open(os.path.join(path, 'lib', 'hll.py')) as hll_file:
local = {}
six.exec_(hll_file.read(), local)
HyperLogLog = local['HyperLogLog']
return expr.agg(HyperLogLog, rtype=types.int64, args=(error_rate, splitter)) # depends on [control=['with'], data=['hll_file']]
|
def read_file(path, absolute=False, encoding='utf-8'):
"""
Read the file at `path`. If `absolute` is True, use absolute path,
otherwise path is assumed to be relative to Tarbell template root dir.
For example:
.. code-block:: html+jinja
<div class="chapter">
{{ read_file('_chapters/one.txt')|linebreaks }}
</div>
"""
site = g.current_site
if not absolute:
path = os.path.join(site.path, path)
try:
return codecs.open(path, 'r', encoding).read()
except IOError:
return None
|
def function[read_file, parameter[path, absolute, encoding]]:
constant[
Read the file at `path`. If `absolute` is True, use absolute path,
otherwise path is assumed to be relative to Tarbell template root dir.
For example:
.. code-block:: html+jinja
<div class="chapter">
{{ read_file('_chapters/one.txt')|linebreaks }}
</div>
]
variable[site] assign[=] name[g].current_site
if <ast.UnaryOp object at 0x7da1b198dc30> begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[site].path, name[path]]]
<ast.Try object at 0x7da1b198fdc0>
|
keyword[def] identifier[read_file] ( identifier[path] , identifier[absolute] = keyword[False] , identifier[encoding] = literal[string] ):
literal[string]
identifier[site] = identifier[g] . identifier[current_site]
keyword[if] keyword[not] identifier[absolute] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[site] . identifier[path] , identifier[path] )
keyword[try] :
keyword[return] identifier[codecs] . identifier[open] ( identifier[path] , literal[string] , identifier[encoding] ). identifier[read] ()
keyword[except] identifier[IOError] :
keyword[return] keyword[None]
|
def read_file(path, absolute=False, encoding='utf-8'):
"""
Read the file at `path`. If `absolute` is True, use absolute path,
otherwise path is assumed to be relative to Tarbell template root dir.
For example:
.. code-block:: html+jinja
<div class="chapter">
{{ read_file('_chapters/one.txt')|linebreaks }}
</div>
"""
site = g.current_site
if not absolute:
path = os.path.join(site.path, path) # depends on [control=['if'], data=[]]
try:
return codecs.open(path, 'r', encoding).read() # depends on [control=['try'], data=[]]
except IOError:
return None # depends on [control=['except'], data=[]]
|
def _grouper(iterable, n, fillvalue=0):
"""Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
|
def function[_grouper, parameter[iterable, n, fillvalue]]:
constant[Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
]
variable[args] assign[=] binary_operation[list[[<ast.Call object at 0x7da204960310>]] * name[n]]
return[call[name[zip_longest], parameter[<ast.Starred object at 0x7da204961f30>]]]
|
keyword[def] identifier[_grouper] ( identifier[iterable] , identifier[n] , identifier[fillvalue] = literal[int] ):
literal[string]
identifier[args] =[ identifier[iter] ( identifier[iterable] )]* identifier[n]
keyword[return] identifier[zip_longest] ( identifier[fillvalue] = identifier[fillvalue] ,* identifier[args] )
|
def _grouper(iterable, n, fillvalue=0):
"""Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
|
def next_event(self, event_id, prev=False):
"""Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event.
"""
i = self.events.index(self._events_dict[event_id])
if prev and i > 0:
return self.events[i - 1]
elif not prev and i + 1 < len(self.events):
return self.events[i + 1]
else:
return None
|
def function[next_event, parameter[self, event_id, prev]]:
constant[Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event.
]
variable[i] assign[=] call[name[self].events.index, parameter[call[name[self]._events_dict][name[event_id]]]]
if <ast.BoolOp object at 0x7da2047e92d0> begin[:]
return[call[name[self].events][binary_operation[name[i] - constant[1]]]]
|
keyword[def] identifier[next_event] ( identifier[self] , identifier[event_id] , identifier[prev] = keyword[False] ):
literal[string]
identifier[i] = identifier[self] . identifier[events] . identifier[index] ( identifier[self] . identifier[_events_dict] [ identifier[event_id] ])
keyword[if] identifier[prev] keyword[and] identifier[i] > literal[int] :
keyword[return] identifier[self] . identifier[events] [ identifier[i] - literal[int] ]
keyword[elif] keyword[not] identifier[prev] keyword[and] identifier[i] + literal[int] < identifier[len] ( identifier[self] . identifier[events] ):
keyword[return] identifier[self] . identifier[events] [ identifier[i] + literal[int] ]
keyword[else] :
keyword[return] keyword[None]
|
def next_event(self, event_id, prev=False):
"""Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event.
"""
i = self.events.index(self._events_dict[event_id])
if prev and i > 0:
return self.events[i - 1] # depends on [control=['if'], data=[]]
elif not prev and i + 1 < len(self.events):
return self.events[i + 1] # depends on [control=['if'], data=[]]
else:
return None
|
def _get_field_by_name(model_class, field_name):
"""
Compatible with old API of model_class._meta.get_field_by_name(field_name)
"""
field = model_class._meta.get_field(field_name)
return (
field, # field
field.model, # model
not field.auto_created or field.concrete, # direct
field.many_to_many # m2m
)
|
def function[_get_field_by_name, parameter[model_class, field_name]]:
constant[
Compatible with old API of model_class._meta.get_field_by_name(field_name)
]
variable[field] assign[=] call[name[model_class]._meta.get_field, parameter[name[field_name]]]
return[tuple[[<ast.Name object at 0x7da20e956d10>, <ast.Attribute object at 0x7da20e955570>, <ast.BoolOp object at 0x7da20e956f80>, <ast.Attribute object at 0x7da20e955bd0>]]]
|
keyword[def] identifier[_get_field_by_name] ( identifier[model_class] , identifier[field_name] ):
literal[string]
identifier[field] = identifier[model_class] . identifier[_meta] . identifier[get_field] ( identifier[field_name] )
keyword[return] (
identifier[field] ,
identifier[field] . identifier[model] ,
keyword[not] identifier[field] . identifier[auto_created] keyword[or] identifier[field] . identifier[concrete] ,
identifier[field] . identifier[many_to_many]
)
|
def _get_field_by_name(model_class, field_name):
"""
Compatible with old API of model_class._meta.get_field_by_name(field_name)
"""
field = model_class._meta.get_field(field_name) # field
# model
# direct
# m2m
return (field, field.model, not field.auto_created or field.concrete, field.many_to_many)
|
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.import_(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
|
def function[import_from_storage_bucket, parameter[self, bucket, file, namespace, entity_filter, labels]]:
constant[
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
]
variable[admin_conn] assign[=] call[name[self].get_conn, parameter[]]
variable[input_url] assign[=] binary_operation[constant[gs://] + call[constant[/].join, parameter[call[name[filter], parameter[constant[None], list[[<ast.Name object at 0x7da1b052b100>, <ast.Name object at 0x7da1b0529e70>, <ast.Name object at 0x7da1b0529de0>]]]]]]]
if <ast.UnaryOp object at 0x7da1b0528cd0> begin[:]
variable[entity_filter] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da1b05297b0> begin[:]
variable[labels] assign[=] dictionary[[], []]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b0528ac0>, <ast.Constant object at 0x7da1b052b1f0>, <ast.Constant object at 0x7da1b052bd60>], [<ast.Name object at 0x7da1b0528880>, <ast.Name object at 0x7da1b052aa10>, <ast.Name object at 0x7da1b052be80>]]
variable[resp] assign[=] call[call[call[name[admin_conn].projects, parameter[]].import_, parameter[]].execute, parameter[]]
return[name[resp]]
|
keyword[def] identifier[import_from_storage_bucket] ( identifier[self] , identifier[bucket] , identifier[file] , identifier[namespace] = keyword[None] , identifier[entity_filter] = keyword[None] , identifier[labels] = keyword[None] ):
literal[string]
identifier[admin_conn] = identifier[self] . identifier[get_conn] ()
identifier[input_url] = literal[string] + literal[string] . identifier[join] ( identifier[filter] ( keyword[None] ,[ identifier[bucket] , identifier[namespace] , identifier[file] ]))
keyword[if] keyword[not] identifier[entity_filter] :
identifier[entity_filter] ={}
keyword[if] keyword[not] identifier[labels] :
identifier[labels] ={}
identifier[body] ={
literal[string] : identifier[input_url] ,
literal[string] : identifier[entity_filter] ,
literal[string] : identifier[labels] ,
}
identifier[resp] =( identifier[admin_conn]
. identifier[projects] ()
. identifier[import_] ( identifier[projectId] = identifier[self] . identifier[project_id] , identifier[body] = identifier[body] )
. identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] ))
keyword[return] identifier[resp]
|
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {} # depends on [control=['if'], data=[]]
if not labels:
labels = {} # depends on [control=['if'], data=[]]
body = {'inputUrl': input_url, 'entityFilter': entity_filter, 'labels': labels}
resp = admin_conn.projects().import_(projectId=self.project_id, body=body).execute(num_retries=self.num_retries)
return resp
|
def clear_file_list_cache(self, load):
'''
Deletes the file_lists cache files
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
saltenv = load.get('saltenv', [])
if saltenv is not None:
if not isinstance(saltenv, list):
try:
saltenv = [x.strip() for x in saltenv.split(',')]
except AttributeError:
saltenv = [x.strip() for x in six.text_type(saltenv).split(',')]
for idx, val in enumerate(saltenv):
if not isinstance(val, six.string_types):
saltenv[idx] = six.text_type(val)
ret = {}
fsb = self.backends(load.pop('fsbackend', None))
list_cachedir = os.path.join(self.opts['cachedir'], 'file_lists')
try:
file_list_backends = os.listdir(list_cachedir)
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug('No file list caches found')
return {}
else:
log.error(
'Failed to get list of saltenvs for which the master has '
'cached file lists: %s', exc
)
for back in file_list_backends:
# Account for the fact that the file_list cache directory for gitfs
# is 'git', hgfs is 'hg', etc.
back_virtualname = re.sub('fs$', '', back)
try:
cache_files = os.listdir(os.path.join(list_cachedir, back))
except OSError as exc:
log.error(
'Failed to find file list caches for saltenv \'%s\': %s',
back, exc
)
continue
for cache_file in cache_files:
try:
cache_saltenv, extension = cache_file.rsplit('.', 1)
except ValueError:
# Filename has no dot in it. Not a cache file, ignore.
continue
if extension != 'p':
# Filename does not end in ".p". Not a cache file, ignore.
continue
elif back_virtualname not in fsb or \
(saltenv is not None and cache_saltenv not in saltenv):
log.debug(
'Skipping %s file list cache for saltenv \'%s\'',
back, cache_saltenv
)
continue
try:
os.remove(os.path.join(list_cachedir, back, cache_file))
except OSError as exc:
if exc.errno != errno.ENOENT:
log.error('Failed to remove %s: %s',
exc.filename, exc.strerror)
else:
ret.setdefault(back, []).append(cache_saltenv)
log.debug(
'Removed %s file list cache for saltenv \'%s\'',
cache_saltenv, back
)
return ret
|
def function[clear_file_list_cache, parameter[self, load]]:
constant[
Deletes the file_lists cache files
]
if compare[constant[env] in name[load]] begin[:]
call[name[load].pop, parameter[constant[env]]]
variable[saltenv] assign[=] call[name[load].get, parameter[constant[saltenv], list[[]]]]
if compare[name[saltenv] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da204960610> begin[:]
<ast.Try object at 0x7da204962e30>
for taget[tuple[[<ast.Name object at 0x7da204961930>, <ast.Name object at 0x7da2049624d0>]]] in starred[call[name[enumerate], parameter[name[saltenv]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f721180> begin[:]
call[name[saltenv]][name[idx]] assign[=] call[name[six].text_type, parameter[name[val]]]
variable[ret] assign[=] dictionary[[], []]
variable[fsb] assign[=] call[name[self].backends, parameter[call[name[load].pop, parameter[constant[fsbackend], constant[None]]]]]
variable[list_cachedir] assign[=] call[name[os].path.join, parameter[call[name[self].opts][constant[cachedir]], constant[file_lists]]]
<ast.Try object at 0x7da18f721b10>
for taget[name[back]] in starred[name[file_list_backends]] begin[:]
variable[back_virtualname] assign[=] call[name[re].sub, parameter[constant[fs$], constant[], name[back]]]
<ast.Try object at 0x7da204963430>
for taget[name[cache_file]] in starred[name[cache_files]] begin[:]
<ast.Try object at 0x7da204960400>
if compare[name[extension] not_equal[!=] constant[p]] begin[:]
continue
<ast.Try object at 0x7da2049635b0>
return[name[ret]]
|
keyword[def] identifier[clear_file_list_cache] ( identifier[self] , identifier[load] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[load] :
identifier[load] . identifier[pop] ( literal[string] )
identifier[saltenv] = identifier[load] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[saltenv] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[saltenv] , identifier[list] ):
keyword[try] :
identifier[saltenv] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[saltenv] . identifier[split] ( literal[string] )]
keyword[except] identifier[AttributeError] :
identifier[saltenv] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[six] . identifier[text_type] ( identifier[saltenv] ). identifier[split] ( literal[string] )]
keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[saltenv] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[six] . identifier[string_types] ):
identifier[saltenv] [ identifier[idx] ]= identifier[six] . identifier[text_type] ( identifier[val] )
identifier[ret] ={}
identifier[fsb] = identifier[self] . identifier[backends] ( identifier[load] . identifier[pop] ( literal[string] , keyword[None] ))
identifier[list_cachedir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[opts] [ literal[string] ], literal[string] )
keyword[try] :
identifier[file_list_backends] = identifier[os] . identifier[listdir] ( identifier[list_cachedir] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] {}
keyword[else] :
identifier[log] . identifier[error] (
literal[string]
literal[string] , identifier[exc]
)
keyword[for] identifier[back] keyword[in] identifier[file_list_backends] :
identifier[back_virtualname] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[back] )
keyword[try] :
identifier[cache_files] = identifier[os] . identifier[listdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[list_cachedir] , identifier[back] ))
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] (
literal[string] ,
identifier[back] , identifier[exc]
)
keyword[continue]
keyword[for] identifier[cache_file] keyword[in] identifier[cache_files] :
keyword[try] :
identifier[cache_saltenv] , identifier[extension] = identifier[cache_file] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[if] identifier[extension] != literal[string] :
keyword[continue]
keyword[elif] identifier[back_virtualname] keyword[not] keyword[in] identifier[fsb] keyword[or] ( identifier[saltenv] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cache_saltenv] keyword[not] keyword[in] identifier[saltenv] ):
identifier[log] . identifier[debug] (
literal[string] ,
identifier[back] , identifier[cache_saltenv]
)
keyword[continue]
keyword[try] :
identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[list_cachedir] , identifier[back] , identifier[cache_file] ))
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] != identifier[errno] . identifier[ENOENT] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[exc] . identifier[filename] , identifier[exc] . identifier[strerror] )
keyword[else] :
identifier[ret] . identifier[setdefault] ( identifier[back] ,[]). identifier[append] ( identifier[cache_saltenv] )
identifier[log] . identifier[debug] (
literal[string] ,
identifier[cache_saltenv] , identifier[back]
)
keyword[return] identifier[ret]
|
def clear_file_list_cache(self, load):
"""
Deletes the file_lists cache files
"""
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env') # depends on [control=['if'], data=['load']]
saltenv = load.get('saltenv', [])
if saltenv is not None:
if not isinstance(saltenv, list):
try:
saltenv = [x.strip() for x in saltenv.split(',')] # depends on [control=['try'], data=[]]
except AttributeError:
saltenv = [x.strip() for x in six.text_type(saltenv).split(',')] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
for (idx, val) in enumerate(saltenv):
if not isinstance(val, six.string_types):
saltenv[idx] = six.text_type(val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['saltenv']]
ret = {}
fsb = self.backends(load.pop('fsbackend', None))
list_cachedir = os.path.join(self.opts['cachedir'], 'file_lists')
try:
file_list_backends = os.listdir(list_cachedir) # depends on [control=['try'], data=[]]
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug('No file list caches found')
return {} # depends on [control=['if'], data=[]]
else:
log.error('Failed to get list of saltenvs for which the master has cached file lists: %s', exc) # depends on [control=['except'], data=['exc']]
for back in file_list_backends:
# Account for the fact that the file_list cache directory for gitfs
# is 'git', hgfs is 'hg', etc.
back_virtualname = re.sub('fs$', '', back)
try:
cache_files = os.listdir(os.path.join(list_cachedir, back)) # depends on [control=['try'], data=[]]
except OSError as exc:
log.error("Failed to find file list caches for saltenv '%s': %s", back, exc)
continue # depends on [control=['except'], data=['exc']]
for cache_file in cache_files:
try:
(cache_saltenv, extension) = cache_file.rsplit('.', 1) # depends on [control=['try'], data=[]]
except ValueError:
# Filename has no dot in it. Not a cache file, ignore.
continue # depends on [control=['except'], data=[]]
if extension != 'p':
# Filename does not end in ".p". Not a cache file, ignore.
continue # depends on [control=['if'], data=[]]
elif back_virtualname not in fsb or (saltenv is not None and cache_saltenv not in saltenv):
log.debug("Skipping %s file list cache for saltenv '%s'", back, cache_saltenv)
continue # depends on [control=['if'], data=[]]
try:
os.remove(os.path.join(list_cachedir, back, cache_file)) # depends on [control=['try'], data=[]]
except OSError as exc:
if exc.errno != errno.ENOENT:
log.error('Failed to remove %s: %s', exc.filename, exc.strerror) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']]
else:
ret.setdefault(back, []).append(cache_saltenv)
log.debug("Removed %s file list cache for saltenv '%s'", cache_saltenv, back) # depends on [control=['for'], data=['cache_file']] # depends on [control=['for'], data=['back']]
return ret
|
def get_ip_interface_output_interface_if_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
if_state = ET.SubElement(interface, "if-state")
if_state.text = kwargs.pop('if_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def function[get_ip_interface_output_interface_if_state, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_ip_interface] assign[=] call[name[ET].Element, parameter[constant[get_ip_interface]]]
variable[config] assign[=] name[get_ip_interface]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_ip_interface], constant[output]]]
variable[interface] assign[=] call[name[ET].SubElement, parameter[name[output], constant[interface]]]
variable[interface_type_key] assign[=] call[name[ET].SubElement, parameter[name[interface], constant[interface-type]]]
name[interface_type_key].text assign[=] call[name[kwargs].pop, parameter[constant[interface_type]]]
variable[interface_name_key] assign[=] call[name[ET].SubElement, parameter[name[interface], constant[interface-name]]]
name[interface_name_key].text assign[=] call[name[kwargs].pop, parameter[constant[interface_name]]]
variable[if_state] assign[=] call[name[ET].SubElement, parameter[name[interface], constant[if-state]]]
name[if_state].text assign[=] call[name[kwargs].pop, parameter[constant[if_state]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]]
|
keyword[def] identifier[get_ip_interface_output_interface_if_state] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_ip_interface] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_ip_interface]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_ip_interface] , literal[string] )
identifier[interface] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[interface_type_key] = identifier[ET] . identifier[SubElement] ( identifier[interface] , literal[string] )
identifier[interface_type_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[interface_name_key] = identifier[ET] . identifier[SubElement] ( identifier[interface] , literal[string] )
identifier[interface_name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[if_state] = identifier[ET] . identifier[SubElement] ( identifier[interface] , literal[string] )
identifier[if_state] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] )
|
def get_ip_interface_output_interface_if_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_ip_interface = ET.Element('get_ip_interface')
config = get_ip_interface
output = ET.SubElement(get_ip_interface, 'output')
interface = ET.SubElement(output, 'interface')
interface_type_key = ET.SubElement(interface, 'interface-type')
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, 'interface-name')
interface_name_key.text = kwargs.pop('interface_name')
if_state = ET.SubElement(interface, 'if-state')
if_state.text = kwargs.pop('if_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def make_certifier():
"""
Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value)
"""
def decorator(func):
@six.wraps(func)
def wrapper(value=_undefined, **kwargs):
def certify(val):
if is_enabled():
exec_func(func, val, **kwargs)
return val
if value is not _undefined:
return certify(value)
else:
return certify
return wrapper
return decorator
|
def function[make_certifier, parameter[]]:
constant[
Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value)
]
def function[decorator, parameter[func]]:
def function[wrapper, parameter[value]]:
def function[certify, parameter[val]]:
if call[name[is_enabled], parameter[]] begin[:]
call[name[exec_func], parameter[name[func], name[val]]]
return[name[val]]
if compare[name[value] is_not name[_undefined]] begin[:]
return[call[name[certify], parameter[name[value]]]]
return[name[wrapper]]
return[name[decorator]]
|
keyword[def] identifier[make_certifier] ():
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
@ identifier[six] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[value] = identifier[_undefined] ,** identifier[kwargs] ):
keyword[def] identifier[certify] ( identifier[val] ):
keyword[if] identifier[is_enabled] ():
identifier[exec_func] ( identifier[func] , identifier[val] ,** identifier[kwargs] )
keyword[return] identifier[val]
keyword[if] identifier[value] keyword[is] keyword[not] identifier[_undefined] :
keyword[return] identifier[certify] ( identifier[value] )
keyword[else] :
keyword[return] identifier[certify]
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator]
|
def make_certifier():
"""
Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value)
"""
def decorator(func):
@six.wraps(func)
def wrapper(value=_undefined, **kwargs):
def certify(val):
if is_enabled():
exec_func(func, val, **kwargs)
return val # depends on [control=['if'], data=[]]
if value is not _undefined:
return certify(value) # depends on [control=['if'], data=['value']]
else:
return certify
return wrapper
return decorator
|
def parse_tibia_datetime(datetime_str) -> Optional[datetime.datetime]:
"""Parses date and time from the format used in Tibia.com
Accepted format:
- ``MMM DD YYYY, HH:mm:ss ZZZ``, e.g. ``Dec 10 2018, 21:53:37 CET``.
Parameters
-------------
datetime_str: :class:`str`
The date and time as represented in Tibia.com
Returns
-----------
:class:`datetime.datetime`, optional
The represented datetime, in UTC.
"""
try:
datetime_str = datetime_str.replace(",", "").replace(" ", " ")
# Extracting timezone
tz = datetime_str[-4:].strip()
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.datetime.strptime(datetime_str[:-4].strip(), "%b %d %Y %H:%M:%S")
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
return None
# Add/subtract hours to get the real time
t = t - datetime.timedelta(hours=utc_offset)
return t.replace(tzinfo=datetime.timezone.utc)
except (ValueError, AttributeError):
return None
|
def function[parse_tibia_datetime, parameter[datetime_str]]:
constant[Parses date and time from the format used in Tibia.com
Accepted format:
- ``MMM DD YYYY, HH:mm:ss ZZZ``, e.g. ``Dec 10 2018, 21:53:37 CET``.
Parameters
-------------
datetime_str: :class:`str`
The date and time as represented in Tibia.com
Returns
-----------
:class:`datetime.datetime`, optional
The represented datetime, in UTC.
]
<ast.Try object at 0x7da1b0b6c820>
|
keyword[def] identifier[parse_tibia_datetime] ( identifier[datetime_str] )-> identifier[Optional] [ identifier[datetime] . identifier[datetime] ]:
literal[string]
keyword[try] :
identifier[datetime_str] = identifier[datetime_str] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[tz] = identifier[datetime_str] [- literal[int] :]. identifier[strip] ()
identifier[t] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[datetime_str] [:- literal[int] ]. identifier[strip] (), literal[string] )
keyword[if] identifier[tz] == literal[string] :
identifier[utc_offset] = literal[int]
keyword[elif] identifier[tz] == literal[string] :
identifier[utc_offset] = literal[int]
keyword[else] :
keyword[return] keyword[None]
identifier[t] = identifier[t] - identifier[datetime] . identifier[timedelta] ( identifier[hours] = identifier[utc_offset] )
keyword[return] identifier[t] . identifier[replace] ( identifier[tzinfo] = identifier[datetime] . identifier[timezone] . identifier[utc] )
keyword[except] ( identifier[ValueError] , identifier[AttributeError] ):
keyword[return] keyword[None]
|
def parse_tibia_datetime(datetime_str) -> Optional[datetime.datetime]:
"""Parses date and time from the format used in Tibia.com
Accepted format:
- ``MMM DD YYYY, HH:mm:ss ZZZ``, e.g. ``Dec 10 2018, 21:53:37 CET``.
Parameters
-------------
datetime_str: :class:`str`
The date and time as represented in Tibia.com
Returns
-----------
:class:`datetime.datetime`, optional
The represented datetime, in UTC.
"""
try:
datetime_str = datetime_str.replace(',', '').replace(' ', ' ')
# Extracting timezone
tz = datetime_str[-4:].strip()
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.datetime.strptime(datetime_str[:-4].strip(), '%b %d %Y %H:%M:%S')
# Getting the offset
if tz == 'CET':
utc_offset = 1 # depends on [control=['if'], data=[]]
elif tz == 'CEST':
utc_offset = 2 # depends on [control=['if'], data=[]]
else:
return None
# Add/subtract hours to get the real time
t = t - datetime.timedelta(hours=utc_offset)
return t.replace(tzinfo=datetime.timezone.utc) # depends on [control=['try'], data=[]]
except (ValueError, AttributeError):
return None # depends on [control=['except'], data=[]]
|
def get(self, attri):
'''
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
'''
isCol=False
isHead=False
if attri in self.dcols:
isCol=True
elif attri in self.hattrs:
isHead=True
else:
print("That attribute does not exist in this File")
print('Returning None')
if isCol:
return self.getColData(attri)
elif isHead:
return hattrs
|
def function[get, parameter[self, attri]]:
constant[
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
]
variable[isCol] assign[=] constant[False]
variable[isHead] assign[=] constant[False]
if compare[name[attri] in name[self].dcols] begin[:]
variable[isCol] assign[=] constant[True]
if name[isCol] begin[:]
return[call[name[self].getColData, parameter[name[attri]]]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[attri] ):
literal[string]
identifier[isCol] = keyword[False]
identifier[isHead] = keyword[False]
keyword[if] identifier[attri] keyword[in] identifier[self] . identifier[dcols] :
identifier[isCol] = keyword[True]
keyword[elif] identifier[attri] keyword[in] identifier[self] . identifier[hattrs] :
identifier[isHead] = keyword[True]
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[if] identifier[isCol] :
keyword[return] identifier[self] . identifier[getColData] ( identifier[attri] )
keyword[elif] identifier[isHead] :
keyword[return] identifier[hattrs]
|
def get(self, attri):
"""
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
"""
isCol = False
isHead = False
if attri in self.dcols:
isCol = True # depends on [control=['if'], data=[]]
elif attri in self.hattrs:
isHead = True # depends on [control=['if'], data=[]]
else:
print('That attribute does not exist in this File')
print('Returning None')
if isCol:
return self.getColData(attri) # depends on [control=['if'], data=[]]
elif isHead:
return hattrs # depends on [control=['if'], data=[]]
|
def update(self, fieldname, localValue, remoteValue):
'''
Returns the appropriate current value, based on the changes
recorded by this ChangeTracker, the value stored by the server
(`localValue`), and the value stored by the synchronizing client
(`remoteValue`). If `remoteValue` conflicts with changes stored
locally, then a `pysyncml.ConflictError` is raised.
If a change needs to be applied because `remoteValue` has been
updated, then the new value will be returned, and this
ChangeTracker will be updated such that a call to
:meth:`getChangeSpec` will incorporate the change.
:param fieldname:
The name of the fieldname being evaluated.
:param localValue:
The value of the field as stored by the server, usually the one that
also stored the current change-spec. If `localValue` is ``None``,
then it is assumed that the field was potentially added (this will
first be verified against the stored change-spec).
:param remoteValue:
The new value being presented that may or may not be a source of
conflict. If `remoteValue` is ``None``, then it is assumed that
the field was potentially deleted (this will first be verified
against the stored change-spec).
'''
if localValue == remoteValue:
return localValue
ct = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIED
if localValue is None:
ct = constants.ITEM_ADDED
# todo: i should probably trap irep errors. for example, if this
# cspec has a field "x" marked as deleted, then `localValue`
# must be None... etc.
# TODO: i think this kind of handling would break in ListChangeTracker!...
changed = self.isChange(fieldname, ct, remoteValue)
if changed is None:
return localValue
self.append(changed, ct, initialValue=localValue, isMd5=False)
return remoteValue
|
def function[update, parameter[self, fieldname, localValue, remoteValue]]:
constant[
Returns the appropriate current value, based on the changes
recorded by this ChangeTracker, the value stored by the server
(`localValue`), and the value stored by the synchronizing client
(`remoteValue`). If `remoteValue` conflicts with changes stored
locally, then a `pysyncml.ConflictError` is raised.
If a change needs to be applied because `remoteValue` has been
updated, then the new value will be returned, and this
ChangeTracker will be updated such that a call to
:meth:`getChangeSpec` will incorporate the change.
:param fieldname:
The name of the fieldname being evaluated.
:param localValue:
The value of the field as stored by the server, usually the one that
also stored the current change-spec. If `localValue` is ``None``,
then it is assumed that the field was potentially added (this will
first be verified against the stored change-spec).
:param remoteValue:
The new value being presented that may or may not be a source of
conflict. If `remoteValue` is ``None``, then it is assumed that
the field was potentially deleted (this will first be verified
against the stored change-spec).
]
if compare[name[localValue] equal[==] name[remoteValue]] begin[:]
return[name[localValue]]
variable[ct] assign[=] <ast.IfExp object at 0x7da1afe79720>
if compare[name[localValue] is constant[None]] begin[:]
variable[ct] assign[=] name[constants].ITEM_ADDED
variable[changed] assign[=] call[name[self].isChange, parameter[name[fieldname], name[ct], name[remoteValue]]]
if compare[name[changed] is constant[None]] begin[:]
return[name[localValue]]
call[name[self].append, parameter[name[changed], name[ct]]]
return[name[remoteValue]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[fieldname] , identifier[localValue] , identifier[remoteValue] ):
literal[string]
keyword[if] identifier[localValue] == identifier[remoteValue] :
keyword[return] identifier[localValue]
identifier[ct] = identifier[constants] . identifier[ITEM_DELETED] keyword[if] identifier[remoteValue] keyword[is] keyword[None] keyword[else] identifier[constants] . identifier[ITEM_MODIFIED]
keyword[if] identifier[localValue] keyword[is] keyword[None] :
identifier[ct] = identifier[constants] . identifier[ITEM_ADDED]
identifier[changed] = identifier[self] . identifier[isChange] ( identifier[fieldname] , identifier[ct] , identifier[remoteValue] )
keyword[if] identifier[changed] keyword[is] keyword[None] :
keyword[return] identifier[localValue]
identifier[self] . identifier[append] ( identifier[changed] , identifier[ct] , identifier[initialValue] = identifier[localValue] , identifier[isMd5] = keyword[False] )
keyword[return] identifier[remoteValue]
|
def update(self, fieldname, localValue, remoteValue):
"""
Returns the appropriate current value, based on the changes
recorded by this ChangeTracker, the value stored by the server
(`localValue`), and the value stored by the synchronizing client
(`remoteValue`). If `remoteValue` conflicts with changes stored
locally, then a `pysyncml.ConflictError` is raised.
If a change needs to be applied because `remoteValue` has been
updated, then the new value will be returned, and this
ChangeTracker will be updated such that a call to
:meth:`getChangeSpec` will incorporate the change.
:param fieldname:
The name of the fieldname being evaluated.
:param localValue:
The value of the field as stored by the server, usually the one that
also stored the current change-spec. If `localValue` is ``None``,
then it is assumed that the field was potentially added (this will
first be verified against the stored change-spec).
:param remoteValue:
The new value being presented that may or may not be a source of
conflict. If `remoteValue` is ``None``, then it is assumed that
the field was potentially deleted (this will first be verified
against the stored change-spec).
"""
if localValue == remoteValue:
return localValue # depends on [control=['if'], data=['localValue']]
ct = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIED
if localValue is None:
ct = constants.ITEM_ADDED # depends on [control=['if'], data=[]]
# todo: i should probably trap irep errors. for example, if this
# cspec has a field "x" marked as deleted, then `localValue`
# must be None... etc.
# TODO: i think this kind of handling would break in ListChangeTracker!...
changed = self.isChange(fieldname, ct, remoteValue)
if changed is None:
return localValue # depends on [control=['if'], data=[]]
self.append(changed, ct, initialValue=localValue, isMd5=False)
return remoteValue
|
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
"""
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
latitude, longitude = locator_to_latlong(locator)
if type(calc_date) != datetime:
raise ValueError
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime()
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC)
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC)
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC)
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC)
return result
|
def function[calculate_sunrise_sunset, parameter[locator, calc_date]]:
constant[calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
]
variable[morning_dawn] assign[=] constant[None]
variable[sunrise] assign[=] constant[None]
variable[evening_dawn] assign[=] constant[None]
variable[sunset] assign[=] constant[None]
<ast.Tuple object at 0x7da1b0f717e0> assign[=] call[name[locator_to_latlong], parameter[name[locator]]]
if compare[call[name[type], parameter[name[calc_date]]] not_equal[!=] name[datetime]] begin[:]
<ast.Raise object at 0x7da1b0f71a20>
variable[sun] assign[=] call[name[ephem].Sun, parameter[]]
variable[home] assign[=] call[name[ephem].Observer, parameter[]]
name[home].lat assign[=] call[name[str], parameter[name[latitude]]]
name[home].long assign[=] call[name[str], parameter[name[longitude]]]
name[home].date assign[=] name[calc_date]
call[name[sun].compute, parameter[name[home]]]
<ast.Try object at 0x7da1b0f72080>
variable[result] assign[=] dictionary[[], []]
call[name[result]][constant[morning_dawn]] assign[=] name[morning_dawn]
call[name[result]][constant[sunrise]] assign[=] name[sunrise]
call[name[result]][constant[evening_dawn]] assign[=] name[evening_dawn]
call[name[result]][constant[sunset]] assign[=] name[sunset]
if name[morning_dawn] begin[:]
call[name[result]][constant[morning_dawn]] assign[=] call[name[morning_dawn].replace, parameter[]]
if name[sunrise] begin[:]
call[name[result]][constant[sunrise]] assign[=] call[name[sunrise].replace, parameter[]]
if name[evening_dawn] begin[:]
call[name[result]][constant[evening_dawn]] assign[=] call[name[evening_dawn].replace, parameter[]]
if name[sunset] begin[:]
call[name[result]][constant[sunset]] assign[=] call[name[sunset].replace, parameter[]]
return[name[result]]
|
keyword[def] identifier[calculate_sunrise_sunset] ( identifier[locator] , identifier[calc_date] = identifier[datetime] . identifier[utcnow] ()):
literal[string]
identifier[morning_dawn] = keyword[None]
identifier[sunrise] = keyword[None]
identifier[evening_dawn] = keyword[None]
identifier[sunset] = keyword[None]
identifier[latitude] , identifier[longitude] = identifier[locator_to_latlong] ( identifier[locator] )
keyword[if] identifier[type] ( identifier[calc_date] )!= identifier[datetime] :
keyword[raise] identifier[ValueError]
identifier[sun] = identifier[ephem] . identifier[Sun] ()
identifier[home] = identifier[ephem] . identifier[Observer] ()
identifier[home] . identifier[lat] = identifier[str] ( identifier[latitude] )
identifier[home] . identifier[long] = identifier[str] ( identifier[longitude] )
identifier[home] . identifier[date] = identifier[calc_date]
identifier[sun] . identifier[compute] ( identifier[home] )
keyword[try] :
identifier[nextrise] = identifier[home] . identifier[next_rising] ( identifier[sun] )
identifier[nextset] = identifier[home] . identifier[next_setting] ( identifier[sun] )
identifier[home] . identifier[horizon] = literal[string]
identifier[beg_twilight] = identifier[home] . identifier[next_rising] ( identifier[sun] , identifier[use_center] = keyword[True] )
identifier[end_twilight] = identifier[home] . identifier[next_setting] ( identifier[sun] , identifier[use_center] = keyword[True] )
identifier[morning_dawn] = identifier[beg_twilight] . identifier[datetime] ()
identifier[sunrise] = identifier[nextrise] . identifier[datetime] ()
identifier[evening_dawn] = identifier[nextset] . identifier[datetime] ()
identifier[sunset] = identifier[end_twilight] . identifier[datetime] ()
keyword[except] identifier[ephem] . identifier[AlwaysUpError] keyword[as] identifier[e] :
identifier[morning_dawn] = keyword[None]
identifier[sunrise] = keyword[None]
identifier[evening_dawn] = keyword[None]
identifier[sunset] = keyword[None]
keyword[except] identifier[ephem] . identifier[NeverUpError] keyword[as] identifier[e] :
identifier[morning_dawn] = keyword[None]
identifier[sunrise] = keyword[None]
identifier[evening_dawn] = keyword[None]
identifier[sunset] = keyword[None]
identifier[result] ={}
identifier[result] [ literal[string] ]= identifier[morning_dawn]
identifier[result] [ literal[string] ]= identifier[sunrise]
identifier[result] [ literal[string] ]= identifier[evening_dawn]
identifier[result] [ literal[string] ]= identifier[sunset]
keyword[if] identifier[morning_dawn] :
identifier[result] [ literal[string] ]= identifier[morning_dawn] . identifier[replace] ( identifier[tzinfo] = identifier[UTC] )
keyword[if] identifier[sunrise] :
identifier[result] [ literal[string] ]= identifier[sunrise] . identifier[replace] ( identifier[tzinfo] = identifier[UTC] )
keyword[if] identifier[evening_dawn] :
identifier[result] [ literal[string] ]= identifier[evening_dawn] . identifier[replace] ( identifier[tzinfo] = identifier[UTC] )
keyword[if] identifier[sunset] :
identifier[result] [ literal[string] ]= identifier[sunset] . identifier[replace] ( identifier[tzinfo] = identifier[UTC] )
keyword[return] identifier[result]
|
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
"""
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
(latitude, longitude) = locator_to_latlong(locator)
if type(calc_date) != datetime:
raise ValueError # depends on [control=['if'], data=[]]
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime() # depends on [control=['try'], data=[]]
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None # depends on [control=['except'], data=[]]
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None # depends on [control=['except'], data=[]]
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC) # depends on [control=['if'], data=[]]
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC) # depends on [control=['if'], data=[]]
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC) # depends on [control=['if'], data=[]]
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC) # depends on [control=['if'], data=[]]
return result
|
def main(**kwargs):
"""
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
"""
manifest = []
print_intro(API_VERSION)
if args.json_file is not None:
with open(args.json_file, 'r') as json_file:
app_json = json.loads(json_file.read())
# Re-confirm the name
name = get_name(default=args.name or app_json.get('name'))
app_json['name'] = name
version = get_version(default=app_json.get('version'))
app_json['version'] = version
try:
os.mkdir(app_json['name'])
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n')
sys.exit(1)
else:
##################
# BASIC METADATA #
##################
name = get_name(default=args.name)
try:
os.mkdir(name)
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n')
sys.exit(1)
title, summary = get_metadata(API_VERSION)
version = get_version()
app_json = OrderedDict()
app_json["name"] = name
app_json["title"] = title or name
app_json['summary'] = summary or name
app_json["dxapi"] = API_VERSION
app_json["version"] = version
############
# IO SPECS #
############
class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash',
'array:int', 'array:float', 'array:string', 'array:boolean',
'record', 'file', 'applet',
'array:record', 'array:file', 'array:applet'])
bool_completer = Completer(['true', 'false'])
print('')
print(BOLD() + 'Input Specification' + ENDC())
print('')
input_spec = True
input_names = []
printed_classes = False
if input_spec:
app_json['inputSpec'] = []
print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
if input_name == '':
break
if input_name in input_names:
print(fill('Error: Cannot use the same input parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(input_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
input_names.append(input_name)
input_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your input parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string
''')
printed_classes = True
while True:
input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if input_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
optional = prompt_for_yn('This is an optional parameter')
default_val = None
if optional and input_class in ['int', 'float', 'string', 'boolean']:
default_val = prompt_for_yn('A default value should be provided')
if default_val:
while True:
if input_class == 'boolean':
use_completer(bool_completer)
default_val = prompt_for_var(' Default value', choices=['true', 'false'])
use_completer()
elif input_class == 'string':
default_val = prompt_for_var(' Default value', allow_empty=True)
else:
default_val = prompt_for_var(' Default value')
try:
if input_class == 'boolean':
default_val = (default_val == 'true')
elif input_class == 'int':
default_val = int(default_val)
elif input_class == 'float':
default_val = float(default_val)
break
except:
print('Not a valid default value for the given class ' + input_class)
else:
default_val = None
# Fill in the input parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = input_name
if input_label != '':
parameter_json['label'] = input_label
parameter_json["class"] = input_class
parameter_json["optional"] = optional
if default_val is not None:
parameter_json['default'] = default_val
# Fill in patterns and blank help string
if input_class == 'file' or input_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['inputSpec'].append(parameter_json)
print('')
print(BOLD() + 'Output Specification' + ENDC())
print('')
output_spec = True
output_names = []
if output_spec:
app_json['outputSpec'] = []
print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
if output_name == '':
break
if output_name in output_names:
print(fill('Error: Cannot use the same output parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(output_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
output_names.append(output_name)
output_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your output parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string''')
printed_classes = True
while True:
output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if output_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
# Fill in the output parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = output_name
if output_label != '':
parameter_json['label'] = output_label
parameter_json["class"] = output_class
# Fill in patterns and blank help string
if output_class == 'file' or output_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['outputSpec'].append(parameter_json)
required_file_input_names = []
optional_file_input_names = []
required_file_array_input_names = []
optional_file_array_input_names = []
file_output_names = []
if 'inputSpec' in app_json:
for param in app_json['inputSpec']:
may_be_missing = param['optional'] and "default" not in param
if param['class'] == 'file':
param_list = optional_file_input_names if may_be_missing else required_file_input_names
elif param['class'] == 'array:file':
param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names
else:
param_list = None
if param_list is not None:
param_list.append(param['name'])
if 'outputSpec' in app_json:
file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file']
##################
# TIMEOUT POLICY #
##################
print('')
print(BOLD() + 'Timeout Policy' + ENDC())
app_json["runSpec"] = OrderedDict({})
app_json['runSpec'].setdefault('timeoutPolicy', {})
timeout, timeout_units = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*'))
app_json['runSpec']['timeoutPolicy'].setdefault('*', {})
app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout)
########################
# LANGUAGE AND PATTERN #
########################
print('')
print(BOLD() + 'Template Options' + ENDC())
# Prompt for programming language if not specified
language = args.language if args.language is not None else get_language()
interpreter = language_options[language].get_interpreter()
app_json["runSpec"]["interpreter"] = interpreter
# Prompt the execution pattern iff the args.pattern is provided and invalid
template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
if not os.path.isdir(os.path.join(template_dir, args.template)):
print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
pattern = get_pattern(template_dir)
else:
pattern = args.template
template_dir = os.path.join(template_dir, pattern)
with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
template_app_json = json.loads(file_text)
for key in template_app_json['runSpec']:
app_json['runSpec'][key] = template_app_json['runSpec'][key]
if (language == args.language) and (pattern == args.template):
print('All template options are supplied in the arguments.')
##########################
# APP ACCESS PERMISSIONS #
##########################
print('')
print(BOLD('Access Permissions'))
print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' +
BOLD('https://wiki.dnanexus.com/App-Permissions') + '.'))
print('')
print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
if prompt_for_yn("Will this app need access to the Internet?", default=False):
app_json.setdefault('access', {})
app_json['access']['network'] = ['*']
print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' +
UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.'))
print('')
print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs,
which will be copied into the project after it's done running.'''))
if prompt_for_yn("Will this app need access to the parent project?", default=False):
app_json.setdefault('access', {})
app_json['access']['project'] = 'CONTRIBUTE'
print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' +
'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') +
' fields of dxapp.json once we generate the app.'))
#######################
# SYSTEM REQUIREMENTS #
#######################
print('')
print(BOLD('System Requirements'))
print('')
print(BOLD('Common AWS instance types:'))
print(format_table(InstanceTypesCompleter.aws_preferred_instance_types.values(),
column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(BOLD('Common Azure instance types:'))
print(format_table(InstanceTypesCompleter.azure_preferred_instance_types.values(),
column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' +
'your app unless you override it. See ' +
BOLD('https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types') + ' for more information.'))
use_completer(InstanceTypesCompleter())
instance_type = prompt_for_var('Choose an instance type for your app',
default=InstanceTypesCompleter.default_instance_type.Name,
choices=list(InstanceTypesCompleter.instance_types))
target_region = DEFAULT_REGION_AWS
if instance_type in InstanceTypesCompleter.azure_preferred_instance_types.keys():
target_region = DEFAULT_REGION_AZURE
app_json['regionalOptions'] = OrderedDict({})
app_json['regionalOptions'][target_region] = OrderedDict({})
app_json['regionalOptions'][target_region].setdefault('systemRequirements', {})
app_json['regionalOptions'][target_region]['systemRequirements'].setdefault('*', {})
app_json['regionalOptions'][target_region]['systemRequirements']['*']['instanceType'] = instance_type
######################
# HARDCODED DEFAULTS #
######################
app_json['runSpec']['distribution'] = 'Ubuntu'
app_json['runSpec']['release'] = '16.04'
#################
# WRITING FILES #
#################
print('')
print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())
with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n')
manifest.append(os.path.join(name, 'dxapp.json'))
print('')
print(fill('''Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://wiki.dnanexus.com/Developer-Portal for complete
documentation).''' + (''' Note that without an input and output specification,
your app can only be built as an APPLET on the system. To publish it to
the DNAnexus community, you must first specify your inputs and outputs.
''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else "")))
print('')
for subdir in 'src', 'test', 'resources':
try:
os.mkdir(os.path.join(name, subdir))
manifest.append(os.path.join(name, subdir, ''))
except:
sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir))
sys.exit(1)
entry_points = ['main']
if pattern == 'parallelized':
entry_points = ['main', 'process', 'postprocess']
elif pattern == 'scatter-process-gather':
entry_points = ['main', 'scatter', 'map', 'process', 'postprocess']
manifest += create_files_from_templates(template_dir, app_json, language,
required_file_input_names, optional_file_input_names,
required_file_array_input_names, optional_file_array_input_names,
file_output_names, pattern,
description='<!-- Insert a description of your app here -->',
entry_points=entry_points)
print("Created files:")
for filename in sorted(manifest):
print("\t", filename)
print("\n" + fill('''App directory created! See
https://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n")
print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ''' +
BOLD() + 'resources' + ENDC() +
''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))
|
def function[main, parameter[]]:
constant[
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
]
variable[manifest] assign[=] list[[]]
call[name[print_intro], parameter[name[API_VERSION]]]
if compare[name[args].json_file is_not constant[None]] begin[:]
with call[name[open], parameter[name[args].json_file, constant[r]]] begin[:]
variable[app_json] assign[=] call[name[json].loads, parameter[call[name[json_file].read, parameter[]]]]
variable[name] assign[=] call[name[get_name], parameter[]]
call[name[app_json]][constant[name]] assign[=] name[name]
variable[version] assign[=] call[name[get_version], parameter[]]
call[name[app_json]][constant[version]] assign[=] name[version]
<ast.Try object at 0x7da1b05772e0>
variable[required_file_input_names] assign[=] list[[]]
variable[optional_file_input_names] assign[=] list[[]]
variable[required_file_array_input_names] assign[=] list[[]]
variable[optional_file_array_input_names] assign[=] list[[]]
variable[file_output_names] assign[=] list[[]]
if compare[constant[inputSpec] in name[app_json]] begin[:]
for taget[name[param]] in starred[call[name[app_json]][constant[inputSpec]]] begin[:]
variable[may_be_missing] assign[=] <ast.BoolOp object at 0x7da1b0510af0>
if compare[call[name[param]][constant[class]] equal[==] constant[file]] begin[:]
variable[param_list] assign[=] <ast.IfExp object at 0x7da1b05110c0>
if compare[name[param_list] is_not constant[None]] begin[:]
call[name[param_list].append, parameter[call[name[param]][constant[name]]]]
if compare[constant[outputSpec] in name[app_json]] begin[:]
variable[file_output_names] assign[=] <ast.ListComp object at 0x7da1b0511090>
call[name[print], parameter[constant[]]]
call[name[print], parameter[binary_operation[binary_operation[call[name[BOLD], parameter[]] + constant[Timeout Policy]] + call[name[ENDC], parameter[]]]]]
call[name[app_json]][constant[runSpec]] assign[=] call[name[OrderedDict], parameter[dictionary[[], []]]]
call[call[name[app_json]][constant[runSpec]].setdefault, parameter[constant[timeoutPolicy], dictionary[[], []]]]
<ast.Tuple object at 0x7da204961db0> assign[=] call[name[get_timeout], parameter[]]
call[call[call[name[app_json]][constant[runSpec]]][constant[timeoutPolicy]].setdefault, parameter[constant[*], dictionary[[], []]]]
call[call[call[call[name[app_json]][constant[runSpec]]][constant[timeoutPolicy]]][constant[*]].setdefault, parameter[name[timeout_units], name[timeout]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[binary_operation[binary_operation[call[name[BOLD], parameter[]] + constant[Template Options]] + call[name[ENDC], parameter[]]]]]
variable[language] assign[=] <ast.IfExp object at 0x7da20c76cc10>
variable[interpreter] assign[=] call[call[name[language_options]][name[language]].get_interpreter, parameter[]]
call[call[name[app_json]][constant[runSpec]]][constant[interpreter]] assign[=] name[interpreter]
variable[template_dir] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[dxpy].__file__]], constant[templating], constant[templates], call[call[name[language_options]][name[language]].get_path, parameter[]]]]
if <ast.UnaryOp object at 0x7da20c76e080> begin[:]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[constant[The execution pattern "] + name[args].template] + constant[" is not available for your programming language.]]]]]]
variable[pattern] assign[=] call[name[get_pattern], parameter[name[template_dir]]]
variable[template_dir] assign[=] call[name[os].path.join, parameter[name[template_dir], name[pattern]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[template_dir], constant[dxapp.json]]], constant[r]]] begin[:]
variable[file_text] assign[=] call[name[fill_in_name_and_ver], parameter[call[name[template_app_json_file].read, parameter[]], name[name], name[version]]]
variable[template_app_json] assign[=] call[name[json].loads, parameter[name[file_text]]]
for taget[name[key]] in starred[call[name[template_app_json]][constant[runSpec]]] begin[:]
call[call[name[app_json]][constant[runSpec]]][name[key]] assign[=] call[call[name[template_app_json]][constant[runSpec]]][name[key]]
if <ast.BoolOp object at 0x7da20c990be0> begin[:]
call[name[print], parameter[constant[All template options are supplied in the arguments.]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[BOLD], parameter[constant[Access Permissions]]]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[constant[If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ] + call[name[BOLD], parameter[constant[https://wiki.dnanexus.com/App-Permissions]]]] + constant[.]]]]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[call[name[UNDERLINE], parameter[constant[Access to the Internet]]] + constant[ (other than accessing the DNAnexus API).]]]]]]
if call[name[prompt_for_yn], parameter[constant[Will this app need access to the Internet?]]] begin[:]
call[name[app_json].setdefault, parameter[constant[access], dictionary[[], []]]]
call[call[name[app_json]][constant[access]]][constant[network]] assign[=] list[[<ast.Constant object at 0x7da20c9905b0>]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[constant[App has full access to the Internet. To narrow access to specific sites, edit the ] + call[name[UNDERLINE], parameter[constant[access.network]]]] + constant[ field of dxapp.json once we generate the app.]]]]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[call[name[UNDERLINE], parameter[constant[Direct access to the parent project]]] + constant[. This is not needed if your app specifies outputs,
which will be copied into the project after it's done running.]]]]]]
if call[name[prompt_for_yn], parameter[constant[Will this app need access to the parent project?]]] begin[:]
call[name[app_json].setdefault, parameter[constant[access], dictionary[[], []]]]
call[call[name[app_json]][constant[access]]][constant[project]] assign[=] constant[CONTRIBUTE]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[App has CONTRIBUTE access to the parent project. To change the access level or request access to ] + constant[other projects, edit the ]] + call[name[UNDERLINE], parameter[constant[access.project]]]] + constant[ and ]] + call[name[UNDERLINE], parameter[constant[access.allProjects]]]] + constant[ fields of dxapp.json once we generate the app.]]]]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[BOLD], parameter[constant[System Requirements]]]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[BOLD], parameter[constant[Common AWS instance types:]]]]]
call[name[print], parameter[call[name[format_table], parameter[call[name[InstanceTypesCompleter].aws_preferred_instance_types.values, parameter[]]]]]]
call[name[print], parameter[call[name[BOLD], parameter[constant[Common Azure instance types:]]]]]
call[name[print], parameter[call[name[format_table], parameter[call[name[InstanceTypesCompleter].azure_preferred_instance_types.values, parameter[]]]]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[BOLD], parameter[constant[Default instance type:]]] + constant[ The instance type you select here will apply to all entry points in ]] + constant[your app unless you override it. See ]] + call[name[BOLD], parameter[constant[https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types]]]] + constant[ for more information.]]]]]]
call[name[use_completer], parameter[call[name[InstanceTypesCompleter], parameter[]]]]
variable[instance_type] assign[=] call[name[prompt_for_var], parameter[constant[Choose an instance type for your app]]]
variable[target_region] assign[=] name[DEFAULT_REGION_AWS]
if compare[name[instance_type] in call[name[InstanceTypesCompleter].azure_preferred_instance_types.keys, parameter[]]] begin[:]
variable[target_region] assign[=] name[DEFAULT_REGION_AZURE]
call[name[app_json]][constant[regionalOptions]] assign[=] call[name[OrderedDict], parameter[dictionary[[], []]]]
call[call[name[app_json]][constant[regionalOptions]]][name[target_region]] assign[=] call[name[OrderedDict], parameter[dictionary[[], []]]]
call[call[call[name[app_json]][constant[regionalOptions]]][name[target_region]].setdefault, parameter[constant[systemRequirements], dictionary[[], []]]]
call[call[call[call[name[app_json]][constant[regionalOptions]]][name[target_region]]][constant[systemRequirements]].setdefault, parameter[constant[*], dictionary[[], []]]]
call[call[call[call[call[name[app_json]][constant[regionalOptions]]][name[target_region]]][constant[systemRequirements]]][constant[*]]][constant[instanceType]] assign[=] name[instance_type]
call[call[name[app_json]][constant[runSpec]]][constant[distribution]] assign[=] constant[Ubuntu]
call[call[name[app_json]][constant[runSpec]]][constant[release]] assign[=] constant[16.04]
call[name[print], parameter[constant[]]]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[BOLD], parameter[]] + constant[*** Generating ]] + call[name[DNANEXUS_LOGO], parameter[]]] + call[name[BOLD], parameter[]]] + constant[ App Template... ***]] + call[name[ENDC], parameter[]]]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[name], constant[dxapp.json]]], constant[w]]] begin[:]
call[name[prog_file].write, parameter[binary_operation[call[name[clean], parameter[call[name[json].dumps, parameter[name[app_json]]]]] + constant[
]]]]
call[name[manifest].append, parameter[call[name[os].path.join, parameter[name[name], constant[dxapp.json]]]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[constant[Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://wiki.dnanexus.com/Developer-Portal for complete
documentation).] + <ast.IfExp object at 0x7da18eb54940>]]]]]
call[name[print], parameter[constant[]]]
for taget[name[subdir]] in starred[tuple[[<ast.Constant object at 0x7da18eb570a0>, <ast.Constant object at 0x7da18eb576d0>, <ast.Constant object at 0x7da18eb577f0>]]] begin[:]
<ast.Try object at 0x7da18eb54d30>
variable[entry_points] assign[=] list[[<ast.Constant object at 0x7da18eb54fd0>]]
if compare[name[pattern] equal[==] constant[parallelized]] begin[:]
variable[entry_points] assign[=] list[[<ast.Constant object at 0x7da18eb55510>, <ast.Constant object at 0x7da18eb56650>, <ast.Constant object at 0x7da18eb55390>]]
<ast.AugAssign object at 0x7da18eb55f90>
call[name[print], parameter[constant[Created files:]]]
for taget[name[filename]] in starred[call[name[sorted], parameter[name[manifest]]]] begin[:]
call[name[print], parameter[constant[ ], name[filename]]]
call[name[print], parameter[binary_operation[binary_operation[constant[
] + call[name[fill], parameter[call[constant[App directory created! See
https://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.].format, parameter[]]]]] + constant[
]]]]
call[name[print], parameter[call[name[fill], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ] + call[name[BOLD], parameter[]]] + constant[resources]] + call[name[ENDC], parameter[]]] + constant[ directory will be uploaded so that they will be present in the root directory when the executable is run.]]]]]]
|
keyword[def] identifier[main] (** identifier[kwargs] ):
literal[string]
identifier[manifest] =[]
identifier[print_intro] ( identifier[API_VERSION] )
keyword[if] identifier[args] . identifier[json_file] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[args] . identifier[json_file] , literal[string] ) keyword[as] identifier[json_file] :
identifier[app_json] = identifier[json] . identifier[loads] ( identifier[json_file] . identifier[read] ())
identifier[name] = identifier[get_name] ( identifier[default] = identifier[args] . identifier[name] keyword[or] identifier[app_json] . identifier[get] ( literal[string] ))
identifier[app_json] [ literal[string] ]= identifier[name]
identifier[version] = identifier[get_version] ( identifier[default] = identifier[app_json] . identifier[get] ( literal[string] ))
identifier[app_json] [ literal[string] ]= identifier[version]
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[app_json] [ literal[string] ])
keyword[except] :
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[fill] ( literal[string] % identifier[app_json] [ literal[string] ])+ literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
identifier[name] = identifier[get_name] ( identifier[default] = identifier[args] . identifier[name] )
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[name] )
keyword[except] :
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[fill] ( literal[string] % identifier[name] )+ literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[title] , identifier[summary] = identifier[get_metadata] ( identifier[API_VERSION] )
identifier[version] = identifier[get_version] ()
identifier[app_json] = identifier[OrderedDict] ()
identifier[app_json] [ literal[string] ]= identifier[name]
identifier[app_json] [ literal[string] ]= identifier[title] keyword[or] identifier[name]
identifier[app_json] [ literal[string] ]= identifier[summary] keyword[or] identifier[name]
identifier[app_json] [ literal[string] ]= identifier[API_VERSION]
identifier[app_json] [ literal[string] ]= identifier[version]
identifier[class_completer] = identifier[Completer] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ])
identifier[bool_completer] = identifier[Completer] ([ literal[string] , literal[string] ])
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ()+ literal[string] + identifier[ENDC] ())
identifier[print] ( literal[string] )
identifier[input_spec] = keyword[True]
identifier[input_names] =[]
identifier[printed_classes] = keyword[False]
keyword[if] identifier[input_spec] :
identifier[app_json] [ literal[string] ]=[]
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[while] keyword[True] :
identifier[print] ( literal[string] )
identifier[ordinal] = identifier[get_ordinal_str] ( identifier[len] ( identifier[app_json] [ literal[string] ])+ literal[int] )
identifier[input_name] = identifier[prompt_for_var] ( identifier[ordinal] + literal[string] , identifier[allow_empty] = keyword[True] )
keyword[if] identifier[input_name] == literal[string] :
keyword[break]
keyword[if] identifier[input_name] keyword[in] identifier[input_names] :
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[continue]
keyword[if] keyword[not] identifier[IO_NAME_PATTERN] . identifier[match] ( identifier[input_name] ):
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[continue]
identifier[input_names] . identifier[append] ( identifier[input_name] )
identifier[input_label] = identifier[prompt_for_var] ( literal[string] , literal[string] )
identifier[use_completer] ( identifier[class_completer] )
keyword[if] keyword[not] identifier[printed_classes] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[printed_classes] = keyword[True]
keyword[while] keyword[True] :
identifier[input_class] = identifier[prompt_for_var] ( literal[string] )
keyword[if] identifier[input_class] keyword[in] identifier[class_completer] . identifier[choices] :
keyword[break]
keyword[else] :
identifier[print] ( identifier[fill] ( literal[string] ))
identifier[use_completer] ()
identifier[optional] = identifier[prompt_for_yn] ( literal[string] )
identifier[default_val] = keyword[None]
keyword[if] identifier[optional] keyword[and] identifier[input_class] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[default_val] = identifier[prompt_for_yn] ( literal[string] )
keyword[if] identifier[default_val] :
keyword[while] keyword[True] :
keyword[if] identifier[input_class] == literal[string] :
identifier[use_completer] ( identifier[bool_completer] )
identifier[default_val] = identifier[prompt_for_var] ( literal[string] , identifier[choices] =[ literal[string] , literal[string] ])
identifier[use_completer] ()
keyword[elif] identifier[input_class] == literal[string] :
identifier[default_val] = identifier[prompt_for_var] ( literal[string] , identifier[allow_empty] = keyword[True] )
keyword[else] :
identifier[default_val] = identifier[prompt_for_var] ( literal[string] )
keyword[try] :
keyword[if] identifier[input_class] == literal[string] :
identifier[default_val] =( identifier[default_val] == literal[string] )
keyword[elif] identifier[input_class] == literal[string] :
identifier[default_val] = identifier[int] ( identifier[default_val] )
keyword[elif] identifier[input_class] == literal[string] :
identifier[default_val] = identifier[float] ( identifier[default_val] )
keyword[break]
keyword[except] :
identifier[print] ( literal[string] + identifier[input_class] )
keyword[else] :
identifier[default_val] = keyword[None]
identifier[parameter_json] = identifier[OrderedDict] ()
identifier[parameter_json] [ literal[string] ]= identifier[input_name]
keyword[if] identifier[input_label] != literal[string] :
identifier[parameter_json] [ literal[string] ]= identifier[input_label]
identifier[parameter_json] [ literal[string] ]= identifier[input_class]
identifier[parameter_json] [ literal[string] ]= identifier[optional]
keyword[if] identifier[default_val] keyword[is] keyword[not] keyword[None] :
identifier[parameter_json] [ literal[string] ]= identifier[default_val]
keyword[if] identifier[input_class] == literal[string] keyword[or] identifier[input_class] == literal[string] :
identifier[parameter_json] [ literal[string] ]=[ literal[string] ]
identifier[parameter_json] [ literal[string] ]= literal[string]
identifier[app_json] [ literal[string] ]. identifier[append] ( identifier[parameter_json] )
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ()+ literal[string] + identifier[ENDC] ())
identifier[print] ( literal[string] )
identifier[output_spec] = keyword[True]
identifier[output_names] =[]
keyword[if] identifier[output_spec] :
identifier[app_json] [ literal[string] ]=[]
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[while] keyword[True] :
identifier[print] ( literal[string] )
identifier[ordinal] = identifier[get_ordinal_str] ( identifier[len] ( identifier[app_json] [ literal[string] ])+ literal[int] )
identifier[output_name] = identifier[prompt_for_var] ( identifier[ordinal] + literal[string] , identifier[allow_empty] = keyword[True] )
keyword[if] identifier[output_name] == literal[string] :
keyword[break]
keyword[if] identifier[output_name] keyword[in] identifier[output_names] :
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[continue]
keyword[if] keyword[not] identifier[IO_NAME_PATTERN] . identifier[match] ( identifier[output_name] ):
identifier[print] ( identifier[fill] ( literal[string] ))
keyword[continue]
identifier[output_names] . identifier[append] ( identifier[output_name] )
identifier[output_label] = identifier[prompt_for_var] ( literal[string] , literal[string] )
identifier[use_completer] ( identifier[class_completer] )
keyword[if] keyword[not] identifier[printed_classes] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[printed_classes] = keyword[True]
keyword[while] keyword[True] :
identifier[output_class] = identifier[prompt_for_var] ( literal[string] )
keyword[if] identifier[output_class] keyword[in] identifier[class_completer] . identifier[choices] :
keyword[break]
keyword[else] :
identifier[print] ( identifier[fill] ( literal[string] ))
identifier[use_completer] ()
identifier[parameter_json] = identifier[OrderedDict] ()
identifier[parameter_json] [ literal[string] ]= identifier[output_name]
keyword[if] identifier[output_label] != literal[string] :
identifier[parameter_json] [ literal[string] ]= identifier[output_label]
identifier[parameter_json] [ literal[string] ]= identifier[output_class]
keyword[if] identifier[output_class] == literal[string] keyword[or] identifier[output_class] == literal[string] :
identifier[parameter_json] [ literal[string] ]=[ literal[string] ]
identifier[parameter_json] [ literal[string] ]= literal[string]
identifier[app_json] [ literal[string] ]. identifier[append] ( identifier[parameter_json] )
identifier[required_file_input_names] =[]
identifier[optional_file_input_names] =[]
identifier[required_file_array_input_names] =[]
identifier[optional_file_array_input_names] =[]
identifier[file_output_names] =[]
keyword[if] literal[string] keyword[in] identifier[app_json] :
keyword[for] identifier[param] keyword[in] identifier[app_json] [ literal[string] ]:
identifier[may_be_missing] = identifier[param] [ literal[string] ] keyword[and] literal[string] keyword[not] keyword[in] identifier[param]
keyword[if] identifier[param] [ literal[string] ]== literal[string] :
identifier[param_list] = identifier[optional_file_input_names] keyword[if] identifier[may_be_missing] keyword[else] identifier[required_file_input_names]
keyword[elif] identifier[param] [ literal[string] ]== literal[string] :
identifier[param_list] = identifier[optional_file_array_input_names] keyword[if] identifier[may_be_missing] keyword[else] identifier[required_file_array_input_names]
keyword[else] :
identifier[param_list] = keyword[None]
keyword[if] identifier[param_list] keyword[is] keyword[not] keyword[None] :
identifier[param_list] . identifier[append] ( identifier[param] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[app_json] :
identifier[file_output_names] =[ identifier[param] [ literal[string] ] keyword[for] identifier[param] keyword[in] identifier[app_json] [ literal[string] ] keyword[if] identifier[param] [ literal[string] ]== literal[string] ]
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ()+ literal[string] + identifier[ENDC] ())
identifier[app_json] [ literal[string] ]= identifier[OrderedDict] ({})
identifier[app_json] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})
identifier[timeout] , identifier[timeout_units] = identifier[get_timeout] ( identifier[default] = identifier[app_json] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] ))
identifier[app_json] [ literal[string] ][ literal[string] ]. identifier[setdefault] ( literal[string] ,{})
identifier[app_json] [ literal[string] ][ literal[string] ][ literal[string] ]. identifier[setdefault] ( identifier[timeout_units] , identifier[timeout] )
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ()+ literal[string] + identifier[ENDC] ())
identifier[language] = identifier[args] . identifier[language] keyword[if] identifier[args] . identifier[language] keyword[is] keyword[not] keyword[None] keyword[else] identifier[get_language] ()
identifier[interpreter] = identifier[language_options] [ identifier[language] ]. identifier[get_interpreter] ()
identifier[app_json] [ literal[string] ][ literal[string] ]= identifier[interpreter]
identifier[template_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[dxpy] . identifier[__file__] ), literal[string] , literal[string] , identifier[language_options] [ identifier[language] ]. identifier[get_path] ())
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[template_dir] , identifier[args] . identifier[template] )):
identifier[print] ( identifier[fill] ( literal[string] + identifier[args] . identifier[template] + literal[string] ))
identifier[pattern] = identifier[get_pattern] ( identifier[template_dir] )
keyword[else] :
identifier[pattern] = identifier[args] . identifier[template]
identifier[template_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[template_dir] , identifier[pattern] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[template_dir] , literal[string] ), literal[string] ) keyword[as] identifier[template_app_json_file] :
identifier[file_text] = identifier[fill_in_name_and_ver] ( identifier[template_app_json_file] . identifier[read] (), identifier[name] , identifier[version] )
identifier[template_app_json] = identifier[json] . identifier[loads] ( identifier[file_text] )
keyword[for] identifier[key] keyword[in] identifier[template_app_json] [ literal[string] ]:
identifier[app_json] [ literal[string] ][ identifier[key] ]= identifier[template_app_json] [ literal[string] ][ identifier[key] ]
keyword[if] ( identifier[language] == identifier[args] . identifier[language] ) keyword[and] ( identifier[pattern] == identifier[args] . identifier[template] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ( literal[string] ))
identifier[print] ( identifier[fill] ( literal[string] +
identifier[BOLD] ( literal[string] )+ literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[fill] ( identifier[UNDERLINE] ( literal[string] )+ literal[string] ))
keyword[if] identifier[prompt_for_yn] ( literal[string] , identifier[default] = keyword[False] ):
identifier[app_json] . identifier[setdefault] ( literal[string] ,{})
identifier[app_json] [ literal[string] ][ literal[string] ]=[ literal[string] ]
identifier[print] ( identifier[fill] ( literal[string] +
identifier[UNDERLINE] ( literal[string] )+ literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[fill] ( identifier[UNDERLINE] ( literal[string] )+ literal[string] ))
keyword[if] identifier[prompt_for_yn] ( literal[string] , identifier[default] = keyword[False] ):
identifier[app_json] . identifier[setdefault] ( literal[string] ,{})
identifier[app_json] [ literal[string] ][ literal[string] ]= literal[string]
identifier[print] ( identifier[fill] ( literal[string] +
literal[string] + identifier[UNDERLINE] ( literal[string] )+ literal[string] + identifier[UNDERLINE] ( literal[string] )+
literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ( literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ( literal[string] ))
identifier[print] ( identifier[format_table] ( identifier[InstanceTypesCompleter] . identifier[aws_preferred_instance_types] . identifier[values] (),
identifier[column_names] = identifier[list] ( identifier[InstanceTypesCompleter] . identifier[instance_types] . identifier[values] ())[ literal[int] ]. identifier[_fields] ))
identifier[print] ( identifier[BOLD] ( literal[string] ))
identifier[print] ( identifier[format_table] ( identifier[InstanceTypesCompleter] . identifier[azure_preferred_instance_types] . identifier[values] (),
identifier[column_names] = identifier[list] ( identifier[InstanceTypesCompleter] . identifier[instance_types] . identifier[values] ())[ literal[int] ]. identifier[_fields] ))
identifier[print] ( identifier[fill] ( identifier[BOLD] ( literal[string] )+ literal[string] +
literal[string] +
identifier[BOLD] ( literal[string] )+ literal[string] ))
identifier[use_completer] ( identifier[InstanceTypesCompleter] ())
identifier[instance_type] = identifier[prompt_for_var] ( literal[string] ,
identifier[default] = identifier[InstanceTypesCompleter] . identifier[default_instance_type] . identifier[Name] ,
identifier[choices] = identifier[list] ( identifier[InstanceTypesCompleter] . identifier[instance_types] ))
identifier[target_region] = identifier[DEFAULT_REGION_AWS]
keyword[if] identifier[instance_type] keyword[in] identifier[InstanceTypesCompleter] . identifier[azure_preferred_instance_types] . identifier[keys] ():
identifier[target_region] = identifier[DEFAULT_REGION_AZURE]
identifier[app_json] [ literal[string] ]= identifier[OrderedDict] ({})
identifier[app_json] [ literal[string] ][ identifier[target_region] ]= identifier[OrderedDict] ({})
identifier[app_json] [ literal[string] ][ identifier[target_region] ]. identifier[setdefault] ( literal[string] ,{})
identifier[app_json] [ literal[string] ][ identifier[target_region] ][ literal[string] ]. identifier[setdefault] ( literal[string] ,{})
identifier[app_json] [ literal[string] ][ identifier[target_region] ][ literal[string] ][ literal[string] ][ literal[string] ]= identifier[instance_type]
identifier[app_json] [ literal[string] ][ literal[string] ]= literal[string]
identifier[app_json] [ literal[string] ][ literal[string] ]= literal[string]
identifier[print] ( literal[string] )
identifier[print] ( identifier[BOLD] ()+ literal[string] + identifier[DNANEXUS_LOGO] ()+ identifier[BOLD] ()+ literal[string] + identifier[ENDC] ())
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] ), literal[string] ) keyword[as] identifier[prog_file] :
identifier[prog_file] . identifier[write] ( identifier[clean] ( identifier[json] . identifier[dumps] ( identifier[app_json] , identifier[indent] = literal[int] ))+ literal[string] )
identifier[manifest] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[fill] ( literal[string] +( literal[string] keyword[if] keyword[not] ( literal[string] keyword[in] identifier[app_json] keyword[and] literal[string] keyword[in] identifier[app_json] ) keyword[else] literal[string] )))
identifier[print] ( literal[string] )
keyword[for] identifier[subdir] keyword[in] literal[string] , literal[string] , literal[string] :
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[name] , identifier[subdir] ))
identifier[manifest] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[name] , identifier[subdir] , literal[string] ))
keyword[except] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] %( identifier[name] , identifier[subdir] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[entry_points] =[ literal[string] ]
keyword[if] identifier[pattern] == literal[string] :
identifier[entry_points] =[ literal[string] , literal[string] , literal[string] ]
keyword[elif] identifier[pattern] == literal[string] :
identifier[entry_points] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[manifest] += identifier[create_files_from_templates] ( identifier[template_dir] , identifier[app_json] , identifier[language] ,
identifier[required_file_input_names] , identifier[optional_file_input_names] ,
identifier[required_file_array_input_names] , identifier[optional_file_array_input_names] ,
identifier[file_output_names] , identifier[pattern] ,
identifier[description] = literal[string] ,
identifier[entry_points] = identifier[entry_points] )
identifier[print] ( literal[string] )
keyword[for] identifier[filename] keyword[in] identifier[sorted] ( identifier[manifest] ):
identifier[print] ( literal[string] , identifier[filename] )
identifier[print] ( literal[string] + identifier[fill] ( literal[string] . identifier[format] ( identifier[n] = identifier[name] ))+ literal[string] )
identifier[print] ( identifier[fill] ( literal[string] +
identifier[BOLD] ()+ literal[string] + identifier[ENDC] ()+
literal[string] ))
|
def main(**kwargs):
"""
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
"""
manifest = []
print_intro(API_VERSION)
if args.json_file is not None:
with open(args.json_file, 'r') as json_file:
app_json = json.loads(json_file.read())
# Re-confirm the name
name = get_name(default=args.name or app_json.get('name'))
app_json['name'] = name
version = get_version(default=app_json.get('version'))
app_json['version'] = version # depends on [control=['with'], data=['json_file']]
try:
os.mkdir(app_json['name']) # depends on [control=['try'], data=[]]
except:
sys.stderr.write(fill('Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.' % app_json['name']) + '\n')
sys.exit(1) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
##################
# BASIC METADATA #
##################
name = get_name(default=args.name)
try:
os.mkdir(name) # depends on [control=['try'], data=[]]
except:
sys.stderr.write(fill('Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.' % name) + '\n')
sys.exit(1) # depends on [control=['except'], data=[]]
(title, summary) = get_metadata(API_VERSION)
version = get_version()
app_json = OrderedDict()
app_json['name'] = name
app_json['title'] = title or name
app_json['summary'] = summary or name
app_json['dxapi'] = API_VERSION
app_json['version'] = version
############
# IO SPECS #
############
class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash', 'array:int', 'array:float', 'array:string', 'array:boolean', 'record', 'file', 'applet', 'array:record', 'array:file', 'array:applet'])
bool_completer = Completer(['true', 'false'])
print('')
print(BOLD() + 'Input Specification' + ENDC())
print('')
input_spec = True
input_names = []
printed_classes = False
if input_spec:
app_json['inputSpec'] = []
print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
if input_name == '':
break # depends on [control=['if'], data=[]]
if input_name in input_names:
print(fill('Error: Cannot use the same input parameter name twice. Please choose again.'))
continue # depends on [control=['if'], data=[]]
if not IO_NAME_PATTERN.match(input_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue # depends on [control=['if'], data=[]]
input_names.append(input_name)
input_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your input parameter must be of one of the following classes:')
print('applet array:file array:record file int\narray:applet array:float array:string float record\narray:boolean array:int boolean hash string\n')
printed_classes = True # depends on [control=['if'], data=[]]
while True:
input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if input_class in class_completer.choices:
break # depends on [control=['if'], data=[]]
else:
print(fill('Not a recognized class; please choose again.')) # depends on [control=['while'], data=[]]
use_completer()
optional = prompt_for_yn('This is an optional parameter')
default_val = None
if optional and input_class in ['int', 'float', 'string', 'boolean']:
default_val = prompt_for_yn('A default value should be provided')
if default_val:
while True:
if input_class == 'boolean':
use_completer(bool_completer)
default_val = prompt_for_var(' Default value', choices=['true', 'false'])
use_completer() # depends on [control=['if'], data=[]]
elif input_class == 'string':
default_val = prompt_for_var(' Default value', allow_empty=True) # depends on [control=['if'], data=[]]
else:
default_val = prompt_for_var(' Default value')
try:
if input_class == 'boolean':
default_val = default_val == 'true' # depends on [control=['if'], data=[]]
elif input_class == 'int':
default_val = int(default_val) # depends on [control=['if'], data=[]]
elif input_class == 'float':
default_val = float(default_val) # depends on [control=['if'], data=[]]
break # depends on [control=['try'], data=[]]
except:
print('Not a valid default value for the given class ' + input_class) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
default_val = None # depends on [control=['if'], data=[]]
# Fill in the input parameter's JSON
parameter_json = OrderedDict()
parameter_json['name'] = input_name
if input_label != '':
parameter_json['label'] = input_label # depends on [control=['if'], data=['input_label']]
parameter_json['class'] = input_class
parameter_json['optional'] = optional
if default_val is not None:
parameter_json['default'] = default_val # depends on [control=['if'], data=['default_val']]
# Fill in patterns and blank help string
if input_class == 'file' or input_class == 'array:file':
parameter_json['patterns'] = ['*'] # depends on [control=['if'], data=[]]
parameter_json['help'] = ''
app_json['inputSpec'].append(parameter_json) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
print('')
print(BOLD() + 'Output Specification' + ENDC())
print('')
output_spec = True
output_names = []
if output_spec:
app_json['outputSpec'] = []
print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
if output_name == '':
break # depends on [control=['if'], data=[]]
if output_name in output_names:
print(fill('Error: Cannot use the same output parameter name twice. Please choose again.'))
continue # depends on [control=['if'], data=[]]
if not IO_NAME_PATTERN.match(output_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue # depends on [control=['if'], data=[]]
output_names.append(output_name)
output_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your output parameter must be of one of the following classes:')
print('applet array:file array:record file int\narray:applet array:float array:string float record\narray:boolean array:int boolean hash string')
printed_classes = True # depends on [control=['if'], data=[]]
while True:
output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if output_class in class_completer.choices:
break # depends on [control=['if'], data=[]]
else:
print(fill('Not a recognized class; please choose again.')) # depends on [control=['while'], data=[]]
use_completer()
# Fill in the output parameter's JSON
parameter_json = OrderedDict()
parameter_json['name'] = output_name
if output_label != '':
parameter_json['label'] = output_label # depends on [control=['if'], data=['output_label']]
parameter_json['class'] = output_class
# Fill in patterns and blank help string
if output_class == 'file' or output_class == 'array:file':
parameter_json['patterns'] = ['*'] # depends on [control=['if'], data=[]]
parameter_json['help'] = ''
app_json['outputSpec'].append(parameter_json) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
required_file_input_names = []
optional_file_input_names = []
required_file_array_input_names = []
optional_file_array_input_names = []
file_output_names = []
if 'inputSpec' in app_json:
for param in app_json['inputSpec']:
may_be_missing = param['optional'] and 'default' not in param
if param['class'] == 'file':
param_list = optional_file_input_names if may_be_missing else required_file_input_names # depends on [control=['if'], data=[]]
elif param['class'] == 'array:file':
param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names # depends on [control=['if'], data=[]]
else:
param_list = None
if param_list is not None:
param_list.append(param['name']) # depends on [control=['if'], data=['param_list']] # depends on [control=['for'], data=['param']] # depends on [control=['if'], data=['app_json']]
if 'outputSpec' in app_json:
file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file'] # depends on [control=['if'], data=['app_json']]
##################
# TIMEOUT POLICY #
##################
print('')
print(BOLD() + 'Timeout Policy' + ENDC())
app_json['runSpec'] = OrderedDict({})
app_json['runSpec'].setdefault('timeoutPolicy', {})
(timeout, timeout_units) = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*'))
app_json['runSpec']['timeoutPolicy'].setdefault('*', {})
app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout)
########################
# LANGUAGE AND PATTERN #
########################
print('')
print(BOLD() + 'Template Options' + ENDC())
# Prompt for programming language if not specified
language = args.language if args.language is not None else get_language()
interpreter = language_options[language].get_interpreter()
app_json['runSpec']['interpreter'] = interpreter
# Prompt the execution pattern iff the args.pattern is provided and invalid
template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
if not os.path.isdir(os.path.join(template_dir, args.template)):
print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
pattern = get_pattern(template_dir) # depends on [control=['if'], data=[]]
else:
pattern = args.template
template_dir = os.path.join(template_dir, pattern)
with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
template_app_json = json.loads(file_text)
for key in template_app_json['runSpec']:
app_json['runSpec'][key] = template_app_json['runSpec'][key] # depends on [control=['for'], data=['key']] # depends on [control=['with'], data=['template_app_json_file']]
if language == args.language and pattern == args.template:
print('All template options are supplied in the arguments.') # depends on [control=['if'], data=[]]
##########################
# APP ACCESS PERMISSIONS #
##########################
print('')
print(BOLD('Access Permissions'))
print(fill('If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ' + BOLD('https://wiki.dnanexus.com/App-Permissions') + '.'))
print('')
print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
if prompt_for_yn('Will this app need access to the Internet?', default=False):
app_json.setdefault('access', {})
app_json['access']['network'] = ['*']
print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' + UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.')) # depends on [control=['if'], data=[]]
print('')
print(fill(UNDERLINE('Direct access to the parent project') + ". This is not needed if your app specifies outputs,\n which will be copied into the project after it's done running."))
if prompt_for_yn('Will this app need access to the parent project?', default=False):
app_json.setdefault('access', {})
app_json['access']['project'] = 'CONTRIBUTE'
print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' + 'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') + ' fields of dxapp.json once we generate the app.')) # depends on [control=['if'], data=[]]
#######################
# SYSTEM REQUIREMENTS #
#######################
print('')
print(BOLD('System Requirements'))
print('')
print(BOLD('Common AWS instance types:'))
print(format_table(InstanceTypesCompleter.aws_preferred_instance_types.values(), column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(BOLD('Common Azure instance types:'))
print(format_table(InstanceTypesCompleter.azure_preferred_instance_types.values(), column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' + 'your app unless you override it. See ' + BOLD('https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types') + ' for more information.'))
use_completer(InstanceTypesCompleter())
instance_type = prompt_for_var('Choose an instance type for your app', default=InstanceTypesCompleter.default_instance_type.Name, choices=list(InstanceTypesCompleter.instance_types))
target_region = DEFAULT_REGION_AWS
if instance_type in InstanceTypesCompleter.azure_preferred_instance_types.keys():
target_region = DEFAULT_REGION_AZURE # depends on [control=['if'], data=[]]
app_json['regionalOptions'] = OrderedDict({})
app_json['regionalOptions'][target_region] = OrderedDict({})
app_json['regionalOptions'][target_region].setdefault('systemRequirements', {})
app_json['regionalOptions'][target_region]['systemRequirements'].setdefault('*', {})
app_json['regionalOptions'][target_region]['systemRequirements']['*']['instanceType'] = instance_type
######################
# HARDCODED DEFAULTS #
######################
app_json['runSpec']['distribution'] = 'Ubuntu'
app_json['runSpec']['release'] = '16.04'
#################
# WRITING FILES #
#################
print('')
print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())
with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n') # depends on [control=['with'], data=['prog_file']]
manifest.append(os.path.join(name, 'dxapp.json'))
print('')
print(fill('Your app specification has been written to the\ndxapp.json file. You can specify more app options by editing this file\ndirectly (see https://wiki.dnanexus.com/Developer-Portal for complete\ndocumentation).' + (' Note that without an input and output specification,\nyour app can only be built as an APPLET on the system. To publish it to\nthe DNAnexus community, you must first specify your inputs and outputs.\n' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else '')))
print('')
for subdir in ('src', 'test', 'resources'):
try:
os.mkdir(os.path.join(name, subdir))
manifest.append(os.path.join(name, subdir, '')) # depends on [control=['try'], data=[]]
except:
sys.stderr.write('Unable to create subdirectory %s/%s' % (name, subdir))
sys.exit(1) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['subdir']]
entry_points = ['main']
if pattern == 'parallelized':
entry_points = ['main', 'process', 'postprocess'] # depends on [control=['if'], data=[]]
elif pattern == 'scatter-process-gather':
entry_points = ['main', 'scatter', 'map', 'process', 'postprocess'] # depends on [control=['if'], data=[]]
manifest += create_files_from_templates(template_dir, app_json, language, required_file_input_names, optional_file_input_names, required_file_array_input_names, optional_file_array_input_names, file_output_names, pattern, description='<!-- Insert a description of your app here -->', entry_points=entry_points)
print('Created files:')
for filename in sorted(manifest):
print('\t', filename) # depends on [control=['for'], data=['filename']]
print('\n' + fill('App directory created! See\nhttps://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,\nor run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'.format(n=name)) + '\n')
print(fill('Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ' + BOLD() + 'resources' + ENDC() + ' directory will be uploaded so that they will be present in the root directory when the executable is run.'))
|
def write_sub_files(self):
"""
Write all the submit files used by the dag to disk. Each submit file is
written to the file name set in the CondorJob.
"""
if not self.__nodes_finalized:
for node in self.__nodes:
node.finalize()
if not self.is_dax():
for job in self.__jobs:
job.write_sub_file()
|
def function[write_sub_files, parameter[self]]:
constant[
Write all the submit files used by the dag to disk. Each submit file is
written to the file name set in the CondorJob.
]
if <ast.UnaryOp object at 0x7da1b0b13340> begin[:]
for taget[name[node]] in starred[name[self].__nodes] begin[:]
call[name[node].finalize, parameter[]]
if <ast.UnaryOp object at 0x7da1b0bdb910> begin[:]
for taget[name[job]] in starred[name[self].__jobs] begin[:]
call[name[job].write_sub_file, parameter[]]
|
keyword[def] identifier[write_sub_files] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__nodes_finalized] :
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[__nodes] :
identifier[node] . identifier[finalize] ()
keyword[if] keyword[not] identifier[self] . identifier[is_dax] ():
keyword[for] identifier[job] keyword[in] identifier[self] . identifier[__jobs] :
identifier[job] . identifier[write_sub_file] ()
|
def write_sub_files(self):
"""
Write all the submit files used by the dag to disk. Each submit file is
written to the file name set in the CondorJob.
"""
if not self.__nodes_finalized:
for node in self.__nodes:
node.finalize() # depends on [control=['for'], data=['node']] # depends on [control=['if'], data=[]]
if not self.is_dax():
for job in self.__jobs:
job.write_sub_file() # depends on [control=['for'], data=['job']] # depends on [control=['if'], data=[]]
|
def meter_calls_with_dims(**dims):
"""Decorator to track the rate at which a function is called
with dimensions.
"""
def meter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
meter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).mark()
return fn(*args, **kwargs)
return fn_wrapper
return meter_wrapper
|
def function[meter_calls_with_dims, parameter[]]:
constant[Decorator to track the rate at which a function is called
with dimensions.
]
def function[meter_wrapper, parameter[fn]]:
def function[fn_wrapper, parameter[]]:
call[call[name[meter], parameter[binary_operation[constant[%s_calls] <ast.Mod object at 0x7da2590d6920> call[name[pyformance].registry.get_qualname, parameter[name[fn]]]]]].mark, parameter[]]
return[call[name[fn], parameter[<ast.Starred object at 0x7da20c993760>]]]
return[name[fn_wrapper]]
return[name[meter_wrapper]]
|
keyword[def] identifier[meter_calls_with_dims] (** identifier[dims] ):
literal[string]
keyword[def] identifier[meter_wrapper] ( identifier[fn] ):
@ identifier[functools] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[fn_wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[meter] ( literal[string] %
identifier[pyformance] . identifier[registry] . identifier[get_qualname] ( identifier[fn] ),** identifier[dims] ). identifier[mark] ()
keyword[return] identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[fn_wrapper]
keyword[return] identifier[meter_wrapper]
|
def meter_calls_with_dims(**dims):
"""Decorator to track the rate at which a function is called
with dimensions.
"""
def meter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
meter('%s_calls' % pyformance.registry.get_qualname(fn), **dims).mark()
return fn(*args, **kwargs)
return fn_wrapper
return meter_wrapper
|
def get_plugins_info(self, limit=10, offset=10):
"""
Provide plugins info
:param limit:
:param offset:
:return:
"""
params = {}
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
url = 'rest/1.0/plugins'
return (self.get(url, params=params) or {}).get('plugins')
|
def function[get_plugins_info, parameter[self, limit, offset]]:
constant[
Provide plugins info
:param limit:
:param offset:
:return:
]
variable[params] assign[=] dictionary[[], []]
if name[offset] begin[:]
call[name[params]][constant[offset]] assign[=] name[offset]
if name[limit] begin[:]
call[name[params]][constant[limit]] assign[=] name[limit]
variable[url] assign[=] constant[rest/1.0/plugins]
return[call[<ast.BoolOp object at 0x7da18f09e410>.get, parameter[constant[plugins]]]]
|
keyword[def] identifier[get_plugins_info] ( identifier[self] , identifier[limit] = literal[int] , identifier[offset] = literal[int] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[offset] :
identifier[params] [ literal[string] ]= identifier[offset]
keyword[if] identifier[limit] :
identifier[params] [ literal[string] ]= identifier[limit]
identifier[url] = literal[string]
keyword[return] ( identifier[self] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] ) keyword[or] {}). identifier[get] ( literal[string] )
|
def get_plugins_info(self, limit=10, offset=10):
"""
Provide plugins info
:param limit:
:param offset:
:return:
"""
params = {}
if offset:
params['offset'] = offset # depends on [control=['if'], data=[]]
if limit:
params['limit'] = limit # depends on [control=['if'], data=[]]
url = 'rest/1.0/plugins'
return (self.get(url, params=params) or {}).get('plugins')
|
def preview(ident):
'''Preview an harvesting for a given source'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest()
|
def function[preview, parameter[ident]]:
constant[Preview an harvesting for a given source]
variable[source] assign[=] call[name[get_source], parameter[name[ident]]]
variable[cls] assign[=] call[name[backends].get, parameter[name[current_app], name[source].backend]]
variable[max_items] assign[=] call[name[current_app].config][constant[HARVEST_PREVIEW_MAX_ITEMS]]
variable[backend] assign[=] call[name[cls], parameter[name[source]]]
return[call[name[backend].harvest, parameter[]]]
|
keyword[def] identifier[preview] ( identifier[ident] ):
literal[string]
identifier[source] = identifier[get_source] ( identifier[ident] )
identifier[cls] = identifier[backends] . identifier[get] ( identifier[current_app] , identifier[source] . identifier[backend] )
identifier[max_items] = identifier[current_app] . identifier[config] [ literal[string] ]
identifier[backend] = identifier[cls] ( identifier[source] , identifier[dryrun] = keyword[True] , identifier[max_items] = identifier[max_items] )
keyword[return] identifier[backend] . identifier[harvest] ()
|
def preview(ident):
"""Preview an harvesting for a given source"""
source = get_source(ident)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest()
|
def leave(self, reason=None, message=None):
"""Actively close this WAMP session.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.leave`
"""
# see https://github.com/crossbario/autobahn-python/issues/605
return self._async_session.leave(reason=reason, log_message=message)
|
def function[leave, parameter[self, reason, message]]:
constant[Actively close this WAMP session.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.leave`
]
return[call[name[self]._async_session.leave, parameter[]]]
|
keyword[def] identifier[leave] ( identifier[self] , identifier[reason] = keyword[None] , identifier[message] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_async_session] . identifier[leave] ( identifier[reason] = identifier[reason] , identifier[log_message] = identifier[message] )
|
def leave(self, reason=None, message=None):
"""Actively close this WAMP session.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.leave`
"""
# see https://github.com/crossbario/autobahn-python/issues/605
return self._async_session.leave(reason=reason, log_message=message)
|
def _set_capabilities(self, v, load=False):
"""
Setter method for capabilities, mapped from YANG variable /capabilities (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_capabilities is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_capabilities() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=capabilities.capabilities, is_container='container', presence=False, yang_name="capabilities", rest_name="capabilities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'CapabilitiesCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """capabilities must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=capabilities.capabilities, is_container='container', presence=False, yang_name="capabilities", rest_name="capabilities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'CapabilitiesCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='container', is_config=True)""",
})
self.__capabilities = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_capabilities, parameter[self, v, load]]:
constant[
Setter method for capabilities, mapped from YANG variable /capabilities (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_capabilities is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_capabilities() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da2054a4b20>
name[self].__capabilities assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_capabilities] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[capabilities] . identifier[capabilities] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__capabilities] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_capabilities(self, v, load=False):
"""
Setter method for capabilities, mapped from YANG variable /capabilities (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_capabilities is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_capabilities() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=capabilities.capabilities, is_container='container', presence=False, yang_name='capabilities', rest_name='capabilities', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'CapabilitiesCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'capabilities must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=capabilities.capabilities, is_container=\'container\', presence=False, yang_name="capabilities", rest_name="capabilities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'CapabilitiesCallpoint\'}}, namespace=\'urn:brocade.com:mgmt:brocade-system-capabilities\', defining_module=\'brocade-system-capabilities\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__capabilities = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def index_name(table, columns):
"""Generate an artificial index name."""
sig = '||'.join(columns)
key = sha1(sig.encode('utf-8')).hexdigest()[:16]
return 'ix_%s_%s' % (table, key)
|
def function[index_name, parameter[table, columns]]:
constant[Generate an artificial index name.]
variable[sig] assign[=] call[constant[||].join, parameter[name[columns]]]
variable[key] assign[=] call[call[call[name[sha1], parameter[call[name[sig].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]][<ast.Slice object at 0x7da20c7cbd00>]
return[binary_operation[constant[ix_%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7cbdf0>, <ast.Name object at 0x7da20c7c8af0>]]]]
|
keyword[def] identifier[index_name] ( identifier[table] , identifier[columns] ):
literal[string]
identifier[sig] = literal[string] . identifier[join] ( identifier[columns] )
identifier[key] = identifier[sha1] ( identifier[sig] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()[: literal[int] ]
keyword[return] literal[string] %( identifier[table] , identifier[key] )
|
def index_name(table, columns):
"""Generate an artificial index name."""
sig = '||'.join(columns)
key = sha1(sig.encode('utf-8')).hexdigest()[:16]
return 'ix_%s_%s' % (table, key)
|
def delete_filter(self, filter_id, params=None):
"""
`<>`_
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.")
return self.transport.perform_request(
"DELETE", _make_path("_ml", "filters", filter_id), params=params
)
|
def function[delete_filter, parameter[self, filter_id, params]]:
constant[
`<>`_
:arg filter_id: The ID of the filter to delete
]
if compare[name[filter_id] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da1b212f3a0>
return[call[name[self].transport.perform_request, parameter[constant[DELETE], call[name[_make_path], parameter[constant[_ml], constant[filters], name[filter_id]]]]]]
|
keyword[def] identifier[delete_filter] ( identifier[self] , identifier[filter_id] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[filter_id] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( literal[string] , literal[string] , identifier[filter_id] ), identifier[params] = identifier[params]
)
|
def delete_filter(self, filter_id, params=None):
"""
`<>`_
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.") # depends on [control=['if'], data=[]]
return self.transport.perform_request('DELETE', _make_path('_ml', 'filters', filter_id), params=params)
|
def opaque_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `opaque_sky_cover` This is the value for
opaque sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena that prevent observing the sky or higher cloud
layers at the time indicated.) This is not used unless the field for
Horizontal Infrared Radiation Intensity is missing and then it is used
to calculate Horizontal Infrared Radiation Intensity.
Args:
value (float): value for IDD Field `opaque_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `opaque_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `opaque_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `opaque_sky_cover`')
self._opaque_sky_cover = value
|
def function[opaque_sky_cover, parameter[self, value]]:
constant[Corresponds to IDD Field `opaque_sky_cover` This is the value for
opaque sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena that prevent observing the sky or higher cloud
layers at the time indicated.) This is not used unless the field for
Horizontal Infrared Radiation Intensity is missing and then it is used
to calculate Horizontal Infrared Radiation Intensity.
Args:
value (float): value for IDD Field `opaque_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fb0fd0>
if compare[name[value] less[<] constant[0.0]] begin[:]
<ast.Raise object at 0x7da1b0fb0160>
if compare[name[value] greater[>] constant[10.0]] begin[:]
<ast.Raise object at 0x7da18c4cd960>
name[self]._opaque_sky_cover assign[=] name[value]
|
keyword[def] identifier[opaque_sky_cover] ( identifier[self] , identifier[value] = literal[int] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] identifier[value] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[value] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[self] . identifier[_opaque_sky_cover] = identifier[value]
|
def opaque_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `opaque_sky_cover` This is the value for
opaque sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena that prevent observing the sky or higher cloud
layers at the time indicated.) This is not used unless the field for
Horizontal Infrared Radiation Intensity is missing and then it is used
to calculate Horizontal Infrared Radiation Intensity.
Args:
value (float): value for IDD Field `opaque_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `opaque_sky_cover`'.format(value)) # depends on [control=['except'], data=[]]
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 for field `opaque_sky_cover`') # depends on [control=['if'], data=[]]
if value > 10.0:
raise ValueError('value need to be smaller 10.0 for field `opaque_sky_cover`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._opaque_sky_cover = value
|
def generate_image(self, chars):
"""Generate the image of the given characters.
:param chars: text to be generated.
"""
background = random_color(238, 255)
color = random_color(10, 200, random.randint(220, 255))
im = self.create_captcha_image(chars, color, background)
self.create_noise_dots(im, color)
self.create_noise_curve(im, color)
im = im.filter(ImageFilter.SMOOTH)
return im
|
def function[generate_image, parameter[self, chars]]:
constant[Generate the image of the given characters.
:param chars: text to be generated.
]
variable[background] assign[=] call[name[random_color], parameter[constant[238], constant[255]]]
variable[color] assign[=] call[name[random_color], parameter[constant[10], constant[200], call[name[random].randint, parameter[constant[220], constant[255]]]]]
variable[im] assign[=] call[name[self].create_captcha_image, parameter[name[chars], name[color], name[background]]]
call[name[self].create_noise_dots, parameter[name[im], name[color]]]
call[name[self].create_noise_curve, parameter[name[im], name[color]]]
variable[im] assign[=] call[name[im].filter, parameter[name[ImageFilter].SMOOTH]]
return[name[im]]
|
keyword[def] identifier[generate_image] ( identifier[self] , identifier[chars] ):
literal[string]
identifier[background] = identifier[random_color] ( literal[int] , literal[int] )
identifier[color] = identifier[random_color] ( literal[int] , literal[int] , identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
identifier[im] = identifier[self] . identifier[create_captcha_image] ( identifier[chars] , identifier[color] , identifier[background] )
identifier[self] . identifier[create_noise_dots] ( identifier[im] , identifier[color] )
identifier[self] . identifier[create_noise_curve] ( identifier[im] , identifier[color] )
identifier[im] = identifier[im] . identifier[filter] ( identifier[ImageFilter] . identifier[SMOOTH] )
keyword[return] identifier[im]
|
def generate_image(self, chars):
"""Generate the image of the given characters.
:param chars: text to be generated.
"""
background = random_color(238, 255)
color = random_color(10, 200, random.randint(220, 255))
im = self.create_captcha_image(chars, color, background)
self.create_noise_dots(im, color)
self.create_noise_curve(im, color)
im = im.filter(ImageFilter.SMOOTH)
return im
|
def ned2ecef(n: float, e: float, d: float,
lat0: float, lon0: float, h0: float,
ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]:
"""
North, East, Down to target ECEF coordinates
Parameters
----------
n : float or numpy.ndarray of float
North NED coordinate (meters)
e : float or numpy.ndarray of float
East NED coordinate (meters)
d : float or numpy.ndarray of float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
x : float or numpy.ndarray of float
ECEF x coordinate (meters)
y : float or numpy.ndarray of float
ECEF y coordinate (meters)
z : float or numpy.ndarray of float
ECEF z coordinate (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
|
def function[ned2ecef, parameter[n, e, d, lat0, lon0, h0, ell, deg]]:
constant[
North, East, Down to target ECEF coordinates
Parameters
----------
n : float or numpy.ndarray of float
North NED coordinate (meters)
e : float or numpy.ndarray of float
East NED coordinate (meters)
d : float or numpy.ndarray of float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
x : float or numpy.ndarray of float
ECEF x coordinate (meters)
y : float or numpy.ndarray of float
ECEF y coordinate (meters)
z : float or numpy.ndarray of float
ECEF z coordinate (meters)
]
return[call[name[enu2ecef], parameter[name[e], name[n], <ast.UnaryOp object at 0x7da1b12a8430>, name[lat0], name[lon0], name[h0], name[ell]]]]
|
keyword[def] identifier[ned2ecef] ( identifier[n] : identifier[float] , identifier[e] : identifier[float] , identifier[d] : identifier[float] ,
identifier[lat0] : identifier[float] , identifier[lon0] : identifier[float] , identifier[h0] : identifier[float] ,
identifier[ell] : identifier[Ellipsoid] = keyword[None] , identifier[deg] : identifier[bool] = keyword[True] )-> identifier[Tuple] [ identifier[float] , identifier[float] , identifier[float] ]:
literal[string]
keyword[return] identifier[enu2ecef] ( identifier[e] , identifier[n] ,- identifier[d] , identifier[lat0] , identifier[lon0] , identifier[h0] , identifier[ell] , identifier[deg] = identifier[deg] )
|
def ned2ecef(n: float, e: float, d: float, lat0: float, lon0: float, h0: float, ell: Ellipsoid=None, deg: bool=True) -> Tuple[float, float, float]:
"""
North, East, Down to target ECEF coordinates
Parameters
----------
n : float or numpy.ndarray of float
North NED coordinate (meters)
e : float or numpy.ndarray of float
East NED coordinate (meters)
d : float or numpy.ndarray of float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
x : float or numpy.ndarray of float
ECEF x coordinate (meters)
y : float or numpy.ndarray of float
ECEF y coordinate (meters)
z : float or numpy.ndarray of float
ECEF z coordinate (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
|
def process_object(obj):
"Hook to process the object currently being displayed."
invalid_options = OptsMagic.process_element(obj)
if invalid_options: return invalid_options
OutputMagic.info(obj)
|
def function[process_object, parameter[obj]]:
constant[Hook to process the object currently being displayed.]
variable[invalid_options] assign[=] call[name[OptsMagic].process_element, parameter[name[obj]]]
if name[invalid_options] begin[:]
return[name[invalid_options]]
call[name[OutputMagic].info, parameter[name[obj]]]
|
keyword[def] identifier[process_object] ( identifier[obj] ):
literal[string]
identifier[invalid_options] = identifier[OptsMagic] . identifier[process_element] ( identifier[obj] )
keyword[if] identifier[invalid_options] : keyword[return] identifier[invalid_options]
identifier[OutputMagic] . identifier[info] ( identifier[obj] )
|
def process_object(obj):
"""Hook to process the object currently being displayed."""
invalid_options = OptsMagic.process_element(obj)
if invalid_options:
return invalid_options # depends on [control=['if'], data=[]]
OutputMagic.info(obj)
|
def daterange(start, stop, step=1, inclusive=False):
"""In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.*
"""
if not isinstance(start, date):
raise TypeError("start expected datetime.date instance")
if stop and not isinstance(stop, date):
raise TypeError("stop expected datetime.date instance or None")
try:
y_step, m_step, d_step = step
except TypeError:
y_step, m_step, d_step = 0, 0, step
else:
y_step, m_step = int(y_step), int(m_step)
if isinstance(d_step, int):
d_step = timedelta(days=int(d_step))
elif isinstance(d_step, timedelta):
pass
else:
raise ValueError('step expected int, timedelta, or tuple'
' (year, month, day), not: %r' % step)
if stop is None:
finished = lambda t: False
elif start < stop:
finished = operator.gt if inclusive else operator.ge
else:
finished = operator.lt if inclusive else operator.le
now = start
while not finished(now, stop):
yield now
if y_step or m_step:
m_y_step, cur_month = divmod(now.month + m_step, 12)
now = now.replace(year=now.year + y_step + m_y_step,
month=cur_month or 12)
now = now + d_step
return
|
def function[daterange, parameter[start, stop, step, inclusive]]:
constant[In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.*
]
if <ast.UnaryOp object at 0x7da1b1968f40> begin[:]
<ast.Raise object at 0x7da1b196ae30>
if <ast.BoolOp object at 0x7da1b1969210> begin[:]
<ast.Raise object at 0x7da1b196a470>
<ast.Try object at 0x7da1b1968d60>
if call[name[isinstance], parameter[name[d_step], name[int]]] begin[:]
variable[d_step] assign[=] call[name[timedelta], parameter[]]
if compare[name[stop] is constant[None]] begin[:]
variable[finished] assign[=] <ast.Lambda object at 0x7da1b19c1060>
variable[now] assign[=] name[start]
while <ast.UnaryOp object at 0x7da1b19c03a0> begin[:]
<ast.Yield object at 0x7da1b19c0e80>
if <ast.BoolOp object at 0x7da1b19c1b70> begin[:]
<ast.Tuple object at 0x7da1b19c1b10> assign[=] call[name[divmod], parameter[binary_operation[name[now].month + name[m_step]], constant[12]]]
variable[now] assign[=] call[name[now].replace, parameter[]]
variable[now] assign[=] binary_operation[name[now] + name[d_step]]
return[None]
|
keyword[def] identifier[daterange] ( identifier[start] , identifier[stop] , identifier[step] = literal[int] , identifier[inclusive] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[start] , identifier[date] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[stop] keyword[and] keyword[not] identifier[isinstance] ( identifier[stop] , identifier[date] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[try] :
identifier[y_step] , identifier[m_step] , identifier[d_step] = identifier[step]
keyword[except] identifier[TypeError] :
identifier[y_step] , identifier[m_step] , identifier[d_step] = literal[int] , literal[int] , identifier[step]
keyword[else] :
identifier[y_step] , identifier[m_step] = identifier[int] ( identifier[y_step] ), identifier[int] ( identifier[m_step] )
keyword[if] identifier[isinstance] ( identifier[d_step] , identifier[int] ):
identifier[d_step] = identifier[timedelta] ( identifier[days] = identifier[int] ( identifier[d_step] ))
keyword[elif] identifier[isinstance] ( identifier[d_step] , identifier[timedelta] ):
keyword[pass]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[step] )
keyword[if] identifier[stop] keyword[is] keyword[None] :
identifier[finished] = keyword[lambda] identifier[t] : keyword[False]
keyword[elif] identifier[start] < identifier[stop] :
identifier[finished] = identifier[operator] . identifier[gt] keyword[if] identifier[inclusive] keyword[else] identifier[operator] . identifier[ge]
keyword[else] :
identifier[finished] = identifier[operator] . identifier[lt] keyword[if] identifier[inclusive] keyword[else] identifier[operator] . identifier[le]
identifier[now] = identifier[start]
keyword[while] keyword[not] identifier[finished] ( identifier[now] , identifier[stop] ):
keyword[yield] identifier[now]
keyword[if] identifier[y_step] keyword[or] identifier[m_step] :
identifier[m_y_step] , identifier[cur_month] = identifier[divmod] ( identifier[now] . identifier[month] + identifier[m_step] , literal[int] )
identifier[now] = identifier[now] . identifier[replace] ( identifier[year] = identifier[now] . identifier[year] + identifier[y_step] + identifier[m_y_step] ,
identifier[month] = identifier[cur_month] keyword[or] literal[int] )
identifier[now] = identifier[now] + identifier[d_step]
keyword[return]
|
def daterange(start, stop, step=1, inclusive=False):
"""In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.*
"""
if not isinstance(start, date):
raise TypeError('start expected datetime.date instance') # depends on [control=['if'], data=[]]
if stop and (not isinstance(stop, date)):
raise TypeError('stop expected datetime.date instance or None') # depends on [control=['if'], data=[]]
try:
(y_step, m_step, d_step) = step # depends on [control=['try'], data=[]]
except TypeError:
(y_step, m_step, d_step) = (0, 0, step) # depends on [control=['except'], data=[]]
else:
(y_step, m_step) = (int(y_step), int(m_step))
if isinstance(d_step, int):
d_step = timedelta(days=int(d_step)) # depends on [control=['if'], data=[]]
elif isinstance(d_step, timedelta):
pass # depends on [control=['if'], data=[]]
else:
raise ValueError('step expected int, timedelta, or tuple (year, month, day), not: %r' % step)
if stop is None:
finished = lambda t: False # depends on [control=['if'], data=[]]
elif start < stop:
finished = operator.gt if inclusive else operator.ge # depends on [control=['if'], data=[]]
else:
finished = operator.lt if inclusive else operator.le
now = start
while not finished(now, stop):
yield now
if y_step or m_step:
(m_y_step, cur_month) = divmod(now.month + m_step, 12)
now = now.replace(year=now.year + y_step + m_y_step, month=cur_month or 12) # depends on [control=['if'], data=[]]
now = now + d_step # depends on [control=['while'], data=[]]
return
|
def get_3q_or_more_nodes(self):
"""Deprecated. Use threeQ_or_more_gates()."""
warnings.warn('The method get_3q_or_more_nodes() is being replaced by'
' threeQ_or_more_gates()',
'Returning a list of (node_id, data) tuples is also deprecated, '
'threeQ_or_more_gates() returns a list of DAGNodes.',
DeprecationWarning, 2)
three_q_nodes = []
for node in self._multi_graph.nodes():
if node.type == 'op' and len(node.qargs) >= 3:
three_q_nodes.append((node._node_id, node.data_dict))
return three_q_nodes
|
def function[get_3q_or_more_nodes, parameter[self]]:
constant[Deprecated. Use threeQ_or_more_gates().]
call[name[warnings].warn, parameter[constant[The method get_3q_or_more_nodes() is being replaced by threeQ_or_more_gates()], constant[Returning a list of (node_id, data) tuples is also deprecated, threeQ_or_more_gates() returns a list of DAGNodes.], name[DeprecationWarning], constant[2]]]
variable[three_q_nodes] assign[=] list[[]]
for taget[name[node]] in starred[call[name[self]._multi_graph.nodes, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0383c40> begin[:]
call[name[three_q_nodes].append, parameter[tuple[[<ast.Attribute object at 0x7da1b0380b80>, <ast.Attribute object at 0x7da1b0380b20>]]]]
return[name[three_q_nodes]]
|
keyword[def] identifier[get_3q_or_more_nodes] ( identifier[self] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] ,
literal[string]
literal[string] ,
identifier[DeprecationWarning] , literal[int] )
identifier[three_q_nodes] =[]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_multi_graph] . identifier[nodes] ():
keyword[if] identifier[node] . identifier[type] == literal[string] keyword[and] identifier[len] ( identifier[node] . identifier[qargs] )>= literal[int] :
identifier[three_q_nodes] . identifier[append] (( identifier[node] . identifier[_node_id] , identifier[node] . identifier[data_dict] ))
keyword[return] identifier[three_q_nodes]
|
def get_3q_or_more_nodes(self):
"""Deprecated. Use threeQ_or_more_gates()."""
warnings.warn('The method get_3q_or_more_nodes() is being replaced by threeQ_or_more_gates()', 'Returning a list of (node_id, data) tuples is also deprecated, threeQ_or_more_gates() returns a list of DAGNodes.', DeprecationWarning, 2)
three_q_nodes = []
for node in self._multi_graph.nodes():
if node.type == 'op' and len(node.qargs) >= 3:
three_q_nodes.append((node._node_id, node.data_dict)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return three_q_nodes
|
def remove_element(self, e):
"""Remove element `e` from model
"""
if e.label is not None: self.elementdict.pop(e.label)
self.elementlist.remove(e)
|
def function[remove_element, parameter[self, e]]:
constant[Remove element `e` from model
]
if compare[name[e].label is_not constant[None]] begin[:]
call[name[self].elementdict.pop, parameter[name[e].label]]
call[name[self].elementlist.remove, parameter[name[e]]]
|
keyword[def] identifier[remove_element] ( identifier[self] , identifier[e] ):
literal[string]
keyword[if] identifier[e] . identifier[label] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[elementdict] . identifier[pop] ( identifier[e] . identifier[label] )
identifier[self] . identifier[elementlist] . identifier[remove] ( identifier[e] )
|
def remove_element(self, e):
"""Remove element `e` from model
"""
if e.label is not None:
self.elementdict.pop(e.label) # depends on [control=['if'], data=[]]
self.elementlist.remove(e)
|
def _groups_of_size(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# _groups_of_size('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
|
def function[_groups_of_size, parameter[iterable, n, fillvalue]]:
constant[Collect data into fixed-length chunks or blocks.]
variable[args] assign[=] binary_operation[list[[<ast.Call object at 0x7da204344490>]] * name[n]]
return[call[name[zip_longest], parameter[<ast.Starred object at 0x7da20c6a8a00>]]]
|
keyword[def] identifier[_groups_of_size] ( identifier[iterable] , identifier[n] , identifier[fillvalue] = keyword[None] ):
literal[string]
identifier[args] =[ identifier[iter] ( identifier[iterable] )]* identifier[n]
keyword[return] identifier[zip_longest] ( identifier[fillvalue] = identifier[fillvalue] ,* identifier[args] )
|
def _groups_of_size(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# _groups_of_size('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
|
def _perform_read(self, addr, size):
"""Perform a read using the machine controller."""
return self._machine_controller.read(addr, size, self._x, self._y, 0)
|
def function[_perform_read, parameter[self, addr, size]]:
constant[Perform a read using the machine controller.]
return[call[name[self]._machine_controller.read, parameter[name[addr], name[size], name[self]._x, name[self]._y, constant[0]]]]
|
keyword[def] identifier[_perform_read] ( identifier[self] , identifier[addr] , identifier[size] ):
literal[string]
keyword[return] identifier[self] . identifier[_machine_controller] . identifier[read] ( identifier[addr] , identifier[size] , identifier[self] . identifier[_x] , identifier[self] . identifier[_y] , literal[int] )
|
def _perform_read(self, addr, size):
"""Perform a read using the machine controller."""
return self._machine_controller.read(addr, size, self._x, self._y, 0)
|
def _convert_a_header_to_a_h2_header(self, hdr_name, hdr_value, is_sensitive, should_index): # noqa: E501
# type: (str, str, Callable[[str, str], bool], Callable[[str], bool]) -> Tuple[HPackHeaders, int] # noqa: E501
""" _convert_a_header_to_a_h2_header builds a HPackHeaders from a header
name and a value. It returns a HPackIndexedHdr whenever possible. If not, # noqa: E501
it returns a HPackLitHdrFldWithoutIndexing or a
HPackLitHdrFldWithIncrIndexing, based on the should_index callback.
HPackLitHdrFldWithoutIndexing is forced if the is_sensitive callback
returns True and its never_index bit is set.
"""
# If both name and value are already indexed
idx = self.get_idx_by_name_and_value(hdr_name, hdr_value)
if idx is not None:
return HPackIndexedHdr(index=idx), len(self[idx])
# The value is not indexed for this headers
hdr_value = self._optimize_header_length_and_packetify(hdr_value)
# Searching if the header name is indexed
idx = self.get_idx_by_name(hdr_name)
if idx is not None:
if is_sensitive(
hdr_name,
hdr_value.getfieldval('data').origin()
):
return HPackLitHdrFldWithoutIndexing(
never_index=1,
index=idx,
hdr_value=hdr_value
), len(
HPackHdrEntry(
self[idx].name(),
hdr_value.getfieldval('data').origin()
)
)
if should_index(hdr_name):
return HPackLitHdrFldWithIncrIndexing(
index=idx,
hdr_value=hdr_value
), len(
HPackHdrEntry(
self[idx].name(),
hdr_value.getfieldval('data').origin()
)
)
return HPackLitHdrFldWithoutIndexing(
index=idx,
hdr_value=hdr_value
), len(
HPackHdrEntry(
self[idx].name(),
hdr_value.getfieldval('data').origin()
)
)
hdr_name = self._optimize_header_length_and_packetify(hdr_name)
if is_sensitive(
hdr_name.getfieldval('data').origin(),
hdr_value.getfieldval('data').origin()
):
return HPackLitHdrFldWithoutIndexing(
never_index=1,
index=0,
hdr_name=hdr_name,
hdr_value=hdr_value
), len(
HPackHdrEntry(
hdr_name.getfieldval('data').origin(),
hdr_value.getfieldval('data').origin()
)
)
if should_index(hdr_name.getfieldval('data').origin()):
return HPackLitHdrFldWithIncrIndexing(
index=0,
hdr_name=hdr_name,
hdr_value=hdr_value
), len(
HPackHdrEntry(
hdr_name.getfieldval('data').origin(),
hdr_value.getfieldval('data').origin()
)
)
return HPackLitHdrFldWithoutIndexing(
index=0,
hdr_name=hdr_name,
hdr_value=hdr_value
), len(
HPackHdrEntry(
hdr_name.getfieldval('data').origin(),
hdr_value.getfieldval('data').origin()
)
)
|
def function[_convert_a_header_to_a_h2_header, parameter[self, hdr_name, hdr_value, is_sensitive, should_index]]:
constant[ _convert_a_header_to_a_h2_header builds a HPackHeaders from a header
name and a value. It returns a HPackIndexedHdr whenever possible. If not, # noqa: E501
it returns a HPackLitHdrFldWithoutIndexing or a
HPackLitHdrFldWithIncrIndexing, based on the should_index callback.
HPackLitHdrFldWithoutIndexing is forced if the is_sensitive callback
returns True and its never_index bit is set.
]
variable[idx] assign[=] call[name[self].get_idx_by_name_and_value, parameter[name[hdr_name], name[hdr_value]]]
if compare[name[idx] is_not constant[None]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b1ff4f40>, <ast.Call object at 0x7da1b1ff42b0>]]]
variable[hdr_value] assign[=] call[name[self]._optimize_header_length_and_packetify, parameter[name[hdr_value]]]
variable[idx] assign[=] call[name[self].get_idx_by_name, parameter[name[hdr_name]]]
if compare[name[idx] is_not constant[None]] begin[:]
if call[name[is_sensitive], parameter[name[hdr_name], call[call[name[hdr_value].getfieldval, parameter[constant[data]]].origin, parameter[]]]] begin[:]
return[tuple[[<ast.Call object at 0x7da20eb2b070>, <ast.Call object at 0x7da20eb29de0>]]]
if call[name[should_index], parameter[name[hdr_name]]] begin[:]
return[tuple[[<ast.Call object at 0x7da2044c07f0>, <ast.Call object at 0x7da2044c1ff0>]]]
return[tuple[[<ast.Call object at 0x7da2044c22c0>, <ast.Call object at 0x7da1b2055990>]]]
variable[hdr_name] assign[=] call[name[self]._optimize_header_length_and_packetify, parameter[name[hdr_name]]]
if call[name[is_sensitive], parameter[call[call[name[hdr_name].getfieldval, parameter[constant[data]]].origin, parameter[]], call[call[name[hdr_value].getfieldval, parameter[constant[data]]].origin, parameter[]]]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b1fdd480>, <ast.Call object at 0x7da1b1fdd2d0>]]]
if call[name[should_index], parameter[call[call[name[hdr_name].getfieldval, parameter[constant[data]]].origin, parameter[]]]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b1fdf820>, <ast.Call object at 0x7da1b1fdc790>]]]
return[tuple[[<ast.Call object at 0x7da1b1fdf580>, <ast.Call object at 0x7da1b1fde500>]]]
|
keyword[def] identifier[_convert_a_header_to_a_h2_header] ( identifier[self] , identifier[hdr_name] , identifier[hdr_value] , identifier[is_sensitive] , identifier[should_index] ):
literal[string]
identifier[idx] = identifier[self] . identifier[get_idx_by_name_and_value] ( identifier[hdr_name] , identifier[hdr_value] )
keyword[if] identifier[idx] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[HPackIndexedHdr] ( identifier[index] = identifier[idx] ), identifier[len] ( identifier[self] [ identifier[idx] ])
identifier[hdr_value] = identifier[self] . identifier[_optimize_header_length_and_packetify] ( identifier[hdr_value] )
identifier[idx] = identifier[self] . identifier[get_idx_by_name] ( identifier[hdr_name] )
keyword[if] identifier[idx] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[is_sensitive] (
identifier[hdr_name] ,
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
):
keyword[return] identifier[HPackLitHdrFldWithoutIndexing] (
identifier[never_index] = literal[int] ,
identifier[index] = identifier[idx] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[self] [ identifier[idx] ]. identifier[name] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
keyword[if] identifier[should_index] ( identifier[hdr_name] ):
keyword[return] identifier[HPackLitHdrFldWithIncrIndexing] (
identifier[index] = identifier[idx] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[self] [ identifier[idx] ]. identifier[name] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
keyword[return] identifier[HPackLitHdrFldWithoutIndexing] (
identifier[index] = identifier[idx] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[self] [ identifier[idx] ]. identifier[name] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
identifier[hdr_name] = identifier[self] . identifier[_optimize_header_length_and_packetify] ( identifier[hdr_name] )
keyword[if] identifier[is_sensitive] (
identifier[hdr_name] . identifier[getfieldval] ( literal[string] ). identifier[origin] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
):
keyword[return] identifier[HPackLitHdrFldWithoutIndexing] (
identifier[never_index] = literal[int] ,
identifier[index] = literal[int] ,
identifier[hdr_name] = identifier[hdr_name] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[hdr_name] . identifier[getfieldval] ( literal[string] ). identifier[origin] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
keyword[if] identifier[should_index] ( identifier[hdr_name] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()):
keyword[return] identifier[HPackLitHdrFldWithIncrIndexing] (
identifier[index] = literal[int] ,
identifier[hdr_name] = identifier[hdr_name] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[hdr_name] . identifier[getfieldval] ( literal[string] ). identifier[origin] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
keyword[return] identifier[HPackLitHdrFldWithoutIndexing] (
identifier[index] = literal[int] ,
identifier[hdr_name] = identifier[hdr_name] ,
identifier[hdr_value] = identifier[hdr_value]
), identifier[len] (
identifier[HPackHdrEntry] (
identifier[hdr_name] . identifier[getfieldval] ( literal[string] ). identifier[origin] (),
identifier[hdr_value] . identifier[getfieldval] ( literal[string] ). identifier[origin] ()
)
)
|
def _convert_a_header_to_a_h2_header(self, hdr_name, hdr_value, is_sensitive, should_index): # noqa: E501
# type: (str, str, Callable[[str, str], bool], Callable[[str], bool]) -> Tuple[HPackHeaders, int] # noqa: E501
' _convert_a_header_to_a_h2_header builds a HPackHeaders from a header\n name and a value. It returns a HPackIndexedHdr whenever possible. If not, # noqa: E501\n it returns a HPackLitHdrFldWithoutIndexing or a\n HPackLitHdrFldWithIncrIndexing, based on the should_index callback.\n HPackLitHdrFldWithoutIndexing is forced if the is_sensitive callback\n returns True and its never_index bit is set.\n '
# If both name and value are already indexed
idx = self.get_idx_by_name_and_value(hdr_name, hdr_value)
if idx is not None:
return (HPackIndexedHdr(index=idx), len(self[idx])) # depends on [control=['if'], data=['idx']]
# The value is not indexed for this headers
hdr_value = self._optimize_header_length_and_packetify(hdr_value)
# Searching if the header name is indexed
idx = self.get_idx_by_name(hdr_name)
if idx is not None:
if is_sensitive(hdr_name, hdr_value.getfieldval('data').origin()):
return (HPackLitHdrFldWithoutIndexing(never_index=1, index=idx, hdr_value=hdr_value), len(HPackHdrEntry(self[idx].name(), hdr_value.getfieldval('data').origin()))) # depends on [control=['if'], data=[]]
if should_index(hdr_name):
return (HPackLitHdrFldWithIncrIndexing(index=idx, hdr_value=hdr_value), len(HPackHdrEntry(self[idx].name(), hdr_value.getfieldval('data').origin()))) # depends on [control=['if'], data=[]]
return (HPackLitHdrFldWithoutIndexing(index=idx, hdr_value=hdr_value), len(HPackHdrEntry(self[idx].name(), hdr_value.getfieldval('data').origin()))) # depends on [control=['if'], data=['idx']]
hdr_name = self._optimize_header_length_and_packetify(hdr_name)
if is_sensitive(hdr_name.getfieldval('data').origin(), hdr_value.getfieldval('data').origin()):
return (HPackLitHdrFldWithoutIndexing(never_index=1, index=0, hdr_name=hdr_name, hdr_value=hdr_value), len(HPackHdrEntry(hdr_name.getfieldval('data').origin(), hdr_value.getfieldval('data').origin()))) # depends on [control=['if'], data=[]]
if should_index(hdr_name.getfieldval('data').origin()):
return (HPackLitHdrFldWithIncrIndexing(index=0, hdr_name=hdr_name, hdr_value=hdr_value), len(HPackHdrEntry(hdr_name.getfieldval('data').origin(), hdr_value.getfieldval('data').origin()))) # depends on [control=['if'], data=[]]
return (HPackLitHdrFldWithoutIndexing(index=0, hdr_name=hdr_name, hdr_value=hdr_value), len(HPackHdrEntry(hdr_name.getfieldval('data').origin(), hdr_value.getfieldval('data').origin())))
|
def insertProcessingEra(self, businput):
"""
Input dictionary has to have the following keys:
processing_version, creation_date, create_by, description
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
businput["processing_era_id"] = self.sm.increment(conn, "SEQ_PE", tran)
businput["processing_version"] = businput["processing_version"]
self.pein.execute(conn, businput, tran)
tran.commit()
tran = None
except KeyError as ke:
dbsExceptionHandler('dbsException-invalid-input',
"Invalid input:" + ke.args[0])
except Exception as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
# already exist
self.logger.warning("DBSProcessingEra/insertProcessingEras. " +
"Unique constraint violation being ignored...")
self.logger.warning(ex)
else:
if tran:
tran.rollback()
tran = None
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
def function[insertProcessingEra, parameter[self, businput]]:
constant[
Input dictionary has to have the following keys:
processing_version, creation_date, create_by, description
it builds the correct dictionary for dao input and executes the dao
]
variable[conn] assign[=] call[name[self].dbi.connection, parameter[]]
variable[tran] assign[=] call[name[conn].begin, parameter[]]
<ast.Try object at 0x7da2054a7370>
|
keyword[def] identifier[insertProcessingEra] ( identifier[self] , identifier[businput] ):
literal[string]
identifier[conn] = identifier[self] . identifier[dbi] . identifier[connection] ()
identifier[tran] = identifier[conn] . identifier[begin] ()
keyword[try] :
identifier[businput] [ literal[string] ]= identifier[self] . identifier[sm] . identifier[increment] ( identifier[conn] , literal[string] , identifier[tran] )
identifier[businput] [ literal[string] ]= identifier[businput] [ literal[string] ]
identifier[self] . identifier[pein] . identifier[execute] ( identifier[conn] , identifier[businput] , identifier[tran] )
identifier[tran] . identifier[commit] ()
identifier[tran] = keyword[None]
keyword[except] identifier[KeyError] keyword[as] identifier[ke] :
identifier[dbsExceptionHandler] ( literal[string] ,
literal[string] + identifier[ke] . identifier[args] [ literal[int] ])
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] ( identifier[str] ( identifier[ex] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] keyword[or]
identifier[str] ( identifier[ex] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] ):
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] +
literal[string] )
identifier[self] . identifier[logger] . identifier[warning] ( identifier[ex] )
keyword[else] :
keyword[if] identifier[tran] :
identifier[tran] . identifier[rollback] ()
identifier[tran] = keyword[None]
keyword[raise]
keyword[finally] :
keyword[if] identifier[tran] :
identifier[tran] . identifier[rollback] ()
keyword[if] identifier[conn] :
identifier[conn] . identifier[close] ()
|
def insertProcessingEra(self, businput):
"""
Input dictionary has to have the following keys:
processing_version, creation_date, create_by, description
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
businput['processing_era_id'] = self.sm.increment(conn, 'SEQ_PE', tran)
businput['processing_version'] = businput['processing_version']
self.pein.execute(conn, businput, tran)
tran.commit()
tran = None # depends on [control=['try'], data=[]]
except KeyError as ke:
dbsExceptionHandler('dbsException-invalid-input', 'Invalid input:' + ke.args[0]) # depends on [control=['except'], data=['ke']]
except Exception as ex:
if str(ex).lower().find('unique constraint') != -1 or str(ex).lower().find('duplicate') != -1:
# already exist
self.logger.warning('DBSProcessingEra/insertProcessingEras. ' + 'Unique constraint violation being ignored...')
self.logger.warning(ex) # depends on [control=['if'], data=[]]
else:
if tran:
tran.rollback()
tran = None # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['ex']]
finally:
if tran:
tran.rollback() # depends on [control=['if'], data=[]]
if conn:
conn.close() # depends on [control=['if'], data=[]]
|
def reporter(self):
""" Parse the results into a report"""
# Initialise variables
combinedrow = ''
reportdirset = set()
# Populate a set of all the report directories to use. A standard analysis will only have a single report
# directory, while pipeline analyses will have as many report directories as there are assembled samples
for sample in self.metadata:
# Ignore samples that lack a populated reportdir attribute
if sample[self.analysistype].reportdir != 'NA':
make_path(sample[self.analysistype].reportdir)
# Add to the set - I probably could have used a counter here, but I decided against it
reportdirset.add(sample[self.analysistype].reportdir)
# Create a report for each sample from :self.resultprofile
for sample in self.metadata:
if sample[self.analysistype].reportdir != 'NA':
if type(sample[self.analysistype].allelenames) == list:
# Populate the header with the appropriate data, including all the genes in the list of targets
row = 'Strain,Genus,SequenceType,Matches,{},\n' \
.format(','.join(sorted(sample[self.analysistype].allelenames)))
# Set the sequence counter to 0. This will be used when a sample has multiple best sequence types.
# The name of the sample will not be written on subsequent rows in order to make the report clearer
seqcount = 0
# Iterate through the best sequence types for the sample (only occurs if update profile is disabled)
for seqtype in self.resultprofile[sample.name]:
"""
{
"OLF15230-1_2015-SEQ-0783": {
"1000004_CFIA": {
"7": {
"dnaE": {
"47": "100.00"
},
"dtdS": {
"19": "100.00"
},
"gyrB": {
"359": "100.00"
},
"pntA": {
"50": "100.00"
},
"pyrC": {
"143": "100.00"
},
"recA": {
"31": "100.00"
},
"tnaA": {
"26": "100.00"
}
}
}
}
}
"""
# Becomes
"""
Strain,SequenceType,Matches,dnaE,gyrB,recA,dtdS,pntA,pyrC,tnaA
OLF15230-1_2015-SEQ-0783,1000004_CFIA,7,26 (100.00%),359 (100.00%),31 (100.00%),50 (100.00%),
19 (100.00%),47 (100.00%),143 (100.00%)
"""
sample[self.analysistype].sequencetype = seqtype
# The number of matches to the profile
matches = list(self.resultprofile[sample.name][seqtype].keys())[0]
# If this is the first of one or more sequence types, include the sample name
if seqcount == 0:
row += '{},{},{},{},'.format(sample.name, sample.general.referencegenus, seqtype, matches)
# Otherwise, skip the sample name
else:
row += ',,{},{},'.format(seqtype, matches)
# Iterate through all the genes present in the analyses for the sample
for gene in sorted(sample[self.analysistype].allelenames):
# refallele = self.profiledata[self.analysistype][seqtype][gene]
refallele = sample[self.analysistype].profiledata[seqtype][gene]
# Set the allele and percent id from the dictionary's keys and values, respectively
allele = list(self.resultprofile[sample.name][seqtype][matches][gene].keys())[0]
percentid = list(self.resultprofile[sample.name][seqtype][matches][gene].values())[0]
if refallele and refallele != allele:
if 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid))
else:
row += '{} ({}),'.format(allele, refallele)
else:
# Add the allele and % id to the row (only add the percent identity if it is not 100%)
if 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid))
else:
row += '{},'.format(allele)
self.referenceprofile[sample.name][gene] = allele
# Add a newline
row += '\n'
# Increment the number of sequence types observed for the sample
seqcount += 1
combinedrow += row
# If the length of the # of report directories is greater than 1 (script is being run as part of
# the assembly pipeline) make a report for each sample
if self.pipeline:
# Open the report
with open('{}{}_{}.csv'.format(sample[self.analysistype].reportdir, sample.name,
self.analysistype), 'w') as report:
# Write the row to the report
report.write(row)
dotter()
# Create the report folder
make_path(self.reportpath)
# Create the report containing all the data from all samples
if self.pipeline:
with open('{}{}.csv'.format(self.reportpath, self.analysistype), 'w') \
as combinedreport:
# Write the results to this report
combinedreport.write(combinedrow)
else:
with open('{}{}_{:}.csv'.format(self.reportpath, self.analysistype, time.strftime("%Y.%m.%d.%H.%M.%S")),
'w') as combinedreport:
# Write the results to this report
combinedreport.write(combinedrow)
# Remove the raw results csv
[os.remove(rawresults) for rawresults in glob('{}*rawresults*'.format(self.reportpath))]
|
def function[reporter, parameter[self]]:
constant[ Parse the results into a report]
variable[combinedrow] assign[=] constant[]
variable[reportdirset] assign[=] call[name[set], parameter[]]
for taget[name[sample]] in starred[name[self].metadata] begin[:]
if compare[call[name[sample]][name[self].analysistype].reportdir not_equal[!=] constant[NA]] begin[:]
call[name[make_path], parameter[call[name[sample]][name[self].analysistype].reportdir]]
call[name[reportdirset].add, parameter[call[name[sample]][name[self].analysistype].reportdir]]
for taget[name[sample]] in starred[name[self].metadata] begin[:]
if compare[call[name[sample]][name[self].analysistype].reportdir not_equal[!=] constant[NA]] begin[:]
if compare[call[name[type], parameter[call[name[sample]][name[self].analysistype].allelenames]] equal[==] name[list]] begin[:]
variable[row] assign[=] call[constant[Strain,Genus,SequenceType,Matches,{},
].format, parameter[call[constant[,].join, parameter[call[name[sorted], parameter[call[name[sample]][name[self].analysistype].allelenames]]]]]]
variable[seqcount] assign[=] constant[0]
for taget[name[seqtype]] in starred[call[name[self].resultprofile][name[sample].name]] begin[:]
constant[
{
"OLF15230-1_2015-SEQ-0783": {
"1000004_CFIA": {
"7": {
"dnaE": {
"47": "100.00"
},
"dtdS": {
"19": "100.00"
},
"gyrB": {
"359": "100.00"
},
"pntA": {
"50": "100.00"
},
"pyrC": {
"143": "100.00"
},
"recA": {
"31": "100.00"
},
"tnaA": {
"26": "100.00"
}
}
}
}
}
]
constant[
Strain,SequenceType,Matches,dnaE,gyrB,recA,dtdS,pntA,pyrC,tnaA
OLF15230-1_2015-SEQ-0783,1000004_CFIA,7,26 (100.00%),359 (100.00%),31 (100.00%),50 (100.00%),
19 (100.00%),47 (100.00%),143 (100.00%)
]
call[name[sample]][name[self].analysistype].sequencetype assign[=] name[seqtype]
variable[matches] assign[=] call[call[name[list], parameter[call[call[call[name[self].resultprofile][name[sample].name]][name[seqtype]].keys, parameter[]]]]][constant[0]]
if compare[name[seqcount] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18bc71300>
for taget[name[gene]] in starred[call[name[sorted], parameter[call[name[sample]][name[self].analysistype].allelenames]]] begin[:]
variable[refallele] assign[=] call[call[call[name[sample]][name[self].analysistype].profiledata][name[seqtype]]][name[gene]]
variable[allele] assign[=] call[call[name[list], parameter[call[call[call[call[call[name[self].resultprofile][name[sample].name]][name[seqtype]]][name[matches]]][name[gene]].keys, parameter[]]]]][constant[0]]
variable[percentid] assign[=] call[call[name[list], parameter[call[call[call[call[call[name[self].resultprofile][name[sample].name]][name[seqtype]]][name[matches]]][name[gene]].values, parameter[]]]]][constant[0]]
if <ast.BoolOp object at 0x7da18bc727d0> begin[:]
if compare[constant[0] less[<] call[name[float], parameter[name[percentid]]]] begin[:]
<ast.AugAssign object at 0x7da18bc73c70>
call[call[name[self].referenceprofile][name[sample].name]][name[gene]] assign[=] name[allele]
<ast.AugAssign object at 0x7da18bc72800>
<ast.AugAssign object at 0x7da18bc72fb0>
<ast.AugAssign object at 0x7da18bc72f20>
if name[self].pipeline begin[:]
with call[name[open], parameter[call[constant[{}{}_{}.csv].format, parameter[call[name[sample]][name[self].analysistype].reportdir, name[sample].name, name[self].analysistype]], constant[w]]] begin[:]
call[name[report].write, parameter[name[row]]]
call[name[dotter], parameter[]]
call[name[make_path], parameter[name[self].reportpath]]
if name[self].pipeline begin[:]
with call[name[open], parameter[call[constant[{}{}.csv].format, parameter[name[self].reportpath, name[self].analysistype]], constant[w]]] begin[:]
call[name[combinedreport].write, parameter[name[combinedrow]]]
<ast.ListComp object at 0x7da18bc73dc0>
|
keyword[def] identifier[reporter] ( identifier[self] ):
literal[string]
identifier[combinedrow] = literal[string]
identifier[reportdirset] = identifier[set] ()
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[metadata] :
keyword[if] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[reportdir] != literal[string] :
identifier[make_path] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[reportdir] )
identifier[reportdirset] . identifier[add] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[reportdir] )
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[metadata] :
keyword[if] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[reportdir] != literal[string] :
keyword[if] identifier[type] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[allelenames] )== identifier[list] :
identifier[row] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[sorted] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[allelenames] )))
identifier[seqcount] = literal[int]
keyword[for] identifier[seqtype] keyword[in] identifier[self] . identifier[resultprofile] [ identifier[sample] . identifier[name] ]:
literal[string]
literal[string]
identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[sequencetype] = identifier[seqtype]
identifier[matches] = identifier[list] ( identifier[self] . identifier[resultprofile] [ identifier[sample] . identifier[name] ][ identifier[seqtype] ]. identifier[keys] ())[ literal[int] ]
keyword[if] identifier[seqcount] == literal[int] :
identifier[row] += literal[string] . identifier[format] ( identifier[sample] . identifier[name] , identifier[sample] . identifier[general] . identifier[referencegenus] , identifier[seqtype] , identifier[matches] )
keyword[else] :
identifier[row] += literal[string] . identifier[format] ( identifier[seqtype] , identifier[matches] )
keyword[for] identifier[gene] keyword[in] identifier[sorted] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[allelenames] ):
identifier[refallele] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[profiledata] [ identifier[seqtype] ][ identifier[gene] ]
identifier[allele] = identifier[list] ( identifier[self] . identifier[resultprofile] [ identifier[sample] . identifier[name] ][ identifier[seqtype] ][ identifier[matches] ][ identifier[gene] ]. identifier[keys] ())[ literal[int] ]
identifier[percentid] = identifier[list] ( identifier[self] . identifier[resultprofile] [ identifier[sample] . identifier[name] ][ identifier[seqtype] ][ identifier[matches] ][ identifier[gene] ]. identifier[values] ())[ literal[int] ]
keyword[if] identifier[refallele] keyword[and] identifier[refallele] != identifier[allele] :
keyword[if] literal[int] < identifier[float] ( identifier[percentid] )< literal[int] :
identifier[row] += literal[string] . identifier[format] ( identifier[allele] , identifier[float] ( identifier[percentid] ))
keyword[else] :
identifier[row] += literal[string] . identifier[format] ( identifier[allele] , identifier[refallele] )
keyword[else] :
keyword[if] literal[int] < identifier[float] ( identifier[percentid] )< literal[int] :
identifier[row] += literal[string] . identifier[format] ( identifier[allele] , identifier[float] ( identifier[percentid] ))
keyword[else] :
identifier[row] += literal[string] . identifier[format] ( identifier[allele] )
identifier[self] . identifier[referenceprofile] [ identifier[sample] . identifier[name] ][ identifier[gene] ]= identifier[allele]
identifier[row] += literal[string]
identifier[seqcount] += literal[int]
identifier[combinedrow] += identifier[row]
keyword[if] identifier[self] . identifier[pipeline] :
keyword[with] identifier[open] ( literal[string] . identifier[format] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[reportdir] , identifier[sample] . identifier[name] ,
identifier[self] . identifier[analysistype] ), literal[string] ) keyword[as] identifier[report] :
identifier[report] . identifier[write] ( identifier[row] )
identifier[dotter] ()
identifier[make_path] ( identifier[self] . identifier[reportpath] )
keyword[if] identifier[self] . identifier[pipeline] :
keyword[with] identifier[open] ( literal[string] . identifier[format] ( identifier[self] . identifier[reportpath] , identifier[self] . identifier[analysistype] ), literal[string] ) keyword[as] identifier[combinedreport] :
identifier[combinedreport] . identifier[write] ( identifier[combinedrow] )
keyword[else] :
keyword[with] identifier[open] ( literal[string] . identifier[format] ( identifier[self] . identifier[reportpath] , identifier[self] . identifier[analysistype] , identifier[time] . identifier[strftime] ( literal[string] )),
literal[string] ) keyword[as] identifier[combinedreport] :
identifier[combinedreport] . identifier[write] ( identifier[combinedrow] )
[ identifier[os] . identifier[remove] ( identifier[rawresults] ) keyword[for] identifier[rawresults] keyword[in] identifier[glob] ( literal[string] . identifier[format] ( identifier[self] . identifier[reportpath] ))]
|
def reporter(self):
""" Parse the results into a report"""
# Initialise variables
combinedrow = ''
reportdirset = set()
# Populate a set of all the report directories to use. A standard analysis will only have a single report
# directory, while pipeline analyses will have as many report directories as there are assembled samples
for sample in self.metadata:
# Ignore samples that lack a populated reportdir attribute
if sample[self.analysistype].reportdir != 'NA':
make_path(sample[self.analysistype].reportdir)
# Add to the set - I probably could have used a counter here, but I decided against it
reportdirset.add(sample[self.analysistype].reportdir) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sample']]
# Create a report for each sample from :self.resultprofile
for sample in self.metadata:
if sample[self.analysistype].reportdir != 'NA':
if type(sample[self.analysistype].allelenames) == list:
# Populate the header with the appropriate data, including all the genes in the list of targets
row = 'Strain,Genus,SequenceType,Matches,{},\n'.format(','.join(sorted(sample[self.analysistype].allelenames)))
# Set the sequence counter to 0. This will be used when a sample has multiple best sequence types.
# The name of the sample will not be written on subsequent rows in order to make the report clearer
seqcount = 0
# Iterate through the best sequence types for the sample (only occurs if update profile is disabled)
for seqtype in self.resultprofile[sample.name]:
'\n {\n "OLF15230-1_2015-SEQ-0783": {\n "1000004_CFIA": {\n "7": {\n "dnaE": {\n "47": "100.00"\n },\n "dtdS": {\n "19": "100.00"\n },\n "gyrB": {\n "359": "100.00"\n },\n "pntA": {\n "50": "100.00"\n },\n "pyrC": {\n "143": "100.00"\n },\n "recA": {\n "31": "100.00"\n },\n "tnaA": {\n "26": "100.00"\n }\n }\n }\n }\n }\n '
# Becomes
'\n Strain,SequenceType,Matches,dnaE,gyrB,recA,dtdS,pntA,pyrC,tnaA\n OLF15230-1_2015-SEQ-0783,1000004_CFIA,7,26 (100.00%),359 (100.00%),31 (100.00%),50 (100.00%),\n 19 (100.00%),47 (100.00%),143 (100.00%)\n '
sample[self.analysistype].sequencetype = seqtype
# The number of matches to the profile
matches = list(self.resultprofile[sample.name][seqtype].keys())[0]
# If this is the first of one or more sequence types, include the sample name
if seqcount == 0:
row += '{},{},{},{},'.format(sample.name, sample.general.referencegenus, seqtype, matches) # depends on [control=['if'], data=[]]
else:
# Otherwise, skip the sample name
row += ',,{},{},'.format(seqtype, matches)
# Iterate through all the genes present in the analyses for the sample
for gene in sorted(sample[self.analysistype].allelenames):
# refallele = self.profiledata[self.analysistype][seqtype][gene]
refallele = sample[self.analysistype].profiledata[seqtype][gene]
# Set the allele and percent id from the dictionary's keys and values, respectively
allele = list(self.resultprofile[sample.name][seqtype][matches][gene].keys())[0]
percentid = list(self.resultprofile[sample.name][seqtype][matches][gene].values())[0]
if refallele and refallele != allele:
if 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid)) # depends on [control=['if'], data=[]]
else:
row += '{} ({}),'.format(allele, refallele) # depends on [control=['if'], data=[]]
# Add the allele and % id to the row (only add the percent identity if it is not 100%)
elif 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid)) # depends on [control=['if'], data=[]]
else:
row += '{},'.format(allele)
self.referenceprofile[sample.name][gene] = allele # depends on [control=['for'], data=['gene']]
# Add a newline
row += '\n'
# Increment the number of sequence types observed for the sample
seqcount += 1 # depends on [control=['for'], data=['seqtype']]
combinedrow += row
# If the length of the # of report directories is greater than 1 (script is being run as part of
# the assembly pipeline) make a report for each sample
if self.pipeline:
# Open the report
with open('{}{}_{}.csv'.format(sample[self.analysistype].reportdir, sample.name, self.analysistype), 'w') as report:
# Write the row to the report
report.write(row) # depends on [control=['with'], data=['report']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['list']]
dotter() # depends on [control=['if'], data=[]]
# Create the report folder
make_path(self.reportpath)
# Create the report containing all the data from all samples
if self.pipeline:
with open('{}{}.csv'.format(self.reportpath, self.analysistype), 'w') as combinedreport:
# Write the results to this report
combinedreport.write(combinedrow) # depends on [control=['with'], data=['combinedreport']] # depends on [control=['if'], data=[]]
else:
with open('{}{}_{:}.csv'.format(self.reportpath, self.analysistype, time.strftime('%Y.%m.%d.%H.%M.%S')), 'w') as combinedreport:
# Write the results to this report
combinedreport.write(combinedrow) # depends on [control=['with'], data=['combinedreport']]
# Remove the raw results csv
[os.remove(rawresults) for rawresults in glob('{}*rawresults*'.format(self.reportpath))] # depends on [control=['for'], data=['sample']]
|
def deserialize_unix(attr):
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param int attr: Object to be serialized.
:rtype: Datetime
:raises: DeserializationError if format invalid
"""
if isinstance(attr, ET.Element):
attr = int(attr.text)
try:
date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
except ValueError as err:
msg = "Cannot deserialize to unix datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
|
def function[deserialize_unix, parameter[attr]]:
constant[Serialize Datetime object into IntTime format.
This is represented as seconds.
:param int attr: Object to be serialized.
:rtype: Datetime
:raises: DeserializationError if format invalid
]
if call[name[isinstance], parameter[name[attr], name[ET].Element]] begin[:]
variable[attr] assign[=] call[name[int], parameter[name[attr].text]]
<ast.Try object at 0x7da18c4ce8c0>
|
keyword[def] identifier[deserialize_unix] ( identifier[attr] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[attr] , identifier[ET] . identifier[Element] ):
identifier[attr] = identifier[int] ( identifier[attr] . identifier[text] )
keyword[try] :
identifier[date_obj] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[attr] , identifier[TZ_UTC] )
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
identifier[msg] = literal[string]
identifier[raise_with_traceback] ( identifier[DeserializationError] , identifier[msg] , identifier[err] )
keyword[else] :
keyword[return] identifier[date_obj]
|
def deserialize_unix(attr):
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param int attr: Object to be serialized.
:rtype: Datetime
:raises: DeserializationError if format invalid
"""
if isinstance(attr, ET.Element):
attr = int(attr.text) # depends on [control=['if'], data=[]]
try:
date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) # depends on [control=['try'], data=[]]
except ValueError as err:
msg = 'Cannot deserialize to unix datetime object.'
raise_with_traceback(DeserializationError, msg, err) # depends on [control=['except'], data=['err']]
else:
return date_obj
|
def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0)
return '%0.2f%s' % (number, prefixes.pop(0))
|
def function[_si, parameter[number]]:
constant[Format a number using base-2 SI prefixes]
variable[prefixes] assign[=] list[[<ast.Constant object at 0x7da20e9b3c70>, <ast.Constant object at 0x7da20e9b2890>, <ast.Constant object at 0x7da20e9b15d0>, <ast.Constant object at 0x7da20e9b2e60>, <ast.Constant object at 0x7da20e9b19c0>, <ast.Constant object at 0x7da20e9b0e80>, <ast.Constant object at 0x7da20e9b28f0>, <ast.Constant object at 0x7da20e9b25c0>, <ast.Constant object at 0x7da20e9b3520>]]
while compare[name[number] greater[>] constant[1024]] begin[:]
<ast.AugAssign object at 0x7da20e9b3490>
call[name[prefixes].pop, parameter[constant[0]]]
return[binary_operation[constant[%0.2f%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9b2f80>, <ast.Call object at 0x7da20e9b1c60>]]]]
|
keyword[def] identifier[_si] ( identifier[number] ):
literal[string]
identifier[prefixes] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[while] identifier[number] > literal[int] :
identifier[number] /= literal[int]
identifier[prefixes] . identifier[pop] ( literal[int] )
keyword[return] literal[string] %( identifier[number] , identifier[prefixes] . identifier[pop] ( literal[int] ))
|
def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0) # depends on [control=['while'], data=['number']]
return '%0.2f%s' % (number, prefixes.pop(0))
|
def render(dson_input, saltenv='base', sls='', **kwargs):
'''
Accepts DSON data as a string or as a file object and runs it through the
JSON parser.
:rtype: A Python data structure
'''
if not isinstance(dson_input, six.string_types):
dson_input = dson_input.read()
log.debug('DSON input = %s', dson_input)
if dson_input.startswith('#!'):
dson_input = dson_input[(dson_input.find('\n') + 1):]
if not dson_input.strip():
return {}
return dson.loads(dson_input)
|
def function[render, parameter[dson_input, saltenv, sls]]:
constant[
Accepts DSON data as a string or as a file object and runs it through the
JSON parser.
:rtype: A Python data structure
]
if <ast.UnaryOp object at 0x7da18dc070a0> begin[:]
variable[dson_input] assign[=] call[name[dson_input].read, parameter[]]
call[name[log].debug, parameter[constant[DSON input = %s], name[dson_input]]]
if call[name[dson_input].startswith, parameter[constant[#!]]] begin[:]
variable[dson_input] assign[=] call[name[dson_input]][<ast.Slice object at 0x7da18dc041f0>]
if <ast.UnaryOp object at 0x7da18dc050c0> begin[:]
return[dictionary[[], []]]
return[call[name[dson].loads, parameter[name[dson_input]]]]
|
keyword[def] identifier[render] ( identifier[dson_input] , identifier[saltenv] = literal[string] , identifier[sls] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[dson_input] , identifier[six] . identifier[string_types] ):
identifier[dson_input] = identifier[dson_input] . identifier[read] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[dson_input] )
keyword[if] identifier[dson_input] . identifier[startswith] ( literal[string] ):
identifier[dson_input] = identifier[dson_input] [( identifier[dson_input] . identifier[find] ( literal[string] )+ literal[int] ):]
keyword[if] keyword[not] identifier[dson_input] . identifier[strip] ():
keyword[return] {}
keyword[return] identifier[dson] . identifier[loads] ( identifier[dson_input] )
|
def render(dson_input, saltenv='base', sls='', **kwargs):
"""
Accepts DSON data as a string or as a file object and runs it through the
JSON parser.
:rtype: A Python data structure
"""
if not isinstance(dson_input, six.string_types):
dson_input = dson_input.read() # depends on [control=['if'], data=[]]
log.debug('DSON input = %s', dson_input)
if dson_input.startswith('#!'):
dson_input = dson_input[dson_input.find('\n') + 1:] # depends on [control=['if'], data=[]]
if not dson_input.strip():
return {} # depends on [control=['if'], data=[]]
return dson.loads(dson_input)
|
def physical_pins(self, function):
"""
Return the physical pins supporting the specified *function* as tuples
of ``(header, pin_number)`` where *header* is a string specifying the
header containing the *pin_number*. Note that the return value is a
:class:`set` which is not indexable. Use :func:`physical_pin` if you
are expecting a single return value.
:param str function:
The pin function you wish to search for. Usually this is something
like "GPIO9" for Broadcom GPIO pin 9, or "GND" for all the pins
connecting to electrical ground.
"""
return {
(header, pin.number)
for (header, info) in self.headers.items()
for pin in info.pins.values()
if pin.function == function
}
|
def function[physical_pins, parameter[self, function]]:
constant[
Return the physical pins supporting the specified *function* as tuples
of ``(header, pin_number)`` where *header* is a string specifying the
header containing the *pin_number*. Note that the return value is a
:class:`set` which is not indexable. Use :func:`physical_pin` if you
are expecting a single return value.
:param str function:
The pin function you wish to search for. Usually this is something
like "GPIO9" for Broadcom GPIO pin 9, or "GND" for all the pins
connecting to electrical ground.
]
return[<ast.SetComp object at 0x7da207f9ab60>]
|
keyword[def] identifier[physical_pins] ( identifier[self] , identifier[function] ):
literal[string]
keyword[return] {
( identifier[header] , identifier[pin] . identifier[number] )
keyword[for] ( identifier[header] , identifier[info] ) keyword[in] identifier[self] . identifier[headers] . identifier[items] ()
keyword[for] identifier[pin] keyword[in] identifier[info] . identifier[pins] . identifier[values] ()
keyword[if] identifier[pin] . identifier[function] == identifier[function]
}
|
def physical_pins(self, function):
"""
Return the physical pins supporting the specified *function* as tuples
of ``(header, pin_number)`` where *header* is a string specifying the
header containing the *pin_number*. Note that the return value is a
:class:`set` which is not indexable. Use :func:`physical_pin` if you
are expecting a single return value.
:param str function:
The pin function you wish to search for. Usually this is something
like "GPIO9" for Broadcom GPIO pin 9, or "GND" for all the pins
connecting to electrical ground.
"""
return {(header, pin.number) for (header, info) in self.headers.items() for pin in info.pins.values() if pin.function == function}
|
def reloaded(name, jboss_config, timeout=60, interval=5):
'''
Reloads configuration of jboss server.
jboss_config:
Dict with connection properties (see state description)
timeout:
Time to wait until jboss is back in running state. Default timeout is 60s.
interval:
Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time
but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than
process cleanup time it may easily lead to excessive resource consumption.
This step performs the following operations:
* Ensures that server is in running or reload-required state (by reading server-state attribute)
* Reloads configuration
* Waits for server to reload and be in running state
Example:
.. code-block:: yaml
configuration_reloaded:
jboss7.reloaded:
- jboss_config: {{ pillar['jboss'] }}
'''
log.debug(" ======================== STATE: jboss7.reloaded (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
status = __salt__['jboss7.status'](jboss_config)
if not status['success'] or status['result'] not in ('running', 'reload-required'):
ret['result'] = False
ret['comment'] = "Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state."
return ret
result = __salt__['jboss7.reload'](jboss_config)
if result['success'] or \
'Operation failed: Channel closed' in result['stdout'] or \
'Communication error: java.util.concurrent.ExecutionException: Operation failed' in result['stdout']:
wait_time = 0
status = None
while (status is None or not status['success'] or status['result'] != 'running') and wait_time < timeout:
time.sleep(interval)
wait_time += interval
status = __salt__['jboss7.status'](jboss_config)
if status['success'] and status['result'] == 'running':
ret['result'] = True
ret['comment'] = 'Configuration reloaded'
ret['changes']['reloaded'] = 'configuration'
else:
ret['result'] = False
ret['comment'] = 'Could not reload the configuration. Timeout ({0} s) exceeded. '.format(timeout)
if not status['success']:
ret['comment'] = __append_comment('Could not connect to JBoss controller.', ret['comment'])
else:
ret['comment'] = __append_comment(('Server is in {0} state'.format(status['result'])), ret['comment'])
else:
ret['result'] = False
ret['comment'] = 'Could not reload the configuration, stdout:'+result['stdout']
return ret
|
def function[reloaded, parameter[name, jboss_config, timeout, interval]]:
constant[
Reloads configuration of jboss server.
jboss_config:
Dict with connection properties (see state description)
timeout:
Time to wait until jboss is back in running state. Default timeout is 60s.
interval:
Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time
but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than
process cleanup time it may easily lead to excessive resource consumption.
This step performs the following operations:
* Ensures that server is in running or reload-required state (by reading server-state attribute)
* Reloads configuration
* Waits for server to reload and be in running state
Example:
.. code-block:: yaml
configuration_reloaded:
jboss7.reloaded:
- jboss_config: {{ pillar['jboss'] }}
]
call[name[log].debug, parameter[constant[ ======================== STATE: jboss7.reloaded (name: %s) ], name[name]]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2034520>, <ast.Constant object at 0x7da1b2034040>, <ast.Constant object at 0x7da1b2035bd0>, <ast.Constant object at 0x7da1b2035c60>], [<ast.Name object at 0x7da1b2035b40>, <ast.Constant object at 0x7da1b2034970>, <ast.Dict object at 0x7da1b2034eb0>, <ast.Constant object at 0x7da1b20349a0>]]
variable[status] assign[=] call[call[name[__salt__]][constant[jboss7.status]], parameter[name[jboss_config]]]
if <ast.BoolOp object at 0x7da1b2035630> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state.]
return[name[ret]]
variable[result] assign[=] call[call[name[__salt__]][constant[jboss7.reload]], parameter[name[jboss_config]]]
if <ast.BoolOp object at 0x7da207f99720> begin[:]
variable[wait_time] assign[=] constant[0]
variable[status] assign[=] constant[None]
while <ast.BoolOp object at 0x7da207f9a0e0> begin[:]
call[name[time].sleep, parameter[name[interval]]]
<ast.AugAssign object at 0x7da207f99510>
variable[status] assign[=] call[call[name[__salt__]][constant[jboss7.status]], parameter[name[jboss_config]]]
if <ast.BoolOp object at 0x7da207f99090> begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[Configuration reloaded]
call[call[name[ret]][constant[changes]]][constant[reloaded]] assign[=] constant[configuration]
return[name[ret]]
|
keyword[def] identifier[reloaded] ( identifier[name] , identifier[jboss_config] , identifier[timeout] = literal[int] , identifier[interval] = literal[int] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[True] ,
literal[string] :{},
literal[string] : literal[string] }
identifier[status] = identifier[__salt__] [ literal[string] ]( identifier[jboss_config] )
keyword[if] keyword[not] identifier[status] [ literal[string] ] keyword[or] identifier[status] [ literal[string] ] keyword[not] keyword[in] ( literal[string] , literal[string] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[jboss_config] )
keyword[if] identifier[result] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[result] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[result] [ literal[string] ]:
identifier[wait_time] = literal[int]
identifier[status] = keyword[None]
keyword[while] ( identifier[status] keyword[is] keyword[None] keyword[or] keyword[not] identifier[status] [ literal[string] ] keyword[or] identifier[status] [ literal[string] ]!= literal[string] ) keyword[and] identifier[wait_time] < identifier[timeout] :
identifier[time] . identifier[sleep] ( identifier[interval] )
identifier[wait_time] += identifier[interval]
identifier[status] = identifier[__salt__] [ literal[string] ]( identifier[jboss_config] )
keyword[if] identifier[status] [ literal[string] ] keyword[and] identifier[status] [ literal[string] ]== literal[string] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[timeout] )
keyword[if] keyword[not] identifier[status] [ literal[string] ]:
identifier[ret] [ literal[string] ]= identifier[__append_comment] ( literal[string] , identifier[ret] [ literal[string] ])
keyword[else] :
identifier[ret] [ literal[string] ]= identifier[__append_comment] (( literal[string] . identifier[format] ( identifier[status] [ literal[string] ])), identifier[ret] [ literal[string] ])
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] + identifier[result] [ literal[string] ]
keyword[return] identifier[ret]
|
def reloaded(name, jboss_config, timeout=60, interval=5):
"""
Reloads configuration of jboss server.
jboss_config:
Dict with connection properties (see state description)
timeout:
Time to wait until jboss is back in running state. Default timeout is 60s.
interval:
Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time
but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than
process cleanup time it may easily lead to excessive resource consumption.
This step performs the following operations:
* Ensures that server is in running or reload-required state (by reading server-state attribute)
* Reloads configuration
* Waits for server to reload and be in running state
Example:
.. code-block:: yaml
configuration_reloaded:
jboss7.reloaded:
- jboss_config: {{ pillar['jboss'] }}
"""
log.debug(' ======================== STATE: jboss7.reloaded (name: %s) ', name)
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
status = __salt__['jboss7.status'](jboss_config)
if not status['success'] or status['result'] not in ('running', 'reload-required'):
ret['result'] = False
ret['comment'] = "Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state."
return ret # depends on [control=['if'], data=[]]
result = __salt__['jboss7.reload'](jboss_config)
if result['success'] or 'Operation failed: Channel closed' in result['stdout'] or 'Communication error: java.util.concurrent.ExecutionException: Operation failed' in result['stdout']:
wait_time = 0
status = None
while (status is None or not status['success'] or status['result'] != 'running') and wait_time < timeout:
time.sleep(interval)
wait_time += interval
status = __salt__['jboss7.status'](jboss_config) # depends on [control=['while'], data=[]]
if status['success'] and status['result'] == 'running':
ret['result'] = True
ret['comment'] = 'Configuration reloaded'
ret['changes']['reloaded'] = 'configuration' # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Could not reload the configuration. Timeout ({0} s) exceeded. '.format(timeout)
if not status['success']:
ret['comment'] = __append_comment('Could not connect to JBoss controller.', ret['comment']) # depends on [control=['if'], data=[]]
else:
ret['comment'] = __append_comment('Server is in {0} state'.format(status['result']), ret['comment']) # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Could not reload the configuration, stdout:' + result['stdout']
return ret
|
def _concat_sparse(to_concat, axis=0, typs=None):
"""
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
from pandas.core.arrays import SparseArray
fill_values = [x.fill_value for x in to_concat
if isinstance(x, SparseArray)]
fill_value = fill_values[0]
# TODO: Fix join unit generation so we aren't passed this.
to_concat = [x if isinstance(x, SparseArray)
else SparseArray(x.squeeze(), fill_value=fill_value)
for x in to_concat]
return SparseArray._concat_same_type(to_concat)
|
def function[_concat_sparse, parameter[to_concat, axis, typs]]:
constant[
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
]
from relative_module[pandas.core.arrays] import module[SparseArray]
variable[fill_values] assign[=] <ast.ListComp object at 0x7da20e9b1330>
variable[fill_value] assign[=] call[name[fill_values]][constant[0]]
variable[to_concat] assign[=] <ast.ListComp object at 0x7da20e9b3010>
return[call[name[SparseArray]._concat_same_type, parameter[name[to_concat]]]]
|
keyword[def] identifier[_concat_sparse] ( identifier[to_concat] , identifier[axis] = literal[int] , identifier[typs] = keyword[None] ):
literal[string]
keyword[from] identifier[pandas] . identifier[core] . identifier[arrays] keyword[import] identifier[SparseArray]
identifier[fill_values] =[ identifier[x] . identifier[fill_value] keyword[for] identifier[x] keyword[in] identifier[to_concat]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[SparseArray] )]
identifier[fill_value] = identifier[fill_values] [ literal[int] ]
identifier[to_concat] =[ identifier[x] keyword[if] identifier[isinstance] ( identifier[x] , identifier[SparseArray] )
keyword[else] identifier[SparseArray] ( identifier[x] . identifier[squeeze] (), identifier[fill_value] = identifier[fill_value] )
keyword[for] identifier[x] keyword[in] identifier[to_concat] ]
keyword[return] identifier[SparseArray] . identifier[_concat_same_type] ( identifier[to_concat] )
|
def _concat_sparse(to_concat, axis=0, typs=None):
"""
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
from pandas.core.arrays import SparseArray
fill_values = [x.fill_value for x in to_concat if isinstance(x, SparseArray)]
fill_value = fill_values[0]
# TODO: Fix join unit generation so we aren't passed this.
to_concat = [x if isinstance(x, SparseArray) else SparseArray(x.squeeze(), fill_value=fill_value) for x in to_concat]
return SparseArray._concat_same_type(to_concat)
|
def get_which_data_ycols(model, which_data_ycols):
"""
Helper to get the data columns to plot.
"""
if which_data_ycols == 'all' or which_data_ycols is None:
return np.arange(model.output_dim)
return which_data_ycols
|
def function[get_which_data_ycols, parameter[model, which_data_ycols]]:
constant[
Helper to get the data columns to plot.
]
if <ast.BoolOp object at 0x7da1b1cc8640> begin[:]
return[call[name[np].arange, parameter[name[model].output_dim]]]
return[name[which_data_ycols]]
|
keyword[def] identifier[get_which_data_ycols] ( identifier[model] , identifier[which_data_ycols] ):
literal[string]
keyword[if] identifier[which_data_ycols] == literal[string] keyword[or] identifier[which_data_ycols] keyword[is] keyword[None] :
keyword[return] identifier[np] . identifier[arange] ( identifier[model] . identifier[output_dim] )
keyword[return] identifier[which_data_ycols]
|
def get_which_data_ycols(model, which_data_ycols):
"""
Helper to get the data columns to plot.
"""
if which_data_ycols == 'all' or which_data_ycols is None:
return np.arange(model.output_dim) # depends on [control=['if'], data=[]]
return which_data_ycols
|
def getcol(self, startrow=0, nrow=-1, rowincr=1):
"""Get the contents of the column or part of it.
(see :func:`table.getcol`)"""
return self._table.getcol(self._column, startrow, nrow, rowincr)
|
def function[getcol, parameter[self, startrow, nrow, rowincr]]:
constant[Get the contents of the column or part of it.
(see :func:`table.getcol`)]
return[call[name[self]._table.getcol, parameter[name[self]._column, name[startrow], name[nrow], name[rowincr]]]]
|
keyword[def] identifier[getcol] ( identifier[self] , identifier[startrow] = literal[int] , identifier[nrow] =- literal[int] , identifier[rowincr] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[_table] . identifier[getcol] ( identifier[self] . identifier[_column] , identifier[startrow] , identifier[nrow] , identifier[rowincr] )
|
def getcol(self, startrow=0, nrow=-1, rowincr=1):
"""Get the contents of the column or part of it.
(see :func:`table.getcol`)"""
return self._table.getcol(self._column, startrow, nrow, rowincr)
|
def from_dict(cls, d):
"""
Decode from a dictionary as from :meth:`to_dict`.
"""
return cls(
d['id'],
d['start'],
d['end'],
Lnk.charspan(d['from'], d['to']) if 'from' in d else None,
# d.get('paths', [1]),
form=d['form'],
surface=d.get('surface'),
# ipos=
# lrules=
pos=zip(d.get('tags', []), d.get('probabilities', []))
)
|
def function[from_dict, parameter[cls, d]]:
constant[
Decode from a dictionary as from :meth:`to_dict`.
]
return[call[name[cls], parameter[call[name[d]][constant[id]], call[name[d]][constant[start]], call[name[d]][constant[end]], <ast.IfExp object at 0x7da1b06c8f70>]]]
|
keyword[def] identifier[from_dict] ( identifier[cls] , identifier[d] ):
literal[string]
keyword[return] identifier[cls] (
identifier[d] [ literal[string] ],
identifier[d] [ literal[string] ],
identifier[d] [ literal[string] ],
identifier[Lnk] . identifier[charspan] ( identifier[d] [ literal[string] ], identifier[d] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[d] keyword[else] keyword[None] ,
identifier[form] = identifier[d] [ literal[string] ],
identifier[surface] = identifier[d] . identifier[get] ( literal[string] ),
identifier[pos] = identifier[zip] ( identifier[d] . identifier[get] ( literal[string] ,[]), identifier[d] . identifier[get] ( literal[string] ,[]))
)
|
def from_dict(cls, d):
"""
Decode from a dictionary as from :meth:`to_dict`.
"""
# d.get('paths', [1]),
# ipos=
# lrules=
return cls(d['id'], d['start'], d['end'], Lnk.charspan(d['from'], d['to']) if 'from' in d else None, form=d['form'], surface=d.get('surface'), pos=zip(d.get('tags', []), d.get('probabilities', [])))
|
def create(cls, term, *ranges):
"""Instantiate the indexed sum while applying simplification rules"""
if not isinstance(term, Scalar):
term = ScalarValue.create(term)
return super().create(term, *ranges)
|
def function[create, parameter[cls, term]]:
constant[Instantiate the indexed sum while applying simplification rules]
if <ast.UnaryOp object at 0x7da18c4ce530> begin[:]
variable[term] assign[=] call[name[ScalarValue].create, parameter[name[term]]]
return[call[call[name[super], parameter[]].create, parameter[name[term], <ast.Starred object at 0x7da18c4cdba0>]]]
|
keyword[def] identifier[create] ( identifier[cls] , identifier[term] ,* identifier[ranges] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[term] , identifier[Scalar] ):
identifier[term] = identifier[ScalarValue] . identifier[create] ( identifier[term] )
keyword[return] identifier[super] (). identifier[create] ( identifier[term] ,* identifier[ranges] )
|
def create(cls, term, *ranges):
"""Instantiate the indexed sum while applying simplification rules"""
if not isinstance(term, Scalar):
term = ScalarValue.create(term) # depends on [control=['if'], data=[]]
return super().create(term, *ranges)
|
def _list(self):
"""
list function logic, override to implement different logic
returns list and search widget
"""
if get_order_args().get(self.__class__.__name__):
order_column, order_direction = get_order_args().get(
self.__class__.__name__
)
else:
order_column, order_direction = "", ""
page = get_page_args().get(self.__class__.__name__)
page_size = get_page_size_args().get(self.__class__.__name__)
get_filter_args(self._filters)
widgets = self._get_list_widget(
filters=self._filters,
order_column=order_column,
order_direction=order_direction,
page=page,
page_size=page_size,
)
form = self.search_form.refresh()
self.update_redirect()
return self._get_search_widget(form=form, widgets=widgets)
|
def function[_list, parameter[self]]:
constant[
list function logic, override to implement different logic
returns list and search widget
]
if call[call[name[get_order_args], parameter[]].get, parameter[name[self].__class__.__name__]] begin[:]
<ast.Tuple object at 0x7da20e954280> assign[=] call[call[name[get_order_args], parameter[]].get, parameter[name[self].__class__.__name__]]
variable[page] assign[=] call[call[name[get_page_args], parameter[]].get, parameter[name[self].__class__.__name__]]
variable[page_size] assign[=] call[call[name[get_page_size_args], parameter[]].get, parameter[name[self].__class__.__name__]]
call[name[get_filter_args], parameter[name[self]._filters]]
variable[widgets] assign[=] call[name[self]._get_list_widget, parameter[]]
variable[form] assign[=] call[name[self].search_form.refresh, parameter[]]
call[name[self].update_redirect, parameter[]]
return[call[name[self]._get_search_widget, parameter[]]]
|
keyword[def] identifier[_list] ( identifier[self] ):
literal[string]
keyword[if] identifier[get_order_args] (). identifier[get] ( identifier[self] . identifier[__class__] . identifier[__name__] ):
identifier[order_column] , identifier[order_direction] = identifier[get_order_args] (). identifier[get] (
identifier[self] . identifier[__class__] . identifier[__name__]
)
keyword[else] :
identifier[order_column] , identifier[order_direction] = literal[string] , literal[string]
identifier[page] = identifier[get_page_args] (). identifier[get] ( identifier[self] . identifier[__class__] . identifier[__name__] )
identifier[page_size] = identifier[get_page_size_args] (). identifier[get] ( identifier[self] . identifier[__class__] . identifier[__name__] )
identifier[get_filter_args] ( identifier[self] . identifier[_filters] )
identifier[widgets] = identifier[self] . identifier[_get_list_widget] (
identifier[filters] = identifier[self] . identifier[_filters] ,
identifier[order_column] = identifier[order_column] ,
identifier[order_direction] = identifier[order_direction] ,
identifier[page] = identifier[page] ,
identifier[page_size] = identifier[page_size] ,
)
identifier[form] = identifier[self] . identifier[search_form] . identifier[refresh] ()
identifier[self] . identifier[update_redirect] ()
keyword[return] identifier[self] . identifier[_get_search_widget] ( identifier[form] = identifier[form] , identifier[widgets] = identifier[widgets] )
|
def _list(self):
"""
list function logic, override to implement different logic
returns list and search widget
"""
if get_order_args().get(self.__class__.__name__):
(order_column, order_direction) = get_order_args().get(self.__class__.__name__) # depends on [control=['if'], data=[]]
else:
(order_column, order_direction) = ('', '')
page = get_page_args().get(self.__class__.__name__)
page_size = get_page_size_args().get(self.__class__.__name__)
get_filter_args(self._filters)
widgets = self._get_list_widget(filters=self._filters, order_column=order_column, order_direction=order_direction, page=page, page_size=page_size)
form = self.search_form.refresh()
self.update_redirect()
return self._get_search_widget(form=form, widgets=widgets)
|
def _schema(self, path, obj, app):
""" fulfill 'name' field for objects under
'#/definitions' and with 'properties'
"""
if path.startswith('#/definitions'):
last_token = jp_split(path)[-1]
if app.version == '1.2':
obj.update_field('name', scope_split(last_token)[-1])
else:
obj.update_field('name', last_token)
|
def function[_schema, parameter[self, path, obj, app]]:
constant[ fulfill 'name' field for objects under
'#/definitions' and with 'properties'
]
if call[name[path].startswith, parameter[constant[#/definitions]]] begin[:]
variable[last_token] assign[=] call[call[name[jp_split], parameter[name[path]]]][<ast.UnaryOp object at 0x7da18bc713f0>]
if compare[name[app].version equal[==] constant[1.2]] begin[:]
call[name[obj].update_field, parameter[constant[name], call[call[name[scope_split], parameter[name[last_token]]]][<ast.UnaryOp object at 0x7da18bc73340>]]]
|
keyword[def] identifier[_schema] ( identifier[self] , identifier[path] , identifier[obj] , identifier[app] ):
literal[string]
keyword[if] identifier[path] . identifier[startswith] ( literal[string] ):
identifier[last_token] = identifier[jp_split] ( identifier[path] )[- literal[int] ]
keyword[if] identifier[app] . identifier[version] == literal[string] :
identifier[obj] . identifier[update_field] ( literal[string] , identifier[scope_split] ( identifier[last_token] )[- literal[int] ])
keyword[else] :
identifier[obj] . identifier[update_field] ( literal[string] , identifier[last_token] )
|
def _schema(self, path, obj, app):
""" fulfill 'name' field for objects under
'#/definitions' and with 'properties'
"""
if path.startswith('#/definitions'):
last_token = jp_split(path)[-1]
if app.version == '1.2':
obj.update_field('name', scope_split(last_token)[-1]) # depends on [control=['if'], data=[]]
else:
obj.update_field('name', last_token) # depends on [control=['if'], data=[]]
|
def H_mag(self, T):
"""
Calculate the phase's magnetic contribution to enthalpy at the
specified temperature.
:param T: [K] temperature
:returns: [J/mol] The magnetic enthalpy of the compound phase.
Dinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),
317–425. http://doi.org/10.1016/0364-5916(91)90030-N
"""
tau = T / self.Tc_mag
if tau <= 1.0:
h = (-self._A_mag/tau +
self._B_mag*(tau**3/2 + tau**9/15 + tau**15/40))/self._D_mag
else:
h = -(tau**-5/2 + tau**-15/21 + tau**-25/60)/self._D_mag
return R*T*math.log(self.beta0_mag + 1)*h
|
def function[H_mag, parameter[self, T]]:
constant[
Calculate the phase's magnetic contribution to enthalpy at the
specified temperature.
:param T: [K] temperature
:returns: [J/mol] The magnetic enthalpy of the compound phase.
Dinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),
317–425. http://doi.org/10.1016/0364-5916(91)90030-N
]
variable[tau] assign[=] binary_operation[name[T] / name[self].Tc_mag]
if compare[name[tau] less_or_equal[<=] constant[1.0]] begin[:]
variable[h] assign[=] binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18f8102e0> / name[tau]] + binary_operation[name[self]._B_mag * binary_operation[binary_operation[binary_operation[binary_operation[name[tau] ** constant[3]] / constant[2]] + binary_operation[binary_operation[name[tau] ** constant[9]] / constant[15]]] + binary_operation[binary_operation[name[tau] ** constant[15]] / constant[40]]]]] / name[self]._D_mag]
return[binary_operation[binary_operation[binary_operation[name[R] * name[T]] * call[name[math].log, parameter[binary_operation[name[self].beta0_mag + constant[1]]]]] * name[h]]]
|
keyword[def] identifier[H_mag] ( identifier[self] , identifier[T] ):
literal[string]
identifier[tau] = identifier[T] / identifier[self] . identifier[Tc_mag]
keyword[if] identifier[tau] <= literal[int] :
identifier[h] =(- identifier[self] . identifier[_A_mag] / identifier[tau] +
identifier[self] . identifier[_B_mag] *( identifier[tau] ** literal[int] / literal[int] + identifier[tau] ** literal[int] / literal[int] + identifier[tau] ** literal[int] / literal[int] ))/ identifier[self] . identifier[_D_mag]
keyword[else] :
identifier[h] =-( identifier[tau] **- literal[int] / literal[int] + identifier[tau] **- literal[int] / literal[int] + identifier[tau] **- literal[int] / literal[int] )/ identifier[self] . identifier[_D_mag]
keyword[return] identifier[R] * identifier[T] * identifier[math] . identifier[log] ( identifier[self] . identifier[beta0_mag] + literal[int] )* identifier[h]
|
def H_mag(self, T):
"""
Calculate the phase's magnetic contribution to enthalpy at the
specified temperature.
:param T: [K] temperature
:returns: [J/mol] The magnetic enthalpy of the compound phase.
Dinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),
317–425. http://doi.org/10.1016/0364-5916(91)90030-N
"""
tau = T / self.Tc_mag
if tau <= 1.0:
h = (-self._A_mag / tau + self._B_mag * (tau ** 3 / 2 + tau ** 9 / 15 + tau ** 15 / 40)) / self._D_mag # depends on [control=['if'], data=['tau']]
else:
h = -(tau ** (-5) / 2 + tau ** (-15) / 21 + tau ** (-25) / 60) / self._D_mag
return R * T * math.log(self.beta0_mag + 1) * h
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
for comp in components:
zcut = "zmax%i" % comp.zmax
mktimelist = copy.copy(comp.mktimefilters)
if not mktimelist:
mktimelist.append('none')
evtclasslist_keys = copy.copy(comp.evtclasses)
if not evtclasslist_keys:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
else:
evtclasslist_vals = copy.copy(evtclasslist_keys)
for mktimekey in mktimelist:
for evtclassval in evtclasslist_vals:
fullkey = comp.make_key(
'%s_%s_{ebin_name}_%s_{evtype_name}' %
(evtclassval, zcut, mktimekey))
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
mktime=mktimekey,
evclass=evtclassval,
fullpath=True)
outfile = NAME_FACTORY.bexpcube(**name_keys)
cmap = NAME_FACTORY.ccube(**name_keys)
infile = NAME_FACTORY.ltcube(**name_keys)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(cmap=cmap,
infile=infile,
outfile=outfile,
irfs=NAME_FACTORY.irfs(**name_keys),
hpx_order=min(
comp.hpx_order, args['hpx_order_max']),
evtype=comp.evtype,
logfile=logfile)
return job_configs
|
def function[build_job_configs, parameter[self, args]]:
constant[Hook to build job configurations
]
variable[job_configs] assign[=] dictionary[[], []]
variable[components] assign[=] call[name[Component].build_from_yamlfile, parameter[call[name[args]][constant[comp]]]]
variable[datafile] assign[=] call[name[args]][constant[data]]
if <ast.BoolOp object at 0x7da20c76ceb0> begin[:]
return[name[job_configs]]
call[name[NAME_FACTORY].update_base_dict, parameter[call[name[args]][constant[data]]]]
for taget[name[comp]] in starred[name[components]] begin[:]
variable[zcut] assign[=] binary_operation[constant[zmax%i] <ast.Mod object at 0x7da2590d6920> name[comp].zmax]
variable[mktimelist] assign[=] call[name[copy].copy, parameter[name[comp].mktimefilters]]
if <ast.UnaryOp object at 0x7da20c76ee30> begin[:]
call[name[mktimelist].append, parameter[constant[none]]]
variable[evtclasslist_keys] assign[=] call[name[copy].copy, parameter[name[comp].evtclasses]]
if <ast.UnaryOp object at 0x7da20c76ebf0> begin[:]
variable[evtclasslist_vals] assign[=] list[[<ast.Subscript object at 0x7da20c76c6a0>]]
for taget[name[mktimekey]] in starred[name[mktimelist]] begin[:]
for taget[name[evtclassval]] in starred[name[evtclasslist_vals]] begin[:]
variable[fullkey] assign[=] call[name[comp].make_key, parameter[binary_operation[constant[%s_%s_{ebin_name}_%s_{evtype_name}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204620070>, <ast.Name object at 0x7da18eb57d00>, <ast.Name object at 0x7da18eb56e60>]]]]]
variable[name_keys] assign[=] call[name[dict], parameter[]]
variable[outfile] assign[=] call[name[NAME_FACTORY].bexpcube, parameter[]]
variable[cmap] assign[=] call[name[NAME_FACTORY].ccube, parameter[]]
variable[infile] assign[=] call[name[NAME_FACTORY].ltcube, parameter[]]
variable[logfile] assign[=] call[name[make_nfs_path], parameter[call[name[outfile].replace, parameter[constant[.fits], constant[.log]]]]]
call[name[job_configs]][name[fullkey]] assign[=] call[name[dict], parameter[]]
return[name[job_configs]]
|
keyword[def] identifier[build_job_configs] ( identifier[self] , identifier[args] ):
literal[string]
identifier[job_configs] ={}
identifier[components] = identifier[Component] . identifier[build_from_yamlfile] ( identifier[args] [ literal[string] ])
identifier[datafile] = identifier[args] [ literal[string] ]
keyword[if] identifier[datafile] keyword[is] keyword[None] keyword[or] identifier[datafile] == literal[string] :
keyword[return] identifier[job_configs]
identifier[NAME_FACTORY] . identifier[update_base_dict] ( identifier[args] [ literal[string] ])
keyword[for] identifier[comp] keyword[in] identifier[components] :
identifier[zcut] = literal[string] % identifier[comp] . identifier[zmax]
identifier[mktimelist] = identifier[copy] . identifier[copy] ( identifier[comp] . identifier[mktimefilters] )
keyword[if] keyword[not] identifier[mktimelist] :
identifier[mktimelist] . identifier[append] ( literal[string] )
identifier[evtclasslist_keys] = identifier[copy] . identifier[copy] ( identifier[comp] . identifier[evtclasses] )
keyword[if] keyword[not] identifier[evtclasslist_keys] :
identifier[evtclasslist_vals] =[ identifier[NAME_FACTORY] . identifier[base_dict] [ literal[string] ]]
keyword[else] :
identifier[evtclasslist_vals] = identifier[copy] . identifier[copy] ( identifier[evtclasslist_keys] )
keyword[for] identifier[mktimekey] keyword[in] identifier[mktimelist] :
keyword[for] identifier[evtclassval] keyword[in] identifier[evtclasslist_vals] :
identifier[fullkey] = identifier[comp] . identifier[make_key] (
literal[string] %
( identifier[evtclassval] , identifier[zcut] , identifier[mktimekey] ))
identifier[name_keys] = identifier[dict] ( identifier[zcut] = identifier[zcut] ,
identifier[ebin] = identifier[comp] . identifier[ebin_name] ,
identifier[psftype] = identifier[comp] . identifier[evtype_name] ,
identifier[coordsys] = identifier[comp] . identifier[coordsys] ,
identifier[irf_ver] = identifier[NAME_FACTORY] . identifier[irf_ver] (),
identifier[mktime] = identifier[mktimekey] ,
identifier[evclass] = identifier[evtclassval] ,
identifier[fullpath] = keyword[True] )
identifier[outfile] = identifier[NAME_FACTORY] . identifier[bexpcube] (** identifier[name_keys] )
identifier[cmap] = identifier[NAME_FACTORY] . identifier[ccube] (** identifier[name_keys] )
identifier[infile] = identifier[NAME_FACTORY] . identifier[ltcube] (** identifier[name_keys] )
identifier[logfile] = identifier[make_nfs_path] ( identifier[outfile] . identifier[replace] ( literal[string] , literal[string] ))
identifier[job_configs] [ identifier[fullkey] ]= identifier[dict] ( identifier[cmap] = identifier[cmap] ,
identifier[infile] = identifier[infile] ,
identifier[outfile] = identifier[outfile] ,
identifier[irfs] = identifier[NAME_FACTORY] . identifier[irfs] (** identifier[name_keys] ),
identifier[hpx_order] = identifier[min] (
identifier[comp] . identifier[hpx_order] , identifier[args] [ literal[string] ]),
identifier[evtype] = identifier[comp] . identifier[evtype] ,
identifier[logfile] = identifier[logfile] )
keyword[return] identifier[job_configs]
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs # depends on [control=['if'], data=[]]
NAME_FACTORY.update_base_dict(args['data'])
for comp in components:
zcut = 'zmax%i' % comp.zmax
mktimelist = copy.copy(comp.mktimefilters)
if not mktimelist:
mktimelist.append('none') # depends on [control=['if'], data=[]]
evtclasslist_keys = copy.copy(comp.evtclasses)
if not evtclasslist_keys:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] # depends on [control=['if'], data=[]]
else:
evtclasslist_vals = copy.copy(evtclasslist_keys)
for mktimekey in mktimelist:
for evtclassval in evtclasslist_vals:
fullkey = comp.make_key('%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey))
name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True)
outfile = NAME_FACTORY.bexpcube(**name_keys)
cmap = NAME_FACTORY.ccube(**name_keys)
infile = NAME_FACTORY.ltcube(**name_keys)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(cmap=cmap, infile=infile, outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), hpx_order=min(comp.hpx_order, args['hpx_order_max']), evtype=comp.evtype, logfile=logfile) # depends on [control=['for'], data=['evtclassval']] # depends on [control=['for'], data=['mktimekey']] # depends on [control=['for'], data=['comp']]
return job_configs
|
async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<')
|
<ast.AsyncFunctionDef object at 0x7da18f811720>
|
keyword[async] keyword[def] identifier[stop] ( identifier[wallet_name] : identifier[str] )-> keyword[None] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[dir_sentinel] = identifier[join] ( identifier[RevRegBuilder] . identifier[dir_tails_sentinel] ( identifier[wallet_name] ))
keyword[if] identifier[isdir] ( identifier[dir_sentinel] ):
identifier[open] ( identifier[join] ( identifier[dir_sentinel] , literal[string] ), literal[string] ). identifier[close] ()
keyword[while] identifier[any] ( identifier[isfile] ( identifier[join] ( identifier[dir_sentinel] , identifier[d] , literal[string] )) keyword[for] identifier[d] keyword[in] identifier[listdir] ( identifier[dir_sentinel] )):
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
identifier[LOGGER] . identifier[debug] ( literal[string] )
|
async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any((isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel))):
await asyncio.sleep(1) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
LOGGER.debug('RevRegBuilder.stop <<<')
|
def get_docker_client():
"""Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function."""
env = get_docker_env()
host, cert_path, tls_verify = env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY']
params = {'base_url': host.replace('tcp://', 'https://'),
'timeout': None,
'version': 'auto'}
if tls_verify and cert_path:
params['tls'] = docker.tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=True,
ssl_version=None,
assert_hostname=False)
return docker.Client(**params)
|
def function[get_docker_client, parameter[]]:
constant[Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function.]
variable[env] assign[=] call[name[get_docker_env], parameter[]]
<ast.Tuple object at 0x7da18c4cfe20> assign[=] tuple[[<ast.Subscript object at 0x7da18c4cc370>, <ast.Subscript object at 0x7da18c4ccdc0>, <ast.Subscript object at 0x7da18c4cca00>]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf2e0>, <ast.Constant object at 0x7da18c4cfa60>, <ast.Constant object at 0x7da18c4ce620>], [<ast.Call object at 0x7da18c4ce680>, <ast.Constant object at 0x7da18c4cc9a0>, <ast.Constant object at 0x7da18c4ce950>]]
if <ast.BoolOp object at 0x7da18c4cec50> begin[:]
call[name[params]][constant[tls]] assign[=] call[name[docker].tls.TLSConfig, parameter[]]
return[call[name[docker].Client, parameter[]]]
|
keyword[def] identifier[get_docker_client] ():
literal[string]
identifier[env] = identifier[get_docker_env] ()
identifier[host] , identifier[cert_path] , identifier[tls_verify] = identifier[env] [ literal[string] ], identifier[env] [ literal[string] ], identifier[env] [ literal[string] ]
identifier[params] ={ literal[string] : identifier[host] . identifier[replace] ( literal[string] , literal[string] ),
literal[string] : keyword[None] ,
literal[string] : literal[string] }
keyword[if] identifier[tls_verify] keyword[and] identifier[cert_path] :
identifier[params] [ literal[string] ]= identifier[docker] . identifier[tls] . identifier[TLSConfig] (
identifier[client_cert] =( identifier[os] . identifier[path] . identifier[join] ( identifier[cert_path] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[cert_path] , literal[string] )),
identifier[ca_cert] = identifier[os] . identifier[path] . identifier[join] ( identifier[cert_path] , literal[string] ),
identifier[verify] = keyword[True] ,
identifier[ssl_version] = keyword[None] ,
identifier[assert_hostname] = keyword[False] )
keyword[return] identifier[docker] . identifier[Client] (** identifier[params] )
|
def get_docker_client():
"""Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function."""
env = get_docker_env()
(host, cert_path, tls_verify) = (env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY'])
params = {'base_url': host.replace('tcp://', 'https://'), 'timeout': None, 'version': 'auto'}
if tls_verify and cert_path:
params['tls'] = docker.tls.TLSConfig(client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ca_cert=os.path.join(cert_path, 'ca.pem'), verify=True, ssl_version=None, assert_hostname=False) # depends on [control=['if'], data=[]]
return docker.Client(**params)
|
def _extract_lines(filename, f_globals, line_no, around):
"""
Extracts a block of lines from the given file
:param filename: Name of the source file
:param f_globals: Globals of the frame of the current code
:param line_no: Current line of code
:param around: Number of line to print before and after the current one
"""
current_line = linecache.getline(filename, line_no, f_globals)
if not current_line:
# No data on this line
return ""
lines = []
# Add some lines before
for pre_line_no in range(line_no - around, line_no):
pre_line = linecache.getline(filename, pre_line_no, f_globals)
lines.append("{0}".format(pre_line.rstrip()))
# The line itself
lines.append("{0}".format(current_line.rstrip()))
# Add some lines after
for pre_line_no in range(line_no + 1, line_no + around + 1):
pre_line = linecache.getline(filename, pre_line_no, f_globals)
lines.append("{0}".format(pre_line.rstrip()))
# Smart left strip
minimal_tab = None
for line in lines:
if line.strip():
tab = len(line) - len(line.lstrip())
if minimal_tab is None or tab < minimal_tab:
minimal_tab = tab
if minimal_tab > 0:
lines = [line[minimal_tab:] for line in lines]
# Add some place for a marker
marked_line = ">> {0}".format(lines[around])
lines = [" {0}".format(line) for line in lines]
lines[around] = marked_line
lines.append("")
return lines
|
def function[_extract_lines, parameter[filename, f_globals, line_no, around]]:
constant[
Extracts a block of lines from the given file
:param filename: Name of the source file
:param f_globals: Globals of the frame of the current code
:param line_no: Current line of code
:param around: Number of line to print before and after the current one
]
variable[current_line] assign[=] call[name[linecache].getline, parameter[name[filename], name[line_no], name[f_globals]]]
if <ast.UnaryOp object at 0x7da1b04d5f60> begin[:]
return[constant[]]
variable[lines] assign[=] list[[]]
for taget[name[pre_line_no]] in starred[call[name[range], parameter[binary_operation[name[line_no] - name[around]], name[line_no]]]] begin[:]
variable[pre_line] assign[=] call[name[linecache].getline, parameter[name[filename], name[pre_line_no], name[f_globals]]]
call[name[lines].append, parameter[call[constant[{0}].format, parameter[call[name[pre_line].rstrip, parameter[]]]]]]
call[name[lines].append, parameter[call[constant[{0}].format, parameter[call[name[current_line].rstrip, parameter[]]]]]]
for taget[name[pre_line_no]] in starred[call[name[range], parameter[binary_operation[name[line_no] + constant[1]], binary_operation[binary_operation[name[line_no] + name[around]] + constant[1]]]]] begin[:]
variable[pre_line] assign[=] call[name[linecache].getline, parameter[name[filename], name[pre_line_no], name[f_globals]]]
call[name[lines].append, parameter[call[constant[{0}].format, parameter[call[name[pre_line].rstrip, parameter[]]]]]]
variable[minimal_tab] assign[=] constant[None]
for taget[name[line]] in starred[name[lines]] begin[:]
if call[name[line].strip, parameter[]] begin[:]
variable[tab] assign[=] binary_operation[call[name[len], parameter[name[line]]] - call[name[len], parameter[call[name[line].lstrip, parameter[]]]]]
if <ast.BoolOp object at 0x7da1b0390ee0> begin[:]
variable[minimal_tab] assign[=] name[tab]
if compare[name[minimal_tab] greater[>] constant[0]] begin[:]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b03913f0>
variable[marked_line] assign[=] call[constant[>> {0}].format, parameter[call[name[lines]][name[around]]]]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b0390cd0>
call[name[lines]][name[around]] assign[=] name[marked_line]
call[name[lines].append, parameter[constant[]]]
return[name[lines]]
|
keyword[def] identifier[_extract_lines] ( identifier[filename] , identifier[f_globals] , identifier[line_no] , identifier[around] ):
literal[string]
identifier[current_line] = identifier[linecache] . identifier[getline] ( identifier[filename] , identifier[line_no] , identifier[f_globals] )
keyword[if] keyword[not] identifier[current_line] :
keyword[return] literal[string]
identifier[lines] =[]
keyword[for] identifier[pre_line_no] keyword[in] identifier[range] ( identifier[line_no] - identifier[around] , identifier[line_no] ):
identifier[pre_line] = identifier[linecache] . identifier[getline] ( identifier[filename] , identifier[pre_line_no] , identifier[f_globals] )
identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[pre_line] . identifier[rstrip] ()))
identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[current_line] . identifier[rstrip] ()))
keyword[for] identifier[pre_line_no] keyword[in] identifier[range] ( identifier[line_no] + literal[int] , identifier[line_no] + identifier[around] + literal[int] ):
identifier[pre_line] = identifier[linecache] . identifier[getline] ( identifier[filename] , identifier[pre_line_no] , identifier[f_globals] )
identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[pre_line] . identifier[rstrip] ()))
identifier[minimal_tab] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[line] . identifier[strip] ():
identifier[tab] = identifier[len] ( identifier[line] )- identifier[len] ( identifier[line] . identifier[lstrip] ())
keyword[if] identifier[minimal_tab] keyword[is] keyword[None] keyword[or] identifier[tab] < identifier[minimal_tab] :
identifier[minimal_tab] = identifier[tab]
keyword[if] identifier[minimal_tab] > literal[int] :
identifier[lines] =[ identifier[line] [ identifier[minimal_tab] :] keyword[for] identifier[line] keyword[in] identifier[lines] ]
identifier[marked_line] = literal[string] . identifier[format] ( identifier[lines] [ identifier[around] ])
identifier[lines] =[ literal[string] . identifier[format] ( identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[lines] ]
identifier[lines] [ identifier[around] ]= identifier[marked_line]
identifier[lines] . identifier[append] ( literal[string] )
keyword[return] identifier[lines]
|
def _extract_lines(filename, f_globals, line_no, around):
"""
Extracts a block of lines from the given file
:param filename: Name of the source file
:param f_globals: Globals of the frame of the current code
:param line_no: Current line of code
:param around: Number of line to print before and after the current one
"""
current_line = linecache.getline(filename, line_no, f_globals)
if not current_line:
# No data on this line
return '' # depends on [control=['if'], data=[]]
lines = []
# Add some lines before
for pre_line_no in range(line_no - around, line_no):
pre_line = linecache.getline(filename, pre_line_no, f_globals)
lines.append('{0}'.format(pre_line.rstrip())) # depends on [control=['for'], data=['pre_line_no']]
# The line itself
lines.append('{0}'.format(current_line.rstrip()))
# Add some lines after
for pre_line_no in range(line_no + 1, line_no + around + 1):
pre_line = linecache.getline(filename, pre_line_no, f_globals)
lines.append('{0}'.format(pre_line.rstrip())) # depends on [control=['for'], data=['pre_line_no']]
# Smart left strip
minimal_tab = None
for line in lines:
if line.strip():
tab = len(line) - len(line.lstrip())
if minimal_tab is None or tab < minimal_tab:
minimal_tab = tab # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if minimal_tab > 0:
lines = [line[minimal_tab:] for line in lines] # depends on [control=['if'], data=['minimal_tab']]
# Add some place for a marker
marked_line = '>> {0}'.format(lines[around])
lines = [' {0}'.format(line) for line in lines]
lines[around] = marked_line
lines.append('')
return lines
|
def startLoop(self, useDriverLoop=True):
"""
Starts an event loop to process queued commands and callbacks.
@param useDriverLoop: If True, uses the run loop provided by the driver
(the default). If False, assumes the caller will enter its own
run loop which will pump any events for the TTS engine properly.
@type useDriverLoop: bool
@raise RuntimeError: When the loop is already running
"""
if self._inLoop:
raise RuntimeError('run loop already started')
self._inLoop = True
self._driverLoop = useDriverLoop
self.proxy.startLoop(self._driverLoop)
|
def function[startLoop, parameter[self, useDriverLoop]]:
constant[
Starts an event loop to process queued commands and callbacks.
@param useDriverLoop: If True, uses the run loop provided by the driver
(the default). If False, assumes the caller will enter its own
run loop which will pump any events for the TTS engine properly.
@type useDriverLoop: bool
@raise RuntimeError: When the loop is already running
]
if name[self]._inLoop begin[:]
<ast.Raise object at 0x7da204963fa0>
name[self]._inLoop assign[=] constant[True]
name[self]._driverLoop assign[=] name[useDriverLoop]
call[name[self].proxy.startLoop, parameter[name[self]._driverLoop]]
|
keyword[def] identifier[startLoop] ( identifier[self] , identifier[useDriverLoop] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[_inLoop] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[_inLoop] = keyword[True]
identifier[self] . identifier[_driverLoop] = identifier[useDriverLoop]
identifier[self] . identifier[proxy] . identifier[startLoop] ( identifier[self] . identifier[_driverLoop] )
|
def startLoop(self, useDriverLoop=True):
"""
Starts an event loop to process queued commands and callbacks.
@param useDriverLoop: If True, uses the run loop provided by the driver
(the default). If False, assumes the caller will enter its own
run loop which will pump any events for the TTS engine properly.
@type useDriverLoop: bool
@raise RuntimeError: When the loop is already running
"""
if self._inLoop:
raise RuntimeError('run loop already started') # depends on [control=['if'], data=[]]
self._inLoop = True
self._driverLoop = useDriverLoop
self.proxy.startLoop(self._driverLoop)
|
def _add_input_deps(self, executor, args, kwargs):
"""Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function
"""
# Return if the task is _*_stage_in
if executor == 'data_manager':
return args, kwargs
inputs = kwargs.get('inputs', [])
for idx, f in enumerate(inputs):
if isinstance(f, File) and f.is_remote():
inputs[idx] = self.data_manager.stage_in(f, executor)
for kwarg, f in kwargs.items():
if isinstance(f, File) and f.is_remote():
kwargs[kwarg] = self.data_manager.stage_in(f, executor)
newargs = list(args)
for idx, f in enumerate(newargs):
if isinstance(f, File) and f.is_remote():
newargs[idx] = self.data_manager.stage_in(f, executor)
return tuple(newargs), kwargs
|
def function[_add_input_deps, parameter[self, executor, args, kwargs]]:
constant[Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function
]
if compare[name[executor] equal[==] constant[data_manager]] begin[:]
return[tuple[[<ast.Name object at 0x7da18dc9a4a0>, <ast.Name object at 0x7da18dc9a230>]]]
variable[inputs] assign[=] call[name[kwargs].get, parameter[constant[inputs], list[[]]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9b430>, <ast.Name object at 0x7da18dc9bf70>]]] in starred[call[name[enumerate], parameter[name[inputs]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc98ca0> begin[:]
call[name[inputs]][name[idx]] assign[=] call[name[self].data_manager.stage_in, parameter[name[f], name[executor]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9bf40>, <ast.Name object at 0x7da18dc9bb80>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18dc99630> begin[:]
call[name[kwargs]][name[kwarg]] assign[=] call[name[self].data_manager.stage_in, parameter[name[f], name[executor]]]
variable[newargs] assign[=] call[name[list], parameter[name[args]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9ba30>, <ast.Name object at 0x7da18dc9b280>]]] in starred[call[name[enumerate], parameter[name[newargs]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc9ac80> begin[:]
call[name[newargs]][name[idx]] assign[=] call[name[self].data_manager.stage_in, parameter[name[f], name[executor]]]
return[tuple[[<ast.Call object at 0x7da1b01d8160>, <ast.Name object at 0x7da1b01dbf70>]]]
|
keyword[def] identifier[_add_input_deps] ( identifier[self] , identifier[executor] , identifier[args] , identifier[kwargs] ):
literal[string]
keyword[if] identifier[executor] == literal[string] :
keyword[return] identifier[args] , identifier[kwargs]
identifier[inputs] = identifier[kwargs] . identifier[get] ( literal[string] ,[])
keyword[for] identifier[idx] , identifier[f] keyword[in] identifier[enumerate] ( identifier[inputs] ):
keyword[if] identifier[isinstance] ( identifier[f] , identifier[File] ) keyword[and] identifier[f] . identifier[is_remote] ():
identifier[inputs] [ identifier[idx] ]= identifier[self] . identifier[data_manager] . identifier[stage_in] ( identifier[f] , identifier[executor] )
keyword[for] identifier[kwarg] , identifier[f] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[f] , identifier[File] ) keyword[and] identifier[f] . identifier[is_remote] ():
identifier[kwargs] [ identifier[kwarg] ]= identifier[self] . identifier[data_manager] . identifier[stage_in] ( identifier[f] , identifier[executor] )
identifier[newargs] = identifier[list] ( identifier[args] )
keyword[for] identifier[idx] , identifier[f] keyword[in] identifier[enumerate] ( identifier[newargs] ):
keyword[if] identifier[isinstance] ( identifier[f] , identifier[File] ) keyword[and] identifier[f] . identifier[is_remote] ():
identifier[newargs] [ identifier[idx] ]= identifier[self] . identifier[data_manager] . identifier[stage_in] ( identifier[f] , identifier[executor] )
keyword[return] identifier[tuple] ( identifier[newargs] ), identifier[kwargs]
|
def _add_input_deps(self, executor, args, kwargs):
"""Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function
"""
# Return if the task is _*_stage_in
if executor == 'data_manager':
return (args, kwargs) # depends on [control=['if'], data=[]]
inputs = kwargs.get('inputs', [])
for (idx, f) in enumerate(inputs):
if isinstance(f, File) and f.is_remote():
inputs[idx] = self.data_manager.stage_in(f, executor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for (kwarg, f) in kwargs.items():
if isinstance(f, File) and f.is_remote():
kwargs[kwarg] = self.data_manager.stage_in(f, executor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
newargs = list(args)
for (idx, f) in enumerate(newargs):
if isinstance(f, File) and f.is_remote():
newargs[idx] = self.data_manager.stage_in(f, executor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (tuple(newargs), kwargs)
|
def geometric_partitions(iterable, floor=1, ceiling=32768):
'''
Partition an iterable into chunks. Returns an iterator over partitions.
'''
partition_size = floor
run_length = multiprocessing.cpu_count()
run_count = 0
try:
while True:
#print("partition_size =", partition_size)
# Split the iterable and replace the original iterator to avoid
# advancing it
partition, iterable = itertools.tee(iterable)
# Yield the first partition, limited to the partition size
yield Queryable(partition).take(partition_size)
# Advance to the start of the next partition, this will raise
# StopIteration if the iterator is exhausted
for i in range(partition_size):
next(iterable)
# If we've reached the end of a run of this size, double the
# partition size
run_count += 1
if run_count >= run_length:
partition_size *= 2
run_count = 0
# Unless we have hit the ceiling
if partition_size > ceiling:
partition_size = ceiling
except StopIteration:
pass
|
def function[geometric_partitions, parameter[iterable, floor, ceiling]]:
constant[
Partition an iterable into chunks. Returns an iterator over partitions.
]
variable[partition_size] assign[=] name[floor]
variable[run_length] assign[=] call[name[multiprocessing].cpu_count, parameter[]]
variable[run_count] assign[=] constant[0]
<ast.Try object at 0x7da1b1933100>
|
keyword[def] identifier[geometric_partitions] ( identifier[iterable] , identifier[floor] = literal[int] , identifier[ceiling] = literal[int] ):
literal[string]
identifier[partition_size] = identifier[floor]
identifier[run_length] = identifier[multiprocessing] . identifier[cpu_count] ()
identifier[run_count] = literal[int]
keyword[try] :
keyword[while] keyword[True] :
identifier[partition] , identifier[iterable] = identifier[itertools] . identifier[tee] ( identifier[iterable] )
keyword[yield] identifier[Queryable] ( identifier[partition] ). identifier[take] ( identifier[partition_size] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[partition_size] ):
identifier[next] ( identifier[iterable] )
identifier[run_count] += literal[int]
keyword[if] identifier[run_count] >= identifier[run_length] :
identifier[partition_size] *= literal[int]
identifier[run_count] = literal[int]
keyword[if] identifier[partition_size] > identifier[ceiling] :
identifier[partition_size] = identifier[ceiling]
keyword[except] identifier[StopIteration] :
keyword[pass]
|
def geometric_partitions(iterable, floor=1, ceiling=32768):
"""
Partition an iterable into chunks. Returns an iterator over partitions.
"""
partition_size = floor
run_length = multiprocessing.cpu_count()
run_count = 0
try:
while True: #print("partition_size =", partition_size)
# Split the iterable and replace the original iterator to avoid
# advancing it
(partition, iterable) = itertools.tee(iterable) # Yield the first partition, limited to the partition size
yield Queryable(partition).take(partition_size) # Advance to the start of the next partition, this will raise
# StopIteration if the iterator is exhausted
for i in range(partition_size):
next(iterable) # depends on [control=['for'], data=[]] # If we've reached the end of a run of this size, double the
# partition size
run_count += 1
if run_count >= run_length:
partition_size *= 2
run_count = 0 # depends on [control=['if'], data=['run_count']] # Unless we have hit the ceiling
if partition_size > ceiling:
partition_size = ceiling # depends on [control=['if'], data=['partition_size', 'ceiling']] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.