code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url) | def function[read_configuration, parameter[self]]:
constant[
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
]
variable[c] assign[=] call[name[self]._get_pypirc_command, parameter[]]
name[c].repository assign[=] name[self].url
variable[cfg] assign[=] call[name[c]._read_pypirc, parameter[]]
name[self].username assign[=] call[name[cfg].get, parameter[constant[username]]]
name[self].password assign[=] call[name[cfg].get, parameter[constant[password]]]
name[self].realm assign[=] call[name[cfg].get, parameter[constant[realm], constant[pypi]]]
name[self].url assign[=] call[name[cfg].get, parameter[constant[repository], name[self].url]] | keyword[def] identifier[read_configuration] ( identifier[self] ):
literal[string]
identifier[c] = identifier[self] . identifier[_get_pypirc_command] ()
identifier[c] . identifier[repository] = identifier[self] . identifier[url]
identifier[cfg] = identifier[c] . identifier[_read_pypirc] ()
identifier[self] . identifier[username] = identifier[cfg] . identifier[get] ( literal[string] )
identifier[self] . identifier[password] = identifier[cfg] . identifier[get] ( literal[string] )
identifier[self] . identifier[realm] = identifier[cfg] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[url] = identifier[cfg] . identifier[get] ( literal[string] , identifier[self] . identifier[url] ) | def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url) |
def get_extension_attribute(self, ext_name, key):
"""
Banana banana
"""
attributes = self.extension_attributes.get(ext_name)
if not attributes:
return None
return attributes.get(key) | def function[get_extension_attribute, parameter[self, ext_name, key]]:
constant[
Banana banana
]
variable[attributes] assign[=] call[name[self].extension_attributes.get, parameter[name[ext_name]]]
if <ast.UnaryOp object at 0x7da18dc980d0> begin[:]
return[constant[None]]
return[call[name[attributes].get, parameter[name[key]]]] | keyword[def] identifier[get_extension_attribute] ( identifier[self] , identifier[ext_name] , identifier[key] ):
literal[string]
identifier[attributes] = identifier[self] . identifier[extension_attributes] . identifier[get] ( identifier[ext_name] )
keyword[if] keyword[not] identifier[attributes] :
keyword[return] keyword[None]
keyword[return] identifier[attributes] . identifier[get] ( identifier[key] ) | def get_extension_attribute(self, ext_name, key):
"""
Banana banana
"""
attributes = self.extension_attributes.get(ext_name)
if not attributes:
return None # depends on [control=['if'], data=[]]
return attributes.get(key) |
def error_retry_codes(self, value):
"""Set value for error_retry_codes."""
if isinstance(value, six.string_types):
value = [int(x) for x in value.split(",")]
self._set_option("error_retry_codes", value) | def function[error_retry_codes, parameter[self, value]]:
constant[Set value for error_retry_codes.]
if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:]
variable[value] assign[=] <ast.ListComp object at 0x7da1b19e5600>
call[name[self]._set_option, parameter[constant[error_retry_codes], name[value]]] | keyword[def] identifier[error_retry_codes] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[value] =[ identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[value] . identifier[split] ( literal[string] )]
identifier[self] . identifier[_set_option] ( literal[string] , identifier[value] ) | def error_retry_codes(self, value):
"""Set value for error_retry_codes."""
if isinstance(value, six.string_types):
value = [int(x) for x in value.split(',')] # depends on [control=['if'], data=[]]
self._set_option('error_retry_codes', value) |
def _aicc(model_results, nobs):
"""Compute the corrected Akaike Information Criterion"""
aic = model_results.aic
df_model = model_results.df_model + 1 # add one for constant term
return aic + 2. * df_model * (nobs / (nobs - df_model - 1.) - 1.) | def function[_aicc, parameter[model_results, nobs]]:
constant[Compute the corrected Akaike Information Criterion]
variable[aic] assign[=] name[model_results].aic
variable[df_model] assign[=] binary_operation[name[model_results].df_model + constant[1]]
return[binary_operation[name[aic] + binary_operation[binary_operation[constant[2.0] * name[df_model]] * binary_operation[binary_operation[name[nobs] / binary_operation[binary_operation[name[nobs] - name[df_model]] - constant[1.0]]] - constant[1.0]]]]] | keyword[def] identifier[_aicc] ( identifier[model_results] , identifier[nobs] ):
literal[string]
identifier[aic] = identifier[model_results] . identifier[aic]
identifier[df_model] = identifier[model_results] . identifier[df_model] + literal[int]
keyword[return] identifier[aic] + literal[int] * identifier[df_model] *( identifier[nobs] /( identifier[nobs] - identifier[df_model] - literal[int] )- literal[int] ) | def _aicc(model_results, nobs):
"""Compute the corrected Akaike Information Criterion"""
aic = model_results.aic
df_model = model_results.df_model + 1 # add one for constant term
return aic + 2.0 * df_model * (nobs / (nobs - df_model - 1.0) - 1.0) |
def resolve_import(self, item):
"""Simulate how Python resolves imports.
Returns the filename of the source file Python would load
when processing a statement like 'import name' in the module
we're currently under.
Args:
item: An instance of ImportItem
Returns:
A filename
Raises:
ImportException: If the module doesn't exist.
"""
name = item.name
# The last part in `from a.b.c import d` might be a symbol rather than a
# module, so we try a.b.c and a.b.c.d as names.
short_name = None
if item.is_from and not item.is_star:
if '.' in name.lstrip('.'):
# The name is something like `a.b.c`, so strip off `.c`.
rindex = name.rfind('.')
else:
# The name is something like `..c`, so strip off just `c`.
rindex = name.rfind('.') + 1
short_name = name[:rindex]
if import_finder.is_builtin(name):
filename = name + '.so'
return Builtin(filename, name)
filename, level = convert_to_path(name)
if level:
# This is a relative import; we need to resolve the filename
# relative to the importing file path.
filename = os.path.normpath(
os.path.join(self.current_directory, filename))
files = [(name, filename)]
if short_name:
short_filename = os.path.dirname(filename)
files.append((short_name, short_filename))
for module_name, path in files:
for fs in self.fs_path:
f = self._find_file(fs, path)
if not f or f == self.current_module.path:
# We cannot import a file from itself.
continue
if item.is_relative():
package_name = self.current_module.package_name
if package_name is None:
# Relative import in non-package
raise ImportException(name)
module_name = get_absolute_name(package_name, module_name)
if isinstance(self.current_module, System):
return System(f, module_name)
return Local(f, module_name, fs)
# If the module isn't found in the explicit pythonpath, see if python
# itself resolved it.
if item.source:
prefix, ext = os.path.splitext(item.source)
mod_name = name
# We need to check for importing a symbol here too.
if short_name:
mod = prefix.replace(os.path.sep, '.')
mod = utils.strip_suffix(mod, '.__init__')
if not mod.endswith(name) and mod.endswith(short_name):
mod_name = short_name
if ext == '.pyc':
pyfile = prefix + '.py'
if os.path.exists(pyfile):
return System(pyfile, mod_name)
elif not ext:
pyfile = os.path.join(prefix, "__init__.py")
if os.path.exists(pyfile):
return System(pyfile, mod_name)
return System(item.source, mod_name)
raise ImportException(name) | def function[resolve_import, parameter[self, item]]:
constant[Simulate how Python resolves imports.
Returns the filename of the source file Python would load
when processing a statement like 'import name' in the module
we're currently under.
Args:
item: An instance of ImportItem
Returns:
A filename
Raises:
ImportException: If the module doesn't exist.
]
variable[name] assign[=] name[item].name
variable[short_name] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0861000> begin[:]
if compare[constant[.] in call[name[name].lstrip, parameter[constant[.]]]] begin[:]
variable[rindex] assign[=] call[name[name].rfind, parameter[constant[.]]]
variable[short_name] assign[=] call[name[name]][<ast.Slice object at 0x7da1b0862680>]
if call[name[import_finder].is_builtin, parameter[name[name]]] begin[:]
variable[filename] assign[=] binary_operation[name[name] + constant[.so]]
return[call[name[Builtin], parameter[name[filename], name[name]]]]
<ast.Tuple object at 0x7da1b0863a00> assign[=] call[name[convert_to_path], parameter[name[name]]]
if name[level] begin[:]
variable[filename] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[self].current_directory, name[filename]]]]]
variable[files] assign[=] list[[<ast.Tuple object at 0x7da1b0862b30>]]
if name[short_name] begin[:]
variable[short_filename] assign[=] call[name[os].path.dirname, parameter[name[filename]]]
call[name[files].append, parameter[tuple[[<ast.Name object at 0x7da1b08936d0>, <ast.Name object at 0x7da1b0893df0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0890280>, <ast.Name object at 0x7da1b0890580>]]] in starred[name[files]] begin[:]
for taget[name[fs]] in starred[name[self].fs_path] begin[:]
variable[f] assign[=] call[name[self]._find_file, parameter[name[fs], name[path]]]
if <ast.BoolOp object at 0x7da1b0893f40> begin[:]
continue
if call[name[item].is_relative, parameter[]] begin[:]
variable[package_name] assign[=] name[self].current_module.package_name
if compare[name[package_name] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0893940>
variable[module_name] assign[=] call[name[get_absolute_name], parameter[name[package_name], name[module_name]]]
if call[name[isinstance], parameter[name[self].current_module, name[System]]] begin[:]
return[call[name[System], parameter[name[f], name[module_name]]]]
return[call[name[Local], parameter[name[f], name[module_name], name[fs]]]]
if name[item].source begin[:]
<ast.Tuple object at 0x7da1b08ba290> assign[=] call[name[os].path.splitext, parameter[name[item].source]]
variable[mod_name] assign[=] name[name]
if name[short_name] begin[:]
variable[mod] assign[=] call[name[prefix].replace, parameter[name[os].path.sep, constant[.]]]
variable[mod] assign[=] call[name[utils].strip_suffix, parameter[name[mod], constant[.__init__]]]
if <ast.BoolOp object at 0x7da1b08b98d0> begin[:]
variable[mod_name] assign[=] name[short_name]
if compare[name[ext] equal[==] constant[.pyc]] begin[:]
variable[pyfile] assign[=] binary_operation[name[prefix] + constant[.py]]
if call[name[os].path.exists, parameter[name[pyfile]]] begin[:]
return[call[name[System], parameter[name[pyfile], name[mod_name]]]]
return[call[name[System], parameter[name[item].source, name[mod_name]]]]
<ast.Raise object at 0x7da1b077bcd0> | keyword[def] identifier[resolve_import] ( identifier[self] , identifier[item] ):
literal[string]
identifier[name] = identifier[item] . identifier[name]
identifier[short_name] = keyword[None]
keyword[if] identifier[item] . identifier[is_from] keyword[and] keyword[not] identifier[item] . identifier[is_star] :
keyword[if] literal[string] keyword[in] identifier[name] . identifier[lstrip] ( literal[string] ):
identifier[rindex] = identifier[name] . identifier[rfind] ( literal[string] )
keyword[else] :
identifier[rindex] = identifier[name] . identifier[rfind] ( literal[string] )+ literal[int]
identifier[short_name] = identifier[name] [: identifier[rindex] ]
keyword[if] identifier[import_finder] . identifier[is_builtin] ( identifier[name] ):
identifier[filename] = identifier[name] + literal[string]
keyword[return] identifier[Builtin] ( identifier[filename] , identifier[name] )
identifier[filename] , identifier[level] = identifier[convert_to_path] ( identifier[name] )
keyword[if] identifier[level] :
identifier[filename] = identifier[os] . identifier[path] . identifier[normpath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[current_directory] , identifier[filename] ))
identifier[files] =[( identifier[name] , identifier[filename] )]
keyword[if] identifier[short_name] :
identifier[short_filename] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] )
identifier[files] . identifier[append] (( identifier[short_name] , identifier[short_filename] ))
keyword[for] identifier[module_name] , identifier[path] keyword[in] identifier[files] :
keyword[for] identifier[fs] keyword[in] identifier[self] . identifier[fs_path] :
identifier[f] = identifier[self] . identifier[_find_file] ( identifier[fs] , identifier[path] )
keyword[if] keyword[not] identifier[f] keyword[or] identifier[f] == identifier[self] . identifier[current_module] . identifier[path] :
keyword[continue]
keyword[if] identifier[item] . identifier[is_relative] ():
identifier[package_name] = identifier[self] . identifier[current_module] . identifier[package_name]
keyword[if] identifier[package_name] keyword[is] keyword[None] :
keyword[raise] identifier[ImportException] ( identifier[name] )
identifier[module_name] = identifier[get_absolute_name] ( identifier[package_name] , identifier[module_name] )
keyword[if] identifier[isinstance] ( identifier[self] . identifier[current_module] , identifier[System] ):
keyword[return] identifier[System] ( identifier[f] , identifier[module_name] )
keyword[return] identifier[Local] ( identifier[f] , identifier[module_name] , identifier[fs] )
keyword[if] identifier[item] . identifier[source] :
identifier[prefix] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[item] . identifier[source] )
identifier[mod_name] = identifier[name]
keyword[if] identifier[short_name] :
identifier[mod] = identifier[prefix] . identifier[replace] ( identifier[os] . identifier[path] . identifier[sep] , literal[string] )
identifier[mod] = identifier[utils] . identifier[strip_suffix] ( identifier[mod] , literal[string] )
keyword[if] keyword[not] identifier[mod] . identifier[endswith] ( identifier[name] ) keyword[and] identifier[mod] . identifier[endswith] ( identifier[short_name] ):
identifier[mod_name] = identifier[short_name]
keyword[if] identifier[ext] == literal[string] :
identifier[pyfile] = identifier[prefix] + literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[pyfile] ):
keyword[return] identifier[System] ( identifier[pyfile] , identifier[mod_name] )
keyword[elif] keyword[not] identifier[ext] :
identifier[pyfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[pyfile] ):
keyword[return] identifier[System] ( identifier[pyfile] , identifier[mod_name] )
keyword[return] identifier[System] ( identifier[item] . identifier[source] , identifier[mod_name] )
keyword[raise] identifier[ImportException] ( identifier[name] ) | def resolve_import(self, item):
"""Simulate how Python resolves imports.
Returns the filename of the source file Python would load
when processing a statement like 'import name' in the module
we're currently under.
Args:
item: An instance of ImportItem
Returns:
A filename
Raises:
ImportException: If the module doesn't exist.
"""
name = item.name
# The last part in `from a.b.c import d` might be a symbol rather than a
# module, so we try a.b.c and a.b.c.d as names.
short_name = None
if item.is_from and (not item.is_star):
if '.' in name.lstrip('.'):
# The name is something like `a.b.c`, so strip off `.c`.
rindex = name.rfind('.') # depends on [control=['if'], data=[]]
else:
# The name is something like `..c`, so strip off just `c`.
rindex = name.rfind('.') + 1
short_name = name[:rindex] # depends on [control=['if'], data=[]]
if import_finder.is_builtin(name):
filename = name + '.so'
return Builtin(filename, name) # depends on [control=['if'], data=[]]
(filename, level) = convert_to_path(name)
if level:
# This is a relative import; we need to resolve the filename
# relative to the importing file path.
filename = os.path.normpath(os.path.join(self.current_directory, filename)) # depends on [control=['if'], data=[]]
files = [(name, filename)]
if short_name:
short_filename = os.path.dirname(filename)
files.append((short_name, short_filename)) # depends on [control=['if'], data=[]]
for (module_name, path) in files:
for fs in self.fs_path:
f = self._find_file(fs, path)
if not f or f == self.current_module.path:
# We cannot import a file from itself.
continue # depends on [control=['if'], data=[]]
if item.is_relative():
package_name = self.current_module.package_name
if package_name is None:
# Relative import in non-package
raise ImportException(name) # depends on [control=['if'], data=[]]
module_name = get_absolute_name(package_name, module_name)
if isinstance(self.current_module, System):
return System(f, module_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return Local(f, module_name, fs) # depends on [control=['for'], data=['fs']] # depends on [control=['for'], data=[]]
# If the module isn't found in the explicit pythonpath, see if python
# itself resolved it.
if item.source:
(prefix, ext) = os.path.splitext(item.source)
mod_name = name
# We need to check for importing a symbol here too.
if short_name:
mod = prefix.replace(os.path.sep, '.')
mod = utils.strip_suffix(mod, '.__init__')
if not mod.endswith(name) and mod.endswith(short_name):
mod_name = short_name # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if ext == '.pyc':
pyfile = prefix + '.py'
if os.path.exists(pyfile):
return System(pyfile, mod_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not ext:
pyfile = os.path.join(prefix, '__init__.py')
if os.path.exists(pyfile):
return System(pyfile, mod_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return System(item.source, mod_name) # depends on [control=['if'], data=[]]
raise ImportException(name) |
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record | def function[_get_gcloud_records, parameter[self, gcloud_zone, page_token]]:
constant[ Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
]
variable[gcloud_iterator] assign[=] call[name[gcloud_zone].list_resource_record_sets, parameter[]]
for taget[name[gcloud_record]] in starred[name[gcloud_iterator]] begin[:]
<ast.Yield object at 0x7da1b16601f0>
if name[gcloud_iterator].next_page_token begin[:]
for taget[name[gcloud_record]] in starred[call[name[self]._get_gcloud_records, parameter[name[gcloud_zone], name[gcloud_iterator].next_page_token]]] begin[:]
<ast.Yield object at 0x7da1b18cac20> | keyword[def] identifier[_get_gcloud_records] ( identifier[self] , identifier[gcloud_zone] , identifier[page_token] = keyword[None] ):
literal[string]
identifier[gcloud_iterator] = identifier[gcloud_zone] . identifier[list_resource_record_sets] (
identifier[page_token] = identifier[page_token] )
keyword[for] identifier[gcloud_record] keyword[in] identifier[gcloud_iterator] :
keyword[yield] identifier[gcloud_record]
keyword[if] identifier[gcloud_iterator] . identifier[next_page_token] :
keyword[for] identifier[gcloud_record] keyword[in] identifier[self] . identifier[_get_gcloud_records] (
identifier[gcloud_zone] , identifier[gcloud_iterator] . identifier[next_page_token] ):
keyword[yield] identifier[gcloud_record] | def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record # depends on [control=['for'], data=['gcloud_record']]
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record # depends on [control=['for'], data=['gcloud_record']] # depends on [control=['if'], data=[]] |
def load_into_collection_from_url(collection, url, content_type=None):
"""
Loads resources from the representation contained in the given URL into
the given collection resource.
:returns: collection resource
"""
parsed = urlparse.urlparse(url)
scheme = parsed.scheme # pylint: disable=E1101
if scheme == 'file':
# Assume a local path.
load_into_collection_from_file(collection,
parsed.path, # pylint: disable=E1101
content_type=content_type)
else:
raise ValueError('Unsupported URL scheme "%s".' % scheme) | def function[load_into_collection_from_url, parameter[collection, url, content_type]]:
constant[
Loads resources from the representation contained in the given URL into
the given collection resource.
:returns: collection resource
]
variable[parsed] assign[=] call[name[urlparse].urlparse, parameter[name[url]]]
variable[scheme] assign[=] name[parsed].scheme
if compare[name[scheme] equal[==] constant[file]] begin[:]
call[name[load_into_collection_from_file], parameter[name[collection], name[parsed].path]] | keyword[def] identifier[load_into_collection_from_url] ( identifier[collection] , identifier[url] , identifier[content_type] = keyword[None] ):
literal[string]
identifier[parsed] = identifier[urlparse] . identifier[urlparse] ( identifier[url] )
identifier[scheme] = identifier[parsed] . identifier[scheme]
keyword[if] identifier[scheme] == literal[string] :
identifier[load_into_collection_from_file] ( identifier[collection] ,
identifier[parsed] . identifier[path] ,
identifier[content_type] = identifier[content_type] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[scheme] ) | def load_into_collection_from_url(collection, url, content_type=None):
"""
Loads resources from the representation contained in the given URL into
the given collection resource.
:returns: collection resource
"""
parsed = urlparse.urlparse(url)
scheme = parsed.scheme # pylint: disable=E1101
if scheme == 'file':
# Assume a local path.
# pylint: disable=E1101
load_into_collection_from_file(collection, parsed.path, content_type=content_type) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unsupported URL scheme "%s".' % scheme) |
def add_input_endpoint(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Add an input endpoint to the deployment. Please note that
there may be a delay before the changes show up.
CLI Example:
.. code-block:: bash
salt-cloud -f add_input_endpoint my-azure service=myservice \\
deployment=mydeployment role=myrole name=HTTP local_port=80 \\
port=80 protocol=tcp enable_direct_server_return=False \\
timeout_for_tcp_idle_connection=4
'''
return update_input_endpoint(
kwargs=kwargs,
conn=conn,
call='function',
activity='add',
) | def function[add_input_endpoint, parameter[kwargs, conn, call]]:
constant[
.. versionadded:: 2015.8.0
Add an input endpoint to the deployment. Please note that
there may be a delay before the changes show up.
CLI Example:
.. code-block:: bash
salt-cloud -f add_input_endpoint my-azure service=myservice \
deployment=mydeployment role=myrole name=HTTP local_port=80 \
port=80 protocol=tcp enable_direct_server_return=False \
timeout_for_tcp_idle_connection=4
]
return[call[name[update_input_endpoint], parameter[]]] | keyword[def] identifier[add_input_endpoint] ( identifier[kwargs] = keyword[None] , identifier[conn] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[return] identifier[update_input_endpoint] (
identifier[kwargs] = identifier[kwargs] ,
identifier[conn] = identifier[conn] ,
identifier[call] = literal[string] ,
identifier[activity] = literal[string] ,
) | def add_input_endpoint(kwargs=None, conn=None, call=None):
"""
.. versionadded:: 2015.8.0
Add an input endpoint to the deployment. Please note that
there may be a delay before the changes show up.
CLI Example:
.. code-block:: bash
salt-cloud -f add_input_endpoint my-azure service=myservice \\
deployment=mydeployment role=myrole name=HTTP local_port=80 \\
port=80 protocol=tcp enable_direct_server_return=False \\
timeout_for_tcp_idle_connection=4
"""
return update_input_endpoint(kwargs=kwargs, conn=conn, call='function', activity='add') |
def police_priority_map_exceed_map_pri7_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri7_exceed = ET.SubElement(exceed, "map-pri7-exceed")
map_pri7_exceed.text = kwargs.pop('map_pri7_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[police_priority_map_exceed_map_pri7_exceed, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[police_priority_map] assign[=] call[name[ET].SubElement, parameter[name[config], constant[police-priority-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[police_priority_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[exceed] assign[=] call[name[ET].SubElement, parameter[name[police_priority_map], constant[exceed]]]
variable[map_pri7_exceed] assign[=] call[name[ET].SubElement, parameter[name[exceed], constant[map-pri7-exceed]]]
name[map_pri7_exceed].text assign[=] call[name[kwargs].pop, parameter[constant[map_pri7_exceed]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[police_priority_map_exceed_map_pri7_exceed] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[police_priority_map] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[police_priority_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[exceed] = identifier[ET] . identifier[SubElement] ( identifier[police_priority_map] , literal[string] )
identifier[map_pri7_exceed] = identifier[ET] . identifier[SubElement] ( identifier[exceed] , literal[string] )
identifier[map_pri7_exceed] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def police_priority_map_exceed_map_pri7_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
police_priority_map = ET.SubElement(config, 'police-priority-map', xmlns='urn:brocade.com:mgmt:brocade-policer')
name_key = ET.SubElement(police_priority_map, 'name')
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, 'exceed')
map_pri7_exceed = ET.SubElement(exceed, 'map-pri7-exceed')
map_pri7_exceed.text = kwargs.pop('map_pri7_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def read_vcf(vcf_file, ref_file):
"""
Reads in a vcf/vcf.gz file and associated
reference sequence fasta (to which the VCF file is mapped).
Parses mutations, insertions, and deletions and stores them in a nested dict,
see 'returns' for the dict structure.
Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are
replaced with Ns at the associated sites.
Positions are stored to correspond the location in the reference sequence
in Python (numbering is transformed to start at 0)
Parameters
----------
vcf_file : string
Path to the vcf or vcf.gz file to be read in
ref_file : string
Path to the fasta reference file to be read in
Returns
--------
compress_seq : nested dict
In the format: ::
{
'reference':'AGCTCGA..A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} },
'positions': [1,4,7,10,100...]
}
references : string
String of the reference sequence read from the Fasta, to which
the variable sites are mapped
sequences : nested dict
Dict containing sequence names as keys which map to dicts
that have position as key and the single-base mutation (or deletion)
as values
insertions : nested dict
Dict in the same format as the above, which stores insertions and their
locations. The first base of the insertion is the same as whatever is
currently in that position (Ref if no mutation, mutation in 'sequences'
otherwise), so the current base can be directly replaced by the bases held here.
positions : list
Python list of all positions with a mutation, insertion, or deletion.
"""
#Programming Note:
# Note on VCF Format
# -------------------
# 'Insertion where there are also deletions' (special handling)
# Ex:
# REF ALT Seq1 Seq2
# GC GCC,G 1/1 2/2
# Insertions formatted differently - don't know how many bp match
# the Ref (unlike simple insert below). Could be mutations, also.
# 'Deletion'
# Ex:
# REF ALT
# GC G
# Alt does not have to be 1 bp - any length shorter than Ref.
# 'Insertion'
# Ex:
# REF ALT
# A ATT
# First base always matches Ref.
# 'No indel'
# Ex:
# REF ALT
# A G
#define here, so that all sub-functions can access them
sequences = defaultdict(dict)
insertions = defaultdict(dict) #Currently not used, but kept in case of future use.
#TreeTime handles 2-3 base ambig codes, this will allow that.
def getAmbigCode(bp1, bp2, bp3=""):
bps = [bp1,bp2,bp3]
bps.sort()
key = "".join(bps)
return {
'CT': 'Y',
'AG': 'R',
'AT': 'W',
'CG': 'S',
'GT': 'K',
'AC': 'M',
'AGT': 'D',
'ACG': 'V',
'ACT': 'H',
'CGT': 'B'
}[key]
#Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion,
#deletion, or single bp subsitution
def parseCall(snps, ins, pos, ref, alt):
#Insertion where there are also deletions (special handling)
if len(ref) > 1 and len(alt)>len(ref):
for i in range(len(ref)):
#if the pos doesn't match, store in sequences
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#if about to run out of ref, store rest:
if (i+1) >= len(ref):
ins[pos+i] = alt[i:]
#Deletion
elif len(ref) > 1:
for i in range(len(ref)):
#if ref is longer than alt, these are deletion positions
if i+1 > len(alt):
snps[pos+i] = '-'
#if not, there may be mutations
else:
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#Insertion
elif len(alt) > 1:
ins[pos] = alt
#No indel
else:
snps[pos] = alt
#Parses a 'bad' (hetero or no-call) call depending on what it is
def parseBadCall(snps, ins, pos, ref, ALT):
#Deletion
# REF ALT Seq1 Seq2 Seq3
# GCC G 1/1 0/1 ./.
# Seq1 (processed by parseCall, above) will become 'G--'
# Seq2 will become 'GNN'
# Seq3 will become 'GNN'
if len(ref) > 1:
#Deleted part becomes Ns
if gen[0] == '0' or gen[0] == '.':
if gen[0] == '0': #if het, get first bp
alt = str(ALT[int(gen[2])-1])
else: #if no-call, there is no alt, so just put Ns after 1st ref base
alt = ref[0]
for i in range(len(ref)):
#if ref is longer than alt, these are deletion positions
if i+1 > len(alt):
snps[pos+i] = 'N'
#if not, there may be mutations
else:
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#If not deletion, need to know call type
#if het, see if proposed alt is 1bp mutation
elif gen[0] == '0':
alt = str(ALT[int(gen[2])-1])
if len(alt)==1:
#alt = getAmbigCode(ref,alt) #if want to allow ambig
alt = 'N' #if you want to disregard ambig
snps[pos] = alt
#else a het-call insertion, so ignore.
#else it's a no-call; see if all alts have a length of 1
#(meaning a simple 1bp mutation)
elif len(ALT)==len("".join(ALT)):
alt = 'N'
snps[pos] = alt
#else a no-call insertion, so ignore.
#House code is *much* faster than pyvcf because we don't care about all info
#about coverage, quality, counts, etc, which pyvcf goes to effort to parse
#(and it's not easy as there's no standard ordering). Custom code can completely
#ignore all of this.
import gzip
from Bio import SeqIO
import numpy as np
nsamp = 0
posLoc = 0
refLoc = 0
altLoc = 0
sampLoc = 9
#Use different openers depending on whether compressed
opn = gzip.open if vcf_file.endswith(('.gz', '.GZ')) else open
with opn(vcf_file, mode='rt') as f:
for line in f:
if line[0] != '#':
#actual data - most common so first in 'if-list'!
line = line.strip()
dat = line.split('\t')
POS = int(dat[posLoc])
REF = dat[refLoc]
ALT = dat[altLoc].split(',')
calls = np.array(dat[sampLoc:])
#get samples that differ from Ref at this site
recCalls = {}
for sname, sa in zip(samps, calls):
if ':' in sa: #if proper VCF file (followed by quality/coverage info)
gt = sa.split(':')[0]
else: #if 'pseudo' VCF file (nextstrain output, or otherwise stripped)
gt = sa
if gt == '0' or gt == '1': #for haploid calls in VCF
gt = '0/0' if gt == '0' else '1/1'
#ignore if ref call: '.' or '0/0', depending on VCF
if ('/' in gt and gt != '0/0') or ('|' in gt and gt != '0|0'):
recCalls[sname] = gt
#store the position and the alt
for seq, gen in recCalls.items():
ref = REF
pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering
#will be from 0 because it's python!
#Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls
if gen[0] != '0' and gen[2] != '0' and gen[0] != '.' and gen[2] != '.':
alt = str(ALT[int(gen[0])-1]) #get the index of the alternate
if seq not in sequences.keys():
sequences[seq] = {}
parseCall(sequences[seq],insertions[seq], pos, ref, alt)
#If is heterozygote call (0/1) or no call (./.)
else:
#alt will differ here depending on het or no-call, must pass original
parseBadCall(sequences[seq],insertions[seq], pos, ref, ALT)
elif line[0] == '#' and line[1] == 'C':
#header line, get all the information
header = line.strip().split('\t')
posLoc = header.index("POS")
refLoc = header.index('REF')
altLoc = header.index('ALT')
sampLoc = header.index('FORMAT')+1
samps = header[sampLoc:]
samps = [ x.strip() for x in samps ] #ensure no leading/trailing spaces
nsamp = len(samps)
#else you are a comment line, ignore.
#Gather all variable positions
positions = set()
for seq, muts in sequences.items():
positions.update(muts.keys())
#One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet
if nsamp > len(sequences):
missings = set(samps).difference(sequences.keys())
for s in missings:
sequences[s] = {}
refSeq = SeqIO.read(ref_file, format='fasta')
refSeq = refSeq.upper() #convert to uppercase to avoid unknown chars later
refSeqStr = str(refSeq.seq)
compress_seq = {'reference':refSeqStr,
'sequences': sequences,
'insertions': insertions,
'positions': sorted(positions)}
return compress_seq | def function[read_vcf, parameter[vcf_file, ref_file]]:
constant[
Reads in a vcf/vcf.gz file and associated
reference sequence fasta (to which the VCF file is mapped).
Parses mutations, insertions, and deletions and stores them in a nested dict,
see 'returns' for the dict structure.
Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are
replaced with Ns at the associated sites.
Positions are stored to correspond the location in the reference sequence
in Python (numbering is transformed to start at 0)
Parameters
----------
vcf_file : string
Path to the vcf or vcf.gz file to be read in
ref_file : string
Path to the fasta reference file to be read in
Returns
--------
compress_seq : nested dict
In the format: ::
{
'reference':'AGCTCGA..A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} },
'positions': [1,4,7,10,100...]
}
references : string
String of the reference sequence read from the Fasta, to which
the variable sites are mapped
sequences : nested dict
Dict containing sequence names as keys which map to dicts
that have position as key and the single-base mutation (or deletion)
as values
insertions : nested dict
Dict in the same format as the above, which stores insertions and their
locations. The first base of the insertion is the same as whatever is
currently in that position (Ref if no mutation, mutation in 'sequences'
otherwise), so the current base can be directly replaced by the bases held here.
positions : list
Python list of all positions with a mutation, insertion, or deletion.
]
variable[sequences] assign[=] call[name[defaultdict], parameter[name[dict]]]
variable[insertions] assign[=] call[name[defaultdict], parameter[name[dict]]]
def function[getAmbigCode, parameter[bp1, bp2, bp3]]:
variable[bps] assign[=] list[[<ast.Name object at 0x7da1b02bb010>, <ast.Name object at 0x7da1b02bbd90>, <ast.Name object at 0x7da1b02bb400>]]
call[name[bps].sort, parameter[]]
variable[key] assign[=] call[constant[].join, parameter[name[bps]]]
return[call[dictionary[[<ast.Constant object at 0x7da1b02b8250>, <ast.Constant object at 0x7da1b02bb610>, <ast.Constant object at 0x7da1b02b8130>, <ast.Constant object at 0x7da1b02bb430>, <ast.Constant object at 0x7da1b02ba2c0>, <ast.Constant object at 0x7da1b02b9090>, <ast.Constant object at 0x7da1b02b9420>, <ast.Constant object at 0x7da1b02b96f0>, <ast.Constant object at 0x7da1b02bb280>, <ast.Constant object at 0x7da1b02b99f0>], [<ast.Constant object at 0x7da1b02bae90>, <ast.Constant object at 0x7da1b02b95d0>, <ast.Constant object at 0x7da1b02bb9d0>, <ast.Constant object at 0x7da1b02b8970>, <ast.Constant object at 0x7da1b02baa70>, <ast.Constant object at 0x7da1b02b9690>, <ast.Constant object at 0x7da1b02bab00>, <ast.Constant object at 0x7da1b02b9a50>, <ast.Constant object at 0x7da1b02b8460>, <ast.Constant object at 0x7da1b02b8100>]]][name[key]]]
def function[parseCall, parameter[snps, ins, pos, ref, alt]]:
if <ast.BoolOp object at 0x7da1b02bbfd0> begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[ref]]]]]] begin[:]
if compare[call[name[ref]][name[i]] not_equal[!=] call[name[alt]][name[i]]] begin[:]
call[name[snps]][binary_operation[name[pos] + name[i]]] assign[=] <ast.IfExp object at 0x7da1b02ba800>
if compare[binary_operation[name[i] + constant[1]] greater_or_equal[>=] call[name[len], parameter[name[ref]]]] begin[:]
call[name[ins]][binary_operation[name[pos] + name[i]]] assign[=] call[name[alt]][<ast.Slice object at 0x7da1b02bb9a0>]
def function[parseBadCall, parameter[snps, ins, pos, ref, ALT]]:
if compare[call[name[len], parameter[name[ref]]] greater[>] constant[1]] begin[:]
if <ast.BoolOp object at 0x7da1b02bb2b0> begin[:]
if compare[call[name[gen]][constant[0]] equal[==] constant[0]] begin[:]
variable[alt] assign[=] call[name[str], parameter[call[name[ALT]][binary_operation[call[name[int], parameter[call[name[gen]][constant[2]]]] - constant[1]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[ref]]]]]] begin[:]
if compare[binary_operation[name[i] + constant[1]] greater[>] call[name[len], parameter[name[alt]]]] begin[:]
call[name[snps]][binary_operation[name[pos] + name[i]]] assign[=] constant[N]
import module[gzip]
from relative_module[Bio] import module[SeqIO]
import module[numpy] as alias[np]
variable[nsamp] assign[=] constant[0]
variable[posLoc] assign[=] constant[0]
variable[refLoc] assign[=] constant[0]
variable[altLoc] assign[=] constant[0]
variable[sampLoc] assign[=] constant[9]
variable[opn] assign[=] <ast.IfExp object at 0x7da2054a7ee0>
with call[name[opn], parameter[name[vcf_file]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if compare[call[name[line]][constant[0]] not_equal[!=] constant[#]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
variable[dat] assign[=] call[name[line].split, parameter[constant[ ]]]
variable[POS] assign[=] call[name[int], parameter[call[name[dat]][name[posLoc]]]]
variable[REF] assign[=] call[name[dat]][name[refLoc]]
variable[ALT] assign[=] call[call[name[dat]][name[altLoc]].split, parameter[constant[,]]]
variable[calls] assign[=] call[name[np].array, parameter[call[name[dat]][<ast.Slice object at 0x7da2054a6290>]]]
variable[recCalls] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2054a7f40>, <ast.Name object at 0x7da2054a56c0>]]] in starred[call[name[zip], parameter[name[samps], name[calls]]]] begin[:]
if compare[constant[:] in name[sa]] begin[:]
variable[gt] assign[=] call[call[name[sa].split, parameter[constant[:]]]][constant[0]]
if <ast.BoolOp object at 0x7da2054a4df0> begin[:]
variable[gt] assign[=] <ast.IfExp object at 0x7da2054a5990>
if <ast.BoolOp object at 0x7da2054a4fa0> begin[:]
call[name[recCalls]][name[sname]] assign[=] name[gt]
for taget[tuple[[<ast.Name object at 0x7da2054a71f0>, <ast.Name object at 0x7da2054a7a60>]]] in starred[call[name[recCalls].items, parameter[]]] begin[:]
variable[ref] assign[=] name[REF]
variable[pos] assign[=] binary_operation[name[POS] - constant[1]]
if <ast.BoolOp object at 0x7da2054a7910> begin[:]
variable[alt] assign[=] call[name[str], parameter[call[name[ALT]][binary_operation[call[name[int], parameter[call[name[gen]][constant[0]]]] - constant[1]]]]]
if compare[name[seq] <ast.NotIn object at 0x7da2590d7190> call[name[sequences].keys, parameter[]]] begin[:]
call[name[sequences]][name[seq]] assign[=] dictionary[[], []]
call[name[parseCall], parameter[call[name[sequences]][name[seq]], call[name[insertions]][name[seq]], name[pos], name[ref], name[alt]]]
variable[positions] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e961cf0>, <ast.Name object at 0x7da20e961630>]]] in starred[call[name[sequences].items, parameter[]]] begin[:]
call[name[positions].update, parameter[call[name[muts].keys, parameter[]]]]
if compare[name[nsamp] greater[>] call[name[len], parameter[name[sequences]]]] begin[:]
variable[missings] assign[=] call[call[name[set], parameter[name[samps]]].difference, parameter[call[name[sequences].keys, parameter[]]]]
for taget[name[s]] in starred[name[missings]] begin[:]
call[name[sequences]][name[s]] assign[=] dictionary[[], []]
variable[refSeq] assign[=] call[name[SeqIO].read, parameter[name[ref_file]]]
variable[refSeq] assign[=] call[name[refSeq].upper, parameter[]]
variable[refSeqStr] assign[=] call[name[str], parameter[name[refSeq].seq]]
variable[compress_seq] assign[=] dictionary[[<ast.Constant object at 0x7da20e962020>, <ast.Constant object at 0x7da20e962710>, <ast.Constant object at 0x7da20e963370>, <ast.Constant object at 0x7da20e963ee0>], [<ast.Name object at 0x7da20e960190>, <ast.Name object at 0x7da20e962530>, <ast.Name object at 0x7da20e9639d0>, <ast.Call object at 0x7da20e963df0>]]
return[name[compress_seq]] | keyword[def] identifier[read_vcf] ( identifier[vcf_file] , identifier[ref_file] ):
literal[string]
identifier[sequences] = identifier[defaultdict] ( identifier[dict] )
identifier[insertions] = identifier[defaultdict] ( identifier[dict] )
keyword[def] identifier[getAmbigCode] ( identifier[bp1] , identifier[bp2] , identifier[bp3] = literal[string] ):
identifier[bps] =[ identifier[bp1] , identifier[bp2] , identifier[bp3] ]
identifier[bps] . identifier[sort] ()
identifier[key] = literal[string] . identifier[join] ( identifier[bps] )
keyword[return] {
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}[ identifier[key] ]
keyword[def] identifier[parseCall] ( identifier[snps] , identifier[ins] , identifier[pos] , identifier[ref] , identifier[alt] ):
keyword[if] identifier[len] ( identifier[ref] )> literal[int] keyword[and] identifier[len] ( identifier[alt] )> identifier[len] ( identifier[ref] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[ref] )):
keyword[if] identifier[ref] [ identifier[i] ]!= identifier[alt] [ identifier[i] ]:
identifier[snps] [ identifier[pos] + identifier[i] ]= identifier[alt] [ identifier[i] ] keyword[if] identifier[alt] [ identifier[i] ]!= literal[string] keyword[else] literal[string]
keyword[if] ( identifier[i] + literal[int] )>= identifier[len] ( identifier[ref] ):
identifier[ins] [ identifier[pos] + identifier[i] ]= identifier[alt] [ identifier[i] :]
keyword[elif] identifier[len] ( identifier[ref] )> literal[int] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[ref] )):
keyword[if] identifier[i] + literal[int] > identifier[len] ( identifier[alt] ):
identifier[snps] [ identifier[pos] + identifier[i] ]= literal[string]
keyword[else] :
keyword[if] identifier[ref] [ identifier[i] ]!= identifier[alt] [ identifier[i] ]:
identifier[snps] [ identifier[pos] + identifier[i] ]= identifier[alt] [ identifier[i] ] keyword[if] identifier[alt] [ identifier[i] ]!= literal[string] keyword[else] literal[string]
keyword[elif] identifier[len] ( identifier[alt] )> literal[int] :
identifier[ins] [ identifier[pos] ]= identifier[alt]
keyword[else] :
identifier[snps] [ identifier[pos] ]= identifier[alt]
keyword[def] identifier[parseBadCall] ( identifier[snps] , identifier[ins] , identifier[pos] , identifier[ref] , identifier[ALT] ):
keyword[if] identifier[len] ( identifier[ref] )> literal[int] :
keyword[if] identifier[gen] [ literal[int] ]== literal[string] keyword[or] identifier[gen] [ literal[int] ]== literal[string] :
keyword[if] identifier[gen] [ literal[int] ]== literal[string] :
identifier[alt] = identifier[str] ( identifier[ALT] [ identifier[int] ( identifier[gen] [ literal[int] ])- literal[int] ])
keyword[else] :
identifier[alt] = identifier[ref] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[ref] )):
keyword[if] identifier[i] + literal[int] > identifier[len] ( identifier[alt] ):
identifier[snps] [ identifier[pos] + identifier[i] ]= literal[string]
keyword[else] :
keyword[if] identifier[ref] [ identifier[i] ]!= identifier[alt] [ identifier[i] ]:
identifier[snps] [ identifier[pos] + identifier[i] ]= identifier[alt] [ identifier[i] ] keyword[if] identifier[alt] [ identifier[i] ]!= literal[string] keyword[else] literal[string]
keyword[elif] identifier[gen] [ literal[int] ]== literal[string] :
identifier[alt] = identifier[str] ( identifier[ALT] [ identifier[int] ( identifier[gen] [ literal[int] ])- literal[int] ])
keyword[if] identifier[len] ( identifier[alt] )== literal[int] :
identifier[alt] = literal[string]
identifier[snps] [ identifier[pos] ]= identifier[alt]
keyword[elif] identifier[len] ( identifier[ALT] )== identifier[len] ( literal[string] . identifier[join] ( identifier[ALT] )):
identifier[alt] = literal[string]
identifier[snps] [ identifier[pos] ]= identifier[alt]
keyword[import] identifier[gzip]
keyword[from] identifier[Bio] keyword[import] identifier[SeqIO]
keyword[import] identifier[numpy] keyword[as] identifier[np]
identifier[nsamp] = literal[int]
identifier[posLoc] = literal[int]
identifier[refLoc] = literal[int]
identifier[altLoc] = literal[int]
identifier[sampLoc] = literal[int]
identifier[opn] = identifier[gzip] . identifier[open] keyword[if] identifier[vcf_file] . identifier[endswith] (( literal[string] , literal[string] )) keyword[else] identifier[open]
keyword[with] identifier[opn] ( identifier[vcf_file] , identifier[mode] = literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] identifier[line] [ literal[int] ]!= literal[string] :
identifier[line] = identifier[line] . identifier[strip] ()
identifier[dat] = identifier[line] . identifier[split] ( literal[string] )
identifier[POS] = identifier[int] ( identifier[dat] [ identifier[posLoc] ])
identifier[REF] = identifier[dat] [ identifier[refLoc] ]
identifier[ALT] = identifier[dat] [ identifier[altLoc] ]. identifier[split] ( literal[string] )
identifier[calls] = identifier[np] . identifier[array] ( identifier[dat] [ identifier[sampLoc] :])
identifier[recCalls] ={}
keyword[for] identifier[sname] , identifier[sa] keyword[in] identifier[zip] ( identifier[samps] , identifier[calls] ):
keyword[if] literal[string] keyword[in] identifier[sa] :
identifier[gt] = identifier[sa] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[else] :
identifier[gt] = identifier[sa]
keyword[if] identifier[gt] == literal[string] keyword[or] identifier[gt] == literal[string] :
identifier[gt] = literal[string] keyword[if] identifier[gt] == literal[string] keyword[else] literal[string]
keyword[if] ( literal[string] keyword[in] identifier[gt] keyword[and] identifier[gt] != literal[string] ) keyword[or] ( literal[string] keyword[in] identifier[gt] keyword[and] identifier[gt] != literal[string] ):
identifier[recCalls] [ identifier[sname] ]= identifier[gt]
keyword[for] identifier[seq] , identifier[gen] keyword[in] identifier[recCalls] . identifier[items] ():
identifier[ref] = identifier[REF]
identifier[pos] = identifier[POS] - literal[int]
keyword[if] identifier[gen] [ literal[int] ]!= literal[string] keyword[and] identifier[gen] [ literal[int] ]!= literal[string] keyword[and] identifier[gen] [ literal[int] ]!= literal[string] keyword[and] identifier[gen] [ literal[int] ]!= literal[string] :
identifier[alt] = identifier[str] ( identifier[ALT] [ identifier[int] ( identifier[gen] [ literal[int] ])- literal[int] ])
keyword[if] identifier[seq] keyword[not] keyword[in] identifier[sequences] . identifier[keys] ():
identifier[sequences] [ identifier[seq] ]={}
identifier[parseCall] ( identifier[sequences] [ identifier[seq] ], identifier[insertions] [ identifier[seq] ], identifier[pos] , identifier[ref] , identifier[alt] )
keyword[else] :
identifier[parseBadCall] ( identifier[sequences] [ identifier[seq] ], identifier[insertions] [ identifier[seq] ], identifier[pos] , identifier[ref] , identifier[ALT] )
keyword[elif] identifier[line] [ literal[int] ]== literal[string] keyword[and] identifier[line] [ literal[int] ]== literal[string] :
identifier[header] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[posLoc] = identifier[header] . identifier[index] ( literal[string] )
identifier[refLoc] = identifier[header] . identifier[index] ( literal[string] )
identifier[altLoc] = identifier[header] . identifier[index] ( literal[string] )
identifier[sampLoc] = identifier[header] . identifier[index] ( literal[string] )+ literal[int]
identifier[samps] = identifier[header] [ identifier[sampLoc] :]
identifier[samps] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[samps] ]
identifier[nsamp] = identifier[len] ( identifier[samps] )
identifier[positions] = identifier[set] ()
keyword[for] identifier[seq] , identifier[muts] keyword[in] identifier[sequences] . identifier[items] ():
identifier[positions] . identifier[update] ( identifier[muts] . identifier[keys] ())
keyword[if] identifier[nsamp] > identifier[len] ( identifier[sequences] ):
identifier[missings] = identifier[set] ( identifier[samps] ). identifier[difference] ( identifier[sequences] . identifier[keys] ())
keyword[for] identifier[s] keyword[in] identifier[missings] :
identifier[sequences] [ identifier[s] ]={}
identifier[refSeq] = identifier[SeqIO] . identifier[read] ( identifier[ref_file] , identifier[format] = literal[string] )
identifier[refSeq] = identifier[refSeq] . identifier[upper] ()
identifier[refSeqStr] = identifier[str] ( identifier[refSeq] . identifier[seq] )
identifier[compress_seq] ={ literal[string] : identifier[refSeqStr] ,
literal[string] : identifier[sequences] ,
literal[string] : identifier[insertions] ,
literal[string] : identifier[sorted] ( identifier[positions] )}
keyword[return] identifier[compress_seq] | def read_vcf(vcf_file, ref_file):
"""
Reads in a vcf/vcf.gz file and associated
reference sequence fasta (to which the VCF file is mapped).
Parses mutations, insertions, and deletions and stores them in a nested dict,
see 'returns' for the dict structure.
Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are
replaced with Ns at the associated sites.
Positions are stored to correspond the location in the reference sequence
in Python (numbering is transformed to start at 0)
Parameters
----------
vcf_file : string
Path to the vcf or vcf.gz file to be read in
ref_file : string
Path to the fasta reference file to be read in
Returns
--------
compress_seq : nested dict
In the format: ::
{
'reference':'AGCTCGA..A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} },
'positions': [1,4,7,10,100...]
}
references : string
String of the reference sequence read from the Fasta, to which
the variable sites are mapped
sequences : nested dict
Dict containing sequence names as keys which map to dicts
that have position as key and the single-base mutation (or deletion)
as values
insertions : nested dict
Dict in the same format as the above, which stores insertions and their
locations. The first base of the insertion is the same as whatever is
currently in that position (Ref if no mutation, mutation in 'sequences'
otherwise), so the current base can be directly replaced by the bases held here.
positions : list
Python list of all positions with a mutation, insertion, or deletion.
""" #Programming Note:
# Note on VCF Format
# -------------------
# 'Insertion where there are also deletions' (special handling)
# Ex:
# REF ALT Seq1 Seq2
# GC GCC,G 1/1 2/2
# Insertions formatted differently - don't know how many bp match
# the Ref (unlike simple insert below). Could be mutations, also.
# 'Deletion'
# Ex:
# REF ALT
# GC G
# Alt does not have to be 1 bp - any length shorter than Ref.
# 'Insertion'
# Ex:
# REF ALT
# A ATT
# First base always matches Ref.
# 'No indel'
# Ex:
# REF ALT
# A G
#define here, so that all sub-functions can access them
sequences = defaultdict(dict)
insertions = defaultdict(dict) #Currently not used, but kept in case of future use.
#TreeTime handles 2-3 base ambig codes, this will allow that.
def getAmbigCode(bp1, bp2, bp3=''):
bps = [bp1, bp2, bp3]
bps.sort()
key = ''.join(bps)
return {'CT': 'Y', 'AG': 'R', 'AT': 'W', 'CG': 'S', 'GT': 'K', 'AC': 'M', 'AGT': 'D', 'ACG': 'V', 'ACT': 'H', 'CGT': 'B'}[key] #Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion,
#deletion, or single bp subsitution
def parseCall(snps, ins, pos, ref, alt): #Insertion where there are also deletions (special handling)
if len(ref) > 1 and len(alt) > len(ref):
for i in range(len(ref)): #if the pos doesn't match, store in sequences
if ref[i] != alt[i]:
snps[pos + i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call # depends on [control=['if'], data=[]] #if about to run out of ref, store rest:
if i + 1 >= len(ref):
ins[pos + i] = alt[i:] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] #Deletion
elif len(ref) > 1:
for i in range(len(ref)): #if ref is longer than alt, these are deletion positions
if i + 1 > len(alt):
snps[pos + i] = '-' # depends on [control=['if'], data=[]] #if not, there may be mutations
elif ref[i] != alt[i]:
snps[pos + i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] #Insertion
elif len(alt) > 1:
ins[pos] = alt # depends on [control=['if'], data=[]]
else: #No indel
snps[pos] = alt #Parses a 'bad' (hetero or no-call) call depending on what it is
def parseBadCall(snps, ins, pos, ref, ALT): #Deletion
# REF ALT Seq1 Seq2 Seq3
# GCC G 1/1 0/1 ./.
# Seq1 (processed by parseCall, above) will become 'G--'
# Seq2 will become 'GNN'
# Seq3 will become 'GNN'
if len(ref) > 1: #Deleted part becomes Ns
if gen[0] == '0' or gen[0] == '.':
if gen[0] == '0': #if het, get first bp
alt = str(ALT[int(gen[2]) - 1]) # depends on [control=['if'], data=[]]
else: #if no-call, there is no alt, so just put Ns after 1st ref base
alt = ref[0]
for i in range(len(ref)): #if ref is longer than alt, these are deletion positions
if i + 1 > len(alt):
snps[pos + i] = 'N' # depends on [control=['if'], data=[]] #if not, there may be mutations
elif ref[i] != alt[i]:
snps[pos + i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] #If not deletion, need to know call type
#if het, see if proposed alt is 1bp mutation
elif gen[0] == '0':
alt = str(ALT[int(gen[2]) - 1])
if len(alt) == 1: #alt = getAmbigCode(ref,alt) #if want to allow ambig
alt = 'N' #if you want to disregard ambig
snps[pos] = alt # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] #else a het-call insertion, so ignore.
#else it's a no-call; see if all alts have a length of 1
#(meaning a simple 1bp mutation)
elif len(ALT) == len(''.join(ALT)):
alt = 'N'
snps[pos] = alt # depends on [control=['if'], data=[]] #else a no-call insertion, so ignore.
#House code is *much* faster than pyvcf because we don't care about all info
#about coverage, quality, counts, etc, which pyvcf goes to effort to parse
#(and it's not easy as there's no standard ordering). Custom code can completely
#ignore all of this.
import gzip
from Bio import SeqIO
import numpy as np
nsamp = 0
posLoc = 0
refLoc = 0
altLoc = 0
sampLoc = 9 #Use different openers depending on whether compressed
opn = gzip.open if vcf_file.endswith(('.gz', '.GZ')) else open
with opn(vcf_file, mode='rt') as f:
for line in f:
if line[0] != '#': #actual data - most common so first in 'if-list'!
line = line.strip()
dat = line.split('\t')
POS = int(dat[posLoc])
REF = dat[refLoc]
ALT = dat[altLoc].split(',')
calls = np.array(dat[sampLoc:]) #get samples that differ from Ref at this site
recCalls = {}
for (sname, sa) in zip(samps, calls):
if ':' in sa: #if proper VCF file (followed by quality/coverage info)
gt = sa.split(':')[0] # depends on [control=['if'], data=['sa']]
else: #if 'pseudo' VCF file (nextstrain output, or otherwise stripped)
gt = sa
if gt == '0' or gt == '1': #for haploid calls in VCF
gt = '0/0' if gt == '0' else '1/1' # depends on [control=['if'], data=[]] #ignore if ref call: '.' or '0/0', depending on VCF
if '/' in gt and gt != '0/0' or ('|' in gt and gt != '0|0'):
recCalls[sname] = gt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] #store the position and the alt
for (seq, gen) in recCalls.items():
ref = REF
pos = POS - 1 #VCF numbering starts from 1, but Reference seq numbering
#will be from 0 because it's python!
#Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls
if gen[0] != '0' and gen[2] != '0' and (gen[0] != '.') and (gen[2] != '.'):
alt = str(ALT[int(gen[0]) - 1]) #get the index of the alternate
if seq not in sequences.keys():
sequences[seq] = {} # depends on [control=['if'], data=['seq']]
parseCall(sequences[seq], insertions[seq], pos, ref, alt) # depends on [control=['if'], data=[]]
else: #If is heterozygote call (0/1) or no call (./.)
#alt will differ here depending on het or no-call, must pass original
parseBadCall(sequences[seq], insertions[seq], pos, ref, ALT) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif line[0] == '#' and line[1] == 'C': #header line, get all the information
header = line.strip().split('\t')
posLoc = header.index('POS')
refLoc = header.index('REF')
altLoc = header.index('ALT')
sampLoc = header.index('FORMAT') + 1
samps = header[sampLoc:]
samps = [x.strip() for x in samps] #ensure no leading/trailing spaces
nsamp = len(samps) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] #else you are a comment line, ignore.
#Gather all variable positions
positions = set()
for (seq, muts) in sequences.items():
positions.update(muts.keys()) # depends on [control=['for'], data=[]] #One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet
if nsamp > len(sequences):
missings = set(samps).difference(sequences.keys())
for s in missings:
sequences[s] = {} # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]]
refSeq = SeqIO.read(ref_file, format='fasta')
refSeq = refSeq.upper() #convert to uppercase to avoid unknown chars later
refSeqStr = str(refSeq.seq)
compress_seq = {'reference': refSeqStr, 'sequences': sequences, 'insertions': insertions, 'positions': sorted(positions)}
return compress_seq |
def verify(self, verify_key):
"""Verify that this MAR file has a valid signature.
Args:
verify_key (str): PEM formatted public key
Returns:
True if the MAR file's signature matches its contents
False otherwise; this includes cases where there is no signature.
"""
if not self.mardata.signatures or not self.mardata.signatures.sigs:
# This MAR file can't be verified since it has no signatures
return False
hashers = []
for sig in self.mardata.signatures.sigs:
hashers.append((sig.algorithm_id, sig.signature, make_hasher(sig.algorithm_id)))
assert len(hashers) == len(self.mardata.signatures.sigs)
for block in get_signature_data(self.fileobj,
self.mardata.signatures.filesize):
[h.update(block) for (_, _, h) in hashers]
for algo_id, sig, h in hashers:
if not verify_signature(verify_key, sig, h.finalize(), h.algorithm.name):
return False
else:
return True | def function[verify, parameter[self, verify_key]]:
constant[Verify that this MAR file has a valid signature.
Args:
verify_key (str): PEM formatted public key
Returns:
True if the MAR file's signature matches its contents
False otherwise; this includes cases where there is no signature.
]
if <ast.BoolOp object at 0x7da1b04d88b0> begin[:]
return[constant[False]]
variable[hashers] assign[=] list[[]]
for taget[name[sig]] in starred[name[self].mardata.signatures.sigs] begin[:]
call[name[hashers].append, parameter[tuple[[<ast.Attribute object at 0x7da20c76d720>, <ast.Attribute object at 0x7da20c76efe0>, <ast.Call object at 0x7da20c76dfc0>]]]]
assert[compare[call[name[len], parameter[name[hashers]]] equal[==] call[name[len], parameter[name[self].mardata.signatures.sigs]]]]
for taget[name[block]] in starred[call[name[get_signature_data], parameter[name[self].fileobj, name[self].mardata.signatures.filesize]]] begin[:]
<ast.ListComp object at 0x7da1b04c9cc0>
for taget[tuple[[<ast.Name object at 0x7da1b04c8430>, <ast.Name object at 0x7da1b04cb0a0>, <ast.Name object at 0x7da1b04ca6e0>]]] in starred[name[hashers]] begin[:]
if <ast.UnaryOp object at 0x7da1b04c9db0> begin[:]
return[constant[False]] | keyword[def] identifier[verify] ( identifier[self] , identifier[verify_key] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[mardata] . identifier[signatures] keyword[or] keyword[not] identifier[self] . identifier[mardata] . identifier[signatures] . identifier[sigs] :
keyword[return] keyword[False]
identifier[hashers] =[]
keyword[for] identifier[sig] keyword[in] identifier[self] . identifier[mardata] . identifier[signatures] . identifier[sigs] :
identifier[hashers] . identifier[append] (( identifier[sig] . identifier[algorithm_id] , identifier[sig] . identifier[signature] , identifier[make_hasher] ( identifier[sig] . identifier[algorithm_id] )))
keyword[assert] identifier[len] ( identifier[hashers] )== identifier[len] ( identifier[self] . identifier[mardata] . identifier[signatures] . identifier[sigs] )
keyword[for] identifier[block] keyword[in] identifier[get_signature_data] ( identifier[self] . identifier[fileobj] ,
identifier[self] . identifier[mardata] . identifier[signatures] . identifier[filesize] ):
[ identifier[h] . identifier[update] ( identifier[block] ) keyword[for] ( identifier[_] , identifier[_] , identifier[h] ) keyword[in] identifier[hashers] ]
keyword[for] identifier[algo_id] , identifier[sig] , identifier[h] keyword[in] identifier[hashers] :
keyword[if] keyword[not] identifier[verify_signature] ( identifier[verify_key] , identifier[sig] , identifier[h] . identifier[finalize] (), identifier[h] . identifier[algorithm] . identifier[name] ):
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def verify(self, verify_key):
"""Verify that this MAR file has a valid signature.
Args:
verify_key (str): PEM formatted public key
Returns:
True if the MAR file's signature matches its contents
False otherwise; this includes cases where there is no signature.
"""
if not self.mardata.signatures or not self.mardata.signatures.sigs:
# This MAR file can't be verified since it has no signatures
return False # depends on [control=['if'], data=[]]
hashers = []
for sig in self.mardata.signatures.sigs:
hashers.append((sig.algorithm_id, sig.signature, make_hasher(sig.algorithm_id))) # depends on [control=['for'], data=['sig']]
assert len(hashers) == len(self.mardata.signatures.sigs)
for block in get_signature_data(self.fileobj, self.mardata.signatures.filesize):
[h.update(block) for (_, _, h) in hashers] # depends on [control=['for'], data=['block']]
for (algo_id, sig, h) in hashers:
if not verify_signature(verify_key, sig, h.finalize(), h.algorithm.name):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
return True |
def delete_user_contact_list(self, id, contact_list_id, **data):
"""
DELETE /users/:id/contact_lists/:contact_list_id/
Deletes the contact list. Returns ``{"deleted": true}``.
"""
return self.delete("/users/{0}/contact_lists/{0}/".format(id,contact_list_id), data=data) | def function[delete_user_contact_list, parameter[self, id, contact_list_id]]:
constant[
DELETE /users/:id/contact_lists/:contact_list_id/
Deletes the contact list. Returns ``{"deleted": true}``.
]
return[call[name[self].delete, parameter[call[constant[/users/{0}/contact_lists/{0}/].format, parameter[name[id], name[contact_list_id]]]]]] | keyword[def] identifier[delete_user_contact_list] ( identifier[self] , identifier[id] , identifier[contact_list_id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[delete] ( literal[string] . identifier[format] ( identifier[id] , identifier[contact_list_id] ), identifier[data] = identifier[data] ) | def delete_user_contact_list(self, id, contact_list_id, **data):
"""
DELETE /users/:id/contact_lists/:contact_list_id/
Deletes the contact list. Returns ``{"deleted": true}``.
"""
return self.delete('/users/{0}/contact_lists/{0}/'.format(id, contact_list_id), data=data) |
def from_raw(self, rval: RawObject, jptr: JSONPointer = "") -> ObjectValue:
"""Override the superclass method."""
if not isinstance(rval, dict):
raise RawTypeError(jptr, "object")
res = ObjectValue()
for qn in rval:
if qn.startswith("@"):
if qn != "@":
tgt = qn[1:]
if tgt not in rval:
raise MissingAnnotationTarget(jptr, tgt)
jptr += '/' + tgt
res[qn] = self._process_metadata(rval[qn], jptr)
else:
cn = self._iname2qname(qn)
ch = self.get_data_child(*cn)
npath = jptr + "/" + qn
if ch is None:
raise RawMemberError(npath)
res[ch.iname()] = ch.from_raw(rval[qn], npath)
return res | def function[from_raw, parameter[self, rval, jptr]]:
constant[Override the superclass method.]
if <ast.UnaryOp object at 0x7da1b02e4be0> begin[:]
<ast.Raise object at 0x7da1b02e6dd0>
variable[res] assign[=] call[name[ObjectValue], parameter[]]
for taget[name[qn]] in starred[name[rval]] begin[:]
if call[name[qn].startswith, parameter[constant[@]]] begin[:]
if compare[name[qn] not_equal[!=] constant[@]] begin[:]
variable[tgt] assign[=] call[name[qn]][<ast.Slice object at 0x7da1b02e7040>]
if compare[name[tgt] <ast.NotIn object at 0x7da2590d7190> name[rval]] begin[:]
<ast.Raise object at 0x7da1b02e6cb0>
<ast.AugAssign object at 0x7da1b02e7880>
call[name[res]][name[qn]] assign[=] call[name[self]._process_metadata, parameter[call[name[rval]][name[qn]], name[jptr]]]
return[name[res]] | keyword[def] identifier[from_raw] ( identifier[self] , identifier[rval] : identifier[RawObject] , identifier[jptr] : identifier[JSONPointer] = literal[string] )-> identifier[ObjectValue] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[rval] , identifier[dict] ):
keyword[raise] identifier[RawTypeError] ( identifier[jptr] , literal[string] )
identifier[res] = identifier[ObjectValue] ()
keyword[for] identifier[qn] keyword[in] identifier[rval] :
keyword[if] identifier[qn] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[qn] != literal[string] :
identifier[tgt] = identifier[qn] [ literal[int] :]
keyword[if] identifier[tgt] keyword[not] keyword[in] identifier[rval] :
keyword[raise] identifier[MissingAnnotationTarget] ( identifier[jptr] , identifier[tgt] )
identifier[jptr] += literal[string] + identifier[tgt]
identifier[res] [ identifier[qn] ]= identifier[self] . identifier[_process_metadata] ( identifier[rval] [ identifier[qn] ], identifier[jptr] )
keyword[else] :
identifier[cn] = identifier[self] . identifier[_iname2qname] ( identifier[qn] )
identifier[ch] = identifier[self] . identifier[get_data_child] (* identifier[cn] )
identifier[npath] = identifier[jptr] + literal[string] + identifier[qn]
keyword[if] identifier[ch] keyword[is] keyword[None] :
keyword[raise] identifier[RawMemberError] ( identifier[npath] )
identifier[res] [ identifier[ch] . identifier[iname] ()]= identifier[ch] . identifier[from_raw] ( identifier[rval] [ identifier[qn] ], identifier[npath] )
keyword[return] identifier[res] | def from_raw(self, rval: RawObject, jptr: JSONPointer='') -> ObjectValue:
"""Override the superclass method."""
if not isinstance(rval, dict):
raise RawTypeError(jptr, 'object') # depends on [control=['if'], data=[]]
res = ObjectValue()
for qn in rval:
if qn.startswith('@'):
if qn != '@':
tgt = qn[1:]
if tgt not in rval:
raise MissingAnnotationTarget(jptr, tgt) # depends on [control=['if'], data=['tgt']]
jptr += '/' + tgt # depends on [control=['if'], data=['qn']]
res[qn] = self._process_metadata(rval[qn], jptr) # depends on [control=['if'], data=[]]
else:
cn = self._iname2qname(qn)
ch = self.get_data_child(*cn)
npath = jptr + '/' + qn
if ch is None:
raise RawMemberError(npath) # depends on [control=['if'], data=[]]
res[ch.iname()] = ch.from_raw(rval[qn], npath) # depends on [control=['for'], data=['qn']]
return res |
def scaleField(self, scalingFactor):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(
val=self.field_strength.val * scalingFactor
) | def function[scaleField, parameter[self, scalingFactor]]:
constant[
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
]
name[self].field_strength assign[=] call[name[self].field_strength._replace, parameter[]] | keyword[def] identifier[scaleField] ( identifier[self] , identifier[scalingFactor] ):
literal[string]
identifier[self] . identifier[field_strength] = identifier[self] . identifier[field_strength] . identifier[_replace] (
identifier[val] = identifier[self] . identifier[field_strength] . identifier[val] * identifier[scalingFactor]
) | def scaleField(self, scalingFactor):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(val=self.field_strength.val * scalingFactor) |
def ignite(self, args=None):
"""
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
"""
if args is None:
args = sys.argv[1:]
if os.environ.get("MANAGED_VIRTUALENV", None) != "1":
made = self.make_virtualenv()
if made or os.environ.get("VENV_STARTER_CHECK_DEPS", None) != "0":
self.install_deps()
self.start_program(args) | def function[ignite, parameter[self, args]]:
constant[
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
]
if compare[name[args] is constant[None]] begin[:]
variable[args] assign[=] call[name[sys].argv][<ast.Slice object at 0x7da18bc72ef0>]
if compare[call[name[os].environ.get, parameter[constant[MANAGED_VIRTUALENV], constant[None]]] not_equal[!=] constant[1]] begin[:]
variable[made] assign[=] call[name[self].make_virtualenv, parameter[]]
if <ast.BoolOp object at 0x7da18bc70a90> begin[:]
call[name[self].install_deps, parameter[]]
call[name[self].start_program, parameter[name[args]]] | keyword[def] identifier[ignite] ( identifier[self] , identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[args] = identifier[sys] . identifier[argv] [ literal[int] :]
keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] )!= literal[string] :
identifier[made] = identifier[self] . identifier[make_virtualenv] ()
keyword[if] identifier[made] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] )!= literal[string] :
identifier[self] . identifier[install_deps] ()
identifier[self] . identifier[start_program] ( identifier[args] ) | def ignite(self, args=None):
"""
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
"""
if args is None:
args = sys.argv[1:] # depends on [control=['if'], data=['args']]
if os.environ.get('MANAGED_VIRTUALENV', None) != '1':
made = self.make_virtualenv()
if made or os.environ.get('VENV_STARTER_CHECK_DEPS', None) != '0':
self.install_deps() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.start_program(args) |
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW'])
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12}) | def function[plot_winds, parameter[self, ws, wd, wsmax, plot_range]]:
constant[
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
]
name[self].ax1 assign[=] call[name[fig].add_subplot, parameter[constant[4], constant[1], constant[1]]]
variable[ln1] assign[=] call[name[self].ax1.plot, parameter[name[self].dates, name[ws]]]
call[name[self].ax1.fill_between, parameter[name[self].dates, name[ws], constant[0]]]
call[name[self].ax1.set_xlim, parameter[name[self].start, name[self].end]]
if <ast.UnaryOp object at 0x7da1b1d5e200> begin[:]
variable[plot_range] assign[=] list[[<ast.Constant object at 0x7da1b1d5e080>, <ast.Constant object at 0x7da1b1d5dff0>, <ast.Constant object at 0x7da1b1d5dfc0>]]
call[name[self].ax1.set_ylabel, parameter[constant[Wind Speed (knots)]]]
call[name[self].ax1.set_ylim, parameter[call[name[plot_range]][constant[0]], call[name[plot_range]][constant[1]], call[name[plot_range]][constant[2]]]]
call[name[self].ax1.grid, parameter[]]
variable[ln2] assign[=] call[name[self].ax1.plot, parameter[name[self].dates, name[wsmax], constant[.r]]]
variable[ax7] assign[=] call[name[self].ax1.twinx, parameter[]]
variable[ln3] assign[=] call[name[ax7].plot, parameter[name[self].dates, name[wd], constant[.k]]]
call[name[ax7].set_ylabel, parameter[constant[Wind
Direction
(degrees)]]]
call[name[ax7].set_ylim, parameter[constant[0], constant[360]]]
call[name[ax7].set_yticks, parameter[call[name[np].arange, parameter[constant[45], constant[405], constant[90]]], list[[<ast.Constant object at 0x7da1b22baa40>, <ast.Constant object at 0x7da1b22bb130>, <ast.Constant object at 0x7da1b22bb910>, <ast.Constant object at 0x7da1b22b88e0>]]]]
variable[lns] assign[=] binary_operation[binary_operation[name[ln1] + name[ln2]] + name[ln3]]
variable[labs] assign[=] <ast.ListComp object at 0x7da1b22bb190>
call[name[ax7].xaxis.set_major_formatter, parameter[call[name[mpl].dates.DateFormatter, parameter[constant[%d/%H UTC]]]]]
call[name[ax7].legend, parameter[name[lns], name[labs]]] | keyword[def] identifier[plot_winds] ( identifier[self] , identifier[ws] , identifier[wd] , identifier[wsmax] , identifier[plot_range] = keyword[None] ):
literal[string]
identifier[self] . identifier[ax1] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , literal[int] )
identifier[ln1] = identifier[self] . identifier[ax1] . identifier[plot] ( identifier[self] . identifier[dates] , identifier[ws] , identifier[label] = literal[string] )
identifier[self] . identifier[ax1] . identifier[fill_between] ( identifier[self] . identifier[dates] , identifier[ws] , literal[int] )
identifier[self] . identifier[ax1] . identifier[set_xlim] ( identifier[self] . identifier[start] , identifier[self] . identifier[end] )
keyword[if] keyword[not] identifier[plot_range] :
identifier[plot_range] =[ literal[int] , literal[int] , literal[int] ]
identifier[self] . identifier[ax1] . identifier[set_ylabel] ( literal[string] , identifier[multialignment] = literal[string] )
identifier[self] . identifier[ax1] . identifier[set_ylim] ( identifier[plot_range] [ literal[int] ], identifier[plot_range] [ literal[int] ], identifier[plot_range] [ literal[int] ])
identifier[self] . identifier[ax1] . identifier[grid] ( identifier[b] = keyword[True] , identifier[which] = literal[string] , identifier[axis] = literal[string] , identifier[color] = literal[string] , identifier[linestyle] = literal[string] ,
identifier[linewidth] = literal[int] )
identifier[ln2] = identifier[self] . identifier[ax1] . identifier[plot] ( identifier[self] . identifier[dates] , identifier[wsmax] , literal[string] , identifier[label] = literal[string] )
identifier[ax7] = identifier[self] . identifier[ax1] . identifier[twinx] ()
identifier[ln3] = identifier[ax7] . identifier[plot] ( identifier[self] . identifier[dates] , identifier[wd] , literal[string] , identifier[linewidth] = literal[int] , identifier[label] = literal[string] )
identifier[ax7] . identifier[set_ylabel] ( literal[string] , identifier[multialignment] = literal[string] )
identifier[ax7] . identifier[set_ylim] ( literal[int] , literal[int] )
identifier[ax7] . identifier[set_yticks] ( identifier[np] . identifier[arange] ( literal[int] , literal[int] , literal[int] ),[ literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[lns] = identifier[ln1] + identifier[ln2] + identifier[ln3]
identifier[labs] =[ identifier[l] . identifier[get_label] () keyword[for] identifier[l] keyword[in] identifier[lns] ]
identifier[ax7] . identifier[xaxis] . identifier[set_major_formatter] ( identifier[mpl] . identifier[dates] . identifier[DateFormatter] ( literal[string] ))
identifier[ax7] . identifier[legend] ( identifier[lns] , identifier[labs] , identifier[loc] = literal[string] ,
identifier[bbox_to_anchor] =( literal[int] , literal[int] ), identifier[ncol] = literal[int] , identifier[prop] ={ literal[string] : literal[int] }) | def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1] # depends on [control=['if'], data=[]]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW'])
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12}) |
def getPayloadStruct(self, attributes, objType):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param attribute: The data
@param objType: SubItem type (e.g: hostgroup for hostgroup_class)
@return RETURN: the payload
"""
payload = {self.payloadObj: attributes,
objType + "_class":
{self.payloadObj: attributes}}
return payload | def function[getPayloadStruct, parameter[self, attributes, objType]]:
constant[ Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param attribute: The data
@param objType: SubItem type (e.g: hostgroup for hostgroup_class)
@return RETURN: the payload
]
variable[payload] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1122ad0>, <ast.BinOp object at 0x7da1b1123a30>], [<ast.Name object at 0x7da1b11212a0>, <ast.Dict object at 0x7da1b1123fa0>]]
return[name[payload]] | keyword[def] identifier[getPayloadStruct] ( identifier[self] , identifier[attributes] , identifier[objType] ):
literal[string]
identifier[payload] ={ identifier[self] . identifier[payloadObj] : identifier[attributes] ,
identifier[objType] + literal[string] :
{ identifier[self] . identifier[payloadObj] : identifier[attributes] }}
keyword[return] identifier[payload] | def getPayloadStruct(self, attributes, objType):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param attribute: The data
@param objType: SubItem type (e.g: hostgroup for hostgroup_class)
@return RETURN: the payload
"""
payload = {self.payloadObj: attributes, objType + '_class': {self.payloadObj: attributes}}
return payload |
def paddingsize(self, namedstruct):
'''
Return the size of the padded struct (including the "real" size and the padding bytes)
:param namedstruct: a NamedStruct object of this type.
:returns: size including both data and padding.
'''
if self.base is not None:
return self.base.paddingsize(namedstruct)
realsize = namedstruct._realsize()
return (realsize + self.padding - 1) // self.padding * self.padding | def function[paddingsize, parameter[self, namedstruct]]:
constant[
Return the size of the padded struct (including the "real" size and the padding bytes)
:param namedstruct: a NamedStruct object of this type.
:returns: size including both data and padding.
]
if compare[name[self].base is_not constant[None]] begin[:]
return[call[name[self].base.paddingsize, parameter[name[namedstruct]]]]
variable[realsize] assign[=] call[name[namedstruct]._realsize, parameter[]]
return[binary_operation[binary_operation[binary_operation[binary_operation[name[realsize] + name[self].padding] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].padding] * name[self].padding]] | keyword[def] identifier[paddingsize] ( identifier[self] , identifier[namedstruct] ):
literal[string]
keyword[if] identifier[self] . identifier[base] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[base] . identifier[paddingsize] ( identifier[namedstruct] )
identifier[realsize] = identifier[namedstruct] . identifier[_realsize] ()
keyword[return] ( identifier[realsize] + identifier[self] . identifier[padding] - literal[int] )// identifier[self] . identifier[padding] * identifier[self] . identifier[padding] | def paddingsize(self, namedstruct):
"""
Return the size of the padded struct (including the "real" size and the padding bytes)
:param namedstruct: a NamedStruct object of this type.
:returns: size including both data and padding.
"""
if self.base is not None:
return self.base.paddingsize(namedstruct) # depends on [control=['if'], data=[]]
realsize = namedstruct._realsize()
return (realsize + self.padding - 1) // self.padding * self.padding |
def filter_effect(self, analytes=None, stats=['mean', 'std'], filt=True):
"""
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
"""
if analytes is None:
analytes = self.analytes
if isinstance(analytes, str):
analytes = [analytes]
# calculate filtered and unfiltered stats
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=False)
suf = self.stats.copy()
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=filt)
sf = self.stats.copy()
# create dataframe for results
cols = []
for s in self.stats_calced:
cols += ['unfiltered_{:}'.format(s), 'filtered_{:}'.format(s)]
comp = pd.DataFrame(index=self.samples,
columns=pd.MultiIndex.from_arrays([cols, [None] * len(cols)]))
# collate stats
for k, v in suf.items():
vf = sf[k]
for i, a in enumerate(v['analytes']):
for s in self.stats_calced:
comp.loc[k, ('unfiltered_{:}'.format(s), a)] = v[s][i,0]
comp.loc[k, ('filtered_{:}'.format(s), a)] = vf[s][i,0]
comp.dropna(0, 'all', inplace=True)
comp.dropna(1, 'all', inplace=True)
comp.sort_index(1, inplace=True)
# calculate filtered/unfiltered ratios
rats = []
for s in self.stats_calced:
rat = comp.loc[:, 'filtered_{:}'.format(s)] / comp.loc[:, 'unfiltered_{:}'.format(s)]
rat.columns = pd.MultiIndex.from_product([['{:}_ratio'.format(s)], rat.columns])
rats.append(rat)
# join it all up
comp = comp.join(pd.concat(rats, 1))
comp.sort_index(1, inplace=True)
return comp.loc[:, (pd.IndexSlice[:], pd.IndexSlice[analytes])] | def function[filter_effect, parameter[self, analytes, stats, filt]]:
constant[
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
]
if compare[name[analytes] is constant[None]] begin[:]
variable[analytes] assign[=] name[self].analytes
if call[name[isinstance], parameter[name[analytes], name[str]]] begin[:]
variable[analytes] assign[=] list[[<ast.Name object at 0x7da20c7955a0>]]
call[name[self].sample_stats, parameter[list[[<ast.Constant object at 0x7da20c795360>, <ast.Constant object at 0x7da20c7952d0>]]]]
variable[suf] assign[=] call[name[self].stats.copy, parameter[]]
call[name[self].sample_stats, parameter[list[[<ast.Constant object at 0x7da20c795d50>, <ast.Constant object at 0x7da20c796350>]]]]
variable[sf] assign[=] call[name[self].stats.copy, parameter[]]
variable[cols] assign[=] list[[]]
for taget[name[s]] in starred[name[self].stats_calced] begin[:]
<ast.AugAssign object at 0x7da20c795d20>
variable[comp] assign[=] call[name[pd].DataFrame, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c7940a0>, <ast.Name object at 0x7da20c796a70>]]] in starred[call[name[suf].items, parameter[]]] begin[:]
variable[vf] assign[=] call[name[sf]][name[k]]
for taget[tuple[[<ast.Name object at 0x7da20c7950f0>, <ast.Name object at 0x7da20c795570>]]] in starred[call[name[enumerate], parameter[call[name[v]][constant[analytes]]]]] begin[:]
for taget[name[s]] in starred[name[self].stats_calced] begin[:]
call[name[comp].loc][tuple[[<ast.Name object at 0x7da20c794070>, <ast.Tuple object at 0x7da20c795de0>]]] assign[=] call[call[name[v]][name[s]]][tuple[[<ast.Name object at 0x7da20c795990>, <ast.Constant object at 0x7da20c7948b0>]]]
call[name[comp].loc][tuple[[<ast.Name object at 0x7da20c794bb0>, <ast.Tuple object at 0x7da20c796b30>]]] assign[=] call[call[name[vf]][name[s]]][tuple[[<ast.Name object at 0x7da20c796920>, <ast.Constant object at 0x7da20c794cd0>]]]
call[name[comp].dropna, parameter[constant[0], constant[all]]]
call[name[comp].dropna, parameter[constant[1], constant[all]]]
call[name[comp].sort_index, parameter[constant[1]]]
variable[rats] assign[=] list[[]]
for taget[name[s]] in starred[name[self].stats_calced] begin[:]
variable[rat] assign[=] binary_operation[call[name[comp].loc][tuple[[<ast.Slice object at 0x7da1b01b8670>, <ast.Call object at 0x7da1b01b85b0>]]] / call[name[comp].loc][tuple[[<ast.Slice object at 0x7da1b01b83a0>, <ast.Call object at 0x7da1b01b8220>]]]]
name[rat].columns assign[=] call[name[pd].MultiIndex.from_product, parameter[list[[<ast.List object at 0x7da1b01b9f30>, <ast.Attribute object at 0x7da1b01bbca0>]]]]
call[name[rats].append, parameter[name[rat]]]
variable[comp] assign[=] call[name[comp].join, parameter[call[name[pd].concat, parameter[name[rats], constant[1]]]]]
call[name[comp].sort_index, parameter[constant[1]]]
return[call[name[comp].loc][tuple[[<ast.Slice object at 0x7da1b01b9c30>, <ast.Tuple object at 0x7da1b01b9d80>]]]] | keyword[def] identifier[filter_effect] ( identifier[self] , identifier[analytes] = keyword[None] , identifier[stats] =[ literal[string] , literal[string] ], identifier[filt] = keyword[True] ):
literal[string]
keyword[if] identifier[analytes] keyword[is] keyword[None] :
identifier[analytes] = identifier[self] . identifier[analytes]
keyword[if] identifier[isinstance] ( identifier[analytes] , identifier[str] ):
identifier[analytes] =[ identifier[analytes] ]
identifier[self] . identifier[sample_stats] ([ literal[string] , literal[string] ], identifier[stats] = identifier[stats] , identifier[filt] = keyword[False] )
identifier[suf] = identifier[self] . identifier[stats] . identifier[copy] ()
identifier[self] . identifier[sample_stats] ([ literal[string] , literal[string] ], identifier[stats] = identifier[stats] , identifier[filt] = identifier[filt] )
identifier[sf] = identifier[self] . identifier[stats] . identifier[copy] ()
identifier[cols] =[]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[stats_calced] :
identifier[cols] +=[ literal[string] . identifier[format] ( identifier[s] ), literal[string] . identifier[format] ( identifier[s] )]
identifier[comp] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[self] . identifier[samples] ,
identifier[columns] = identifier[pd] . identifier[MultiIndex] . identifier[from_arrays] ([ identifier[cols] ,[ keyword[None] ]* identifier[len] ( identifier[cols] )]))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[suf] . identifier[items] ():
identifier[vf] = identifier[sf] [ identifier[k] ]
keyword[for] identifier[i] , identifier[a] keyword[in] identifier[enumerate] ( identifier[v] [ literal[string] ]):
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[stats_calced] :
identifier[comp] . identifier[loc] [ identifier[k] ,( literal[string] . identifier[format] ( identifier[s] ), identifier[a] )]= identifier[v] [ identifier[s] ][ identifier[i] , literal[int] ]
identifier[comp] . identifier[loc] [ identifier[k] ,( literal[string] . identifier[format] ( identifier[s] ), identifier[a] )]= identifier[vf] [ identifier[s] ][ identifier[i] , literal[int] ]
identifier[comp] . identifier[dropna] ( literal[int] , literal[string] , identifier[inplace] = keyword[True] )
identifier[comp] . identifier[dropna] ( literal[int] , literal[string] , identifier[inplace] = keyword[True] )
identifier[comp] . identifier[sort_index] ( literal[int] , identifier[inplace] = keyword[True] )
identifier[rats] =[]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[stats_calced] :
identifier[rat] = identifier[comp] . identifier[loc] [:, literal[string] . identifier[format] ( identifier[s] )]/ identifier[comp] . identifier[loc] [:, literal[string] . identifier[format] ( identifier[s] )]
identifier[rat] . identifier[columns] = identifier[pd] . identifier[MultiIndex] . identifier[from_product] ([[ literal[string] . identifier[format] ( identifier[s] )], identifier[rat] . identifier[columns] ])
identifier[rats] . identifier[append] ( identifier[rat] )
identifier[comp] = identifier[comp] . identifier[join] ( identifier[pd] . identifier[concat] ( identifier[rats] , literal[int] ))
identifier[comp] . identifier[sort_index] ( literal[int] , identifier[inplace] = keyword[True] )
keyword[return] identifier[comp] . identifier[loc] [:,( identifier[pd] . identifier[IndexSlice] [:], identifier[pd] . identifier[IndexSlice] [ identifier[analytes] ])] | def filter_effect(self, analytes=None, stats=['mean', 'std'], filt=True):
"""
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
"""
if analytes is None:
analytes = self.analytes # depends on [control=['if'], data=['analytes']]
if isinstance(analytes, str):
analytes = [analytes] # depends on [control=['if'], data=[]]
# calculate filtered and unfiltered stats
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=False)
suf = self.stats.copy()
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=filt)
sf = self.stats.copy()
# create dataframe for results
cols = []
for s in self.stats_calced:
cols += ['unfiltered_{:}'.format(s), 'filtered_{:}'.format(s)] # depends on [control=['for'], data=['s']]
comp = pd.DataFrame(index=self.samples, columns=pd.MultiIndex.from_arrays([cols, [None] * len(cols)]))
# collate stats
for (k, v) in suf.items():
vf = sf[k]
for (i, a) in enumerate(v['analytes']):
for s in self.stats_calced:
comp.loc[k, ('unfiltered_{:}'.format(s), a)] = v[s][i, 0]
comp.loc[k, ('filtered_{:}'.format(s), a)] = vf[s][i, 0] # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
comp.dropna(0, 'all', inplace=True)
comp.dropna(1, 'all', inplace=True)
comp.sort_index(1, inplace=True)
# calculate filtered/unfiltered ratios
rats = []
for s in self.stats_calced:
rat = comp.loc[:, 'filtered_{:}'.format(s)] / comp.loc[:, 'unfiltered_{:}'.format(s)]
rat.columns = pd.MultiIndex.from_product([['{:}_ratio'.format(s)], rat.columns])
rats.append(rat) # depends on [control=['for'], data=['s']]
# join it all up
comp = comp.join(pd.concat(rats, 1))
comp.sort_index(1, inplace=True)
return comp.loc[:, (pd.IndexSlice[:], pd.IndexSlice[analytes])] |
def replace_route(route_table_id=None, destination_cidr_block=None,
route_table_name=None, gateway_id=None,
instance_id=None, interface_id=None,
region=None, key=None, keyid=None, profile=None,
vpc_peering_connection_id=None):
'''
Replaces a route.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.replace_route 'rtb-1f382e7d' '10.0.0.0/16' gateway_id='vgw-a1b2c3'
'''
if not _exactly_one((route_table_name, route_table_id)):
raise SaltInvocationError('One (but not both) of route_table_id or route_table_name '
'must be provided.')
if destination_cidr_block is None:
raise SaltInvocationError('destination_cidr_block is required.')
try:
if route_table_name:
route_table_id = _get_resource_id('route_table', route_table_name,
region=region, key=key,
keyid=keyid, profile=profile)
if not route_table_id:
return {'replaced': False,
'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.replace_route(route_table_id, destination_cidr_block,
gateway_id=gateway_id, instance_id=instance_id,
interface_id=interface_id, vpc_peering_connection_id=vpc_peering_connection_id):
log.info(
'Route with cidr block %s on route table %s was replaced',
route_table_id, destination_cidr_block
)
return {'replaced': True}
else:
log.warning(
'Route with cidr block %s on route table %s was not replaced',
route_table_id, destination_cidr_block
)
return {'replaced': False}
except BotoServerError as e:
return {'replaced': False, 'error': __utils__['boto.get_error'](e)} | def function[replace_route, parameter[route_table_id, destination_cidr_block, route_table_name, gateway_id, instance_id, interface_id, region, key, keyid, profile, vpc_peering_connection_id]]:
constant[
Replaces a route.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.replace_route 'rtb-1f382e7d' '10.0.0.0/16' gateway_id='vgw-a1b2c3'
]
if <ast.UnaryOp object at 0x7da1b21a54b0> begin[:]
<ast.Raise object at 0x7da1b21a4ee0>
if compare[name[destination_cidr_block] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b21a6ce0>
<ast.Try object at 0x7da1b21a5b40> | keyword[def] identifier[replace_route] ( identifier[route_table_id] = keyword[None] , identifier[destination_cidr_block] = keyword[None] ,
identifier[route_table_name] = keyword[None] , identifier[gateway_id] = keyword[None] ,
identifier[instance_id] = keyword[None] , identifier[interface_id] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ,
identifier[vpc_peering_connection_id] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[_exactly_one] (( identifier[route_table_name] , identifier[route_table_id] )):
keyword[raise] identifier[SaltInvocationError] ( literal[string]
literal[string] )
keyword[if] identifier[destination_cidr_block] keyword[is] keyword[None] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
keyword[try] :
keyword[if] identifier[route_table_name] :
identifier[route_table_id] = identifier[_get_resource_id] ( literal[string] , identifier[route_table_name] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[route_table_id] :
keyword[return] { literal[string] : keyword[False] ,
literal[string] :{ literal[string] : literal[string] . identifier[format] ( identifier[route_table_name] )}}
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[conn] . identifier[replace_route] ( identifier[route_table_id] , identifier[destination_cidr_block] ,
identifier[gateway_id] = identifier[gateway_id] , identifier[instance_id] = identifier[instance_id] ,
identifier[interface_id] = identifier[interface_id] , identifier[vpc_peering_connection_id] = identifier[vpc_peering_connection_id] ):
identifier[log] . identifier[info] (
literal[string] ,
identifier[route_table_id] , identifier[destination_cidr_block]
)
keyword[return] { literal[string] : keyword[True] }
keyword[else] :
identifier[log] . identifier[warning] (
literal[string] ,
identifier[route_table_id] , identifier[destination_cidr_block]
)
keyword[return] { literal[string] : keyword[False] }
keyword[except] identifier[BotoServerError] keyword[as] identifier[e] :
keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )} | def replace_route(route_table_id=None, destination_cidr_block=None, route_table_name=None, gateway_id=None, instance_id=None, interface_id=None, region=None, key=None, keyid=None, profile=None, vpc_peering_connection_id=None):
"""
Replaces a route.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.replace_route 'rtb-1f382e7d' '10.0.0.0/16' gateway_id='vgw-a1b2c3'
"""
if not _exactly_one((route_table_name, route_table_id)):
raise SaltInvocationError('One (but not both) of route_table_id or route_table_name must be provided.') # depends on [control=['if'], data=[]]
if destination_cidr_block is None:
raise SaltInvocationError('destination_cidr_block is required.') # depends on [control=['if'], data=[]]
try:
if route_table_name:
route_table_id = _get_resource_id('route_table', route_table_name, region=region, key=key, keyid=keyid, profile=profile)
if not route_table_id:
return {'replaced': False, 'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.replace_route(route_table_id, destination_cidr_block, gateway_id=gateway_id, instance_id=instance_id, interface_id=interface_id, vpc_peering_connection_id=vpc_peering_connection_id):
log.info('Route with cidr block %s on route table %s was replaced', route_table_id, destination_cidr_block)
return {'replaced': True} # depends on [control=['if'], data=[]]
else:
log.warning('Route with cidr block %s on route table %s was not replaced', route_table_id, destination_cidr_block)
return {'replaced': False} # depends on [control=['try'], data=[]]
except BotoServerError as e:
return {'replaced': False, 'error': __utils__['boto.get_error'](e)} # depends on [control=['except'], data=['e']] |
def add_size_info (self):
"""Get size of file content and modification time from filename path."""
if self.is_directory():
# Directory size always differs from the customer index.html
# that is generated. So return without calculating any size.
return
filename = self.get_os_filename()
self.size = fileutil.get_size(filename)
self.modified = datetime.utcfromtimestamp(fileutil.get_mtime(filename)) | def function[add_size_info, parameter[self]]:
constant[Get size of file content and modification time from filename path.]
if call[name[self].is_directory, parameter[]] begin[:]
return[None]
variable[filename] assign[=] call[name[self].get_os_filename, parameter[]]
name[self].size assign[=] call[name[fileutil].get_size, parameter[name[filename]]]
name[self].modified assign[=] call[name[datetime].utcfromtimestamp, parameter[call[name[fileutil].get_mtime, parameter[name[filename]]]]] | keyword[def] identifier[add_size_info] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_directory] ():
keyword[return]
identifier[filename] = identifier[self] . identifier[get_os_filename] ()
identifier[self] . identifier[size] = identifier[fileutil] . identifier[get_size] ( identifier[filename] )
identifier[self] . identifier[modified] = identifier[datetime] . identifier[utcfromtimestamp] ( identifier[fileutil] . identifier[get_mtime] ( identifier[filename] )) | def add_size_info(self):
"""Get size of file content and modification time from filename path."""
if self.is_directory():
# Directory size always differs from the customer index.html
# that is generated. So return without calculating any size.
return # depends on [control=['if'], data=[]]
filename = self.get_os_filename()
self.size = fileutil.get_size(filename)
self.modified = datetime.utcfromtimestamp(fileutil.get_mtime(filename)) |
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = [
'id',
'billingItem',
'storageType[keyName]',
'capacityGb',
'provisionedIops',
'storageTierLevel',
'staasVersion',
'hasEncryptionAtRest',
]
block_mask = ','.join(mask_items)
volume = self.get_block_volume_details(volume_id, mask=block_mask)
order = storage_utils.prepare_modify_order_object(
self, volume, new_iops, new_tier_level, new_size
)
return self.client.call('Product_Order', 'placeOrder', order) | def function[order_modified_volume, parameter[self, volume_id, new_size, new_iops, new_tier_level]]:
constant[Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
]
variable[mask_items] assign[=] list[[<ast.Constant object at 0x7da20c795a50>, <ast.Constant object at 0x7da20c794850>, <ast.Constant object at 0x7da20c796260>, <ast.Constant object at 0x7da20c795f00>, <ast.Constant object at 0x7da20c7941f0>, <ast.Constant object at 0x7da20c794a00>, <ast.Constant object at 0x7da20c795c60>, <ast.Constant object at 0x7da20c794b50>]]
variable[block_mask] assign[=] call[constant[,].join, parameter[name[mask_items]]]
variable[volume] assign[=] call[name[self].get_block_volume_details, parameter[name[volume_id]]]
variable[order] assign[=] call[name[storage_utils].prepare_modify_order_object, parameter[name[self], name[volume], name[new_iops], name[new_tier_level], name[new_size]]]
return[call[name[self].client.call, parameter[constant[Product_Order], constant[placeOrder], name[order]]]] | keyword[def] identifier[order_modified_volume] ( identifier[self] , identifier[volume_id] , identifier[new_size] = keyword[None] , identifier[new_iops] = keyword[None] , identifier[new_tier_level] = keyword[None] ):
literal[string]
identifier[mask_items] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[block_mask] = literal[string] . identifier[join] ( identifier[mask_items] )
identifier[volume] = identifier[self] . identifier[get_block_volume_details] ( identifier[volume_id] , identifier[mask] = identifier[block_mask] )
identifier[order] = identifier[storage_utils] . identifier[prepare_modify_order_object] (
identifier[self] , identifier[volume] , identifier[new_iops] , identifier[new_tier_level] , identifier[new_size]
)
keyword[return] identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] , identifier[order] ) | def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = ['id', 'billingItem', 'storageType[keyName]', 'capacityGb', 'provisionedIops', 'storageTierLevel', 'staasVersion', 'hasEncryptionAtRest']
block_mask = ','.join(mask_items)
volume = self.get_block_volume_details(volume_id, mask=block_mask)
order = storage_utils.prepare_modify_order_object(self, volume, new_iops, new_tier_level, new_size)
return self.client.call('Product_Order', 'placeOrder', order) |
def publish_server_closed(self, server_address, topology_id):
"""Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.
"""
event = ServerClosedEvent(server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.closed(event)
except Exception:
_handle_exception() | def function[publish_server_closed, parameter[self, server_address, topology_id]]:
constant[Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.
]
variable[event] assign[=] call[name[ServerClosedEvent], parameter[name[server_address], name[topology_id]]]
for taget[name[subscriber]] in starred[name[self].__server_listeners] begin[:]
<ast.Try object at 0x7da2047e8310> | keyword[def] identifier[publish_server_closed] ( identifier[self] , identifier[server_address] , identifier[topology_id] ):
literal[string]
identifier[event] = identifier[ServerClosedEvent] ( identifier[server_address] , identifier[topology_id] )
keyword[for] identifier[subscriber] keyword[in] identifier[self] . identifier[__server_listeners] :
keyword[try] :
identifier[subscriber] . identifier[closed] ( identifier[event] )
keyword[except] identifier[Exception] :
identifier[_handle_exception] () | def publish_server_closed(self, server_address, topology_id):
"""Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.
"""
event = ServerClosedEvent(server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.closed(event) # depends on [control=['try'], data=[]]
except Exception:
_handle_exception() # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['subscriber']] |
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key) | def function[pop_result, parameter[self, key]]:
constant[Returns the result for ``key`` and unregisters it.]
call[name[self].pending_callbacks.remove, parameter[name[key]]]
return[call[name[self].results.pop, parameter[name[key]]]] | keyword[def] identifier[pop_result] ( identifier[self] , identifier[key] ):
literal[string]
identifier[self] . identifier[pending_callbacks] . identifier[remove] ( identifier[key] )
keyword[return] identifier[self] . identifier[results] . identifier[pop] ( identifier[key] ) | def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key) |
def init_batch(raw_constraints: List[Optional[RawConstraintList]],
beam_size: int,
start_id: int,
eos_id: int) -> List[Optional[ConstrainedHypothesis]]:
"""
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
"""
constraints = [None] * (len(raw_constraints) * beam_size) # type: List[Optional[ConstrainedHypothesis]]
if any(raw_constraints):
for i, raw_list in enumerate(raw_constraints):
num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0
if num_constraints > 0:
hyp = ConstrainedHypothesis(raw_list, eos_id)
idx = i * beam_size
constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)]
return constraints | def function[init_batch, parameter[raw_constraints, beam_size, start_id, eos_id]]:
constant[
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
]
variable[constraints] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18c4cf130>]] * binary_operation[call[name[len], parameter[name[raw_constraints]]] * name[beam_size]]]
if call[name[any], parameter[name[raw_constraints]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18c4ceb30>, <ast.Name object at 0x7da18c4cdb70>]]] in starred[call[name[enumerate], parameter[name[raw_constraints]]]] begin[:]
variable[num_constraints] assign[=] <ast.IfExp object at 0x7da18c4cffd0>
if compare[name[num_constraints] greater[>] constant[0]] begin[:]
variable[hyp] assign[=] call[name[ConstrainedHypothesis], parameter[name[raw_list], name[eos_id]]]
variable[idx] assign[=] binary_operation[name[i] * name[beam_size]]
call[name[constraints]][<ast.Slice object at 0x7da18c4cf160>] assign[=] <ast.ListComp object at 0x7da18c4cf250>
return[name[constraints]] | keyword[def] identifier[init_batch] ( identifier[raw_constraints] : identifier[List] [ identifier[Optional] [ identifier[RawConstraintList] ]],
identifier[beam_size] : identifier[int] ,
identifier[start_id] : identifier[int] ,
identifier[eos_id] : identifier[int] )-> identifier[List] [ identifier[Optional] [ identifier[ConstrainedHypothesis] ]]:
literal[string]
identifier[constraints] =[ keyword[None] ]*( identifier[len] ( identifier[raw_constraints] )* identifier[beam_size] )
keyword[if] identifier[any] ( identifier[raw_constraints] ):
keyword[for] identifier[i] , identifier[raw_list] keyword[in] identifier[enumerate] ( identifier[raw_constraints] ):
identifier[num_constraints] = identifier[sum] ([ identifier[len] ( identifier[phrase] ) keyword[for] identifier[phrase] keyword[in] identifier[raw_list] ]) keyword[if] identifier[raw_list] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
keyword[if] identifier[num_constraints] > literal[int] :
identifier[hyp] = identifier[ConstrainedHypothesis] ( identifier[raw_list] , identifier[eos_id] )
identifier[idx] = identifier[i] * identifier[beam_size]
identifier[constraints] [ identifier[idx] : identifier[idx] + identifier[beam_size] ]=[ identifier[hyp] . identifier[advance] ( identifier[start_id] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[beam_size] )]
keyword[return] identifier[constraints] | def init_batch(raw_constraints: List[Optional[RawConstraintList]], beam_size: int, start_id: int, eos_id: int) -> List[Optional[ConstrainedHypothesis]]:
"""
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
"""
constraints = [None] * (len(raw_constraints) * beam_size) # type: List[Optional[ConstrainedHypothesis]]
if any(raw_constraints):
for (i, raw_list) in enumerate(raw_constraints):
num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0
if num_constraints > 0:
hyp = ConstrainedHypothesis(raw_list, eos_id)
idx = i * beam_size
constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return constraints |
def do_authenticate_account(self, authc_token):
"""
Returns an account object only when the current token authenticates AND
the authentication process is complete, raising otherwise
:returns: Account
:raises AdditionalAuthenticationRequired: when additional tokens are required,
passing the account object
"""
try:
realms = self.token_realm_resolver[authc_token.__class__]
except KeyError:
raise KeyError('Unsupported Token Type Provided: ', authc_token.__class__.__name__)
if (len(self.realms) == 1):
account = self.authenticate_single_realm_account(realms[0], authc_token)
else:
account = self.authenticate_multi_realm_account(self.realms, authc_token)
cred_type = authc_token.token_info['cred_type']
attempts = account['authc_info'][cred_type].get('failed_attempts', [])
self.validate_locked(authc_token, attempts)
# TODO: refactor this to something less rigid as it is unreliable:
if len(account['authc_info']) > authc_token.token_info['tier']:
if self.mfa_dispatcher:
realm = self.token_realm_resolver[TOTPToken][0] # s/b only one
totp_token = realm.generate_totp_token(account)
mfa_info = account['authc_info']['totp_key']['2fa_info']
self.mfa_dispatcher.dispatch(authc_token.identifier,
mfa_info,
totp_token)
raise AdditionalAuthenticationRequired(account['account_id'])
return account | def function[do_authenticate_account, parameter[self, authc_token]]:
constant[
Returns an account object only when the current token authenticates AND
the authentication process is complete, raising otherwise
:returns: Account
:raises AdditionalAuthenticationRequired: when additional tokens are required,
passing the account object
]
<ast.Try object at 0x7da20c6c6080>
if compare[call[name[len], parameter[name[self].realms]] equal[==] constant[1]] begin[:]
variable[account] assign[=] call[name[self].authenticate_single_realm_account, parameter[call[name[realms]][constant[0]], name[authc_token]]]
variable[cred_type] assign[=] call[name[authc_token].token_info][constant[cred_type]]
variable[attempts] assign[=] call[call[call[name[account]][constant[authc_info]]][name[cred_type]].get, parameter[constant[failed_attempts], list[[]]]]
call[name[self].validate_locked, parameter[name[authc_token], name[attempts]]]
if compare[call[name[len], parameter[call[name[account]][constant[authc_info]]]] greater[>] call[name[authc_token].token_info][constant[tier]]] begin[:]
if name[self].mfa_dispatcher begin[:]
variable[realm] assign[=] call[call[name[self].token_realm_resolver][name[TOTPToken]]][constant[0]]
variable[totp_token] assign[=] call[name[realm].generate_totp_token, parameter[name[account]]]
variable[mfa_info] assign[=] call[call[call[name[account]][constant[authc_info]]][constant[totp_key]]][constant[2fa_info]]
call[name[self].mfa_dispatcher.dispatch, parameter[name[authc_token].identifier, name[mfa_info], name[totp_token]]]
<ast.Raise object at 0x7da20c6c7fd0>
return[name[account]] | keyword[def] identifier[do_authenticate_account] ( identifier[self] , identifier[authc_token] ):
literal[string]
keyword[try] :
identifier[realms] = identifier[self] . identifier[token_realm_resolver] [ identifier[authc_token] . identifier[__class__] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string] , identifier[authc_token] . identifier[__class__] . identifier[__name__] )
keyword[if] ( identifier[len] ( identifier[self] . identifier[realms] )== literal[int] ):
identifier[account] = identifier[self] . identifier[authenticate_single_realm_account] ( identifier[realms] [ literal[int] ], identifier[authc_token] )
keyword[else] :
identifier[account] = identifier[self] . identifier[authenticate_multi_realm_account] ( identifier[self] . identifier[realms] , identifier[authc_token] )
identifier[cred_type] = identifier[authc_token] . identifier[token_info] [ literal[string] ]
identifier[attempts] = identifier[account] [ literal[string] ][ identifier[cred_type] ]. identifier[get] ( literal[string] ,[])
identifier[self] . identifier[validate_locked] ( identifier[authc_token] , identifier[attempts] )
keyword[if] identifier[len] ( identifier[account] [ literal[string] ])> identifier[authc_token] . identifier[token_info] [ literal[string] ]:
keyword[if] identifier[self] . identifier[mfa_dispatcher] :
identifier[realm] = identifier[self] . identifier[token_realm_resolver] [ identifier[TOTPToken] ][ literal[int] ]
identifier[totp_token] = identifier[realm] . identifier[generate_totp_token] ( identifier[account] )
identifier[mfa_info] = identifier[account] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[self] . identifier[mfa_dispatcher] . identifier[dispatch] ( identifier[authc_token] . identifier[identifier] ,
identifier[mfa_info] ,
identifier[totp_token] )
keyword[raise] identifier[AdditionalAuthenticationRequired] ( identifier[account] [ literal[string] ])
keyword[return] identifier[account] | def do_authenticate_account(self, authc_token):
"""
Returns an account object only when the current token authenticates AND
the authentication process is complete, raising otherwise
:returns: Account
:raises AdditionalAuthenticationRequired: when additional tokens are required,
passing the account object
"""
try:
realms = self.token_realm_resolver[authc_token.__class__] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('Unsupported Token Type Provided: ', authc_token.__class__.__name__) # depends on [control=['except'], data=[]]
if len(self.realms) == 1:
account = self.authenticate_single_realm_account(realms[0], authc_token) # depends on [control=['if'], data=[]]
else:
account = self.authenticate_multi_realm_account(self.realms, authc_token)
cred_type = authc_token.token_info['cred_type']
attempts = account['authc_info'][cred_type].get('failed_attempts', [])
self.validate_locked(authc_token, attempts)
# TODO: refactor this to something less rigid as it is unreliable:
if len(account['authc_info']) > authc_token.token_info['tier']:
if self.mfa_dispatcher:
realm = self.token_realm_resolver[TOTPToken][0] # s/b only one
totp_token = realm.generate_totp_token(account)
mfa_info = account['authc_info']['totp_key']['2fa_info']
self.mfa_dispatcher.dispatch(authc_token.identifier, mfa_info, totp_token) # depends on [control=['if'], data=[]]
raise AdditionalAuthenticationRequired(account['account_id']) # depends on [control=['if'], data=[]]
return account |
def _get_upsert_fields(self, kwargs):
"""Gets the fields to use in an upsert.
This some nice magic. We'll split the fields into
a group of "insert fields" and "update fields":
INSERT INTO bla ("val1", "val2") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
insert_fields update_fields
Often, fields appear in both lists. But, for example,
a :see:DateTime field with `auto_now_add=True` set, will
only appear in "insert_fields", since it won't be set
on existing rows.
Other than that, the user specificies a list of fields
in the upsert() call. That migt not be all fields. The
user could decide to leave out optional fields. If we
end up doing an update, we don't want to overwrite
those non-specified fields.
We cannot just take the list of fields the user
specifies, because as mentioned, some fields
make modifications to the model on their own.
We'll have to detect which fields make modifications
and include them in the list of insert/update fields.
"""
model_instance = self.model(**kwargs)
insert_fields = []
update_fields = []
for field in model_instance._meta.local_concrete_fields:
has_default = field.default != NOT_PROVIDED
if (field.name in kwargs or field.column in kwargs):
insert_fields.append(field)
update_fields.append(field)
continue
elif has_default:
insert_fields.append(field)
continue
# special handling for 'pk' which always refers to
# the primary key, so if we the user specifies `pk`
# instead of a concrete field, we have to handle that
if field.primary_key is True and 'pk' in kwargs:
insert_fields.append(field)
update_fields.append(field)
continue
if self._is_magical_field(model_instance, field, is_insert=True):
insert_fields.append(field)
if self._is_magical_field(model_instance, field, is_insert=False):
update_fields.append(field)
return insert_fields, update_fields | def function[_get_upsert_fields, parameter[self, kwargs]]:
constant[Gets the fields to use in an upsert.
This some nice magic. We'll split the fields into
a group of "insert fields" and "update fields":
INSERT INTO bla ("val1", "val2") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
insert_fields update_fields
Often, fields appear in both lists. But, for example,
a :see:DateTime field with `auto_now_add=True` set, will
only appear in "insert_fields", since it won't be set
on existing rows.
Other than that, the user specificies a list of fields
in the upsert() call. That migt not be all fields. The
user could decide to leave out optional fields. If we
end up doing an update, we don't want to overwrite
those non-specified fields.
We cannot just take the list of fields the user
specifies, because as mentioned, some fields
make modifications to the model on their own.
We'll have to detect which fields make modifications
and include them in the list of insert/update fields.
]
variable[model_instance] assign[=] call[name[self].model, parameter[]]
variable[insert_fields] assign[=] list[[]]
variable[update_fields] assign[=] list[[]]
for taget[name[field]] in starred[name[model_instance]._meta.local_concrete_fields] begin[:]
variable[has_default] assign[=] compare[name[field].default not_equal[!=] name[NOT_PROVIDED]]
if <ast.BoolOp object at 0x7da1b059df90> begin[:]
call[name[insert_fields].append, parameter[name[field]]]
call[name[update_fields].append, parameter[name[field]]]
continue
if <ast.BoolOp object at 0x7da1b059fca0> begin[:]
call[name[insert_fields].append, parameter[name[field]]]
call[name[update_fields].append, parameter[name[field]]]
continue
if call[name[self]._is_magical_field, parameter[name[model_instance], name[field]]] begin[:]
call[name[insert_fields].append, parameter[name[field]]]
if call[name[self]._is_magical_field, parameter[name[model_instance], name[field]]] begin[:]
call[name[update_fields].append, parameter[name[field]]]
return[tuple[[<ast.Name object at 0x7da1b0381ea0>, <ast.Name object at 0x7da1b0380f10>]]] | keyword[def] identifier[_get_upsert_fields] ( identifier[self] , identifier[kwargs] ):
literal[string]
identifier[model_instance] = identifier[self] . identifier[model] (** identifier[kwargs] )
identifier[insert_fields] =[]
identifier[update_fields] =[]
keyword[for] identifier[field] keyword[in] identifier[model_instance] . identifier[_meta] . identifier[local_concrete_fields] :
identifier[has_default] = identifier[field] . identifier[default] != identifier[NOT_PROVIDED]
keyword[if] ( identifier[field] . identifier[name] keyword[in] identifier[kwargs] keyword[or] identifier[field] . identifier[column] keyword[in] identifier[kwargs] ):
identifier[insert_fields] . identifier[append] ( identifier[field] )
identifier[update_fields] . identifier[append] ( identifier[field] )
keyword[continue]
keyword[elif] identifier[has_default] :
identifier[insert_fields] . identifier[append] ( identifier[field] )
keyword[continue]
keyword[if] identifier[field] . identifier[primary_key] keyword[is] keyword[True] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[insert_fields] . identifier[append] ( identifier[field] )
identifier[update_fields] . identifier[append] ( identifier[field] )
keyword[continue]
keyword[if] identifier[self] . identifier[_is_magical_field] ( identifier[model_instance] , identifier[field] , identifier[is_insert] = keyword[True] ):
identifier[insert_fields] . identifier[append] ( identifier[field] )
keyword[if] identifier[self] . identifier[_is_magical_field] ( identifier[model_instance] , identifier[field] , identifier[is_insert] = keyword[False] ):
identifier[update_fields] . identifier[append] ( identifier[field] )
keyword[return] identifier[insert_fields] , identifier[update_fields] | def _get_upsert_fields(self, kwargs):
"""Gets the fields to use in an upsert.
This some nice magic. We'll split the fields into
a group of "insert fields" and "update fields":
INSERT INTO bla ("val1", "val2") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
insert_fields update_fields
Often, fields appear in both lists. But, for example,
a :see:DateTime field with `auto_now_add=True` set, will
only appear in "insert_fields", since it won't be set
on existing rows.
Other than that, the user specificies a list of fields
in the upsert() call. That migt not be all fields. The
user could decide to leave out optional fields. If we
end up doing an update, we don't want to overwrite
those non-specified fields.
We cannot just take the list of fields the user
specifies, because as mentioned, some fields
make modifications to the model on their own.
We'll have to detect which fields make modifications
and include them in the list of insert/update fields.
"""
model_instance = self.model(**kwargs)
insert_fields = []
update_fields = []
for field in model_instance._meta.local_concrete_fields:
has_default = field.default != NOT_PROVIDED
if field.name in kwargs or field.column in kwargs:
insert_fields.append(field)
update_fields.append(field)
continue # depends on [control=['if'], data=[]]
elif has_default:
insert_fields.append(field)
continue # depends on [control=['if'], data=[]]
# special handling for 'pk' which always refers to
# the primary key, so if we the user specifies `pk`
# instead of a concrete field, we have to handle that
if field.primary_key is True and 'pk' in kwargs:
insert_fields.append(field)
update_fields.append(field)
continue # depends on [control=['if'], data=[]]
if self._is_magical_field(model_instance, field, is_insert=True):
insert_fields.append(field) # depends on [control=['if'], data=[]]
if self._is_magical_field(model_instance, field, is_insert=False):
update_fields.append(field) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
return (insert_fields, update_fields) |
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None | def function[json, parameter[self]]:
constant[ If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. ]
variable[ctype] assign[=] call[call[call[call[name[self].environ.get, parameter[constant[CONTENT_TYPE], constant[]]].lower, parameter[]].split, parameter[constant[;]]]][constant[0]]
if compare[name[ctype] equal[==] constant[application/json]] begin[:]
variable[b] assign[=] call[name[self]._get_body_string, parameter[]]
if <ast.UnaryOp object at 0x7da20c6ab3a0> begin[:]
return[constant[None]]
return[call[name[json_loads], parameter[name[b]]]]
return[constant[None]] | keyword[def] identifier[json] ( identifier[self] ):
literal[string]
identifier[ctype] = identifier[self] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] (). identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[ctype] == literal[string] :
identifier[b] = identifier[self] . identifier[_get_body_string] ()
keyword[if] keyword[not] identifier[b] :
keyword[return] keyword[None]
keyword[return] identifier[json_loads] ( identifier[b] )
keyword[return] keyword[None] | def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None # depends on [control=['if'], data=[]]
return json_loads(b) # depends on [control=['if'], data=[]]
return None |
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author,
'changes': changes
}
u = self._url("/commit")
return self._result(
self._post_json(u, data=conf, params=params), json=True
) | def function[commit, parameter[self, container, repository, tag, message, author, changes, conf]]:
constant[
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9bb50>, <ast.Constant object at 0x7da18dc9a710>, <ast.Constant object at 0x7da18dc99d80>, <ast.Constant object at 0x7da18dc9a800>, <ast.Constant object at 0x7da18dc9a770>, <ast.Constant object at 0x7da18dc984c0>], [<ast.Name object at 0x7da18dc9ad70>, <ast.Name object at 0x7da18dc9ada0>, <ast.Name object at 0x7da18dc9a3b0>, <ast.Name object at 0x7da18dc9b5e0>, <ast.Name object at 0x7da18dc9be50>, <ast.Name object at 0x7da18dc98e50>]]
variable[u] assign[=] call[name[self]._url, parameter[constant[/commit]]]
return[call[name[self]._result, parameter[call[name[self]._post_json, parameter[name[u]]]]]] | keyword[def] identifier[commit] ( identifier[self] , identifier[container] , identifier[repository] = keyword[None] , identifier[tag] = keyword[None] , identifier[message] = keyword[None] ,
identifier[author] = keyword[None] , identifier[changes] = keyword[None] , identifier[conf] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : identifier[container] ,
literal[string] : identifier[repository] ,
literal[string] : identifier[tag] ,
literal[string] : identifier[message] ,
literal[string] : identifier[author] ,
literal[string] : identifier[changes]
}
identifier[u] = identifier[self] . identifier[_url] ( literal[string] )
keyword[return] identifier[self] . identifier[_result] (
identifier[self] . identifier[_post_json] ( identifier[u] , identifier[data] = identifier[conf] , identifier[params] = identifier[params] ), identifier[json] = keyword[True]
) | def commit(self, container, repository=None, tag=None, message=None, author=None, changes=None, conf=None):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'container': container, 'repo': repository, 'tag': tag, 'comment': message, 'author': author, 'changes': changes}
u = self._url('/commit')
return self._result(self._post_json(u, data=conf, params=params), json=True) |
def get_account(self, address, token_type):
"""
Get the state of an account for a given token type
"""
cur = self.db.cursor()
return namedb_get_account(cur, address, token_type) | def function[get_account, parameter[self, address, token_type]]:
constant[
Get the state of an account for a given token type
]
variable[cur] assign[=] call[name[self].db.cursor, parameter[]]
return[call[name[namedb_get_account], parameter[name[cur], name[address], name[token_type]]]] | keyword[def] identifier[get_account] ( identifier[self] , identifier[address] , identifier[token_type] ):
literal[string]
identifier[cur] = identifier[self] . identifier[db] . identifier[cursor] ()
keyword[return] identifier[namedb_get_account] ( identifier[cur] , identifier[address] , identifier[token_type] ) | def get_account(self, address, token_type):
"""
Get the state of an account for a given token type
"""
cur = self.db.cursor()
return namedb_get_account(cur, address, token_type) |
def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper around `AsyncSession.threadsafe_call`."""
def handler():
try:
fn(*args, **kwargs)
except Exception:
warn("error caught while excecuting async callback\n%s\n",
format_exc())
def greenlet_wrapper():
gr = greenlet.greenlet(handler)
gr.switch()
self._async_session.threadsafe_call(greenlet_wrapper) | def function[threadsafe_call, parameter[self, fn]]:
constant[Wrapper around `AsyncSession.threadsafe_call`.]
def function[handler, parameter[]]:
<ast.Try object at 0x7da1b22afcd0>
def function[greenlet_wrapper, parameter[]]:
variable[gr] assign[=] call[name[greenlet].greenlet, parameter[name[handler]]]
call[name[gr].switch, parameter[]]
call[name[self]._async_session.threadsafe_call, parameter[name[greenlet_wrapper]]] | keyword[def] identifier[threadsafe_call] ( identifier[self] , identifier[fn] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[handler] ():
keyword[try] :
identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] :
identifier[warn] ( literal[string] ,
identifier[format_exc] ())
keyword[def] identifier[greenlet_wrapper] ():
identifier[gr] = identifier[greenlet] . identifier[greenlet] ( identifier[handler] )
identifier[gr] . identifier[switch] ()
identifier[self] . identifier[_async_session] . identifier[threadsafe_call] ( identifier[greenlet_wrapper] ) | def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper around `AsyncSession.threadsafe_call`."""
def handler():
try:
fn(*args, **kwargs) # depends on [control=['try'], data=[]]
except Exception:
warn('error caught while excecuting async callback\n%s\n', format_exc()) # depends on [control=['except'], data=[]]
def greenlet_wrapper():
gr = greenlet.greenlet(handler)
gr.switch()
self._async_session.threadsafe_call(greenlet_wrapper) |
def sign(node=None):
""" Sign a specific node to grant it access
you can specify "all" to sign all nodes
returns the nodes that were signed
"""
if not node:
raise Exception("Specify either 'all' your specify token/host_name of node to sign. ")
if node == 'all':
node = 'unsigned'
nodes = list_nodes(search=node)
result = {}
for token, i in nodes.items():
i['access'] = 'node'
i.save()
result[token] = i
return result | def function[sign, parameter[node]]:
constant[ Sign a specific node to grant it access
you can specify "all" to sign all nodes
returns the nodes that were signed
]
if <ast.UnaryOp object at 0x7da18dc06ec0> begin[:]
<ast.Raise object at 0x7da18dc07f40>
if compare[name[node] equal[==] constant[all]] begin[:]
variable[node] assign[=] constant[unsigned]
variable[nodes] assign[=] call[name[list_nodes], parameter[]]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18fe90790>, <ast.Name object at 0x7da18fe93460>]]] in starred[call[name[nodes].items, parameter[]]] begin[:]
call[name[i]][constant[access]] assign[=] constant[node]
call[name[i].save, parameter[]]
call[name[result]][name[token]] assign[=] name[i]
return[name[result]] | keyword[def] identifier[sign] ( identifier[node] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[node] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[node] == literal[string] :
identifier[node] = literal[string]
identifier[nodes] = identifier[list_nodes] ( identifier[search] = identifier[node] )
identifier[result] ={}
keyword[for] identifier[token] , identifier[i] keyword[in] identifier[nodes] . identifier[items] ():
identifier[i] [ literal[string] ]= literal[string]
identifier[i] . identifier[save] ()
identifier[result] [ identifier[token] ]= identifier[i]
keyword[return] identifier[result] | def sign(node=None):
""" Sign a specific node to grant it access
you can specify "all" to sign all nodes
returns the nodes that were signed
"""
if not node:
raise Exception("Specify either 'all' your specify token/host_name of node to sign. ") # depends on [control=['if'], data=[]]
if node == 'all':
node = 'unsigned' # depends on [control=['if'], data=['node']]
nodes = list_nodes(search=node)
result = {}
for (token, i) in nodes.items():
i['access'] = 'node'
i.save()
result[token] = i # depends on [control=['for'], data=[]]
return result |
def _generate_examples(self, split_subsets, extraction_map):
"""Returns the examples in the raw (text) form."""
source, _ = self.builder_config.language_pair
def _get_local_paths(ds, extract_dirs):
rel_paths = ds.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)]
for ss_name in split_subsets:
logging.info("Generating examples from: %s", ss_name)
ds = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(ds, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(
_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(
_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(
_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif ss_name.startswith("newscommentary_v14"):
sub_generator = functools.partial(
_parse_tsv, language_pair=self.builder_config.language_pair)
elif "tmx" in fname:
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
yield ex | def function[_generate_examples, parameter[self, split_subsets, extraction_map]]:
constant[Returns the examples in the raw (text) form.]
<ast.Tuple object at 0x7da1b20c8c10> assign[=] name[self].builder_config.language_pair
def function[_get_local_paths, parameter[ds, extract_dirs]]:
variable[rel_paths] assign[=] call[name[ds].get_path, parameter[name[source]]]
if compare[call[name[len], parameter[name[extract_dirs]]] equal[==] constant[1]] begin[:]
variable[extract_dirs] assign[=] binary_operation[name[extract_dirs] * call[name[len], parameter[name[rel_paths]]]]
return[<ast.ListComp object at 0x7da1b20cb8e0>]
for taget[name[ss_name]] in starred[name[split_subsets]] begin[:]
call[name[logging].info, parameter[constant[Generating examples from: %s], name[ss_name]]]
variable[ds] assign[=] call[name[DATASET_MAP]][name[ss_name]]
variable[extract_dirs] assign[=] call[name[extraction_map]][name[ss_name]]
variable[files] assign[=] call[name[_get_local_paths], parameter[name[ds], name[extract_dirs]]]
if call[name[ss_name].startswith, parameter[constant[czeng]]] begin[:]
if call[name[ss_name].endswith, parameter[constant[16pre]]] begin[:]
variable[sub_generator] assign[=] call[name[functools].partial, parameter[name[_parse_tsv]]]
for taget[name[ex]] in starred[call[name[sub_generator], parameter[<ast.Starred object at 0x7da1b20cb310>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b20cb340> begin[:]
continue
<ast.Yield object at 0x7da1b20c9000> | keyword[def] identifier[_generate_examples] ( identifier[self] , identifier[split_subsets] , identifier[extraction_map] ):
literal[string]
identifier[source] , identifier[_] = identifier[self] . identifier[builder_config] . identifier[language_pair]
keyword[def] identifier[_get_local_paths] ( identifier[ds] , identifier[extract_dirs] ):
identifier[rel_paths] = identifier[ds] . identifier[get_path] ( identifier[source] )
keyword[if] identifier[len] ( identifier[extract_dirs] )== literal[int] :
identifier[extract_dirs] = identifier[extract_dirs] * identifier[len] ( identifier[rel_paths] )
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( identifier[ex_dir] , identifier[rel_path] ) keyword[if] identifier[rel_path] keyword[else] identifier[ex_dir]
keyword[for] identifier[ex_dir] , identifier[rel_path] keyword[in] identifier[zip] ( identifier[extract_dirs] , identifier[rel_paths] )]
keyword[for] identifier[ss_name] keyword[in] identifier[split_subsets] :
identifier[logging] . identifier[info] ( literal[string] , identifier[ss_name] )
identifier[ds] = identifier[DATASET_MAP] [ identifier[ss_name] ]
identifier[extract_dirs] = identifier[extraction_map] [ identifier[ss_name] ]
identifier[files] = identifier[_get_local_paths] ( identifier[ds] , identifier[extract_dirs] )
keyword[if] identifier[ss_name] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[ss_name] . identifier[endswith] ( literal[string] ):
identifier[sub_generator] = identifier[functools] . identifier[partial] (
identifier[_parse_tsv] , identifier[language_pair] =( literal[string] , literal[string] ))
keyword[elif] identifier[ss_name] . identifier[endswith] ( literal[string] ):
identifier[filter_path] = identifier[_get_local_paths] (
identifier[_CZENG17_FILTER] , identifier[extraction_map] [ identifier[_CZENG17_FILTER] . identifier[name] ])[ literal[int] ]
identifier[sub_generator] = identifier[functools] . identifier[partial] (
identifier[_parse_czeng] , identifier[filter_path] = identifier[filter_path] )
keyword[else] :
identifier[sub_generator] = identifier[_parse_czeng]
keyword[elif] identifier[len] ( identifier[files] )== literal[int] :
keyword[if] identifier[ss_name] . identifier[endswith] ( literal[string] ):
identifier[sub_generator] = identifier[_parse_frde_bitext]
keyword[else] :
identifier[sub_generator] = identifier[_parse_parallel_sentences]
keyword[elif] identifier[len] ( identifier[files] )== literal[int] :
identifier[fname] = identifier[files] [ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[fname] :
identifier[sub_generator] = identifier[_parse_tsv]
keyword[elif] identifier[ss_name] . identifier[startswith] ( literal[string] ):
identifier[sub_generator] = identifier[functools] . identifier[partial] (
identifier[_parse_tsv] , identifier[language_pair] = identifier[self] . identifier[builder_config] . identifier[language_pair] )
keyword[elif] literal[string] keyword[in] identifier[fname] :
identifier[sub_generator] = identifier[_parse_tmx]
keyword[elif] identifier[ss_name] . identifier[startswith] ( literal[string] ):
identifier[sub_generator] = identifier[_parse_wikiheadlines]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[fname] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[len] ( identifier[files] ))
keyword[for] identifier[ex] keyword[in] identifier[sub_generator] (* identifier[files] ):
keyword[if] keyword[not] identifier[all] ( identifier[ex] . identifier[values] ()):
keyword[continue]
keyword[yield] identifier[ex] | def _generate_examples(self, split_subsets, extraction_map):
"""Returns the examples in the raw (text) form."""
(source, _) = self.builder_config.language_pair
def _get_local_paths(ds, extract_dirs):
rel_paths = ds.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths) # depends on [control=['if'], data=[]]
return [os.path.join(ex_dir, rel_path) if rel_path else ex_dir for (ex_dir, rel_path) in zip(extract_dirs, rel_paths)]
for ss_name in split_subsets:
logging.info('Generating examples from: %s', ss_name)
ds = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(ds, extract_dirs)
if ss_name.startswith('czeng'):
if ss_name.endswith('16pre'):
sub_generator = functools.partial(_parse_tsv, language_pair=('en', 'cs')) # depends on [control=['if'], data=[]]
elif ss_name.endswith('17'):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path) # depends on [control=['if'], data=[]]
else:
sub_generator = _parse_czeng # depends on [control=['if'], data=[]]
elif len(files) == 2:
if ss_name.endswith('_frde'):
sub_generator = _parse_frde_bitext # depends on [control=['if'], data=[]]
else:
sub_generator = _parse_parallel_sentences # depends on [control=['if'], data=[]]
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if '.tsv' in fname:
sub_generator = _parse_tsv # depends on [control=['if'], data=[]]
elif ss_name.startswith('newscommentary_v14'):
sub_generator = functools.partial(_parse_tsv, language_pair=self.builder_config.language_pair) # depends on [control=['if'], data=[]]
elif 'tmx' in fname:
sub_generator = _parse_tmx # depends on [control=['if'], data=[]]
elif ss_name.startswith('wikiheadlines'):
sub_generator = _parse_wikiheadlines # depends on [control=['if'], data=[]]
else:
raise ValueError('Unsupported file format: %s' % fname) # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid number of files: %d' % len(files))
for ex in sub_generator(*files):
if not all(ex.values()):
continue # depends on [control=['if'], data=[]]
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
yield ex # depends on [control=['for'], data=['ex']] # depends on [control=['for'], data=['ss_name']] |
def get_all_file_report_pages(self, query):
""" Get File Report (All Pages).
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation.
:return: All JSON responses appended together.
"""
responses = []
next_page, response = self.get_hashes_from_search(self, query)
responses.append(_return_response_and_status_code(response))
while next_page:
next_page, response = self.get_hashes_from_search(query, next_page)
responses.append(_return_response_and_status_code(response))
return dict(results=responses) | def function[get_all_file_report_pages, parameter[self, query]]:
constant[ Get File Report (All Pages).
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation.
:return: All JSON responses appended together.
]
variable[responses] assign[=] list[[]]
<ast.Tuple object at 0x7da20c7ca5c0> assign[=] call[name[self].get_hashes_from_search, parameter[name[self], name[query]]]
call[name[responses].append, parameter[call[name[_return_response_and_status_code], parameter[name[response]]]]]
while name[next_page] begin[:]
<ast.Tuple object at 0x7da18f09e140> assign[=] call[name[self].get_hashes_from_search, parameter[name[query], name[next_page]]]
call[name[responses].append, parameter[call[name[_return_response_and_status_code], parameter[name[response]]]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[get_all_file_report_pages] ( identifier[self] , identifier[query] ):
literal[string]
identifier[responses] =[]
identifier[next_page] , identifier[response] = identifier[self] . identifier[get_hashes_from_search] ( identifier[self] , identifier[query] )
identifier[responses] . identifier[append] ( identifier[_return_response_and_status_code] ( identifier[response] ))
keyword[while] identifier[next_page] :
identifier[next_page] , identifier[response] = identifier[self] . identifier[get_hashes_from_search] ( identifier[query] , identifier[next_page] )
identifier[responses] . identifier[append] ( identifier[_return_response_and_status_code] ( identifier[response] ))
keyword[return] identifier[dict] ( identifier[results] = identifier[responses] ) | def get_all_file_report_pages(self, query):
""" Get File Report (All Pages).
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation.
:return: All JSON responses appended together.
"""
responses = []
(next_page, response) = self.get_hashes_from_search(self, query)
responses.append(_return_response_and_status_code(response))
while next_page:
(next_page, response) = self.get_hashes_from_search(query, next_page)
responses.append(_return_response_and_status_code(response)) # depends on [control=['while'], data=[]]
return dict(results=responses) |
def decrypt_attributes(self, attribute_statement):
"""
Decrypts possible encrypted attributes and adds the decrypts to the
list of attributes.
:param attribute_statement: A SAML.AttributeStatement which might
contain both encrypted attributes and attributes.
"""
# _node_name = [
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedData",
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute"]
for encattr in attribute_statement.encrypted_attribute:
if not encattr.encrypted_key:
_decr = self.sec.decrypt(encattr.encrypted_data)
_attr = attribute_from_string(_decr)
attribute_statement.attribute.append(_attr)
else:
_decr = self.sec.decrypt(encattr)
enc_attr = encrypted_attribute_from_string(_decr)
attrlist = enc_attr.extensions_as_elements("Attribute", saml)
attribute_statement.attribute.extend(attrlist) | def function[decrypt_attributes, parameter[self, attribute_statement]]:
constant[
Decrypts possible encrypted attributes and adds the decrypts to the
list of attributes.
:param attribute_statement: A SAML.AttributeStatement which might
contain both encrypted attributes and attributes.
]
for taget[name[encattr]] in starred[name[attribute_statement].encrypted_attribute] begin[:]
if <ast.UnaryOp object at 0x7da1b1d55c90> begin[:]
variable[_decr] assign[=] call[name[self].sec.decrypt, parameter[name[encattr].encrypted_data]]
variable[_attr] assign[=] call[name[attribute_from_string], parameter[name[_decr]]]
call[name[attribute_statement].attribute.append, parameter[name[_attr]]] | keyword[def] identifier[decrypt_attributes] ( identifier[self] , identifier[attribute_statement] ):
literal[string]
keyword[for] identifier[encattr] keyword[in] identifier[attribute_statement] . identifier[encrypted_attribute] :
keyword[if] keyword[not] identifier[encattr] . identifier[encrypted_key] :
identifier[_decr] = identifier[self] . identifier[sec] . identifier[decrypt] ( identifier[encattr] . identifier[encrypted_data] )
identifier[_attr] = identifier[attribute_from_string] ( identifier[_decr] )
identifier[attribute_statement] . identifier[attribute] . identifier[append] ( identifier[_attr] )
keyword[else] :
identifier[_decr] = identifier[self] . identifier[sec] . identifier[decrypt] ( identifier[encattr] )
identifier[enc_attr] = identifier[encrypted_attribute_from_string] ( identifier[_decr] )
identifier[attrlist] = identifier[enc_attr] . identifier[extensions_as_elements] ( literal[string] , identifier[saml] )
identifier[attribute_statement] . identifier[attribute] . identifier[extend] ( identifier[attrlist] ) | def decrypt_attributes(self, attribute_statement):
"""
Decrypts possible encrypted attributes and adds the decrypts to the
list of attributes.
:param attribute_statement: A SAML.AttributeStatement which might
contain both encrypted attributes and attributes.
"""
# _node_name = [
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedData",
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute"]
for encattr in attribute_statement.encrypted_attribute:
if not encattr.encrypted_key:
_decr = self.sec.decrypt(encattr.encrypted_data)
_attr = attribute_from_string(_decr)
attribute_statement.attribute.append(_attr) # depends on [control=['if'], data=[]]
else:
_decr = self.sec.decrypt(encattr)
enc_attr = encrypted_attribute_from_string(_decr)
attrlist = enc_attr.extensions_as_elements('Attribute', saml)
attribute_statement.attribute.extend(attrlist) # depends on [control=['for'], data=['encattr']] |
def _dens(self, R, z, phi=0, t=0):
"""
NAME:
_dens
PURPOSE:
Evaluate the density. If not given, the density is computed using the Poisson equation
from the first and second derivatives of the potential (if all are implemented).
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the density
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
ng = self._ns * g
zKB = z * Ks / Bs
sech_zKB = 1 / np.cosh(zKB)
tanh_zKB = np.tanh(zKB)
log_sech_zKB = np.log(sech_zKB)
# numpy of E as defined in the appendix of the paper.
E = 1 + Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2) - R / self._Rs \
- (Ks * self._H) * (1 + 0.8 * Ks * self._H) * log_sech_zKB \
- 0.4 * (Ks * self._H) ** 2 * zKB * tanh_zKB
# numpy array of rE' as define in the appendix of the paper.
rE = -Ks * self._H / Ds * (1 - 0.3 * (1 - 0.3 * Ks * self._H) / (1 + 0.3 * Ks * self._H) ** 3) \
+ (Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2)) - R / self._Rs \
+ Ks * self._H * (1 + 1.6 * Ks * self._H) * log_sech_zKB \
- (0.4 * (Ks * self._H) ** 2 * zKB * sech_zKB) ** 2 / Bs \
+ 1.2 * (Ks * self._H) ** 2 * zKB * tanh_zKB
return np.sum(self._Cs * self._rho0 * (self._H / (Ds * R)) * np.exp(-(R - self._r_ref) / self._Rs)
* sech_zKB**Bs * (np.cos(ng) * (Ks * R * (Bs + 1) / Bs * sech_zKB**2
- 1 / Ks / R * (E**2 + rE))
- 2 * np.sin(ng)* E * np.cos(self._alpha))) | def function[_dens, parameter[self, R, z, phi, t]]:
constant[
NAME:
_dens
PURPOSE:
Evaluate the density. If not given, the density is computed using the Poisson equation
from the first and second derivatives of the potential (if all are implemented).
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the density
HISTORY:
2017-05-12 Jack Hong (UBC)
]
variable[g] assign[=] call[name[self]._gamma, parameter[name[R], binary_operation[name[phi] - binary_operation[name[self]._omega * name[t]]]]]
variable[Ks] assign[=] call[name[self]._K, parameter[name[R]]]
variable[Bs] assign[=] call[name[self]._B, parameter[name[R]]]
variable[Ds] assign[=] call[name[self]._D, parameter[name[R]]]
variable[ng] assign[=] binary_operation[name[self]._ns * name[g]]
variable[zKB] assign[=] binary_operation[binary_operation[name[z] * name[Ks]] / name[Bs]]
variable[sech_zKB] assign[=] binary_operation[constant[1] / call[name[np].cosh, parameter[name[zKB]]]]
variable[tanh_zKB] assign[=] call[name[np].tanh, parameter[name[zKB]]]
variable[log_sech_zKB] assign[=] call[name[np].log, parameter[name[sech_zKB]]]
variable[E] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[1] + binary_operation[binary_operation[binary_operation[name[Ks] * name[self]._H] / name[Ds]] * binary_operation[constant[1] - binary_operation[constant[0.3] / binary_operation[binary_operation[constant[1] + binary_operation[binary_operation[constant[0.3] * name[Ks]] * name[self]._H]] ** constant[2]]]]]] - binary_operation[name[R] / name[self]._Rs]] - binary_operation[binary_operation[binary_operation[name[Ks] * name[self]._H] * binary_operation[constant[1] + binary_operation[binary_operation[constant[0.8] * name[Ks]] * name[self]._H]]] * name[log_sech_zKB]]] - binary_operation[binary_operation[binary_operation[constant[0.4] * binary_operation[binary_operation[name[Ks] * name[self]._H] ** constant[2]]] * name[zKB]] * name[tanh_zKB]]]
variable[rE] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18fe90220> * name[self]._H] / name[Ds]] * binary_operation[constant[1] - binary_operation[binary_operation[constant[0.3] * binary_operation[constant[1] - binary_operation[binary_operation[constant[0.3] * name[Ks]] * name[self]._H]]] / binary_operation[binary_operation[constant[1] + binary_operation[binary_operation[constant[0.3] * name[Ks]] * name[self]._H]] ** constant[3]]]]] + binary_operation[binary_operation[binary_operation[name[Ks] * name[self]._H] / name[Ds]] * binary_operation[constant[1] - binary_operation[constant[0.3] / binary_operation[binary_operation[constant[1] + binary_operation[binary_operation[constant[0.3] * name[Ks]] * name[self]._H]] ** constant[2]]]]]] - binary_operation[name[R] / name[self]._Rs]] + binary_operation[binary_operation[binary_operation[name[Ks] * name[self]._H] * binary_operation[constant[1] + binary_operation[binary_operation[constant[1.6] * name[Ks]] * name[self]._H]]] * name[log_sech_zKB]]] - binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[0.4] * binary_operation[binary_operation[name[Ks] * name[self]._H] ** constant[2]]] * name[zKB]] * name[sech_zKB]] ** constant[2]] / name[Bs]]] + binary_operation[binary_operation[binary_operation[constant[1.2] * binary_operation[binary_operation[name[Ks] * name[self]._H] ** constant[2]]] * name[zKB]] * name[tanh_zKB]]]
return[call[name[np].sum, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[self]._Cs * name[self]._rho0] * binary_operation[name[self]._H / binary_operation[name[Ds] * name[R]]]] * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b0cb4be0> / name[self]._Rs]]]] * binary_operation[name[sech_zKB] ** name[Bs]]] * binary_operation[binary_operation[call[name[np].cos, parameter[name[ng]]] * binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[Ks] * name[R]] * binary_operation[name[Bs] + constant[1]]] / name[Bs]] * binary_operation[name[sech_zKB] ** constant[2]]] - binary_operation[binary_operation[binary_operation[constant[1] / name[Ks]] / name[R]] * binary_operation[binary_operation[name[E] ** constant[2]] + name[rE]]]]] - binary_operation[binary_operation[binary_operation[constant[2] * call[name[np].sin, parameter[name[ng]]]] * name[E]] * call[name[np].cos, parameter[name[self]._alpha]]]]]]]] | keyword[def] identifier[_dens] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
identifier[g] = identifier[self] . identifier[_gamma] ( identifier[R] , identifier[phi] - identifier[self] . identifier[_omega] * identifier[t] )
identifier[Ks] = identifier[self] . identifier[_K] ( identifier[R] )
identifier[Bs] = identifier[self] . identifier[_B] ( identifier[R] )
identifier[Ds] = identifier[self] . identifier[_D] ( identifier[R] )
identifier[ng] = identifier[self] . identifier[_ns] * identifier[g]
identifier[zKB] = identifier[z] * identifier[Ks] / identifier[Bs]
identifier[sech_zKB] = literal[int] / identifier[np] . identifier[cosh] ( identifier[zKB] )
identifier[tanh_zKB] = identifier[np] . identifier[tanh] ( identifier[zKB] )
identifier[log_sech_zKB] = identifier[np] . identifier[log] ( identifier[sech_zKB] )
identifier[E] = literal[int] + identifier[Ks] * identifier[self] . identifier[_H] / identifier[Ds] *( literal[int] - literal[int] /( literal[int] + literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] )- identifier[R] / identifier[self] . identifier[_Rs] -( identifier[Ks] * identifier[self] . identifier[_H] )*( literal[int] + literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )* identifier[log_sech_zKB] - literal[int] *( identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] * identifier[zKB] * identifier[tanh_zKB]
identifier[rE] =- identifier[Ks] * identifier[self] . identifier[_H] / identifier[Ds] *( literal[int] - literal[int] *( literal[int] - literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )/( literal[int] + literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] )+( identifier[Ks] * identifier[self] . identifier[_H] / identifier[Ds] *( literal[int] - literal[int] /( literal[int] + literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] ))- identifier[R] / identifier[self] . identifier[_Rs] + identifier[Ks] * identifier[self] . identifier[_H] *( literal[int] + literal[int] * identifier[Ks] * identifier[self] . identifier[_H] )* identifier[log_sech_zKB] -( literal[int] *( identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] * identifier[zKB] * identifier[sech_zKB] )** literal[int] / identifier[Bs] + literal[int] *( identifier[Ks] * identifier[self] . identifier[_H] )** literal[int] * identifier[zKB] * identifier[tanh_zKB]
keyword[return] identifier[np] . identifier[sum] ( identifier[self] . identifier[_Cs] * identifier[self] . identifier[_rho0] *( identifier[self] . identifier[_H] /( identifier[Ds] * identifier[R] ))* identifier[np] . identifier[exp] (-( identifier[R] - identifier[self] . identifier[_r_ref] )/ identifier[self] . identifier[_Rs] )
* identifier[sech_zKB] ** identifier[Bs] *( identifier[np] . identifier[cos] ( identifier[ng] )*( identifier[Ks] * identifier[R] *( identifier[Bs] + literal[int] )/ identifier[Bs] * identifier[sech_zKB] ** literal[int]
- literal[int] / identifier[Ks] / identifier[R] *( identifier[E] ** literal[int] + identifier[rE] ))
- literal[int] * identifier[np] . identifier[sin] ( identifier[ng] )* identifier[E] * identifier[np] . identifier[cos] ( identifier[self] . identifier[_alpha] ))) | def _dens(self, R, z, phi=0, t=0):
"""
NAME:
_dens
PURPOSE:
Evaluate the density. If not given, the density is computed using the Poisson equation
from the first and second derivatives of the potential (if all are implemented).
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the density
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
ng = self._ns * g
zKB = z * Ks / Bs
sech_zKB = 1 / np.cosh(zKB)
tanh_zKB = np.tanh(zKB)
log_sech_zKB = np.log(sech_zKB)
# numpy of E as defined in the appendix of the paper.
E = 1 + Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2) - R / self._Rs - Ks * self._H * (1 + 0.8 * Ks * self._H) * log_sech_zKB - 0.4 * (Ks * self._H) ** 2 * zKB * tanh_zKB
# numpy array of rE' as define in the appendix of the paper.
rE = -Ks * self._H / Ds * (1 - 0.3 * (1 - 0.3 * Ks * self._H) / (1 + 0.3 * Ks * self._H) ** 3) + Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2) - R / self._Rs + Ks * self._H * (1 + 1.6 * Ks * self._H) * log_sech_zKB - (0.4 * (Ks * self._H) ** 2 * zKB * sech_zKB) ** 2 / Bs + 1.2 * (Ks * self._H) ** 2 * zKB * tanh_zKB
return np.sum(self._Cs * self._rho0 * (self._H / (Ds * R)) * np.exp(-(R - self._r_ref) / self._Rs) * sech_zKB ** Bs * (np.cos(ng) * (Ks * R * (Bs + 1) / Bs * sech_zKB ** 2 - 1 / Ks / R * (E ** 2 + rE)) - 2 * np.sin(ng) * E * np.cos(self._alpha))) |
def fetchEC2InstanceDict(regionNickname=None):
"""
Fetches EC2 instances types by region programmatically using the AWS pricing API.
See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/
:return: A dict of InstanceType objects, where the key is the string:
aws instance name (example: 't2.micro'), and the value is an
InstanceType object representing that aws instance name.
"""
ec2Source = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json'
if regionNickname is None:
regionNickname = 'us-west-2'
region = EC2Regions[regionNickname] # JSON uses verbose region names as keys
ec2InstanceList = []
# summon the API to grab the latest instance types/prices/specs
response = requests.get(ec2Source)
if response.ok:
ec2InstanceList = parseEC2Json2List(jsontext=response.text, region=region)
if ec2InstanceList:
return dict((_.name, _) for _ in ec2InstanceList)
else:
from toil.lib import generatedEC2Lists as defaultEC2
return dict((_.name, _) for _ in defaultEC2.ec2InstancesByRegion[regionNickname]) | def function[fetchEC2InstanceDict, parameter[regionNickname]]:
constant[
Fetches EC2 instances types by region programmatically using the AWS pricing API.
See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/
:return: A dict of InstanceType objects, where the key is the string:
aws instance name (example: 't2.micro'), and the value is an
InstanceType object representing that aws instance name.
]
variable[ec2Source] assign[=] constant[https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json]
if compare[name[regionNickname] is constant[None]] begin[:]
variable[regionNickname] assign[=] constant[us-west-2]
variable[region] assign[=] call[name[EC2Regions]][name[regionNickname]]
variable[ec2InstanceList] assign[=] list[[]]
variable[response] assign[=] call[name[requests].get, parameter[name[ec2Source]]]
if name[response].ok begin[:]
variable[ec2InstanceList] assign[=] call[name[parseEC2Json2List], parameter[]]
if name[ec2InstanceList] begin[:]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2044c2a40>]]] | keyword[def] identifier[fetchEC2InstanceDict] ( identifier[regionNickname] = keyword[None] ):
literal[string]
identifier[ec2Source] = literal[string]
keyword[if] identifier[regionNickname] keyword[is] keyword[None] :
identifier[regionNickname] = literal[string]
identifier[region] = identifier[EC2Regions] [ identifier[regionNickname] ]
identifier[ec2InstanceList] =[]
identifier[response] = identifier[requests] . identifier[get] ( identifier[ec2Source] )
keyword[if] identifier[response] . identifier[ok] :
identifier[ec2InstanceList] = identifier[parseEC2Json2List] ( identifier[jsontext] = identifier[response] . identifier[text] , identifier[region] = identifier[region] )
keyword[if] identifier[ec2InstanceList] :
keyword[return] identifier[dict] (( identifier[_] . identifier[name] , identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[ec2InstanceList] )
keyword[else] :
keyword[from] identifier[toil] . identifier[lib] keyword[import] identifier[generatedEC2Lists] keyword[as] identifier[defaultEC2]
keyword[return] identifier[dict] (( identifier[_] . identifier[name] , identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[defaultEC2] . identifier[ec2InstancesByRegion] [ identifier[regionNickname] ]) | def fetchEC2InstanceDict(regionNickname=None):
"""
Fetches EC2 instances types by region programmatically using the AWS pricing API.
See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/
:return: A dict of InstanceType objects, where the key is the string:
aws instance name (example: 't2.micro'), and the value is an
InstanceType object representing that aws instance name.
"""
ec2Source = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json'
if regionNickname is None:
regionNickname = 'us-west-2' # depends on [control=['if'], data=['regionNickname']]
region = EC2Regions[regionNickname] # JSON uses verbose region names as keys
ec2InstanceList = []
# summon the API to grab the latest instance types/prices/specs
response = requests.get(ec2Source)
if response.ok:
ec2InstanceList = parseEC2Json2List(jsontext=response.text, region=region) # depends on [control=['if'], data=[]]
if ec2InstanceList:
return dict(((_.name, _) for _ in ec2InstanceList)) # depends on [control=['if'], data=[]]
else:
from toil.lib import generatedEC2Lists as defaultEC2
return dict(((_.name, _) for _ in defaultEC2.ec2InstancesByRegion[regionNickname])) |
def py_to_weld_type(self, obj):
"""Summary
Args:
obj (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if isinstance(obj, np.ndarray):
dtype = str(obj.dtype)
if dtype == 'int16':
base = WeldInt16()
elif dtype == 'int32':
base = WeldInt()
elif dtype == 'int64':
base = WeldLong()
elif dtype == 'float32':
base = WeldFloat()
elif dtype == 'float64':
base = WeldDouble()
elif dtype == 'bool':
base = WeldBit()
else:
base = WeldVec(WeldChar()) # TODO: Fix this
for i in xrange(obj.ndim):
base = WeldVec(base)
elif isinstance(obj, str):
base = WeldVec(WeldChar())
else:
raise Exception("Invalid object type: unable to infer NVL type")
return base | def function[py_to_weld_type, parameter[self, obj]]:
constant[Summary
Args:
obj (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
]
if call[name[isinstance], parameter[name[obj], name[np].ndarray]] begin[:]
variable[dtype] assign[=] call[name[str], parameter[name[obj].dtype]]
if compare[name[dtype] equal[==] constant[int16]] begin[:]
variable[base] assign[=] call[name[WeldInt16], parameter[]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[obj].ndim]]] begin[:]
variable[base] assign[=] call[name[WeldVec], parameter[name[base]]]
return[name[base]] | keyword[def] identifier[py_to_weld_type] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[np] . identifier[ndarray] ):
identifier[dtype] = identifier[str] ( identifier[obj] . identifier[dtype] )
keyword[if] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldInt16] ()
keyword[elif] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldInt] ()
keyword[elif] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldLong] ()
keyword[elif] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldFloat] ()
keyword[elif] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldDouble] ()
keyword[elif] identifier[dtype] == literal[string] :
identifier[base] = identifier[WeldBit] ()
keyword[else] :
identifier[base] = identifier[WeldVec] ( identifier[WeldChar] ())
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[obj] . identifier[ndim] ):
identifier[base] = identifier[WeldVec] ( identifier[base] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[str] ):
identifier[base] = identifier[WeldVec] ( identifier[WeldChar] ())
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[base] | def py_to_weld_type(self, obj):
"""Summary
Args:
obj (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if isinstance(obj, np.ndarray):
dtype = str(obj.dtype)
if dtype == 'int16':
base = WeldInt16() # depends on [control=['if'], data=[]]
elif dtype == 'int32':
base = WeldInt() # depends on [control=['if'], data=[]]
elif dtype == 'int64':
base = WeldLong() # depends on [control=['if'], data=[]]
elif dtype == 'float32':
base = WeldFloat() # depends on [control=['if'], data=[]]
elif dtype == 'float64':
base = WeldDouble() # depends on [control=['if'], data=[]]
elif dtype == 'bool':
base = WeldBit() # depends on [control=['if'], data=[]]
else:
base = WeldVec(WeldChar()) # TODO: Fix this
for i in xrange(obj.ndim):
base = WeldVec(base) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, str):
base = WeldVec(WeldChar()) # depends on [control=['if'], data=[]]
else:
raise Exception('Invalid object type: unable to infer NVL type')
return base |
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content) | def function[mkparam, parameter[self, method, pdef, object]]:
constant[
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
]
variable[marshaller] assign[=] call[name[self].marshaller, parameter[]]
variable[content] assign[=] call[name[Content], parameter[]]
return[call[name[marshaller].process, parameter[name[content]]]] | keyword[def] identifier[mkparam] ( identifier[self] , identifier[method] , identifier[pdef] , identifier[object] ):
literal[string]
identifier[marshaller] = identifier[self] . identifier[marshaller] ()
identifier[content] = identifier[Content] ( identifier[tag] = identifier[pdef] [ literal[int] ],
identifier[value] = identifier[object] ,
identifier[type] = identifier[pdef] [ literal[int] ],
identifier[real] = identifier[pdef] [ literal[int] ]. identifier[resolve] ())
keyword[return] identifier[marshaller] . identifier[process] ( identifier[content] ) | def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = Content(tag=pdef[0], value=object, type=pdef[1], real=pdef[1].resolve())
return marshaller.process(content) |
def login(self):
"""
登陆系统,返回一个requests的session对象
:return: session with login cookies
:rtype: requests.sessions.Session
"""
if not hasattr(self, 'session'):
self.last_connect = time.time()
s = requests.session()
s.get('http://bkjws.sdu.edu.cn')
data = {
'j_username': self.student_id,
'j_password': self.password_md5
}
r6 = s.post('http://bkjws.sdu.edu.cn/b/ajaxLogin', headers={
'user-agent': self._ua},
data=data)
if r6.text == '"success"':
return s
else:
s.close()
raise AuthFailure(r6.text) | def function[login, parameter[self]]:
constant[
登陆系统,返回一个requests的session对象
:return: session with login cookies
:rtype: requests.sessions.Session
]
if <ast.UnaryOp object at 0x7da20e9626b0> begin[:]
name[self].last_connect assign[=] call[name[time].time, parameter[]]
variable[s] assign[=] call[name[requests].session, parameter[]]
call[name[s].get, parameter[constant[http://bkjws.sdu.edu.cn]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e962e30>, <ast.Constant object at 0x7da20e961f30>], [<ast.Attribute object at 0x7da20e962230>, <ast.Attribute object at 0x7da20e9605b0>]]
variable[r6] assign[=] call[name[s].post, parameter[constant[http://bkjws.sdu.edu.cn/b/ajaxLogin]]]
if compare[name[r6].text equal[==] constant["success"]] begin[:]
return[name[s]] | keyword[def] identifier[login] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[last_connect] = identifier[time] . identifier[time] ()
identifier[s] = identifier[requests] . identifier[session] ()
identifier[s] . identifier[get] ( literal[string] )
identifier[data] ={
literal[string] : identifier[self] . identifier[student_id] ,
literal[string] : identifier[self] . identifier[password_md5]
}
identifier[r6] = identifier[s] . identifier[post] ( literal[string] , identifier[headers] ={
literal[string] : identifier[self] . identifier[_ua] },
identifier[data] = identifier[data] )
keyword[if] identifier[r6] . identifier[text] == literal[string] :
keyword[return] identifier[s]
keyword[else] :
identifier[s] . identifier[close] ()
keyword[raise] identifier[AuthFailure] ( identifier[r6] . identifier[text] ) | def login(self):
"""
登陆系统,返回一个requests的session对象
:return: session with login cookies
:rtype: requests.sessions.Session
"""
if not hasattr(self, 'session'):
self.last_connect = time.time()
s = requests.session()
s.get('http://bkjws.sdu.edu.cn')
data = {'j_username': self.student_id, 'j_password': self.password_md5}
r6 = s.post('http://bkjws.sdu.edu.cn/b/ajaxLogin', headers={'user-agent': self._ua}, data=data)
if r6.text == '"success"':
return s # depends on [control=['if'], data=[]]
else:
s.close()
raise AuthFailure(r6.text) # depends on [control=['if'], data=[]] |
def simulate_async_event():
"""Simulate an asynchronous event."""
scc.state = 'executing'
def async_event(result):
"""All other asynchronous events or function calls
returned from later steps will wait until this
callback fires."""
scc.state = result
return 'some event result'
deferred = Deferred()
reactor.callLater(1, deferred.callback, 'done') # pylint: disable=E1101
deferred.addCallback(async_event)
return deferred | def function[simulate_async_event, parameter[]]:
constant[Simulate an asynchronous event.]
name[scc].state assign[=] constant[executing]
def function[async_event, parameter[result]]:
constant[All other asynchronous events or function calls
returned from later steps will wait until this
callback fires.]
name[scc].state assign[=] name[result]
return[constant[some event result]]
variable[deferred] assign[=] call[name[Deferred], parameter[]]
call[name[reactor].callLater, parameter[constant[1], name[deferred].callback, constant[done]]]
call[name[deferred].addCallback, parameter[name[async_event]]]
return[name[deferred]] | keyword[def] identifier[simulate_async_event] ():
literal[string]
identifier[scc] . identifier[state] = literal[string]
keyword[def] identifier[async_event] ( identifier[result] ):
literal[string]
identifier[scc] . identifier[state] = identifier[result]
keyword[return] literal[string]
identifier[deferred] = identifier[Deferred] ()
identifier[reactor] . identifier[callLater] ( literal[int] , identifier[deferred] . identifier[callback] , literal[string] )
identifier[deferred] . identifier[addCallback] ( identifier[async_event] )
keyword[return] identifier[deferred] | def simulate_async_event():
"""Simulate an asynchronous event."""
scc.state = 'executing'
def async_event(result):
"""All other asynchronous events or function calls
returned from later steps will wait until this
callback fires."""
scc.state = result
return 'some event result'
deferred = Deferred()
reactor.callLater(1, deferred.callback, 'done') # pylint: disable=E1101
deferred.addCallback(async_event)
return deferred |
def set_command(value, parameter):
"""
Executor for `globus config set`
"""
conf = get_config_obj()
section = "cli"
if "." in parameter:
section, parameter = parameter.split(".", 1)
# ensure that the section exists
if section not in conf:
conf[section] = {}
# set the value for the given parameter
conf[section][parameter] = value
# write to disk
safeprint("Writing updated config to {}".format(conf.filename))
conf.write() | def function[set_command, parameter[value, parameter]]:
constant[
Executor for `globus config set`
]
variable[conf] assign[=] call[name[get_config_obj], parameter[]]
variable[section] assign[=] constant[cli]
if compare[constant[.] in name[parameter]] begin[:]
<ast.Tuple object at 0x7da18ede4c40> assign[=] call[name[parameter].split, parameter[constant[.], constant[1]]]
if compare[name[section] <ast.NotIn object at 0x7da2590d7190> name[conf]] begin[:]
call[name[conf]][name[section]] assign[=] dictionary[[], []]
call[call[name[conf]][name[section]]][name[parameter]] assign[=] name[value]
call[name[safeprint], parameter[call[constant[Writing updated config to {}].format, parameter[name[conf].filename]]]]
call[name[conf].write, parameter[]] | keyword[def] identifier[set_command] ( identifier[value] , identifier[parameter] ):
literal[string]
identifier[conf] = identifier[get_config_obj] ()
identifier[section] = literal[string]
keyword[if] literal[string] keyword[in] identifier[parameter] :
identifier[section] , identifier[parameter] = identifier[parameter] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[section] keyword[not] keyword[in] identifier[conf] :
identifier[conf] [ identifier[section] ]={}
identifier[conf] [ identifier[section] ][ identifier[parameter] ]= identifier[value]
identifier[safeprint] ( literal[string] . identifier[format] ( identifier[conf] . identifier[filename] ))
identifier[conf] . identifier[write] () | def set_command(value, parameter):
"""
Executor for `globus config set`
"""
conf = get_config_obj()
section = 'cli'
if '.' in parameter:
(section, parameter) = parameter.split('.', 1) # depends on [control=['if'], data=['parameter']]
# ensure that the section exists
if section not in conf:
conf[section] = {} # depends on [control=['if'], data=['section', 'conf']]
# set the value for the given parameter
conf[section][parameter] = value
# write to disk
safeprint('Writing updated config to {}'.format(conf.filename))
conf.write() |
def setCard(self, name, value, add_quotes=False):
"""
Adds/updates card for gssha project file
Args:
name (str): Name of card to be updated/added.
value (str): Value to attach to the card.
add_quotes (Optional[bool]): If True, will add quotes around string. Default is False.
"""
gssha_card = self.getCard(name)
if add_quotes:
value = '"{0}"'.format(value)
if gssha_card is None:
# add new card
new_card = ProjectCard(name=name, value=value)
new_card.projectFile = self
else:
gssha_card.value = value | def function[setCard, parameter[self, name, value, add_quotes]]:
constant[
Adds/updates card for gssha project file
Args:
name (str): Name of card to be updated/added.
value (str): Value to attach to the card.
add_quotes (Optional[bool]): If True, will add quotes around string. Default is False.
]
variable[gssha_card] assign[=] call[name[self].getCard, parameter[name[name]]]
if name[add_quotes] begin[:]
variable[value] assign[=] call[constant["{0}"].format, parameter[name[value]]]
if compare[name[gssha_card] is constant[None]] begin[:]
variable[new_card] assign[=] call[name[ProjectCard], parameter[]]
name[new_card].projectFile assign[=] name[self] | keyword[def] identifier[setCard] ( identifier[self] , identifier[name] , identifier[value] , identifier[add_quotes] = keyword[False] ):
literal[string]
identifier[gssha_card] = identifier[self] . identifier[getCard] ( identifier[name] )
keyword[if] identifier[add_quotes] :
identifier[value] = literal[string] . identifier[format] ( identifier[value] )
keyword[if] identifier[gssha_card] keyword[is] keyword[None] :
identifier[new_card] = identifier[ProjectCard] ( identifier[name] = identifier[name] , identifier[value] = identifier[value] )
identifier[new_card] . identifier[projectFile] = identifier[self]
keyword[else] :
identifier[gssha_card] . identifier[value] = identifier[value] | def setCard(self, name, value, add_quotes=False):
"""
Adds/updates card for gssha project file
Args:
name (str): Name of card to be updated/added.
value (str): Value to attach to the card.
add_quotes (Optional[bool]): If True, will add quotes around string. Default is False.
"""
gssha_card = self.getCard(name)
if add_quotes:
value = '"{0}"'.format(value) # depends on [control=['if'], data=[]]
if gssha_card is None:
# add new card
new_card = ProjectCard(name=name, value=value)
new_card.projectFile = self # depends on [control=['if'], data=[]]
else:
gssha_card.value = value |
def get_list(self, name):
'''Return all the values for given name.'''
normalized_name = normalize_name(name, self._normalize_overrides)
return self._map[normalized_name] | def function[get_list, parameter[self, name]]:
constant[Return all the values for given name.]
variable[normalized_name] assign[=] call[name[normalize_name], parameter[name[name], name[self]._normalize_overrides]]
return[call[name[self]._map][name[normalized_name]]] | keyword[def] identifier[get_list] ( identifier[self] , identifier[name] ):
literal[string]
identifier[normalized_name] = identifier[normalize_name] ( identifier[name] , identifier[self] . identifier[_normalize_overrides] )
keyword[return] identifier[self] . identifier[_map] [ identifier[normalized_name] ] | def get_list(self, name):
"""Return all the values for given name."""
normalized_name = normalize_name(name, self._normalize_overrides)
return self._map[normalized_name] |
def main():
"""
Example of self documenting (of sorts) code, via aikif.
Simply call functions like below to build an overview
which has metadata automatically updated.
"""
fldr = mod_cfg.fldrs['program_path']
p = mod_prg.Programs('AIKIF Programs', fldr)
document_core_programs(p)
document_agents(p)
document_examples(p)
# p.list() # get list of all programs
p.save(fldr + os.sep + 'examples' + os.sep + 'document_AIKIF.csv')
p.collect_program_info('progress.md') | def function[main, parameter[]]:
constant[
Example of self documenting (of sorts) code, via aikif.
Simply call functions like below to build an overview
which has metadata automatically updated.
]
variable[fldr] assign[=] call[name[mod_cfg].fldrs][constant[program_path]]
variable[p] assign[=] call[name[mod_prg].Programs, parameter[constant[AIKIF Programs], name[fldr]]]
call[name[document_core_programs], parameter[name[p]]]
call[name[document_agents], parameter[name[p]]]
call[name[document_examples], parameter[name[p]]]
call[name[p].save, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[fldr] + name[os].sep] + constant[examples]] + name[os].sep] + constant[document_AIKIF.csv]]]]
call[name[p].collect_program_info, parameter[constant[progress.md]]] | keyword[def] identifier[main] ():
literal[string]
identifier[fldr] = identifier[mod_cfg] . identifier[fldrs] [ literal[string] ]
identifier[p] = identifier[mod_prg] . identifier[Programs] ( literal[string] , identifier[fldr] )
identifier[document_core_programs] ( identifier[p] )
identifier[document_agents] ( identifier[p] )
identifier[document_examples] ( identifier[p] )
identifier[p] . identifier[save] ( identifier[fldr] + identifier[os] . identifier[sep] + literal[string] + identifier[os] . identifier[sep] + literal[string] )
identifier[p] . identifier[collect_program_info] ( literal[string] ) | def main():
"""
Example of self documenting (of sorts) code, via aikif.
Simply call functions like below to build an overview
which has metadata automatically updated.
"""
fldr = mod_cfg.fldrs['program_path']
p = mod_prg.Programs('AIKIF Programs', fldr)
document_core_programs(p)
document_agents(p)
document_examples(p)
# p.list() # get list of all programs
p.save(fldr + os.sep + 'examples' + os.sep + 'document_AIKIF.csv')
p.collect_program_info('progress.md') |
def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data["dirs"] = {"work": work_dir}
if not tz.get_in(["config", "algorithm"], data):
if "config" not in data:
data["config"] = {}
data["config"]["algorithm"] = {}
if "rgnames" not in data and "description" in data:
data["rgnames"] = {"sample": data["description"]}
data["cwl_keys"] = passed_keys
data["output_cwl_keys"] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data | def function[_finalize_cwl_in, parameter[data, work_dir, passed_keys, output_cwl_keys, runtime]]:
constant[Finalize data object with inputs from CWL.
]
call[name[data]][constant[dirs]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1897910>], [<ast.Name object at 0x7da1b1897fa0>]]
if <ast.UnaryOp object at 0x7da1b1894b50> begin[:]
if compare[constant[config] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[config]] assign[=] dictionary[[], []]
call[call[name[data]][constant[config]]][constant[algorithm]] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b1895840> begin[:]
call[name[data]][constant[rgnames]] assign[=] dictionary[[<ast.Constant object at 0x7da1b18972e0>], [<ast.Subscript object at 0x7da1b1895b40>]]
call[name[data]][constant[cwl_keys]] assign[=] name[passed_keys]
call[name[data]][constant[output_cwl_keys]] assign[=] name[output_cwl_keys]
variable[data] assign[=] call[name[_add_resources], parameter[name[data], name[runtime]]]
variable[data] assign[=] call[name[cwlutils].normalize_missing, parameter[name[data]]]
variable[data] assign[=] call[name[run_info].normalize_world, parameter[name[data]]]
return[name[data]] | keyword[def] identifier[_finalize_cwl_in] ( identifier[data] , identifier[work_dir] , identifier[passed_keys] , identifier[output_cwl_keys] , identifier[runtime] ):
literal[string]
identifier[data] [ literal[string] ]={ literal[string] : identifier[work_dir] }
keyword[if] keyword[not] identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] ], identifier[data] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]={}
identifier[data] [ literal[string] ][ literal[string] ]={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] keyword[and] literal[string] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]={ literal[string] : identifier[data] [ literal[string] ]}
identifier[data] [ literal[string] ]= identifier[passed_keys]
identifier[data] [ literal[string] ]= identifier[output_cwl_keys]
identifier[data] = identifier[_add_resources] ( identifier[data] , identifier[runtime] )
identifier[data] = identifier[cwlutils] . identifier[normalize_missing] ( identifier[data] )
identifier[data] = identifier[run_info] . identifier[normalize_world] ( identifier[data] )
keyword[return] identifier[data] | def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data['dirs'] = {'work': work_dir}
if not tz.get_in(['config', 'algorithm'], data):
if 'config' not in data:
data['config'] = {} # depends on [control=['if'], data=['data']]
data['config']['algorithm'] = {} # depends on [control=['if'], data=[]]
if 'rgnames' not in data and 'description' in data:
data['rgnames'] = {'sample': data['description']} # depends on [control=['if'], data=[]]
data['cwl_keys'] = passed_keys
data['output_cwl_keys'] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data |
def get_dummy_distribution():
"""
Returns a distutils Distribution object used to instrument the setup
environment before calling the actual setup() function.
"""
from .setup_helpers import _module_state
if _module_state['registered_commands'] is None:
raise RuntimeError(
'astropy_helpers.setup_helpers.register_commands() must be '
'called before using '
'astropy_helpers.setup_helpers.get_dummy_distribution()')
# Pre-parse the Distutils command-line options and config files to if
# the option is set.
dist = Distribution({'script_name': os.path.basename(sys.argv[0]),
'script_args': sys.argv[1:]})
dist.cmdclass.update(_module_state['registered_commands'])
with silence():
try:
dist.parse_config_files()
dist.parse_command_line()
except (DistutilsError, AttributeError, SystemExit):
# Let distutils handle DistutilsErrors itself AttributeErrors can
# get raise for ./setup.py --help SystemExit can be raised if a
# display option was used, for example
pass
return dist | def function[get_dummy_distribution, parameter[]]:
constant[
Returns a distutils Distribution object used to instrument the setup
environment before calling the actual setup() function.
]
from relative_module[setup_helpers] import module[_module_state]
if compare[call[name[_module_state]][constant[registered_commands]] is constant[None]] begin[:]
<ast.Raise object at 0x7da2041dba60>
variable[dist] assign[=] call[name[Distribution], parameter[dictionary[[<ast.Constant object at 0x7da2041daa70>, <ast.Constant object at 0x7da2041d9a20>], [<ast.Call object at 0x7da2041da110>, <ast.Subscript object at 0x7da2041d8670>]]]]
call[name[dist].cmdclass.update, parameter[call[name[_module_state]][constant[registered_commands]]]]
with call[name[silence], parameter[]] begin[:]
<ast.Try object at 0x7da2041da9e0>
return[name[dist]] | keyword[def] identifier[get_dummy_distribution] ():
literal[string]
keyword[from] . identifier[setup_helpers] keyword[import] identifier[_module_state]
keyword[if] identifier[_module_state] [ literal[string] ] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string]
literal[string] )
identifier[dist] = identifier[Distribution] ({ literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[sys] . identifier[argv] [ literal[int] ]),
literal[string] : identifier[sys] . identifier[argv] [ literal[int] :]})
identifier[dist] . identifier[cmdclass] . identifier[update] ( identifier[_module_state] [ literal[string] ])
keyword[with] identifier[silence] ():
keyword[try] :
identifier[dist] . identifier[parse_config_files] ()
identifier[dist] . identifier[parse_command_line] ()
keyword[except] ( identifier[DistutilsError] , identifier[AttributeError] , identifier[SystemExit] ):
keyword[pass]
keyword[return] identifier[dist] | def get_dummy_distribution():
"""
Returns a distutils Distribution object used to instrument the setup
environment before calling the actual setup() function.
"""
from .setup_helpers import _module_state
if _module_state['registered_commands'] is None:
raise RuntimeError('astropy_helpers.setup_helpers.register_commands() must be called before using astropy_helpers.setup_helpers.get_dummy_distribution()') # depends on [control=['if'], data=[]]
# Pre-parse the Distutils command-line options and config files to if
# the option is set.
dist = Distribution({'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:]})
dist.cmdclass.update(_module_state['registered_commands'])
with silence():
try:
dist.parse_config_files()
dist.parse_command_line() # depends on [control=['try'], data=[]]
except (DistutilsError, AttributeError, SystemExit):
# Let distutils handle DistutilsErrors itself AttributeErrors can
# get raise for ./setup.py --help SystemExit can be raised if a
# display option was used, for example
pass # depends on [control=['except'], data=[]] # depends on [control=['with'], data=[]]
return dist |
def renamed(self, name):
"""
Duplicate the datum and rename it
"""
duplicate = copy(self)
duplicate._name = name
return duplicate | def function[renamed, parameter[self, name]]:
constant[
Duplicate the datum and rename it
]
variable[duplicate] assign[=] call[name[copy], parameter[name[self]]]
name[duplicate]._name assign[=] name[name]
return[name[duplicate]] | keyword[def] identifier[renamed] ( identifier[self] , identifier[name] ):
literal[string]
identifier[duplicate] = identifier[copy] ( identifier[self] )
identifier[duplicate] . identifier[_name] = identifier[name]
keyword[return] identifier[duplicate] | def renamed(self, name):
"""
Duplicate the datum and rename it
"""
duplicate = copy(self)
duplicate._name = name
return duplicate |
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(message) is str):
message=message.encode('utf-8')
return c_AddStatusbarMessage(clientID, message, operationMode) | def function[simxAddStatusbarMessage, parameter[clientID, message, operationMode]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
if <ast.BoolOp object at 0x7da207f000d0> begin[:]
variable[message] assign[=] call[name[message].encode, parameter[constant[utf-8]]]
return[call[name[c_AddStatusbarMessage], parameter[name[clientID], name[message], name[operationMode]]]] | keyword[def] identifier[simxAddStatusbarMessage] ( identifier[clientID] , identifier[message] , identifier[operationMode] ):
literal[string]
keyword[if] ( identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] ) keyword[and] ( identifier[type] ( identifier[message] ) keyword[is] identifier[str] ):
identifier[message] = identifier[message] . identifier[encode] ( literal[string] )
keyword[return] identifier[c_AddStatusbarMessage] ( identifier[clientID] , identifier[message] , identifier[operationMode] ) | def simxAddStatusbarMessage(clientID, message, operationMode):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
if sys.version_info[0] == 3 and type(message) is str:
message = message.encode('utf-8') # depends on [control=['if'], data=[]]
return c_AddStatusbarMessage(clientID, message, operationMode) |
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
local_dev_file = "setup-dev.cfg"
if os.path.isfile(local_dev_file):
files.append(local_dev_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files | def function[find_config_files, parameter[self]]:
constant[Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
]
variable[files] assign[=] list[[]]
call[name[check_environ], parameter[]]
variable[sys_dir] assign[=] call[name[os].path.dirname, parameter[call[name[sys].modules][constant[distutils]].__file__]]
variable[sys_file] assign[=] call[name[os].path.join, parameter[name[sys_dir], constant[distutils.cfg]]]
if call[name[os].path.isfile, parameter[name[sys_file]]] begin[:]
call[name[files].append, parameter[name[sys_file]]]
if compare[name[os].name equal[==] constant[posix]] begin[:]
variable[user_filename] assign[=] constant[.pydistutils.cfg]
if name[self].want_user_cfg begin[:]
variable[user_file] assign[=] call[name[os].path.join, parameter[call[name[os].path.expanduser, parameter[constant[~]]], name[user_filename]]]
if call[name[os].path.isfile, parameter[name[user_file]]] begin[:]
call[name[files].append, parameter[name[user_file]]]
variable[local_file] assign[=] constant[setup.cfg]
if call[name[os].path.isfile, parameter[name[local_file]]] begin[:]
call[name[files].append, parameter[name[local_file]]]
variable[local_dev_file] assign[=] constant[setup-dev.cfg]
if call[name[os].path.isfile, parameter[name[local_dev_file]]] begin[:]
call[name[files].append, parameter[name[local_dev_file]]]
if name[DEBUG] begin[:]
call[name[self].announce, parameter[binary_operation[constant[using config files: %s] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[name[files]]]]]]
return[name[files]] | keyword[def] identifier[find_config_files] ( identifier[self] ):
literal[string]
identifier[files] =[]
identifier[check_environ] ()
identifier[sys_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[sys] . identifier[modules] [ literal[string] ]. identifier[__file__] )
identifier[sys_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[sys_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[sys_file] ):
identifier[files] . identifier[append] ( identifier[sys_file] )
keyword[if] identifier[os] . identifier[name] == literal[string] :
identifier[user_filename] = literal[string]
keyword[else] :
identifier[user_filename] = literal[string]
keyword[if] identifier[self] . identifier[want_user_cfg] :
identifier[user_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ), identifier[user_filename] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[user_file] ):
identifier[files] . identifier[append] ( identifier[user_file] )
identifier[local_file] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[local_file] ):
identifier[files] . identifier[append] ( identifier[local_file] )
identifier[local_dev_file] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[local_dev_file] ):
identifier[files] . identifier[append] ( identifier[local_dev_file] )
keyword[if] identifier[DEBUG] :
identifier[self] . identifier[announce] ( literal[string] % literal[string] . identifier[join] ( identifier[files] ))
keyword[return] identifier[files] | def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, 'distutils.cfg')
if os.path.isfile(sys_file):
files.append(sys_file) # depends on [control=['if'], data=[]]
# What to call the per-user config file
if os.name == 'posix':
user_filename = '.pydistutils.cfg' # depends on [control=['if'], data=[]]
else:
user_filename = 'pydistutils.cfg'
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# All platforms support local setup.cfg
local_file = 'setup.cfg'
if os.path.isfile(local_file):
files.append(local_file) # depends on [control=['if'], data=[]]
local_dev_file = 'setup-dev.cfg'
if os.path.isfile(local_dev_file):
files.append(local_dev_file) # depends on [control=['if'], data=[]]
if DEBUG:
self.announce('using config files: %s' % ', '.join(files)) # depends on [control=['if'], data=[]]
return files |
def create_resource_group(access_token, subscription_id, rgname, location):
'''Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token) | def function[create_resource_group, parameter[access_token, subscription_id, rgname, location]]:
constant[Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b0537e20>, <ast.Constant object at 0x7da1b0536c80>, <ast.Name object at 0x7da1b05375e0>, <ast.Constant object at 0x7da1b0537af0>, <ast.Name object at 0x7da1b0536560>, <ast.Constant object at 0x7da1b0537bb0>, <ast.Name object at 0x7da1b0537280>]]]]
variable[rg_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b05364d0>], [<ast.Name object at 0x7da1b0536590>]]
variable[body] assign[=] call[name[json].dumps, parameter[name[rg_body]]]
return[call[name[do_put], parameter[name[endpoint], name[body], name[access_token]]]] | keyword[def] identifier[create_resource_group] ( identifier[access_token] , identifier[subscription_id] , identifier[rgname] , identifier[location] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[rgname] ,
literal[string] , identifier[RESOURCE_API] ])
identifier[rg_body] ={ literal[string] : identifier[location] }
identifier[body] = identifier[json] . identifier[dumps] ( identifier[rg_body] )
keyword[return] identifier[do_put] ( identifier[endpoint] , identifier[body] , identifier[access_token] ) | def create_resource_group(access_token, subscription_id, rgname, location):
"""Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token) |
def add(self, p_src):
"""
Given a todo string, parse it and put it to the end of the list.
"""
todos = self.add_list([p_src])
return todos[0] if len(todos) else None | def function[add, parameter[self, p_src]]:
constant[
Given a todo string, parse it and put it to the end of the list.
]
variable[todos] assign[=] call[name[self].add_list, parameter[list[[<ast.Name object at 0x7da18eb56440>]]]]
return[<ast.IfExp object at 0x7da18eb55810>] | keyword[def] identifier[add] ( identifier[self] , identifier[p_src] ):
literal[string]
identifier[todos] = identifier[self] . identifier[add_list] ([ identifier[p_src] ])
keyword[return] identifier[todos] [ literal[int] ] keyword[if] identifier[len] ( identifier[todos] ) keyword[else] keyword[None] | def add(self, p_src):
"""
Given a todo string, parse it and put it to the end of the list.
"""
todos = self.add_list([p_src])
return todos[0] if len(todos) else None |
def safe_dump(data):
"""
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
"""
# TODO(retr0h): Do we need to encode?
# yaml.dump(data) produces the document as a str object in both python
# 2 and 3.
return yaml.dump(
data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True) | def function[safe_dump, parameter[data]]:
constant[
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
]
return[call[name[yaml].dump, parameter[name[data]]]] | keyword[def] identifier[safe_dump] ( identifier[data] ):
literal[string]
keyword[return] identifier[yaml] . identifier[dump] (
identifier[data] , identifier[Dumper] = identifier[SafeDumper] , identifier[default_flow_style] = keyword[False] , identifier[explicit_start] = keyword[True] ) | def safe_dump(data):
"""
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
"""
# TODO(retr0h): Do we need to encode?
# yaml.dump(data) produces the document as a str object in both python
# 2 and 3.
return yaml.dump(data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True) |
def param_defs(self, method):
"""Get parameter definitions for document literal."""
pts = self.bodypart_types(method)
if not method.soap.input.body.wrapped:
return pts
pt = pts[0][1].resolve()
return [(c.name, c, a) for c, a in pt if not c.isattr()] | def function[param_defs, parameter[self, method]]:
constant[Get parameter definitions for document literal.]
variable[pts] assign[=] call[name[self].bodypart_types, parameter[name[method]]]
if <ast.UnaryOp object at 0x7da18f722a10> begin[:]
return[name[pts]]
variable[pt] assign[=] call[call[call[name[pts]][constant[0]]][constant[1]].resolve, parameter[]]
return[<ast.ListComp object at 0x7da18f720a90>] | keyword[def] identifier[param_defs] ( identifier[self] , identifier[method] ):
literal[string]
identifier[pts] = identifier[self] . identifier[bodypart_types] ( identifier[method] )
keyword[if] keyword[not] identifier[method] . identifier[soap] . identifier[input] . identifier[body] . identifier[wrapped] :
keyword[return] identifier[pts]
identifier[pt] = identifier[pts] [ literal[int] ][ literal[int] ]. identifier[resolve] ()
keyword[return] [( identifier[c] . identifier[name] , identifier[c] , identifier[a] ) keyword[for] identifier[c] , identifier[a] keyword[in] identifier[pt] keyword[if] keyword[not] identifier[c] . identifier[isattr] ()] | def param_defs(self, method):
"""Get parameter definitions for document literal."""
pts = self.bodypart_types(method)
if not method.soap.input.body.wrapped:
return pts # depends on [control=['if'], data=[]]
pt = pts[0][1].resolve()
return [(c.name, c, a) for (c, a) in pt if not c.isattr()] |
def start_packet_groups(self, clear_time_stamps=True, *ports):
""" Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if clear_time_stamps:
self.api.call_rc('ixClearTimeStamp {}'.format(port_list))
self.api.call_rc('ixStartPacketGroups {}'.format(port_list)) | def function[start_packet_groups, parameter[self, clear_time_stamps]]:
constant[ Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
]
variable[port_list] assign[=] call[name[self].set_ports_list, parameter[<ast.Starred object at 0x7da20c6aa0b0>]]
if name[clear_time_stamps] begin[:]
call[name[self].api.call_rc, parameter[call[constant[ixClearTimeStamp {}].format, parameter[name[port_list]]]]]
call[name[self].api.call_rc, parameter[call[constant[ixStartPacketGroups {}].format, parameter[name[port_list]]]]] | keyword[def] identifier[start_packet_groups] ( identifier[self] , identifier[clear_time_stamps] = keyword[True] ,* identifier[ports] ):
literal[string]
identifier[port_list] = identifier[self] . identifier[set_ports_list] (* identifier[ports] )
keyword[if] identifier[clear_time_stamps] :
identifier[self] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[port_list] ))
identifier[self] . identifier[api] . identifier[call_rc] ( literal[string] . identifier[format] ( identifier[port_list] )) | def start_packet_groups(self, clear_time_stamps=True, *ports):
""" Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if clear_time_stamps:
self.api.call_rc('ixClearTimeStamp {}'.format(port_list)) # depends on [control=['if'], data=[]]
self.api.call_rc('ixStartPacketGroups {}'.format(port_list)) |
def create_table_service(self):
'''
Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService`
'''
try:
from ..table.tableservice import TableService
return TableService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated)
except ImportError:
raise Exception('The package azure-storage-table is required. '
+ 'Please install it using "pip install azure-storage-table"') | def function[create_table_service, parameter[self]]:
constant[
Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService`
]
<ast.Try object at 0x7da1b1a1a920> | keyword[def] identifier[create_table_service] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[from] .. identifier[table] . identifier[tableservice] keyword[import] identifier[TableService]
keyword[return] identifier[TableService] ( identifier[self] . identifier[account_name] , identifier[self] . identifier[account_key] ,
identifier[sas_token] = identifier[self] . identifier[sas_token] ,
identifier[is_emulated] = identifier[self] . identifier[is_emulated] )
keyword[except] identifier[ImportError] :
keyword[raise] identifier[Exception] ( literal[string]
+ literal[string] ) | def create_table_service(self):
"""
Creates a TableService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.table.tableservice.TableService`
"""
try:
from ..table.tableservice import TableService
return TableService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) # depends on [control=['try'], data=[]]
except ImportError:
raise Exception('The package azure-storage-table is required. ' + 'Please install it using "pip install azure-storage-table"') # depends on [control=['except'], data=[]] |
def cell_start(self, cell, cell_index=None, **kwargs):
"""
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
"""
if self.log_output:
ceel_num = cell_index + 1 if cell_index is not None else ''
logger.info('Executing Cell {:-<40}'.format(ceel_num))
cell.metadata.papermill['start_time'] = self.now().isoformat()
cell.metadata.papermill["status"] = self.RUNNING
cell.metadata.papermill['exception'] = False
self.save() | def function[cell_start, parameter[self, cell, cell_index]]:
constant[
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
]
if name[self].log_output begin[:]
variable[ceel_num] assign[=] <ast.IfExp object at 0x7da1b1bc0d60>
call[name[logger].info, parameter[call[constant[Executing Cell {:-<40}].format, parameter[name[ceel_num]]]]]
call[name[cell].metadata.papermill][constant[start_time]] assign[=] call[call[name[self].now, parameter[]].isoformat, parameter[]]
call[name[cell].metadata.papermill][constant[status]] assign[=] name[self].RUNNING
call[name[cell].metadata.papermill][constant[exception]] assign[=] constant[False]
call[name[self].save, parameter[]] | keyword[def] identifier[cell_start] ( identifier[self] , identifier[cell] , identifier[cell_index] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[log_output] :
identifier[ceel_num] = identifier[cell_index] + literal[int] keyword[if] identifier[cell_index] keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[ceel_num] ))
identifier[cell] . identifier[metadata] . identifier[papermill] [ literal[string] ]= identifier[self] . identifier[now] (). identifier[isoformat] ()
identifier[cell] . identifier[metadata] . identifier[papermill] [ literal[string] ]= identifier[self] . identifier[RUNNING]
identifier[cell] . identifier[metadata] . identifier[papermill] [ literal[string] ]= keyword[False]
identifier[self] . identifier[save] () | def cell_start(self, cell, cell_index=None, **kwargs):
"""
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
"""
if self.log_output:
ceel_num = cell_index + 1 if cell_index is not None else ''
logger.info('Executing Cell {:-<40}'.format(ceel_num)) # depends on [control=['if'], data=[]]
cell.metadata.papermill['start_time'] = self.now().isoformat()
cell.metadata.papermill['status'] = self.RUNNING
cell.metadata.papermill['exception'] = False
self.save() |
def _emit_twisted(signal, *args, **kwargs):
"""
Emits a single signal to call callbacks registered to respond to that signal.
Optionally accepts args and kwargs that are passed directly to callbacks.
:param signal: Signal to send
"""
errback = kwargs.pop('errback', lambda f: f)
dl = []
for callback in set(receivers[signal]): # Make a copy in case of any ninja signals
d = _call(callback, args=args, kwargs=kwargs)
if d is not None:
dl.append(d.addErrback(errback))
def simplify(results):
return [x[1] for x in results]
from twisted.internet.defer import DeferredList
return DeferredList(dl).addCallback(simplify) | def function[_emit_twisted, parameter[signal]]:
constant[
Emits a single signal to call callbacks registered to respond to that signal.
Optionally accepts args and kwargs that are passed directly to callbacks.
:param signal: Signal to send
]
variable[errback] assign[=] call[name[kwargs].pop, parameter[constant[errback], <ast.Lambda object at 0x7da2043447c0>]]
variable[dl] assign[=] list[[]]
for taget[name[callback]] in starred[call[name[set], parameter[call[name[receivers]][name[signal]]]]] begin[:]
variable[d] assign[=] call[name[_call], parameter[name[callback]]]
if compare[name[d] is_not constant[None]] begin[:]
call[name[dl].append, parameter[call[name[d].addErrback, parameter[name[errback]]]]]
def function[simplify, parameter[results]]:
return[<ast.ListComp object at 0x7da204346aa0>]
from relative_module[twisted.internet.defer] import module[DeferredList]
return[call[call[name[DeferredList], parameter[name[dl]]].addCallback, parameter[name[simplify]]]] | keyword[def] identifier[_emit_twisted] ( identifier[signal] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[errback] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[lambda] identifier[f] : identifier[f] )
identifier[dl] =[]
keyword[for] identifier[callback] keyword[in] identifier[set] ( identifier[receivers] [ identifier[signal] ]):
identifier[d] = identifier[_call] ( identifier[callback] , identifier[args] = identifier[args] , identifier[kwargs] = identifier[kwargs] )
keyword[if] identifier[d] keyword[is] keyword[not] keyword[None] :
identifier[dl] . identifier[append] ( identifier[d] . identifier[addErrback] ( identifier[errback] ))
keyword[def] identifier[simplify] ( identifier[results] ):
keyword[return] [ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[results] ]
keyword[from] identifier[twisted] . identifier[internet] . identifier[defer] keyword[import] identifier[DeferredList]
keyword[return] identifier[DeferredList] ( identifier[dl] ). identifier[addCallback] ( identifier[simplify] ) | def _emit_twisted(signal, *args, **kwargs):
"""
Emits a single signal to call callbacks registered to respond to that signal.
Optionally accepts args and kwargs that are passed directly to callbacks.
:param signal: Signal to send
"""
errback = kwargs.pop('errback', lambda f: f)
dl = []
for callback in set(receivers[signal]): # Make a copy in case of any ninja signals
d = _call(callback, args=args, kwargs=kwargs)
if d is not None:
dl.append(d.addErrback(errback)) # depends on [control=['if'], data=['d']] # depends on [control=['for'], data=['callback']]
def simplify(results):
return [x[1] for x in results]
from twisted.internet.defer import DeferredList
return DeferredList(dl).addCallback(simplify) |
def import_plugin(name, superclasses=None):
"""Import name as a module and return a list of all classes defined in that
module. superclasses should be a tuple of valid superclasses to import,
this defaults to (Plugin,).
"""
plugin_fqname = "sos.plugins.%s" % name
if not superclasses:
superclasses = (Plugin,)
return import_module(plugin_fqname, superclasses) | def function[import_plugin, parameter[name, superclasses]]:
constant[Import name as a module and return a list of all classes defined in that
module. superclasses should be a tuple of valid superclasses to import,
this defaults to (Plugin,).
]
variable[plugin_fqname] assign[=] binary_operation[constant[sos.plugins.%s] <ast.Mod object at 0x7da2590d6920> name[name]]
if <ast.UnaryOp object at 0x7da18c4cf340> begin[:]
variable[superclasses] assign[=] tuple[[<ast.Name object at 0x7da18c4ccb50>]]
return[call[name[import_module], parameter[name[plugin_fqname], name[superclasses]]]] | keyword[def] identifier[import_plugin] ( identifier[name] , identifier[superclasses] = keyword[None] ):
literal[string]
identifier[plugin_fqname] = literal[string] % identifier[name]
keyword[if] keyword[not] identifier[superclasses] :
identifier[superclasses] =( identifier[Plugin] ,)
keyword[return] identifier[import_module] ( identifier[plugin_fqname] , identifier[superclasses] ) | def import_plugin(name, superclasses=None):
"""Import name as a module and return a list of all classes defined in that
module. superclasses should be a tuple of valid superclasses to import,
this defaults to (Plugin,).
"""
plugin_fqname = 'sos.plugins.%s' % name
if not superclasses:
superclasses = (Plugin,) # depends on [control=['if'], data=[]]
return import_module(plugin_fqname, superclasses) |
def reset(self):
"""
Reset all fields of this object to class defaults
"""
for name in self.__dict__:
if name.startswith("_"):
continue
attr = getattr(self, name)
setattr(self, name, attr and attr.__class__()) | def function[reset, parameter[self]]:
constant[
Reset all fields of this object to class defaults
]
for taget[name[name]] in starred[name[self].__dict__] begin[:]
if call[name[name].startswith, parameter[constant[_]]] begin[:]
continue
variable[attr] assign[=] call[name[getattr], parameter[name[self], name[name]]]
call[name[setattr], parameter[name[self], name[name], <ast.BoolOp object at 0x7da1b24ff190>]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[self] . identifier[__dict__] :
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[attr] = identifier[getattr] ( identifier[self] , identifier[name] )
identifier[setattr] ( identifier[self] , identifier[name] , identifier[attr] keyword[and] identifier[attr] . identifier[__class__] ()) | def reset(self):
"""
Reset all fields of this object to class defaults
"""
for name in self.__dict__:
if name.startswith('_'):
continue # depends on [control=['if'], data=[]]
attr = getattr(self, name)
setattr(self, name, attr and attr.__class__()) # depends on [control=['for'], data=['name']] |
def load_chkpt_vars(model_path):
""" Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
model_path = get_checkpoint_path(model_path)
reader = tfv1.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result | def function[load_chkpt_vars, parameter[model_path]]:
constant[ Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
]
variable[model_path] assign[=] call[name[get_checkpoint_path], parameter[name[model_path]]]
variable[reader] assign[=] call[name[tfv1].train.NewCheckpointReader, parameter[name[model_path]]]
variable[var_names] assign[=] call[call[name[reader].get_variable_to_shape_map, parameter[]].keys, parameter[]]
variable[result] assign[=] dictionary[[], []]
for taget[name[n]] in starred[name[var_names]] begin[:]
call[name[result]][name[n]] assign[=] call[name[reader].get_tensor, parameter[name[n]]]
return[name[result]] | keyword[def] identifier[load_chkpt_vars] ( identifier[model_path] ):
literal[string]
identifier[model_path] = identifier[get_checkpoint_path] ( identifier[model_path] )
identifier[reader] = identifier[tfv1] . identifier[train] . identifier[NewCheckpointReader] ( identifier[model_path] )
identifier[var_names] = identifier[reader] . identifier[get_variable_to_shape_map] (). identifier[keys] ()
identifier[result] ={}
keyword[for] identifier[n] keyword[in] identifier[var_names] :
identifier[result] [ identifier[n] ]= identifier[reader] . identifier[get_tensor] ( identifier[n] )
keyword[return] identifier[result] | def load_chkpt_vars(model_path):
""" Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
model_path = get_checkpoint_path(model_path)
reader = tfv1.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n) # depends on [control=['for'], data=['n']]
return result |
def declare_exchange(self, name, type, *, durable=True, auto_delete=False,
passive=False, internal=False, nowait=False,
arguments=None):
"""
Declare an :class:`Exchange` on the broker. If the exchange does not exist, it will be created.
This method is a :ref:`coroutine <coroutine>`.
:param str name: the name of the exchange.
:param str type: the type of the exchange
(usually one of ``'fanout'``, ``'direct'``, ``'topic'``, or ``'headers'``)
:keyword bool durable: If true, the exchange will be re-created when
the server restarts.
:keyword bool auto_delete: If true, the exchange will be
deleted when the last queue is un-bound from it.
:keyword bool passive: If `true` and exchange with such a name does
not exist it will raise a :class:`exceptions.NotFound`. If `false`
server will create it. Arguments ``durable``, ``auto_delete`` and
``internal`` are ignored if `passive=True`.
:keyword bool internal: If true, the exchange cannot be published to
directly; it can only be bound to other exchanges.
:keyword bool nowait: If true, the method will not wait for declare-ok
to arrive and return right away.
:keyword dict arguments: Table of optional parameters for extensions to
the AMQP protocol. See :ref:`extensions`.
:return: the new :class:`Exchange` object.
"""
if name == '':
return exchange.Exchange(self.reader, self.synchroniser, self.sender, name, 'direct', True, False, False)
if not VALID_EXCHANGE_NAME_RE.match(name):
raise ValueError(
"Invalid exchange name.\n"
"Valid names consist of letters, digits, hyphen, underscore, "
"period, or colon, and do not begin with 'amq.'")
self.sender.send_ExchangeDeclare(
name, type, passive, durable, auto_delete, internal, nowait,
arguments or {})
if not nowait:
yield from self.synchroniser.wait(spec.ExchangeDeclareOK)
self.reader.ready()
ex = exchange.Exchange(
self.reader, self.synchroniser, self.sender, name, type, durable,
auto_delete, internal)
return ex | def function[declare_exchange, parameter[self, name, type]]:
constant[
Declare an :class:`Exchange` on the broker. If the exchange does not exist, it will be created.
This method is a :ref:`coroutine <coroutine>`.
:param str name: the name of the exchange.
:param str type: the type of the exchange
(usually one of ``'fanout'``, ``'direct'``, ``'topic'``, or ``'headers'``)
:keyword bool durable: If true, the exchange will be re-created when
the server restarts.
:keyword bool auto_delete: If true, the exchange will be
deleted when the last queue is un-bound from it.
:keyword bool passive: If `true` and exchange with such a name does
not exist it will raise a :class:`exceptions.NotFound`. If `false`
server will create it. Arguments ``durable``, ``auto_delete`` and
``internal`` are ignored if `passive=True`.
:keyword bool internal: If true, the exchange cannot be published to
directly; it can only be bound to other exchanges.
:keyword bool nowait: If true, the method will not wait for declare-ok
to arrive and return right away.
:keyword dict arguments: Table of optional parameters for extensions to
the AMQP protocol. See :ref:`extensions`.
:return: the new :class:`Exchange` object.
]
if compare[name[name] equal[==] constant[]] begin[:]
return[call[name[exchange].Exchange, parameter[name[self].reader, name[self].synchroniser, name[self].sender, name[name], constant[direct], constant[True], constant[False], constant[False]]]]
if <ast.UnaryOp object at 0x7da20c6aac50> begin[:]
<ast.Raise object at 0x7da20c6ab5e0>
call[name[self].sender.send_ExchangeDeclare, parameter[name[name], name[type], name[passive], name[durable], name[auto_delete], name[internal], name[nowait], <ast.BoolOp object at 0x7da20c6aa4a0>]]
if <ast.UnaryOp object at 0x7da20c6ab400> begin[:]
<ast.YieldFrom object at 0x7da20c6aa890>
call[name[self].reader.ready, parameter[]]
variable[ex] assign[=] call[name[exchange].Exchange, parameter[name[self].reader, name[self].synchroniser, name[self].sender, name[name], name[type], name[durable], name[auto_delete], name[internal]]]
return[name[ex]] | keyword[def] identifier[declare_exchange] ( identifier[self] , identifier[name] , identifier[type] ,*, identifier[durable] = keyword[True] , identifier[auto_delete] = keyword[False] ,
identifier[passive] = keyword[False] , identifier[internal] = keyword[False] , identifier[nowait] = keyword[False] ,
identifier[arguments] = keyword[None] ):
literal[string]
keyword[if] identifier[name] == literal[string] :
keyword[return] identifier[exchange] . identifier[Exchange] ( identifier[self] . identifier[reader] , identifier[self] . identifier[synchroniser] , identifier[self] . identifier[sender] , identifier[name] , literal[string] , keyword[True] , keyword[False] , keyword[False] )
keyword[if] keyword[not] identifier[VALID_EXCHANGE_NAME_RE] . identifier[match] ( identifier[name] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string] )
identifier[self] . identifier[sender] . identifier[send_ExchangeDeclare] (
identifier[name] , identifier[type] , identifier[passive] , identifier[durable] , identifier[auto_delete] , identifier[internal] , identifier[nowait] ,
identifier[arguments] keyword[or] {})
keyword[if] keyword[not] identifier[nowait] :
keyword[yield] keyword[from] identifier[self] . identifier[synchroniser] . identifier[wait] ( identifier[spec] . identifier[ExchangeDeclareOK] )
identifier[self] . identifier[reader] . identifier[ready] ()
identifier[ex] = identifier[exchange] . identifier[Exchange] (
identifier[self] . identifier[reader] , identifier[self] . identifier[synchroniser] , identifier[self] . identifier[sender] , identifier[name] , identifier[type] , identifier[durable] ,
identifier[auto_delete] , identifier[internal] )
keyword[return] identifier[ex] | def declare_exchange(self, name, type, *, durable=True, auto_delete=False, passive=False, internal=False, nowait=False, arguments=None):
"""
Declare an :class:`Exchange` on the broker. If the exchange does not exist, it will be created.
This method is a :ref:`coroutine <coroutine>`.
:param str name: the name of the exchange.
:param str type: the type of the exchange
(usually one of ``'fanout'``, ``'direct'``, ``'topic'``, or ``'headers'``)
:keyword bool durable: If true, the exchange will be re-created when
the server restarts.
:keyword bool auto_delete: If true, the exchange will be
deleted when the last queue is un-bound from it.
:keyword bool passive: If `true` and exchange with such a name does
not exist it will raise a :class:`exceptions.NotFound`. If `false`
server will create it. Arguments ``durable``, ``auto_delete`` and
``internal`` are ignored if `passive=True`.
:keyword bool internal: If true, the exchange cannot be published to
directly; it can only be bound to other exchanges.
:keyword bool nowait: If true, the method will not wait for declare-ok
to arrive and return right away.
:keyword dict arguments: Table of optional parameters for extensions to
the AMQP protocol. See :ref:`extensions`.
:return: the new :class:`Exchange` object.
"""
if name == '':
return exchange.Exchange(self.reader, self.synchroniser, self.sender, name, 'direct', True, False, False) # depends on [control=['if'], data=['name']]
if not VALID_EXCHANGE_NAME_RE.match(name):
raise ValueError("Invalid exchange name.\nValid names consist of letters, digits, hyphen, underscore, period, or colon, and do not begin with 'amq.'") # depends on [control=['if'], data=[]]
self.sender.send_ExchangeDeclare(name, type, passive, durable, auto_delete, internal, nowait, arguments or {})
if not nowait:
yield from self.synchroniser.wait(spec.ExchangeDeclareOK)
self.reader.ready() # depends on [control=['if'], data=[]]
ex = exchange.Exchange(self.reader, self.synchroniser, self.sender, name, type, durable, auto_delete, internal)
return ex |
def merge_pairs(data, two_files, merged_out, revcomp, merge):
"""
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
"""
LOGGER.debug("Entering merge_pairs()")
## Return the number of merged pairs
nmerged = -1
## Check input files from inside list-tuple [(r1, r2)]
for fhandle in two_files[0]:
if not os.path.exists(fhandle):
raise IPyradWarningExit("""
Attempting to merge a file that doesn't exist - {}""".format(fhandle))
## If it already exists, clean up the old merged file
if os.path.exists(merged_out):
os.remove(merged_out)
## if merge then catch nonmerged in a separate file
if merge:
nonmerged1 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R1_.fastq").name
nonmerged2 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R2_.fastq").name
## if not merging then the nonmerged reads will come from the normal edits
else:
nonmerged1 = two_files[0][0]
nonmerged2 = two_files[0][1]
## get the maxn and minlen values
try:
maxn = sum(data.paramsdict['max_low_qual_bases'])
except TypeError:
maxn = data.paramsdict['max_low_qual_bases']
minlen = str(max(32, data.paramsdict["filter_min_trim_len"]))
## we need to gunzip the files if they are zipped (at least for now)
if merge and two_files[0][0].endswith(".gz"):
LOGGER.info("gunzipping pairs")
tmp1 = os.path.splitext(two_files[0][0])[0]+".tmp1"
tmp2 = os.path.splitext(two_files[0][1])[0]+".tmp2"
out1 = open(tmp1, 'w')
out2 = open(tmp2, 'w')
gun1 = sps.Popen(["gunzip", "-c", two_files[0][0]],
stderr=sps.STDOUT, stdout=out1, close_fds=True)
gun2 = sps.Popen(["gunzip", "-c", two_files[0][1]],
stderr=sps.STDOUT, stdout=out2, close_fds=True)
_ = gun1.communicate()
_ = gun2.communicate()
out1.close()
out2.close()
else:
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
try:
## If we are actually mergeing and not just joining then do vsearch
if merge:
## create tmp files with high quality scores and with R2 oriented
cmd = [ipyrad.bins.vsearch,
"--fastq_mergepairs", tmp1,
"--reverse", tmp2,
"--fastqout", merged_out,
"--fastqout_notmerged_fwd", nonmerged1,
"--fastqout_notmerged_rev", nonmerged2,
"--fasta_width", "0",
"--fastq_minmergelen", minlen,
"--fastq_maxns", str(maxn),
"--fastq_minovlen", "20",
"--fastq_maxdiffs", "4",
"--label_suffix", "_m1",
"--fastq_qmax", "1000",
"--threads", "2",
"--fastq_allowmergestagger"]
LOGGER.debug("merge cmd: %s", " ".join(cmd))
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
if proc.returncode:
LOGGER.error("Error: %s %s", cmd, res)
## remove temp files
rmfiles = [os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2",
nonmerged1, nonmerged2]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res)
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
#nmerged = len(tmpf.readlines()) // 4
nmerged = sum(1 for i in tmpf.readlines()) // 4
## Combine the unmerged pairs and append to the merge file
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
if nonmerged1.endswith(".gz"):
fr1 = gzip.open(nonmerged1, 'rb')
else:
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)]*4)
if nonmerged2.endswith(".gz"):
fr2 = gzip.open(nonmerged2, 'rb')
else:
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)]*4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
read1s, read2s = quarts.next()
except StopIteration:
break
if revcomp:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
comp(read2s[1].strip()[::-1]) + "\n",
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3].strip()[::-1] + "\n",
]))
else:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
read2s[1],
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3],
]))
counts += 1
if not counts % 10:
combout.write("".join(writing)) #+"\n")
writing = []
if writing:
combout.write("".join(writing))
## close handles
fr1.close()
fr2.close()
combout.close()
except Exception as inst:
LOGGER.error("Exception in merge_pairs - {}".format(inst))
raise
## No matter what happens please clean up the temp files.
finally:
## if merged then delete the nonmerge tmp files
if merge:
## remove temp files
rmfiles = [nonmerged1, nonmerged2,
os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2"]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
return nmerged | def function[merge_pairs, parameter[data, two_files, merged_out, revcomp, merge]]:
constant[
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
]
call[name[LOGGER].debug, parameter[constant[Entering merge_pairs()]]]
variable[nmerged] assign[=] <ast.UnaryOp object at 0x7da18dc984f0>
for taget[name[fhandle]] in starred[call[name[two_files]][constant[0]]] begin[:]
if <ast.UnaryOp object at 0x7da18dc9af80> begin[:]
<ast.Raise object at 0x7da18dc99150>
if call[name[os].path.exists, parameter[name[merged_out]]] begin[:]
call[name[os].remove, parameter[name[merged_out]]]
if name[merge] begin[:]
variable[nonmerged1] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]].name
variable[nonmerged2] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]].name
<ast.Try object at 0x7da18ede6440>
variable[minlen] assign[=] call[name[str], parameter[call[name[max], parameter[constant[32], call[name[data].paramsdict][constant[filter_min_trim_len]]]]]]
if <ast.BoolOp object at 0x7da18ede7850> begin[:]
call[name[LOGGER].info, parameter[constant[gunzipping pairs]]]
variable[tmp1] assign[=] binary_operation[call[call[name[os].path.splitext, parameter[call[call[name[two_files]][constant[0]]][constant[0]]]]][constant[0]] + constant[.tmp1]]
variable[tmp2] assign[=] binary_operation[call[call[name[os].path.splitext, parameter[call[call[name[two_files]][constant[0]]][constant[1]]]]][constant[0]] + constant[.tmp2]]
variable[out1] assign[=] call[name[open], parameter[name[tmp1], constant[w]]]
variable[out2] assign[=] call[name[open], parameter[name[tmp2], constant[w]]]
variable[gun1] assign[=] call[name[sps].Popen, parameter[list[[<ast.Constant object at 0x7da18ede77c0>, <ast.Constant object at 0x7da18ede71f0>, <ast.Subscript object at 0x7da18ede6bc0>]]]]
variable[gun2] assign[=] call[name[sps].Popen, parameter[list[[<ast.Constant object at 0x7da18ede68f0>, <ast.Constant object at 0x7da18ede7c10>, <ast.Subscript object at 0x7da18ede4e80>]]]]
variable[_] assign[=] call[name[gun1].communicate, parameter[]]
variable[_] assign[=] call[name[gun2].communicate, parameter[]]
call[name[out1].close, parameter[]]
call[name[out2].close, parameter[]]
<ast.Try object at 0x7da18ede4bb0>
return[name[nmerged]] | keyword[def] identifier[merge_pairs] ( identifier[data] , identifier[two_files] , identifier[merged_out] , identifier[revcomp] , identifier[merge] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[nmerged] =- literal[int]
keyword[for] identifier[fhandle] keyword[in] identifier[two_files] [ literal[int] ]:
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fhandle] ):
keyword[raise] identifier[IPyradWarningExit] ( literal[string] . identifier[format] ( identifier[fhandle] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[merged_out] ):
identifier[os] . identifier[remove] ( identifier[merged_out] )
keyword[if] identifier[merge] :
identifier[nonmerged1] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] ,
identifier[dir] = identifier[data] . identifier[dirs] . identifier[edits] ,
identifier[suffix] = literal[string] ). identifier[name]
identifier[nonmerged2] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] ,
identifier[dir] = identifier[data] . identifier[dirs] . identifier[edits] ,
identifier[suffix] = literal[string] ). identifier[name]
keyword[else] :
identifier[nonmerged1] = identifier[two_files] [ literal[int] ][ literal[int] ]
identifier[nonmerged2] = identifier[two_files] [ literal[int] ][ literal[int] ]
keyword[try] :
identifier[maxn] = identifier[sum] ( identifier[data] . identifier[paramsdict] [ literal[string] ])
keyword[except] identifier[TypeError] :
identifier[maxn] = identifier[data] . identifier[paramsdict] [ literal[string] ]
identifier[minlen] = identifier[str] ( identifier[max] ( literal[int] , identifier[data] . identifier[paramsdict] [ literal[string] ]))
keyword[if] identifier[merge] keyword[and] identifier[two_files] [ literal[int] ][ literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[tmp1] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string]
identifier[tmp2] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string]
identifier[out1] = identifier[open] ( identifier[tmp1] , literal[string] )
identifier[out2] = identifier[open] ( identifier[tmp2] , literal[string] )
identifier[gun1] = identifier[sps] . identifier[Popen] ([ literal[string] , literal[string] , identifier[two_files] [ literal[int] ][ literal[int] ]],
identifier[stderr] = identifier[sps] . identifier[STDOUT] , identifier[stdout] = identifier[out1] , identifier[close_fds] = keyword[True] )
identifier[gun2] = identifier[sps] . identifier[Popen] ([ literal[string] , literal[string] , identifier[two_files] [ literal[int] ][ literal[int] ]],
identifier[stderr] = identifier[sps] . identifier[STDOUT] , identifier[stdout] = identifier[out2] , identifier[close_fds] = keyword[True] )
identifier[_] = identifier[gun1] . identifier[communicate] ()
identifier[_] = identifier[gun2] . identifier[communicate] ()
identifier[out1] . identifier[close] ()
identifier[out2] . identifier[close] ()
keyword[else] :
identifier[tmp1] = identifier[two_files] [ literal[int] ][ literal[int] ]
identifier[tmp2] = identifier[two_files] [ literal[int] ][ literal[int] ]
keyword[try] :
keyword[if] identifier[merge] :
identifier[cmd] =[ identifier[ipyrad] . identifier[bins] . identifier[vsearch] ,
literal[string] , identifier[tmp1] ,
literal[string] , identifier[tmp2] ,
literal[string] , identifier[merged_out] ,
literal[string] , identifier[nonmerged1] ,
literal[string] , identifier[nonmerged2] ,
literal[string] , literal[string] ,
literal[string] , identifier[minlen] ,
literal[string] , identifier[str] ( identifier[maxn] ),
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ]
identifier[LOGGER] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[cmd] ))
identifier[proc] = identifier[sps] . identifier[Popen] ( identifier[cmd] , identifier[stderr] = identifier[sps] . identifier[STDOUT] , identifier[stdout] = identifier[sps] . identifier[PIPE] )
keyword[try] :
identifier[res] = identifier[proc] . identifier[communicate] ()[ literal[int] ]
keyword[except] identifier[KeyboardInterrupt] :
identifier[proc] . identifier[kill] ()
keyword[if] identifier[proc] . identifier[returncode] :
identifier[LOGGER] . identifier[error] ( literal[string] , identifier[cmd] , identifier[res] )
identifier[rmfiles] =[ identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string] ,
identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string] ,
identifier[nonmerged1] , identifier[nonmerged2] ]
keyword[for] identifier[rmfile] keyword[in] identifier[rmfiles] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[rmfile] ):
identifier[os] . identifier[remove] ( identifier[rmfile] )
keyword[raise] identifier[IPyradWarningExit] ( literal[string] , identifier[cmd] , identifier[res] )
keyword[with] identifier[open] ( identifier[merged_out] , literal[string] ) keyword[as] identifier[tmpf] :
identifier[nmerged] = identifier[sum] ( literal[int] keyword[for] identifier[i] keyword[in] identifier[tmpf] . identifier[readlines] ())// literal[int]
keyword[with] identifier[open] ( identifier[merged_out] , literal[string] ) keyword[as] identifier[combout] :
keyword[if] identifier[nonmerged1] . identifier[endswith] ( literal[string] ):
identifier[fr1] = identifier[gzip] . identifier[open] ( identifier[nonmerged1] , literal[string] )
keyword[else] :
identifier[fr1] = identifier[open] ( identifier[nonmerged1] , literal[string] )
identifier[quart1] = identifier[itertools] . identifier[izip] (*[ identifier[iter] ( identifier[fr1] )]* literal[int] )
keyword[if] identifier[nonmerged2] . identifier[endswith] ( literal[string] ):
identifier[fr2] = identifier[gzip] . identifier[open] ( identifier[nonmerged2] , literal[string] )
keyword[else] :
identifier[fr2] = identifier[open] ( identifier[nonmerged2] , literal[string] )
identifier[quart2] = identifier[itertools] . identifier[izip] (*[ identifier[iter] ( identifier[fr2] )]* literal[int] )
identifier[quarts] = identifier[itertools] . identifier[izip] ( identifier[quart1] , identifier[quart2] )
identifier[writing] =[]
identifier[counts] = literal[int]
keyword[while] literal[int] :
keyword[try] :
identifier[read1s] , identifier[read2s] = identifier[quarts] . identifier[next] ()
keyword[except] identifier[StopIteration] :
keyword[break]
keyword[if] identifier[revcomp] :
identifier[writing] . identifier[append] ( literal[string] . identifier[join] ([
identifier[read1s] [ literal[int] ],
identifier[read1s] [ literal[int] ]. identifier[strip] ()+ literal[string] + identifier[comp] ( identifier[read2s] [ literal[int] ]. identifier[strip] ()[::- literal[int] ])+ literal[string] ,
identifier[read1s] [ literal[int] ],
identifier[read1s] [ literal[int] ]. identifier[strip] ()+ literal[string] + identifier[read2s] [ literal[int] ]. identifier[strip] ()[::- literal[int] ]+ literal[string] ,
]))
keyword[else] :
identifier[writing] . identifier[append] ( literal[string] . identifier[join] ([
identifier[read1s] [ literal[int] ],
identifier[read1s] [ literal[int] ]. identifier[strip] ()+ literal[string] + identifier[read2s] [ literal[int] ],
identifier[read1s] [ literal[int] ],
identifier[read1s] [ literal[int] ]. identifier[strip] ()+ literal[string] + identifier[read2s] [ literal[int] ],
]))
identifier[counts] += literal[int]
keyword[if] keyword[not] identifier[counts] % literal[int] :
identifier[combout] . identifier[write] ( literal[string] . identifier[join] ( identifier[writing] ))
identifier[writing] =[]
keyword[if] identifier[writing] :
identifier[combout] . identifier[write] ( literal[string] . identifier[join] ( identifier[writing] ))
identifier[fr1] . identifier[close] ()
identifier[fr2] . identifier[close] ()
identifier[combout] . identifier[close] ()
keyword[except] identifier[Exception] keyword[as] identifier[inst] :
identifier[LOGGER] . identifier[error] ( literal[string] . identifier[format] ( identifier[inst] ))
keyword[raise]
keyword[finally] :
keyword[if] identifier[merge] :
identifier[rmfiles] =[ identifier[nonmerged1] , identifier[nonmerged2] ,
identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string] ,
identifier[os] . identifier[path] . identifier[splitext] ( identifier[two_files] [ literal[int] ][ literal[int] ])[ literal[int] ]+ literal[string] ]
keyword[for] identifier[rmfile] keyword[in] identifier[rmfiles] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[rmfile] ):
identifier[os] . identifier[remove] ( identifier[rmfile] )
keyword[return] identifier[nmerged] | def merge_pairs(data, two_files, merged_out, revcomp, merge):
"""
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
"""
LOGGER.debug('Entering merge_pairs()')
## Return the number of merged pairs
nmerged = -1
## Check input files from inside list-tuple [(r1, r2)]
for fhandle in two_files[0]:
if not os.path.exists(fhandle):
raise IPyradWarningExit("\n Attempting to merge a file that doesn't exist - {}".format(fhandle)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fhandle']]
## If it already exists, clean up the old merged file
if os.path.exists(merged_out):
os.remove(merged_out) # depends on [control=['if'], data=[]]
## if merge then catch nonmerged in a separate file
if merge:
nonmerged1 = tempfile.NamedTemporaryFile(mode='wb', dir=data.dirs.edits, suffix='_nonmerged_R1_.fastq').name
nonmerged2 = tempfile.NamedTemporaryFile(mode='wb', dir=data.dirs.edits, suffix='_nonmerged_R2_.fastq').name # depends on [control=['if'], data=[]]
else:
## if not merging then the nonmerged reads will come from the normal edits
nonmerged1 = two_files[0][0]
nonmerged2 = two_files[0][1]
## get the maxn and minlen values
try:
maxn = sum(data.paramsdict['max_low_qual_bases']) # depends on [control=['try'], data=[]]
except TypeError:
maxn = data.paramsdict['max_low_qual_bases'] # depends on [control=['except'], data=[]]
minlen = str(max(32, data.paramsdict['filter_min_trim_len']))
## we need to gunzip the files if they are zipped (at least for now)
if merge and two_files[0][0].endswith('.gz'):
LOGGER.info('gunzipping pairs')
tmp1 = os.path.splitext(two_files[0][0])[0] + '.tmp1'
tmp2 = os.path.splitext(two_files[0][1])[0] + '.tmp2'
out1 = open(tmp1, 'w')
out2 = open(tmp2, 'w')
gun1 = sps.Popen(['gunzip', '-c', two_files[0][0]], stderr=sps.STDOUT, stdout=out1, close_fds=True)
gun2 = sps.Popen(['gunzip', '-c', two_files[0][1]], stderr=sps.STDOUT, stdout=out2, close_fds=True)
_ = gun1.communicate()
_ = gun2.communicate()
out1.close()
out2.close() # depends on [control=['if'], data=[]]
else:
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
try:
## If we are actually mergeing and not just joining then do vsearch
if merge:
## create tmp files with high quality scores and with R2 oriented
cmd = [ipyrad.bins.vsearch, '--fastq_mergepairs', tmp1, '--reverse', tmp2, '--fastqout', merged_out, '--fastqout_notmerged_fwd', nonmerged1, '--fastqout_notmerged_rev', nonmerged2, '--fasta_width', '0', '--fastq_minmergelen', minlen, '--fastq_maxns', str(maxn), '--fastq_minovlen', '20', '--fastq_maxdiffs', '4', '--label_suffix', '_m1', '--fastq_qmax', '1000', '--threads', '2', '--fastq_allowmergestagger']
LOGGER.debug('merge cmd: %s', ' '.join(cmd))
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
proc.kill() # depends on [control=['except'], data=[]]
if proc.returncode:
LOGGER.error('Error: %s %s', cmd, res)
## remove temp files
rmfiles = [os.path.splitext(two_files[0][0])[0] + '.tmp1', os.path.splitext(two_files[0][1])[0] + '.tmp2', nonmerged1, nonmerged2]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rmfile']]
raise IPyradWarningExit('Error merge pairs:\n %s\n%s', cmd, res) # depends on [control=['if'], data=[]]
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
#nmerged = len(tmpf.readlines()) // 4
nmerged = sum((1 for i in tmpf.readlines())) // 4 # depends on [control=['with'], data=['tmpf']] # depends on [control=['if'], data=[]]
## Combine the unmerged pairs and append to the merge file
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
if nonmerged1.endswith('.gz'):
fr1 = gzip.open(nonmerged1, 'rb') # depends on [control=['if'], data=[]]
else:
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)] * 4)
if nonmerged2.endswith('.gz'):
fr2 = gzip.open(nonmerged2, 'rb') # depends on [control=['if'], data=[]]
else:
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)] * 4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
(read1s, read2s) = quarts.next() # depends on [control=['try'], data=[]]
except StopIteration:
break # depends on [control=['except'], data=[]]
if revcomp:
writing.append(''.join([read1s[0], read1s[1].strip() + 'nnnn' + comp(read2s[1].strip()[::-1]) + '\n', read1s[2], read1s[3].strip() + 'nnnn' + read2s[3].strip()[::-1] + '\n'])) # depends on [control=['if'], data=[]]
else:
writing.append(''.join([read1s[0], read1s[1].strip() + 'nnnn' + read2s[1], read1s[2], read1s[3].strip() + 'nnnn' + read2s[3]]))
counts += 1
if not counts % 10:
combout.write(''.join(writing)) #+"\n")
writing = [] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if writing:
combout.write(''.join(writing)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['open', 'combout']]
## close handles
fr1.close()
fr2.close()
combout.close() # depends on [control=['try'], data=[]]
except Exception as inst:
LOGGER.error('Exception in merge_pairs - {}'.format(inst))
raise # depends on [control=['except'], data=['inst']]
finally:
## No matter what happens please clean up the temp files.
## if merged then delete the nonmerge tmp files
if merge:
## remove temp files
rmfiles = [nonmerged1, nonmerged2, os.path.splitext(two_files[0][0])[0] + '.tmp1', os.path.splitext(two_files[0][1])[0] + '.tmp2']
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rmfile']] # depends on [control=['if'], data=[]]
return nmerged |
def save(self, p_todolist):
"""
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
"""
self._trim()
current_hash = hash_todolist(p_todolist)
list_todo = (self.todolist.print_todos()+'\n').splitlines(True)
try:
list_archive = (self.archive.print_todos()+'\n').splitlines(True)
except AttributeError:
list_archive = []
self.backup_dict[self.timestamp] = (list_todo, list_archive, self.label)
index = self._get_index()
index.insert(0, (self.timestamp, current_hash))
self._save_index(index)
self._write()
self.close() | def function[save, parameter[self, p_todolist]]:
constant[
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
]
call[name[self]._trim, parameter[]]
variable[current_hash] assign[=] call[name[hash_todolist], parameter[name[p_todolist]]]
variable[list_todo] assign[=] call[binary_operation[call[name[self].todolist.print_todos, parameter[]] + constant[
]].splitlines, parameter[constant[True]]]
<ast.Try object at 0x7da18ede4370>
call[name[self].backup_dict][name[self].timestamp] assign[=] tuple[[<ast.Name object at 0x7da18ede5ff0>, <ast.Name object at 0x7da18ede6380>, <ast.Attribute object at 0x7da18ede7820>]]
variable[index] assign[=] call[name[self]._get_index, parameter[]]
call[name[index].insert, parameter[constant[0], tuple[[<ast.Attribute object at 0x7da18ede7d30>, <ast.Name object at 0x7da18ede5330>]]]]
call[name[self]._save_index, parameter[name[index]]]
call[name[self]._write, parameter[]]
call[name[self].close, parameter[]] | keyword[def] identifier[save] ( identifier[self] , identifier[p_todolist] ):
literal[string]
identifier[self] . identifier[_trim] ()
identifier[current_hash] = identifier[hash_todolist] ( identifier[p_todolist] )
identifier[list_todo] =( identifier[self] . identifier[todolist] . identifier[print_todos] ()+ literal[string] ). identifier[splitlines] ( keyword[True] )
keyword[try] :
identifier[list_archive] =( identifier[self] . identifier[archive] . identifier[print_todos] ()+ literal[string] ). identifier[splitlines] ( keyword[True] )
keyword[except] identifier[AttributeError] :
identifier[list_archive] =[]
identifier[self] . identifier[backup_dict] [ identifier[self] . identifier[timestamp] ]=( identifier[list_todo] , identifier[list_archive] , identifier[self] . identifier[label] )
identifier[index] = identifier[self] . identifier[_get_index] ()
identifier[index] . identifier[insert] ( literal[int] ,( identifier[self] . identifier[timestamp] , identifier[current_hash] ))
identifier[self] . identifier[_save_index] ( identifier[index] )
identifier[self] . identifier[_write] ()
identifier[self] . identifier[close] () | def save(self, p_todolist):
"""
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
"""
self._trim()
current_hash = hash_todolist(p_todolist)
list_todo = (self.todolist.print_todos() + '\n').splitlines(True)
try:
list_archive = (self.archive.print_todos() + '\n').splitlines(True) # depends on [control=['try'], data=[]]
except AttributeError:
list_archive = [] # depends on [control=['except'], data=[]]
self.backup_dict[self.timestamp] = (list_todo, list_archive, self.label)
index = self._get_index()
index.insert(0, (self.timestamp, current_hash))
self._save_index(index)
self._write()
self.close() |
def task_estimates(channel, states):
"""
Estimate remaining time for all tasks in this channel.
:param channel: txkoji.channel.Channel
:param list states: list of task_states ints, eg [task_states.OPEN]
:returns: deferred that when fired returns a list of
(task, est_remaining) tuples
"""
for state in states:
if state != task_states.OPEN:
raise NotImplementedError('only estimate OPEN tasks')
tasks = yield channel.tasks(state=states)
# Estimate all the unique packages.
packages = set([task.package for task in tasks])
print('checking avg build duration for %i packages:' % len(packages))
packages = list(packages)
durations = yield average_build_durations(channel.connection, packages)
avg_package_durations = dict(zip(packages, durations))
# pprint(avg_package_durations)
# Determine estimates for all our tasks.
results = []
utcnow = datetime.utcnow()
for task in tasks:
avg_duration = avg_package_durations[task.package]
est_complete = task.started + avg_duration
est_remaining = est_complete - utcnow
result = (task, est_remaining)
results.append(result)
defer.returnValue(results) | def function[task_estimates, parameter[channel, states]]:
constant[
Estimate remaining time for all tasks in this channel.
:param channel: txkoji.channel.Channel
:param list states: list of task_states ints, eg [task_states.OPEN]
:returns: deferred that when fired returns a list of
(task, est_remaining) tuples
]
for taget[name[state]] in starred[name[states]] begin[:]
if compare[name[state] not_equal[!=] name[task_states].OPEN] begin[:]
<ast.Raise object at 0x7da1b1f36020>
variable[tasks] assign[=] <ast.Yield object at 0x7da1b1f35cf0>
variable[packages] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b1f358a0>]]
call[name[print], parameter[binary_operation[constant[checking avg build duration for %i packages:] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[packages]]]]]]
variable[packages] assign[=] call[name[list], parameter[name[packages]]]
variable[durations] assign[=] <ast.Yield object at 0x7da1b1f36f20>
variable[avg_package_durations] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[packages], name[durations]]]]]
variable[results] assign[=] list[[]]
variable[utcnow] assign[=] call[name[datetime].utcnow, parameter[]]
for taget[name[task]] in starred[name[tasks]] begin[:]
variable[avg_duration] assign[=] call[name[avg_package_durations]][name[task].package]
variable[est_complete] assign[=] binary_operation[name[task].started + name[avg_duration]]
variable[est_remaining] assign[=] binary_operation[name[est_complete] - name[utcnow]]
variable[result] assign[=] tuple[[<ast.Name object at 0x7da1b1f290c0>, <ast.Name object at 0x7da1b1f284c0>]]
call[name[results].append, parameter[name[result]]]
call[name[defer].returnValue, parameter[name[results]]] | keyword[def] identifier[task_estimates] ( identifier[channel] , identifier[states] ):
literal[string]
keyword[for] identifier[state] keyword[in] identifier[states] :
keyword[if] identifier[state] != identifier[task_states] . identifier[OPEN] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[tasks] = keyword[yield] identifier[channel] . identifier[tasks] ( identifier[state] = identifier[states] )
identifier[packages] = identifier[set] ([ identifier[task] . identifier[package] keyword[for] identifier[task] keyword[in] identifier[tasks] ])
identifier[print] ( literal[string] % identifier[len] ( identifier[packages] ))
identifier[packages] = identifier[list] ( identifier[packages] )
identifier[durations] = keyword[yield] identifier[average_build_durations] ( identifier[channel] . identifier[connection] , identifier[packages] )
identifier[avg_package_durations] = identifier[dict] ( identifier[zip] ( identifier[packages] , identifier[durations] ))
identifier[results] =[]
identifier[utcnow] = identifier[datetime] . identifier[utcnow] ()
keyword[for] identifier[task] keyword[in] identifier[tasks] :
identifier[avg_duration] = identifier[avg_package_durations] [ identifier[task] . identifier[package] ]
identifier[est_complete] = identifier[task] . identifier[started] + identifier[avg_duration]
identifier[est_remaining] = identifier[est_complete] - identifier[utcnow]
identifier[result] =( identifier[task] , identifier[est_remaining] )
identifier[results] . identifier[append] ( identifier[result] )
identifier[defer] . identifier[returnValue] ( identifier[results] ) | def task_estimates(channel, states):
"""
Estimate remaining time for all tasks in this channel.
:param channel: txkoji.channel.Channel
:param list states: list of task_states ints, eg [task_states.OPEN]
:returns: deferred that when fired returns a list of
(task, est_remaining) tuples
"""
for state in states:
if state != task_states.OPEN:
raise NotImplementedError('only estimate OPEN tasks') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['state']]
tasks = (yield channel.tasks(state=states))
# Estimate all the unique packages.
packages = set([task.package for task in tasks])
print('checking avg build duration for %i packages:' % len(packages))
packages = list(packages)
durations = (yield average_build_durations(channel.connection, packages))
avg_package_durations = dict(zip(packages, durations))
# pprint(avg_package_durations)
# Determine estimates for all our tasks.
results = []
utcnow = datetime.utcnow()
for task in tasks:
avg_duration = avg_package_durations[task.package]
est_complete = task.started + avg_duration
est_remaining = est_complete - utcnow
result = (task, est_remaining)
results.append(result) # depends on [control=['for'], data=['task']]
defer.returnValue(results) |
def integrate_data(xdata, ydata, xmin=None, xmax=None, autozero=0):
"""
Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating).
"""
# sort the arrays and make sure they're numpy arrays
[xdata, ydata] = sort_matrix([xdata,ydata],0)
xdata = _n.array(xdata)
ydata = _n.array(ydata)
if xmin is None: xmin = min(xdata)
if xmax is None: xmax = max(xdata)
# find the index range
imin = xdata.searchsorted(xmin)
imax = xdata.searchsorted(xmax)
xint = [xdata[imin]]
yint = [0]
# get the autozero
if autozero >= 1:
zero = _n.average(ydata[imin:imin+int(autozero)])
ydata = ydata-zero
for n in range(imin+1,imax):
if len(yint):
xint.append(xdata[n])
yint.append(yint[-1]+0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
else:
xint.append(xdata[n])
yint.append(0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
return _n.array(xint), _n.array(yint) | def function[integrate_data, parameter[xdata, ydata, xmin, xmax, autozero]]:
constant[
Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating).
]
<ast.List object at 0x7da18dc9b5b0> assign[=] call[name[sort_matrix], parameter[list[[<ast.Name object at 0x7da18dc9a290>, <ast.Name object at 0x7da18dc98df0>]], constant[0]]]
variable[xdata] assign[=] call[name[_n].array, parameter[name[xdata]]]
variable[ydata] assign[=] call[name[_n].array, parameter[name[ydata]]]
if compare[name[xmin] is constant[None]] begin[:]
variable[xmin] assign[=] call[name[min], parameter[name[xdata]]]
if compare[name[xmax] is constant[None]] begin[:]
variable[xmax] assign[=] call[name[max], parameter[name[xdata]]]
variable[imin] assign[=] call[name[xdata].searchsorted, parameter[name[xmin]]]
variable[imax] assign[=] call[name[xdata].searchsorted, parameter[name[xmax]]]
variable[xint] assign[=] list[[<ast.Subscript object at 0x7da18dc988b0>]]
variable[yint] assign[=] list[[<ast.Constant object at 0x7da18dc99360>]]
if compare[name[autozero] greater_or_equal[>=] constant[1]] begin[:]
variable[zero] assign[=] call[name[_n].average, parameter[call[name[ydata]][<ast.Slice object at 0x7da18dc9aef0>]]]
variable[ydata] assign[=] binary_operation[name[ydata] - name[zero]]
for taget[name[n]] in starred[call[name[range], parameter[binary_operation[name[imin] + constant[1]], name[imax]]]] begin[:]
if call[name[len], parameter[name[yint]]] begin[:]
call[name[xint].append, parameter[call[name[xdata]][name[n]]]]
call[name[yint].append, parameter[binary_operation[call[name[yint]][<ast.UnaryOp object at 0x7da18dc994b0>] + binary_operation[binary_operation[constant[0.5] * binary_operation[call[name[xdata]][name[n]] - call[name[xdata]][binary_operation[name[n] - constant[1]]]]] * binary_operation[call[name[ydata]][name[n]] + call[name[ydata]][binary_operation[name[n] - constant[1]]]]]]]]
return[tuple[[<ast.Call object at 0x7da1b1a47670>, <ast.Call object at 0x7da1b1a46e30>]]] | keyword[def] identifier[integrate_data] ( identifier[xdata] , identifier[ydata] , identifier[xmin] = keyword[None] , identifier[xmax] = keyword[None] , identifier[autozero] = literal[int] ):
literal[string]
[ identifier[xdata] , identifier[ydata] ]= identifier[sort_matrix] ([ identifier[xdata] , identifier[ydata] ], literal[int] )
identifier[xdata] = identifier[_n] . identifier[array] ( identifier[xdata] )
identifier[ydata] = identifier[_n] . identifier[array] ( identifier[ydata] )
keyword[if] identifier[xmin] keyword[is] keyword[None] : identifier[xmin] = identifier[min] ( identifier[xdata] )
keyword[if] identifier[xmax] keyword[is] keyword[None] : identifier[xmax] = identifier[max] ( identifier[xdata] )
identifier[imin] = identifier[xdata] . identifier[searchsorted] ( identifier[xmin] )
identifier[imax] = identifier[xdata] . identifier[searchsorted] ( identifier[xmax] )
identifier[xint] =[ identifier[xdata] [ identifier[imin] ]]
identifier[yint] =[ literal[int] ]
keyword[if] identifier[autozero] >= literal[int] :
identifier[zero] = identifier[_n] . identifier[average] ( identifier[ydata] [ identifier[imin] : identifier[imin] + identifier[int] ( identifier[autozero] )])
identifier[ydata] = identifier[ydata] - identifier[zero]
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[imin] + literal[int] , identifier[imax] ):
keyword[if] identifier[len] ( identifier[yint] ):
identifier[xint] . identifier[append] ( identifier[xdata] [ identifier[n] ])
identifier[yint] . identifier[append] ( identifier[yint] [- literal[int] ]+ literal[int] *( identifier[xdata] [ identifier[n] ]- identifier[xdata] [ identifier[n] - literal[int] ])*( identifier[ydata] [ identifier[n] ]+ identifier[ydata] [ identifier[n] - literal[int] ]))
keyword[else] :
identifier[xint] . identifier[append] ( identifier[xdata] [ identifier[n] ])
identifier[yint] . identifier[append] ( literal[int] *( identifier[xdata] [ identifier[n] ]- identifier[xdata] [ identifier[n] - literal[int] ])*( identifier[ydata] [ identifier[n] ]+ identifier[ydata] [ identifier[n] - literal[int] ]))
keyword[return] identifier[_n] . identifier[array] ( identifier[xint] ), identifier[_n] . identifier[array] ( identifier[yint] ) | def integrate_data(xdata, ydata, xmin=None, xmax=None, autozero=0):
"""
Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating).
"""
# sort the arrays and make sure they're numpy arrays
[xdata, ydata] = sort_matrix([xdata, ydata], 0)
xdata = _n.array(xdata)
ydata = _n.array(ydata)
if xmin is None:
xmin = min(xdata) # depends on [control=['if'], data=['xmin']]
if xmax is None:
xmax = max(xdata) # depends on [control=['if'], data=['xmax']]
# find the index range
imin = xdata.searchsorted(xmin)
imax = xdata.searchsorted(xmax)
xint = [xdata[imin]]
yint = [0]
# get the autozero
if autozero >= 1:
zero = _n.average(ydata[imin:imin + int(autozero)])
ydata = ydata - zero # depends on [control=['if'], data=['autozero']]
for n in range(imin + 1, imax):
if len(yint):
xint.append(xdata[n])
yint.append(yint[-1] + 0.5 * (xdata[n] - xdata[n - 1]) * (ydata[n] + ydata[n - 1])) # depends on [control=['if'], data=[]]
else:
xint.append(xdata[n])
yint.append(0.5 * (xdata[n] - xdata[n - 1]) * (ydata[n] + ydata[n - 1])) # depends on [control=['for'], data=['n']]
return (_n.array(xint), _n.array(yint)) |
def insert_rows_json(
self,
table,
json_rows,
row_ids=None,
skip_invalid_rows=None,
ignore_unknown_values=None,
template_suffix=None,
retry=DEFAULT_RETRY,
):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ \
:class:`~google.cloud.bigquery.table.Table` \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
"""
# Convert table to just a reference because unlike insert_rows,
# insert_rows_json doesn't need the table schema. It's not doing any
# type conversions.
table = _table_arg_to_table_ref(table, default_project=self.project)
rows_info = []
data = {"rows": rows_info}
for index, row in enumerate(json_rows):
info = {"json": row}
if row_ids is not None:
info["insertId"] = row_ids[index]
else:
info["insertId"] = str(uuid.uuid4())
rows_info.append(info)
if skip_invalid_rows is not None:
data["skipInvalidRows"] = skip_invalid_rows
if ignore_unknown_values is not None:
data["ignoreUnknownValues"] = ignore_unknown_values
if template_suffix is not None:
data["templateSuffix"] = template_suffix
# We can always retry, because every row has an insert ID.
response = self._call_api(
retry, method="POST", path="%s/insertAll" % table.path, data=data
)
errors = []
for error in response.get("insertErrors", ()):
errors.append({"index": int(error["index"]), "errors": error["errors"]})
return errors | def function[insert_rows_json, parameter[self, table, json_rows, row_ids, skip_invalid_rows, ignore_unknown_values, template_suffix, retry]]:
constant[Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ :class:`~google.cloud.bigquery.table.Table` :class:`~google.cloud.bigquery.table.TableReference`, str, ]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
]
variable[table] assign[=] call[name[_table_arg_to_table_ref], parameter[name[table]]]
variable[rows_info] assign[=] list[[]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7730>], [<ast.Name object at 0x7da20c6e7940>]]
for taget[tuple[[<ast.Name object at 0x7da20c6e66e0>, <ast.Name object at 0x7da20c6e59f0>]]] in starred[call[name[enumerate], parameter[name[json_rows]]]] begin[:]
variable[info] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e73a0>], [<ast.Name object at 0x7da20c6e6020>]]
if compare[name[row_ids] is_not constant[None]] begin[:]
call[name[info]][constant[insertId]] assign[=] call[name[row_ids]][name[index]]
call[name[rows_info].append, parameter[name[info]]]
if compare[name[skip_invalid_rows] is_not constant[None]] begin[:]
call[name[data]][constant[skipInvalidRows]] assign[=] name[skip_invalid_rows]
if compare[name[ignore_unknown_values] is_not constant[None]] begin[:]
call[name[data]][constant[ignoreUnknownValues]] assign[=] name[ignore_unknown_values]
if compare[name[template_suffix] is_not constant[None]] begin[:]
call[name[data]][constant[templateSuffix]] assign[=] name[template_suffix]
variable[response] assign[=] call[name[self]._call_api, parameter[name[retry]]]
variable[errors] assign[=] list[[]]
for taget[name[error]] in starred[call[name[response].get, parameter[constant[insertErrors], tuple[[]]]]] begin[:]
call[name[errors].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6c7d60>, <ast.Constant object at 0x7da20c6c5fc0>], [<ast.Call object at 0x7da20c6c45e0>, <ast.Subscript object at 0x7da20c6c4e20>]]]]
return[name[errors]] | keyword[def] identifier[insert_rows_json] (
identifier[self] ,
identifier[table] ,
identifier[json_rows] ,
identifier[row_ids] = keyword[None] ,
identifier[skip_invalid_rows] = keyword[None] ,
identifier[ignore_unknown_values] = keyword[None] ,
identifier[template_suffix] = keyword[None] ,
identifier[retry] = identifier[DEFAULT_RETRY] ,
):
literal[string]
identifier[table] = identifier[_table_arg_to_table_ref] ( identifier[table] , identifier[default_project] = identifier[self] . identifier[project] )
identifier[rows_info] =[]
identifier[data] ={ literal[string] : identifier[rows_info] }
keyword[for] identifier[index] , identifier[row] keyword[in] identifier[enumerate] ( identifier[json_rows] ):
identifier[info] ={ literal[string] : identifier[row] }
keyword[if] identifier[row_ids] keyword[is] keyword[not] keyword[None] :
identifier[info] [ literal[string] ]= identifier[row_ids] [ identifier[index] ]
keyword[else] :
identifier[info] [ literal[string] ]= identifier[str] ( identifier[uuid] . identifier[uuid4] ())
identifier[rows_info] . identifier[append] ( identifier[info] )
keyword[if] identifier[skip_invalid_rows] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[skip_invalid_rows]
keyword[if] identifier[ignore_unknown_values] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ignore_unknown_values]
keyword[if] identifier[template_suffix] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[template_suffix]
identifier[response] = identifier[self] . identifier[_call_api] (
identifier[retry] , identifier[method] = literal[string] , identifier[path] = literal[string] % identifier[table] . identifier[path] , identifier[data] = identifier[data]
)
identifier[errors] =[]
keyword[for] identifier[error] keyword[in] identifier[response] . identifier[get] ( literal[string] ,()):
identifier[errors] . identifier[append] ({ literal[string] : identifier[int] ( identifier[error] [ literal[string] ]), literal[string] : identifier[error] [ literal[string] ]})
keyword[return] identifier[errors] | def insert_rows_json(self, table, json_rows, row_ids=None, skip_invalid_rows=None, ignore_unknown_values=None, template_suffix=None, retry=DEFAULT_RETRY):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ :class:`~google.cloud.bigquery.table.Table` :class:`~google.cloud.bigquery.table.TableReference`, str, ]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
"""
# Convert table to just a reference because unlike insert_rows,
# insert_rows_json doesn't need the table schema. It's not doing any
# type conversions.
table = _table_arg_to_table_ref(table, default_project=self.project)
rows_info = []
data = {'rows': rows_info}
for (index, row) in enumerate(json_rows):
info = {'json': row}
if row_ids is not None:
info['insertId'] = row_ids[index] # depends on [control=['if'], data=['row_ids']]
else:
info['insertId'] = str(uuid.uuid4())
rows_info.append(info) # depends on [control=['for'], data=[]]
if skip_invalid_rows is not None:
data['skipInvalidRows'] = skip_invalid_rows # depends on [control=['if'], data=['skip_invalid_rows']]
if ignore_unknown_values is not None:
data['ignoreUnknownValues'] = ignore_unknown_values # depends on [control=['if'], data=['ignore_unknown_values']]
if template_suffix is not None:
data['templateSuffix'] = template_suffix # depends on [control=['if'], data=['template_suffix']]
# We can always retry, because every row has an insert ID.
response = self._call_api(retry, method='POST', path='%s/insertAll' % table.path, data=data)
errors = []
for error in response.get('insertErrors', ()):
errors.append({'index': int(error['index']), 'errors': error['errors']}) # depends on [control=['for'], data=['error']]
return errors |
def generate_headers(self, client_type, client_id, secret):
"""
generate_headers is used to generate the headers automatically for your http request
:param client_type (str): remoteci or feeder
:param client_id (str): remoteci or feeder id
:param secret (str): api secret
:return: Authorization headers (dict)
"""
self.request.add_header(self.dci_datetime_header, self.dci_datetime_str)
signature = self._sign(secret)
return self.request.build_headers(client_type, client_id, signature) | def function[generate_headers, parameter[self, client_type, client_id, secret]]:
constant[
generate_headers is used to generate the headers automatically for your http request
:param client_type (str): remoteci or feeder
:param client_id (str): remoteci or feeder id
:param secret (str): api secret
:return: Authorization headers (dict)
]
call[name[self].request.add_header, parameter[name[self].dci_datetime_header, name[self].dci_datetime_str]]
variable[signature] assign[=] call[name[self]._sign, parameter[name[secret]]]
return[call[name[self].request.build_headers, parameter[name[client_type], name[client_id], name[signature]]]] | keyword[def] identifier[generate_headers] ( identifier[self] , identifier[client_type] , identifier[client_id] , identifier[secret] ):
literal[string]
identifier[self] . identifier[request] . identifier[add_header] ( identifier[self] . identifier[dci_datetime_header] , identifier[self] . identifier[dci_datetime_str] )
identifier[signature] = identifier[self] . identifier[_sign] ( identifier[secret] )
keyword[return] identifier[self] . identifier[request] . identifier[build_headers] ( identifier[client_type] , identifier[client_id] , identifier[signature] ) | def generate_headers(self, client_type, client_id, secret):
"""
generate_headers is used to generate the headers automatically for your http request
:param client_type (str): remoteci or feeder
:param client_id (str): remoteci or feeder id
:param secret (str): api secret
:return: Authorization headers (dict)
"""
self.request.add_header(self.dci_datetime_header, self.dci_datetime_str)
signature = self._sign(secret)
return self.request.build_headers(client_type, client_id, signature) |
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval) | def function[start, parameter[track_file, twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret, poll_interval, unfiltered, languages, debug, outfile]]:
constant[Start the stream.]
variable[listener] assign[=] call[name[construct_listener], parameter[name[outfile]]]
variable[checker] assign[=] call[name[BasicFileTermChecker], parameter[name[track_file], name[listener]]]
variable[auth] assign[=] call[name[get_tweepy_auth], parameter[name[twitter_api_key], name[twitter_api_secret], name[twitter_access_token], name[twitter_access_token_secret]]]
variable[stream] assign[=] call[name[DynamicTwitterStream], parameter[name[auth], name[listener], name[checker]]]
call[name[set_terminate_listeners], parameter[name[stream]]]
if name[debug] begin[:]
call[name[set_debug_listener], parameter[name[stream]]]
call[name[begin_stream_loop], parameter[name[stream], name[poll_interval]]] | keyword[def] identifier[start] ( identifier[track_file] ,
identifier[twitter_api_key] ,
identifier[twitter_api_secret] ,
identifier[twitter_access_token] ,
identifier[twitter_access_token_secret] ,
identifier[poll_interval] = literal[int] ,
identifier[unfiltered] = keyword[False] ,
identifier[languages] = keyword[None] ,
identifier[debug] = keyword[False] ,
identifier[outfile] = keyword[None] ):
literal[string]
identifier[listener] = identifier[construct_listener] ( identifier[outfile] )
identifier[checker] = identifier[BasicFileTermChecker] ( identifier[track_file] , identifier[listener] )
identifier[auth] = identifier[get_tweepy_auth] ( identifier[twitter_api_key] ,
identifier[twitter_api_secret] ,
identifier[twitter_access_token] ,
identifier[twitter_access_token_secret] )
identifier[stream] = identifier[DynamicTwitterStream] ( identifier[auth] , identifier[listener] , identifier[checker] , identifier[unfiltered] = identifier[unfiltered] , identifier[languages] = identifier[languages] )
identifier[set_terminate_listeners] ( identifier[stream] )
keyword[if] identifier[debug] :
identifier[set_debug_listener] ( identifier[stream] )
identifier[begin_stream_loop] ( identifier[stream] , identifier[poll_interval] ) | def start(track_file, twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret, poll_interval=15, unfiltered=False, languages=None, debug=False, outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream) # depends on [control=['if'], data=[]]
begin_stream_loop(stream, poll_interval) |
def add(self, phase_name, result):
"""
Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase
"""
if phase_name in self.__result_dict:
raise exc.PipelineException(
"Results from a phase called {} already exist in the pipeline".format(phase_name))
self.__result_list.append(result)
self.__result_dict[phase_name] = result | def function[add, parameter[self, phase_name, result]]:
constant[
Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase
]
if compare[name[phase_name] in name[self].__result_dict] begin[:]
<ast.Raise object at 0x7da1b23d6bf0>
call[name[self].__result_list.append, parameter[name[result]]]
call[name[self].__result_dict][name[phase_name]] assign[=] name[result] | keyword[def] identifier[add] ( identifier[self] , identifier[phase_name] , identifier[result] ):
literal[string]
keyword[if] identifier[phase_name] keyword[in] identifier[self] . identifier[__result_dict] :
keyword[raise] identifier[exc] . identifier[PipelineException] (
literal[string] . identifier[format] ( identifier[phase_name] ))
identifier[self] . identifier[__result_list] . identifier[append] ( identifier[result] )
identifier[self] . identifier[__result_dict] [ identifier[phase_name] ]= identifier[result] | def add(self, phase_name, result):
"""
Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase
"""
if phase_name in self.__result_dict:
raise exc.PipelineException('Results from a phase called {} already exist in the pipeline'.format(phase_name)) # depends on [control=['if'], data=['phase_name']]
self.__result_list.append(result)
self.__result_dict[phase_name] = result |
def changed(self):
""":class:`dict` containing the key value pairs that are not the
default"""
return {key: value for key, value in six.iteritems(self)
if getattr(self, key).changed} | def function[changed, parameter[self]]:
constant[:class:`dict` containing the key value pairs that are not the
default]
return[<ast.DictComp object at 0x7da18c4cf670>] | keyword[def] identifier[changed] ( identifier[self] ):
literal[string]
keyword[return] { identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] )
keyword[if] identifier[getattr] ( identifier[self] , identifier[key] ). identifier[changed] } | def changed(self):
""":class:`dict` containing the key value pairs that are not the
default"""
return {key: value for (key, value) in six.iteritems(self) if getattr(self, key).changed} |
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
"""
line = logical_line
for char in '([{':
found = line.find(char + ' ')
if found > -1:
return found + 1, "E201 whitespace after '%s'" % char
for char in '}])':
found = line.find(' ' + char)
if found > -1 and line[found - 1] != ',':
return found, "E202 whitespace before '%s'" % char
for char in ',;:':
found = line.find(' ' + char)
if found > -1:
return found, "E203 whitespace before '%s'" % char | def function[extraneous_whitespace, parameter[logical_line]]:
constant[
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
]
variable[line] assign[=] name[logical_line]
for taget[name[char]] in starred[constant[([{]] begin[:]
variable[found] assign[=] call[name[line].find, parameter[binary_operation[name[char] + constant[ ]]]]
if compare[name[found] greater[>] <ast.UnaryOp object at 0x7da1b09ec5e0>] begin[:]
return[tuple[[<ast.BinOp object at 0x7da1b09eee60>, <ast.BinOp object at 0x7da1b09efdc0>]]]
for taget[name[char]] in starred[constant[}])]] begin[:]
variable[found] assign[=] call[name[line].find, parameter[binary_operation[constant[ ] + name[char]]]]
if <ast.BoolOp object at 0x7da1b09ef880> begin[:]
return[tuple[[<ast.Name object at 0x7da1b09ee980>, <ast.BinOp object at 0x7da1b09ec730>]]]
for taget[name[char]] in starred[constant[,;:]] begin[:]
variable[found] assign[=] call[name[line].find, parameter[binary_operation[constant[ ] + name[char]]]]
if compare[name[found] greater[>] <ast.UnaryOp object at 0x7da1b09ef100>] begin[:]
return[tuple[[<ast.Name object at 0x7da1b09eedd0>, <ast.BinOp object at 0x7da1b09ee650>]]] | keyword[def] identifier[extraneous_whitespace] ( identifier[logical_line] ):
literal[string]
identifier[line] = identifier[logical_line]
keyword[for] identifier[char] keyword[in] literal[string] :
identifier[found] = identifier[line] . identifier[find] ( identifier[char] + literal[string] )
keyword[if] identifier[found] >- literal[int] :
keyword[return] identifier[found] + literal[int] , literal[string] % identifier[char]
keyword[for] identifier[char] keyword[in] literal[string] :
identifier[found] = identifier[line] . identifier[find] ( literal[string] + identifier[char] )
keyword[if] identifier[found] >- literal[int] keyword[and] identifier[line] [ identifier[found] - literal[int] ]!= literal[string] :
keyword[return] identifier[found] , literal[string] % identifier[char]
keyword[for] identifier[char] keyword[in] literal[string] :
identifier[found] = identifier[line] . identifier[find] ( literal[string] + identifier[char] )
keyword[if] identifier[found] >- literal[int] :
keyword[return] identifier[found] , literal[string] % identifier[char] | def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
"""
line = logical_line
for char in '([{':
found = line.find(char + ' ')
if found > -1:
return (found + 1, "E201 whitespace after '%s'" % char) # depends on [control=['if'], data=['found']] # depends on [control=['for'], data=['char']]
for char in '}])':
found = line.find(' ' + char)
if found > -1 and line[found - 1] != ',':
return (found, "E202 whitespace before '%s'" % char) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['char']]
for char in ',;:':
found = line.find(' ' + char)
if found > -1:
return (found, "E203 whitespace before '%s'" % char) # depends on [control=['if'], data=['found']] # depends on [control=['for'], data=['char']] |
def _query(self, x, result):
'''
Same as self.query, but uses a provided list to accumulate results into.
'''
if self.single_interval is None: # Empty
return
elif self.single_interval != 0: # Single interval, just check whether x is in it
if self.single_interval[0] <= x < self.single_interval[1]:
result.append(self.single_interval)
elif x < self.center: # Normal tree, query point to the left of center
if self.left_subtree is not None:
self.left_subtree._query(x, result)
for int in self.mid_sorted_by_start:
if int[0] <= x:
result.append(int)
else:
break
else: # Normal tree, query point to the right of center
for int in self.mid_sorted_by_end:
if int[1] > x:
result.append(int)
else:
break
if self.right_subtree is not None:
self.right_subtree._query(x, result) | def function[_query, parameter[self, x, result]]:
constant[
Same as self.query, but uses a provided list to accumulate results into.
]
if compare[name[self].single_interval is constant[None]] begin[:]
return[None] | keyword[def] identifier[_query] ( identifier[self] , identifier[x] , identifier[result] ):
literal[string]
keyword[if] identifier[self] . identifier[single_interval] keyword[is] keyword[None] :
keyword[return]
keyword[elif] identifier[self] . identifier[single_interval] != literal[int] :
keyword[if] identifier[self] . identifier[single_interval] [ literal[int] ]<= identifier[x] < identifier[self] . identifier[single_interval] [ literal[int] ]:
identifier[result] . identifier[append] ( identifier[self] . identifier[single_interval] )
keyword[elif] identifier[x] < identifier[self] . identifier[center] :
keyword[if] identifier[self] . identifier[left_subtree] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[left_subtree] . identifier[_query] ( identifier[x] , identifier[result] )
keyword[for] identifier[int] keyword[in] identifier[self] . identifier[mid_sorted_by_start] :
keyword[if] identifier[int] [ literal[int] ]<= identifier[x] :
identifier[result] . identifier[append] ( identifier[int] )
keyword[else] :
keyword[break]
keyword[else] :
keyword[for] identifier[int] keyword[in] identifier[self] . identifier[mid_sorted_by_end] :
keyword[if] identifier[int] [ literal[int] ]> identifier[x] :
identifier[result] . identifier[append] ( identifier[int] )
keyword[else] :
keyword[break]
keyword[if] identifier[self] . identifier[right_subtree] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[right_subtree] . identifier[_query] ( identifier[x] , identifier[result] ) | def _query(self, x, result):
"""
Same as self.query, but uses a provided list to accumulate results into.
"""
if self.single_interval is None: # Empty
return # depends on [control=['if'], data=[]]
elif self.single_interval != 0: # Single interval, just check whether x is in it
if self.single_interval[0] <= x < self.single_interval[1]:
result.append(self.single_interval) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif x < self.center: # Normal tree, query point to the left of center
if self.left_subtree is not None:
self.left_subtree._query(x, result) # depends on [control=['if'], data=[]]
for int in self.mid_sorted_by_start:
if int[0] <= x:
result.append(int) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['int']] # depends on [control=['if'], data=['x']]
else: # Normal tree, query point to the right of center
for int in self.mid_sorted_by_end:
if int[1] > x:
result.append(int) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['int']]
if self.right_subtree is not None:
self.right_subtree._query(x, result) # depends on [control=['if'], data=[]] |
def prepare(self):
'''
Returns a new Prepare message with a proposal id higher than
that of any observed proposals. A side effect of this method is
to clear the leader flag if it is currently set.
'''
self.leader = False
self.promises_received = set()
self.nacks_received = set()
self.proposal_id = ProposalID(self.highest_proposal_id.number + 1, self.network_uid)
self.highest_proposal_id = self.proposal_id
self.current_prepare_msg = Prepare(self.network_uid, self.proposal_id)
return self.current_prepare_msg | def function[prepare, parameter[self]]:
constant[
Returns a new Prepare message with a proposal id higher than
that of any observed proposals. A side effect of this method is
to clear the leader flag if it is currently set.
]
name[self].leader assign[=] constant[False]
name[self].promises_received assign[=] call[name[set], parameter[]]
name[self].nacks_received assign[=] call[name[set], parameter[]]
name[self].proposal_id assign[=] call[name[ProposalID], parameter[binary_operation[name[self].highest_proposal_id.number + constant[1]], name[self].network_uid]]
name[self].highest_proposal_id assign[=] name[self].proposal_id
name[self].current_prepare_msg assign[=] call[name[Prepare], parameter[name[self].network_uid, name[self].proposal_id]]
return[name[self].current_prepare_msg] | keyword[def] identifier[prepare] ( identifier[self] ):
literal[string]
identifier[self] . identifier[leader] = keyword[False]
identifier[self] . identifier[promises_received] = identifier[set] ()
identifier[self] . identifier[nacks_received] = identifier[set] ()
identifier[self] . identifier[proposal_id] = identifier[ProposalID] ( identifier[self] . identifier[highest_proposal_id] . identifier[number] + literal[int] , identifier[self] . identifier[network_uid] )
identifier[self] . identifier[highest_proposal_id] = identifier[self] . identifier[proposal_id]
identifier[self] . identifier[current_prepare_msg] = identifier[Prepare] ( identifier[self] . identifier[network_uid] , identifier[self] . identifier[proposal_id] )
keyword[return] identifier[self] . identifier[current_prepare_msg] | def prepare(self):
"""
Returns a new Prepare message with a proposal id higher than
that of any observed proposals. A side effect of this method is
to clear the leader flag if it is currently set.
"""
self.leader = False
self.promises_received = set()
self.nacks_received = set()
self.proposal_id = ProposalID(self.highest_proposal_id.number + 1, self.network_uid)
self.highest_proposal_id = self.proposal_id
self.current_prepare_msg = Prepare(self.network_uid, self.proposal_id)
return self.current_prepare_msg |
def child_end_handler(self,scache):
'''
_upgrade_breadth_info
update breadth, breadth_path, and add desc to desc_level
'''
desc = self.desc
desc_level = scache.desc_level
breadth = desc_level.__len__()
desc['breadth'] = breadth
desc['breadth_path'].append(breadth)
desc_level.append(desc) | def function[child_end_handler, parameter[self, scache]]:
constant[
_upgrade_breadth_info
update breadth, breadth_path, and add desc to desc_level
]
variable[desc] assign[=] name[self].desc
variable[desc_level] assign[=] name[scache].desc_level
variable[breadth] assign[=] call[name[desc_level].__len__, parameter[]]
call[name[desc]][constant[breadth]] assign[=] name[breadth]
call[call[name[desc]][constant[breadth_path]].append, parameter[name[breadth]]]
call[name[desc_level].append, parameter[name[desc]]] | keyword[def] identifier[child_end_handler] ( identifier[self] , identifier[scache] ):
literal[string]
identifier[desc] = identifier[self] . identifier[desc]
identifier[desc_level] = identifier[scache] . identifier[desc_level]
identifier[breadth] = identifier[desc_level] . identifier[__len__] ()
identifier[desc] [ literal[string] ]= identifier[breadth]
identifier[desc] [ literal[string] ]. identifier[append] ( identifier[breadth] )
identifier[desc_level] . identifier[append] ( identifier[desc] ) | def child_end_handler(self, scache):
"""
_upgrade_breadth_info
update breadth, breadth_path, and add desc to desc_level
"""
desc = self.desc
desc_level = scache.desc_level
breadth = desc_level.__len__()
desc['breadth'] = breadth
desc['breadth_path'].append(breadth)
desc_level.append(desc) |
def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread | def function[_start_reader_thread, parameter[self, stream, chunks]]:
constant[Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
]
import module[io]
import module[threading]
def function[target, parameter[]]:
while constant[True] begin[:]
variable[chunk] assign[=] call[name[stream].read, parameter[name[io].DEFAULT_BUFFER_SIZE]]
if <ast.UnaryOp object at 0x7da1b208a800> begin[:]
break
call[name[chunks].append, parameter[name[chunk]]]
variable[thread] assign[=] call[name[threading].Thread, parameter[]]
call[name[thread].start, parameter[]]
return[name[thread]] | keyword[def] identifier[_start_reader_thread] ( identifier[self] , identifier[stream] , identifier[chunks] ):
literal[string]
keyword[import] identifier[io]
keyword[import] identifier[threading]
keyword[def] identifier[target] ():
keyword[while] keyword[True] :
identifier[chunk] = identifier[stream] . identifier[read] ( identifier[io] . identifier[DEFAULT_BUFFER_SIZE] )
keyword[if] keyword[not] identifier[chunk] :
keyword[break]
identifier[chunks] . identifier[append] ( identifier[chunk] )
identifier[thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[target] )
identifier[thread] . identifier[start] ()
keyword[return] identifier[thread] | def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break # depends on [control=['if'], data=[]]
chunks.append(chunk) # depends on [control=['while'], data=[]]
thread = threading.Thread(target=target)
thread.start()
return thread |
def batches(arrays, steps=100, batch_size=64, rng=None):
'''Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
'''
assert batch_size >= 2, 'batch_size must be at least 2!'
assert isinstance(arrays, (tuple, list)), 'arrays must be a tuple or list!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
def sample():
xs = [np.zeros((batch_size, steps, a.shape[1]), a.dtype) for a in arrays]
for i in range(batch_size):
j = rng.randint(len(arrays[0]) - steps)
for x, a in zip(xs, arrays):
x[i] = a[j:j+steps]
return xs
return sample | def function[batches, parameter[arrays, steps, batch_size, rng]]:
constant[Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
]
assert[compare[name[batch_size] greater_or_equal[>=] constant[2]]]
assert[call[name[isinstance], parameter[name[arrays], tuple[[<ast.Name object at 0x7da1b033f5b0>, <ast.Name object at 0x7da1b033c220>]]]]]
if <ast.BoolOp object at 0x7da1b033f190> begin[:]
variable[rng] assign[=] call[name[np].random.RandomState, parameter[name[rng]]]
def function[sample, parameter[]]:
variable[xs] assign[=] <ast.ListComp object at 0x7da1b033d3c0>
for taget[name[i]] in starred[call[name[range], parameter[name[batch_size]]]] begin[:]
variable[j] assign[=] call[name[rng].randint, parameter[binary_operation[call[name[len], parameter[call[name[arrays]][constant[0]]]] - name[steps]]]]
for taget[tuple[[<ast.Name object at 0x7da18f813580>, <ast.Name object at 0x7da18f812950>]]] in starred[call[name[zip], parameter[name[xs], name[arrays]]]] begin[:]
call[name[x]][name[i]] assign[=] call[name[a]][<ast.Slice object at 0x7da18f810820>]
return[name[xs]]
return[name[sample]] | keyword[def] identifier[batches] ( identifier[arrays] , identifier[steps] = literal[int] , identifier[batch_size] = literal[int] , identifier[rng] = keyword[None] ):
literal[string]
keyword[assert] identifier[batch_size] >= literal[int] , literal[string]
keyword[assert] identifier[isinstance] ( identifier[arrays] ,( identifier[tuple] , identifier[list] )), literal[string]
keyword[if] identifier[rng] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[rng] , identifier[int] ):
identifier[rng] = identifier[np] . identifier[random] . identifier[RandomState] ( identifier[rng] )
keyword[def] identifier[sample] ():
identifier[xs] =[ identifier[np] . identifier[zeros] (( identifier[batch_size] , identifier[steps] , identifier[a] . identifier[shape] [ literal[int] ]), identifier[a] . identifier[dtype] ) keyword[for] identifier[a] keyword[in] identifier[arrays] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[batch_size] ):
identifier[j] = identifier[rng] . identifier[randint] ( identifier[len] ( identifier[arrays] [ literal[int] ])- identifier[steps] )
keyword[for] identifier[x] , identifier[a] keyword[in] identifier[zip] ( identifier[xs] , identifier[arrays] ):
identifier[x] [ identifier[i] ]= identifier[a] [ identifier[j] : identifier[j] + identifier[steps] ]
keyword[return] identifier[xs]
keyword[return] identifier[sample] | def batches(arrays, steps=100, batch_size=64, rng=None):
"""Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
"""
assert batch_size >= 2, 'batch_size must be at least 2!'
assert isinstance(arrays, (tuple, list)), 'arrays must be a tuple or list!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng) # depends on [control=['if'], data=[]]
def sample():
xs = [np.zeros((batch_size, steps, a.shape[1]), a.dtype) for a in arrays]
for i in range(batch_size):
j = rng.randint(len(arrays[0]) - steps)
for (x, a) in zip(xs, arrays):
x[i] = a[j:j + steps] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['i']]
return xs
return sample |
def targetSurfacemass(self,R,log=False):
"""
NAME:
targetSurfacemass
PURPOSE:
evaluate the target surface mass at R
INPUT:
R - radius at which to evaluate (can be Quantity)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-28 - Written - Bovy (NYU)
"""
return self._surfaceSigmaProfile.surfacemass(R,log=log) | def function[targetSurfacemass, parameter[self, R, log]]:
constant[
NAME:
targetSurfacemass
PURPOSE:
evaluate the target surface mass at R
INPUT:
R - radius at which to evaluate (can be Quantity)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-28 - Written - Bovy (NYU)
]
return[call[name[self]._surfaceSigmaProfile.surfacemass, parameter[name[R]]]] | keyword[def] identifier[targetSurfacemass] ( identifier[self] , identifier[R] , identifier[log] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_surfaceSigmaProfile] . identifier[surfacemass] ( identifier[R] , identifier[log] = identifier[log] ) | def targetSurfacemass(self, R, log=False):
"""
NAME:
targetSurfacemass
PURPOSE:
evaluate the target surface mass at R
INPUT:
R - radius at which to evaluate (can be Quantity)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-28 - Written - Bovy (NYU)
"""
return self._surfaceSigmaProfile.surfacemass(R, log=log) |
def convert_descriptor(self, descriptor):
"""Convert descriptor to BigQuery
"""
# Fields
fields = []
fallbacks = []
schema = tableschema.Schema(descriptor)
for index, field in enumerate(schema.fields):
converted_type = self.convert_type(field.type)
if not converted_type:
converted_type = 'STRING'
fallbacks.append(index)
mode = 'NULLABLE'
if field.required:
mode = 'REQUIRED'
fields.append({
'name': _slugify_field_name(field.name),
'type': converted_type,
'mode': mode,
})
# Descriptor
converted_descriptor = {
'fields': fields,
}
return (converted_descriptor, fallbacks) | def function[convert_descriptor, parameter[self, descriptor]]:
constant[Convert descriptor to BigQuery
]
variable[fields] assign[=] list[[]]
variable[fallbacks] assign[=] list[[]]
variable[schema] assign[=] call[name[tableschema].Schema, parameter[name[descriptor]]]
for taget[tuple[[<ast.Name object at 0x7da1b2628250>, <ast.Name object at 0x7da1b262a290>]]] in starred[call[name[enumerate], parameter[name[schema].fields]]] begin[:]
variable[converted_type] assign[=] call[name[self].convert_type, parameter[name[field].type]]
if <ast.UnaryOp object at 0x7da1b26281c0> begin[:]
variable[converted_type] assign[=] constant[STRING]
call[name[fallbacks].append, parameter[name[index]]]
variable[mode] assign[=] constant[NULLABLE]
if name[field].required begin[:]
variable[mode] assign[=] constant[REQUIRED]
call[name[fields].append, parameter[dictionary[[<ast.Constant object at 0x7da1b2628eb0>, <ast.Constant object at 0x7da1b2629540>, <ast.Constant object at 0x7da1b2629330>], [<ast.Call object at 0x7da1b2629bd0>, <ast.Name object at 0x7da1b2628970>, <ast.Name object at 0x7da1b262a020>]]]]
variable[converted_descriptor] assign[=] dictionary[[<ast.Constant object at 0x7da1b262ae00>], [<ast.Name object at 0x7da1b26295d0>]]
return[tuple[[<ast.Name object at 0x7da1b262a5f0>, <ast.Name object at 0x7da1b262b6d0>]]] | keyword[def] identifier[convert_descriptor] ( identifier[self] , identifier[descriptor] ):
literal[string]
identifier[fields] =[]
identifier[fallbacks] =[]
identifier[schema] = identifier[tableschema] . identifier[Schema] ( identifier[descriptor] )
keyword[for] identifier[index] , identifier[field] keyword[in] identifier[enumerate] ( identifier[schema] . identifier[fields] ):
identifier[converted_type] = identifier[self] . identifier[convert_type] ( identifier[field] . identifier[type] )
keyword[if] keyword[not] identifier[converted_type] :
identifier[converted_type] = literal[string]
identifier[fallbacks] . identifier[append] ( identifier[index] )
identifier[mode] = literal[string]
keyword[if] identifier[field] . identifier[required] :
identifier[mode] = literal[string]
identifier[fields] . identifier[append] ({
literal[string] : identifier[_slugify_field_name] ( identifier[field] . identifier[name] ),
literal[string] : identifier[converted_type] ,
literal[string] : identifier[mode] ,
})
identifier[converted_descriptor] ={
literal[string] : identifier[fields] ,
}
keyword[return] ( identifier[converted_descriptor] , identifier[fallbacks] ) | def convert_descriptor(self, descriptor):
"""Convert descriptor to BigQuery
"""
# Fields
fields = []
fallbacks = []
schema = tableschema.Schema(descriptor)
for (index, field) in enumerate(schema.fields):
converted_type = self.convert_type(field.type)
if not converted_type:
converted_type = 'STRING'
fallbacks.append(index) # depends on [control=['if'], data=[]]
mode = 'NULLABLE'
if field.required:
mode = 'REQUIRED' # depends on [control=['if'], data=[]]
fields.append({'name': _slugify_field_name(field.name), 'type': converted_type, 'mode': mode}) # depends on [control=['for'], data=[]]
# Descriptor
converted_descriptor = {'fields': fields}
return (converted_descriptor, fallbacks) |
def words(self):
"""
Возвращает итератор по словам, содержащимся в боре
"""
branch, word, indexes = [self.root], [], [0]
letters_with_children = [self._get_children_and_letters(self.root)]
while len(branch) > 0:
if self.is_final(branch[-1]):
yield "".join(word)
while indexes[-1] == len(letters_with_children[-1]):
indexes.pop()
letters_with_children.pop()
branch.pop()
if len(indexes) == 0:
raise StopIteration()
word.pop()
next_letter, next_child = letters_with_children[-1][indexes[-1]]
indexes[-1] += 1
indexes.append(0)
word.append(next_letter)
branch.append(next_child)
letters_with_children.append(self._get_children_and_letters(branch[-1])) | def function[words, parameter[self]]:
constant[
Возвращает итератор по словам, содержащимся в боре
]
<ast.Tuple object at 0x7da2054a4550> assign[=] tuple[[<ast.List object at 0x7da2054a45b0>, <ast.List object at 0x7da2054a52a0>, <ast.List object at 0x7da2054a69b0>]]
variable[letters_with_children] assign[=] list[[<ast.Call object at 0x7da2054a7940>]]
while compare[call[name[len], parameter[name[branch]]] greater[>] constant[0]] begin[:]
if call[name[self].is_final, parameter[call[name[branch]][<ast.UnaryOp object at 0x7da2054a7040>]]] begin[:]
<ast.Yield object at 0x7da2054a6aa0>
while compare[call[name[indexes]][<ast.UnaryOp object at 0x7da2054a7d90>] equal[==] call[name[len], parameter[call[name[letters_with_children]][<ast.UnaryOp object at 0x7da2054a67a0>]]]] begin[:]
call[name[indexes].pop, parameter[]]
call[name[letters_with_children].pop, parameter[]]
call[name[branch].pop, parameter[]]
if compare[call[name[len], parameter[name[indexes]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da2054a49d0>
call[name[word].pop, parameter[]]
<ast.Tuple object at 0x7da2054a6a70> assign[=] call[call[name[letters_with_children]][<ast.UnaryOp object at 0x7da2054a76d0>]][call[name[indexes]][<ast.UnaryOp object at 0x7da2054a5750>]]
<ast.AugAssign object at 0x7da2054a6a40>
call[name[indexes].append, parameter[constant[0]]]
call[name[word].append, parameter[name[next_letter]]]
call[name[branch].append, parameter[name[next_child]]]
call[name[letters_with_children].append, parameter[call[name[self]._get_children_and_letters, parameter[call[name[branch]][<ast.UnaryOp object at 0x7da2054a61a0>]]]]] | keyword[def] identifier[words] ( identifier[self] ):
literal[string]
identifier[branch] , identifier[word] , identifier[indexes] =[ identifier[self] . identifier[root] ],[],[ literal[int] ]
identifier[letters_with_children] =[ identifier[self] . identifier[_get_children_and_letters] ( identifier[self] . identifier[root] )]
keyword[while] identifier[len] ( identifier[branch] )> literal[int] :
keyword[if] identifier[self] . identifier[is_final] ( identifier[branch] [- literal[int] ]):
keyword[yield] literal[string] . identifier[join] ( identifier[word] )
keyword[while] identifier[indexes] [- literal[int] ]== identifier[len] ( identifier[letters_with_children] [- literal[int] ]):
identifier[indexes] . identifier[pop] ()
identifier[letters_with_children] . identifier[pop] ()
identifier[branch] . identifier[pop] ()
keyword[if] identifier[len] ( identifier[indexes] )== literal[int] :
keyword[raise] identifier[StopIteration] ()
identifier[word] . identifier[pop] ()
identifier[next_letter] , identifier[next_child] = identifier[letters_with_children] [- literal[int] ][ identifier[indexes] [- literal[int] ]]
identifier[indexes] [- literal[int] ]+= literal[int]
identifier[indexes] . identifier[append] ( literal[int] )
identifier[word] . identifier[append] ( identifier[next_letter] )
identifier[branch] . identifier[append] ( identifier[next_child] )
identifier[letters_with_children] . identifier[append] ( identifier[self] . identifier[_get_children_and_letters] ( identifier[branch] [- literal[int] ])) | def words(self):
"""
Возвращает итератор по словам, содержащимся в боре
"""
(branch, word, indexes) = ([self.root], [], [0])
letters_with_children = [self._get_children_and_letters(self.root)]
while len(branch) > 0:
if self.is_final(branch[-1]):
yield ''.join(word) # depends on [control=['if'], data=[]]
while indexes[-1] == len(letters_with_children[-1]):
indexes.pop()
letters_with_children.pop()
branch.pop()
if len(indexes) == 0:
raise StopIteration() # depends on [control=['if'], data=[]]
word.pop() # depends on [control=['while'], data=[]]
(next_letter, next_child) = letters_with_children[-1][indexes[-1]]
indexes[-1] += 1
indexes.append(0)
word.append(next_letter)
branch.append(next_child)
letters_with_children.append(self._get_children_and_letters(branch[-1])) # depends on [control=['while'], data=[]] |
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state() | def function[raise_on_packet, parameter[self, pkt_cls, state, get_next_msg]]:
constant[
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
]
if name[get_next_msg] begin[:]
call[name[self].get_next_msg, parameter[]]
if <ast.BoolOp object at 0x7da1b1c3c550> begin[:]
return[None]
name[self].cur_pkt assign[=] call[name[self].buffer_in][constant[0]]
name[self].buffer_in assign[=] call[name[self].buffer_in][<ast.Slice object at 0x7da18f09ce20>]
<ast.Raise object at 0x7da18f09d4e0> | keyword[def] identifier[raise_on_packet] ( identifier[self] , identifier[pkt_cls] , identifier[state] , identifier[get_next_msg] = keyword[True] ):
literal[string]
keyword[if] identifier[get_next_msg] :
identifier[self] . identifier[get_next_msg] ()
keyword[if] ( keyword[not] identifier[self] . identifier[buffer_in] keyword[or]
keyword[not] identifier[isinstance] ( identifier[self] . identifier[buffer_in] [ literal[int] ], identifier[pkt_cls] )):
keyword[return]
identifier[self] . identifier[cur_pkt] = identifier[self] . identifier[buffer_in] [ literal[int] ]
identifier[self] . identifier[buffer_in] = identifier[self] . identifier[buffer_in] [ literal[int] :]
keyword[raise] identifier[state] () | def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg() # depends on [control=['if'], data=[]]
if not self.buffer_in or not isinstance(self.buffer_in[0], pkt_cls):
return # depends on [control=['if'], data=[]]
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state() |
def _connect_mitogen_su(spec):
"""
Return ContextService arguments for su as a first class connection.
"""
return {
'method': 'su',
'kwargs': {
'username': spec.remote_user(),
'password': spec.password(),
'python_path': spec.python_path(),
'su_path': spec.become_exe(),
'connect_timeout': spec.timeout(),
'remote_name': get_remote_name(spec),
}
} | def function[_connect_mitogen_su, parameter[spec]]:
constant[
Return ContextService arguments for su as a first class connection.
]
return[dictionary[[<ast.Constant object at 0x7da1b1d50ca0>, <ast.Constant object at 0x7da1b1d528c0>], [<ast.Constant object at 0x7da1b1d500a0>, <ast.Dict object at 0x7da1b1d51bd0>]]] | keyword[def] identifier[_connect_mitogen_su] ( identifier[spec] ):
literal[string]
keyword[return] {
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[spec] . identifier[remote_user] (),
literal[string] : identifier[spec] . identifier[password] (),
literal[string] : identifier[spec] . identifier[python_path] (),
literal[string] : identifier[spec] . identifier[become_exe] (),
literal[string] : identifier[spec] . identifier[timeout] (),
literal[string] : identifier[get_remote_name] ( identifier[spec] ),
}
} | def _connect_mitogen_su(spec):
"""
Return ContextService arguments for su as a first class connection.
"""
return {'method': 'su', 'kwargs': {'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec)}} |
def CheckBlobsExist(self, blob_ids):
"""Check if blobs for the given digests already exist."""
res = {blob_id: False for blob_id in blob_ids}
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_ids}
existing = aff4.FACTORY.MultiOpen(
urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r")
for blob in existing:
res[urns[blob.urn]] = True
return res | def function[CheckBlobsExist, parameter[self, blob_ids]]:
constant[Check if blobs for the given digests already exist.]
variable[res] assign[=] <ast.DictComp object at 0x7da1b1c254b0>
variable[urns] assign[=] <ast.DictComp object at 0x7da1b1c0e3b0>
variable[existing] assign[=] call[name[aff4].FACTORY.MultiOpen, parameter[name[urns]]]
for taget[name[blob]] in starred[name[existing]] begin[:]
call[name[res]][call[name[urns]][name[blob].urn]] assign[=] constant[True]
return[name[res]] | keyword[def] identifier[CheckBlobsExist] ( identifier[self] , identifier[blob_ids] ):
literal[string]
identifier[res] ={ identifier[blob_id] : keyword[False] keyword[for] identifier[blob_id] keyword[in] identifier[blob_ids] }
identifier[urns] ={ identifier[self] . identifier[_BlobUrn] ( identifier[blob_id] ): identifier[blob_id] keyword[for] identifier[blob_id] keyword[in] identifier[blob_ids] }
identifier[existing] = identifier[aff4] . identifier[FACTORY] . identifier[MultiOpen] (
identifier[urns] , identifier[aff4_type] = identifier[aff4] . identifier[AFF4MemoryStreamBase] , identifier[mode] = literal[string] )
keyword[for] identifier[blob] keyword[in] identifier[existing] :
identifier[res] [ identifier[urns] [ identifier[blob] . identifier[urn] ]]= keyword[True]
keyword[return] identifier[res] | def CheckBlobsExist(self, blob_ids):
"""Check if blobs for the given digests already exist."""
res = {blob_id: False for blob_id in blob_ids}
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_ids}
existing = aff4.FACTORY.MultiOpen(urns, aff4_type=aff4.AFF4MemoryStreamBase, mode='r')
for blob in existing:
res[urns[blob.urn]] = True # depends on [control=['for'], data=['blob']]
return res |
def set_timeout(scope, timeout):
"""
Defines the time after which Exscript fails if it does not receive a
prompt from the remote host.
:type timeout: int
:param timeout: The timeout in seconds.
"""
conn = scope.get('__connection__')
conn.set_timeout(int(timeout[0]))
return True | def function[set_timeout, parameter[scope, timeout]]:
constant[
Defines the time after which Exscript fails if it does not receive a
prompt from the remote host.
:type timeout: int
:param timeout: The timeout in seconds.
]
variable[conn] assign[=] call[name[scope].get, parameter[constant[__connection__]]]
call[name[conn].set_timeout, parameter[call[name[int], parameter[call[name[timeout]][constant[0]]]]]]
return[constant[True]] | keyword[def] identifier[set_timeout] ( identifier[scope] , identifier[timeout] ):
literal[string]
identifier[conn] = identifier[scope] . identifier[get] ( literal[string] )
identifier[conn] . identifier[set_timeout] ( identifier[int] ( identifier[timeout] [ literal[int] ]))
keyword[return] keyword[True] | def set_timeout(scope, timeout):
"""
Defines the time after which Exscript fails if it does not receive a
prompt from the remote host.
:type timeout: int
:param timeout: The timeout in seconds.
"""
conn = scope.get('__connection__')
conn.set_timeout(int(timeout[0]))
return True |
def as_bel(self) -> str:
"""Return this fusion range as a BEL string."""
return '{reference}.{start}_{stop}'.format(
reference=self[FUSION_REFERENCE],
start=self[FUSION_START],
stop=self[FUSION_STOP],
) | def function[as_bel, parameter[self]]:
constant[Return this fusion range as a BEL string.]
return[call[constant[{reference}.{start}_{stop}].format, parameter[]]] | keyword[def] identifier[as_bel] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[return] literal[string] . identifier[format] (
identifier[reference] = identifier[self] [ identifier[FUSION_REFERENCE] ],
identifier[start] = identifier[self] [ identifier[FUSION_START] ],
identifier[stop] = identifier[self] [ identifier[FUSION_STOP] ],
) | def as_bel(self) -> str:
"""Return this fusion range as a BEL string."""
return '{reference}.{start}_{stop}'.format(reference=self[FUSION_REFERENCE], start=self[FUSION_START], stop=self[FUSION_STOP]) |
def create_alert(self, **kwargs): # noqa: E501
"""Create a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Alert body: Example Classic Body: <pre>{ \"name\": \"Alert Name\", \"target\": \"success@simulator.amazonses.com\", \"condition\": \"ts(~sample.cpu.loadavg.1m) > 1\", \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"resolveAfterMinutes\": 2, \"severity\": \"INFO\", \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"alertTag1\" ] } }</pre> Example Threshold Body: <pre>{ \"name\": \"Alert Name\", \"alertType\": \"THRESHOLD\", \"conditions\": { \"info\": \"ts(~sample.cpu.loadavg.1m) > 0\", \"warn\": \"ts(~sample.cpu.loadavg.1m) > 2\" }, \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"additionalInformation\": \"conditions value entry needs to be of the form: displayExpression operator threshold\" }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_alert_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_alert_with_http_info(**kwargs) # noqa: E501
return data | def function[create_alert, parameter[self]]:
constant[Create a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Alert body: Example Classic Body: <pre>{ "name": "Alert Name", "target": "success@simulator.amazonses.com", "condition": "ts(~sample.cpu.loadavg.1m) > 1", "displayExpression": "ts(~sample.cpu.loadavg.1m)", "minutes": 5, "resolveAfterMinutes": 2, "severity": "INFO", "additionalInformation": "Additional Info", "tags": { "customerTags": [ "alertTag1" ] } }</pre> Example Threshold Body: <pre>{ "name": "Alert Name", "alertType": "THRESHOLD", "conditions": { "info": "ts(~sample.cpu.loadavg.1m) > 0", "warn": "ts(~sample.cpu.loadavg.1m) > 2" }, "displayExpression": "ts(~sample.cpu.loadavg.1m)", "minutes": 5, "additionalInformation": "conditions value entry needs to be of the form: displayExpression operator threshold" }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].create_alert_with_http_info, parameter[]]] | keyword[def] identifier[create_alert] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[create_alert_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[create_alert_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def create_alert(self, **kwargs): # noqa: E501
'Create a specific alert # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_alert(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Alert body: Example Classic Body: <pre>{ "name": "Alert Name", "target": "success@simulator.amazonses.com", "condition": "ts(~sample.cpu.loadavg.1m) > 1", "displayExpression": "ts(~sample.cpu.loadavg.1m)", "minutes": 5, "resolveAfterMinutes": 2, "severity": "INFO", "additionalInformation": "Additional Info", "tags": { "customerTags": [ "alertTag1" ] } }</pre> Example Threshold Body: <pre>{ "name": "Alert Name", "alertType": "THRESHOLD", "conditions": { "info": "ts(~sample.cpu.loadavg.1m) > 0", "warn": "ts(~sample.cpu.loadavg.1m) > 2" }, "displayExpression": "ts(~sample.cpu.loadavg.1m)", "minutes": 5, "additionalInformation": "conditions value entry needs to be of the form: displayExpression operator threshold" }</pre>\n :return: ResponseContainerAlert\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_alert_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.create_alert_with_http_info(**kwargs) # noqa: E501
return data |
def reftrack_version_data(rt, role):
"""Return the data for the version that is loaded by the reftrack
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the version
:rtype: depending on role
:raises: None
"""
tfi = rt.get_taskfileinfo()
if not tfi:
return
return filesysitemdata.taskfileinfo_version_data(tfi, role) | def function[reftrack_version_data, parameter[rt, role]]:
constant[Return the data for the version that is loaded by the reftrack
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the version
:rtype: depending on role
:raises: None
]
variable[tfi] assign[=] call[name[rt].get_taskfileinfo, parameter[]]
if <ast.UnaryOp object at 0x7da1b16ab2e0> begin[:]
return[None]
return[call[name[filesysitemdata].taskfileinfo_version_data, parameter[name[tfi], name[role]]]] | keyword[def] identifier[reftrack_version_data] ( identifier[rt] , identifier[role] ):
literal[string]
identifier[tfi] = identifier[rt] . identifier[get_taskfileinfo] ()
keyword[if] keyword[not] identifier[tfi] :
keyword[return]
keyword[return] identifier[filesysitemdata] . identifier[taskfileinfo_version_data] ( identifier[tfi] , identifier[role] ) | def reftrack_version_data(rt, role):
"""Return the data for the version that is loaded by the reftrack
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the version
:rtype: depending on role
:raises: None
"""
tfi = rt.get_taskfileinfo()
if not tfi:
return # depends on [control=['if'], data=[]]
return filesysitemdata.taskfileinfo_version_data(tfi, role) |
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u | def function[unscale_and_snap_to_nearest, parameter[x, tune_params, eps]]:
constant[helper func that snaps a scaled variable to the nearest config]
variable[x_u] assign[=] <ast.ListComp object at 0x7da1b04d8160>
for taget[tuple[[<ast.Name object at 0x7da1b04da320>, <ast.Name object at 0x7da1b04d83d0>]]] in starred[call[name[enumerate], parameter[call[name[tune_params].values, parameter[]]]]] begin[:]
variable[pad] assign[=] binary_operation[constant[0.5] * name[eps]]
variable[linspace] assign[=] call[name[numpy].linspace, parameter[name[pad], binary_operation[binary_operation[name[eps] * call[name[len], parameter[name[v]]]] - name[pad]], call[name[len], parameter[name[v]]]]]
variable[idx] assign[=] call[call[name[numpy].abs, parameter[binary_operation[name[linspace] - call[name[x]][name[i]]]]].argmin, parameter[]]
variable[idx] assign[=] call[name[min], parameter[call[name[max], parameter[name[idx], constant[0]]], binary_operation[call[name[len], parameter[name[v]]] - constant[1]]]]
call[name[x_u]][name[i]] assign[=] call[name[v]][name[idx]]
return[name[x_u]] | keyword[def] identifier[unscale_and_snap_to_nearest] ( identifier[x] , identifier[tune_params] , identifier[eps] ):
literal[string]
identifier[x_u] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[x] ]
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[tune_params] . identifier[values] ()):
identifier[pad] = literal[int] * identifier[eps]
identifier[linspace] = identifier[numpy] . identifier[linspace] ( identifier[pad] ,( identifier[eps] * identifier[len] ( identifier[v] ))- identifier[pad] , identifier[len] ( identifier[v] ))
identifier[idx] = identifier[numpy] . identifier[abs] ( identifier[linspace] - identifier[x] [ identifier[i] ]). identifier[argmin] ()
identifier[idx] = identifier[min] ( identifier[max] ( identifier[idx] , literal[int] ), identifier[len] ( identifier[v] )- literal[int] )
identifier[x_u] [ identifier[i] ]= identifier[v] [ identifier[idx] ]
keyword[return] identifier[x_u] | def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for (i, v) in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5 * eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, eps * len(v) - pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace - x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v) - 1)
#use index into array of actual values
x_u[i] = v[idx] # depends on [control=['for'], data=[]]
return x_u |
def list_boards():
"""! @brief Generate dictionary with info about supported boards.
Output version history:
- 1.0, initial version
"""
boards = []
obj = {
'pyocd_version' : __version__,
'version' : { 'major' : 1, 'minor' : 0 },
'status' : 0,
'boards' : boards
}
for board_id, info in BOARD_ID_TO_INFO.items():
d = {
'id' : board_id,
'name' : info.name,
'target': info.target,
'binary' : info.binary,
}
boards.append(d)
return obj | def function[list_boards, parameter[]]:
constant[! @brief Generate dictionary with info about supported boards.
Output version history:
- 1.0, initial version
]
variable[boards] assign[=] list[[]]
variable[obj] assign[=] dictionary[[<ast.Constant object at 0x7da1b18bfcd0>, <ast.Constant object at 0x7da1b18be590>, <ast.Constant object at 0x7da1b18bcf40>, <ast.Constant object at 0x7da1b18be530>], [<ast.Name object at 0x7da1b18bf7f0>, <ast.Dict object at 0x7da1b18bddb0>, <ast.Constant object at 0x7da1b18bf5b0>, <ast.Name object at 0x7da1b18bf8e0>]]
for taget[tuple[[<ast.Name object at 0x7da1b18bf9d0>, <ast.Name object at 0x7da1b18bcac0>]]] in starred[call[name[BOARD_ID_TO_INFO].items, parameter[]]] begin[:]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b18bf430>, <ast.Constant object at 0x7da1b18bfca0>, <ast.Constant object at 0x7da1b18bdcf0>, <ast.Constant object at 0x7da1b18bc670>], [<ast.Name object at 0x7da1b18bf3d0>, <ast.Attribute object at 0x7da1b18bf6d0>, <ast.Attribute object at 0x7da1b18bdc30>, <ast.Attribute object at 0x7da1b18bc5e0>]]
call[name[boards].append, parameter[name[d]]]
return[name[obj]] | keyword[def] identifier[list_boards] ():
literal[string]
identifier[boards] =[]
identifier[obj] ={
literal[string] : identifier[__version__] ,
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[int] },
literal[string] : literal[int] ,
literal[string] : identifier[boards]
}
keyword[for] identifier[board_id] , identifier[info] keyword[in] identifier[BOARD_ID_TO_INFO] . identifier[items] ():
identifier[d] ={
literal[string] : identifier[board_id] ,
literal[string] : identifier[info] . identifier[name] ,
literal[string] : identifier[info] . identifier[target] ,
literal[string] : identifier[info] . identifier[binary] ,
}
identifier[boards] . identifier[append] ( identifier[d] )
keyword[return] identifier[obj] | def list_boards():
"""! @brief Generate dictionary with info about supported boards.
Output version history:
- 1.0, initial version
"""
boards = []
obj = {'pyocd_version': __version__, 'version': {'major': 1, 'minor': 0}, 'status': 0, 'boards': boards}
for (board_id, info) in BOARD_ID_TO_INFO.items():
d = {'id': board_id, 'name': info.name, 'target': info.target, 'binary': info.binary}
boards.append(d) # depends on [control=['for'], data=[]]
return obj |
def plot_qq_unf(fignum, D, title, subplot=False, degrees=True):
"""
plots data against a uniform distribution in 0=>360.
Parameters
_________
fignum : matplotlib figure number
D : data
title : title for plot
subplot : if True, make this number one of two subplots
degrees : if True, assume that these are degrees
Return
Mu : Mu statistic (Fisher et al., 1987)
Mu_crit : critical value of Mu for uniform distribution
Effect
______
makes a Quantile Quantile plot of data
"""
if subplot == True:
plt.subplot(1, 2, fignum)
else:
plt.figure(num=fignum)
X, Y, dpos, dneg = [], [], 0., 0.
if degrees:
D = (np.array(D)) % 360
X = D/D.max()
X = np.sort(X)
n = float(len(D))
i = np.arange(0, len(D))
Y = (i-0.5)/n
ds = (i/n)-X
dpos = ds.max()
dneg = ds.min()
plt.plot(Y, X, 'ro')
v = dneg + dpos # kuiper's v
# Mu of fisher et al. equation 5.16
Mu = v * (np.sqrt(n) - 0.567 + (old_div(1.623, (np.sqrt(n)))))
plt.axis([0, 1., 0., 1.])
bounds = plt.axis()
notestr = 'N: ' + '%i' % (n)
plt.text(.1 * bounds[1], .9 * bounds[3], notestr)
notestr = 'Mu: ' + '%7.3f' % (Mu)
plt.text(.1 * bounds[1], .8 * bounds[3], notestr)
if Mu > 1.347:
notestr = "Non-uniform (99%)"
elif Mu < 1.207:
notestr = "Uniform (95%)"
elif Mu > 1.207:
notestr = "Uniform (99%)"
plt.text(.1 * bounds[1], .7 * bounds[3], notestr)
plt.text(.1 * bounds[1], .7 * bounds[3], notestr)
plt.title(title)
plt.xlabel('Uniform Quantile')
plt.ylabel('Data Quantile')
return Mu, 1.207 | def function[plot_qq_unf, parameter[fignum, D, title, subplot, degrees]]:
constant[
plots data against a uniform distribution in 0=>360.
Parameters
_________
fignum : matplotlib figure number
D : data
title : title for plot
subplot : if True, make this number one of two subplots
degrees : if True, assume that these are degrees
Return
Mu : Mu statistic (Fisher et al., 1987)
Mu_crit : critical value of Mu for uniform distribution
Effect
______
makes a Quantile Quantile plot of data
]
if compare[name[subplot] equal[==] constant[True]] begin[:]
call[name[plt].subplot, parameter[constant[1], constant[2], name[fignum]]]
<ast.Tuple object at 0x7da1b0535a50> assign[=] tuple[[<ast.List object at 0x7da1b0537a60>, <ast.List object at 0x7da1b0537a00>, <ast.Constant object at 0x7da1b05379d0>, <ast.Constant object at 0x7da1b0537d60>]]
if name[degrees] begin[:]
variable[D] assign[=] binary_operation[call[name[np].array, parameter[name[D]]] <ast.Mod object at 0x7da2590d6920> constant[360]]
variable[X] assign[=] binary_operation[name[D] / call[name[D].max, parameter[]]]
variable[X] assign[=] call[name[np].sort, parameter[name[X]]]
variable[n] assign[=] call[name[float], parameter[call[name[len], parameter[name[D]]]]]
variable[i] assign[=] call[name[np].arange, parameter[constant[0], call[name[len], parameter[name[D]]]]]
variable[Y] assign[=] binary_operation[binary_operation[name[i] - constant[0.5]] / name[n]]
variable[ds] assign[=] binary_operation[binary_operation[name[i] / name[n]] - name[X]]
variable[dpos] assign[=] call[name[ds].max, parameter[]]
variable[dneg] assign[=] call[name[ds].min, parameter[]]
call[name[plt].plot, parameter[name[Y], name[X], constant[ro]]]
variable[v] assign[=] binary_operation[name[dneg] + name[dpos]]
variable[Mu] assign[=] binary_operation[name[v] * binary_operation[binary_operation[call[name[np].sqrt, parameter[name[n]]] - constant[0.567]] + call[name[old_div], parameter[constant[1.623], call[name[np].sqrt, parameter[name[n]]]]]]]
call[name[plt].axis, parameter[list[[<ast.Constant object at 0x7da1b04fcbe0>, <ast.Constant object at 0x7da1b04fcc40>, <ast.Constant object at 0x7da1b04fcd00>, <ast.Constant object at 0x7da1b04fcd90>]]]]
variable[bounds] assign[=] call[name[plt].axis, parameter[]]
variable[notestr] assign[=] binary_operation[constant[N: ] + binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> name[n]]]
call[name[plt].text, parameter[binary_operation[constant[0.1] * call[name[bounds]][constant[1]]], binary_operation[constant[0.9] * call[name[bounds]][constant[3]]], name[notestr]]]
variable[notestr] assign[=] binary_operation[constant[Mu: ] + binary_operation[constant[%7.3f] <ast.Mod object at 0x7da2590d6920> name[Mu]]]
call[name[plt].text, parameter[binary_operation[constant[0.1] * call[name[bounds]][constant[1]]], binary_operation[constant[0.8] * call[name[bounds]][constant[3]]], name[notestr]]]
if compare[name[Mu] greater[>] constant[1.347]] begin[:]
variable[notestr] assign[=] constant[Non-uniform (99%)]
call[name[plt].text, parameter[binary_operation[constant[0.1] * call[name[bounds]][constant[1]]], binary_operation[constant[0.7] * call[name[bounds]][constant[3]]], name[notestr]]]
call[name[plt].text, parameter[binary_operation[constant[0.1] * call[name[bounds]][constant[1]]], binary_operation[constant[0.7] * call[name[bounds]][constant[3]]], name[notestr]]]
call[name[plt].title, parameter[name[title]]]
call[name[plt].xlabel, parameter[constant[Uniform Quantile]]]
call[name[plt].ylabel, parameter[constant[Data Quantile]]]
return[tuple[[<ast.Name object at 0x7da1b04fdd50>, <ast.Constant object at 0x7da1b04ff520>]]] | keyword[def] identifier[plot_qq_unf] ( identifier[fignum] , identifier[D] , identifier[title] , identifier[subplot] = keyword[False] , identifier[degrees] = keyword[True] ):
literal[string]
keyword[if] identifier[subplot] == keyword[True] :
identifier[plt] . identifier[subplot] ( literal[int] , literal[int] , identifier[fignum] )
keyword[else] :
identifier[plt] . identifier[figure] ( identifier[num] = identifier[fignum] )
identifier[X] , identifier[Y] , identifier[dpos] , identifier[dneg] =[],[], literal[int] , literal[int]
keyword[if] identifier[degrees] :
identifier[D] =( identifier[np] . identifier[array] ( identifier[D] ))% literal[int]
identifier[X] = identifier[D] / identifier[D] . identifier[max] ()
identifier[X] = identifier[np] . identifier[sort] ( identifier[X] )
identifier[n] = identifier[float] ( identifier[len] ( identifier[D] ))
identifier[i] = identifier[np] . identifier[arange] ( literal[int] , identifier[len] ( identifier[D] ))
identifier[Y] =( identifier[i] - literal[int] )/ identifier[n]
identifier[ds] =( identifier[i] / identifier[n] )- identifier[X]
identifier[dpos] = identifier[ds] . identifier[max] ()
identifier[dneg] = identifier[ds] . identifier[min] ()
identifier[plt] . identifier[plot] ( identifier[Y] , identifier[X] , literal[string] )
identifier[v] = identifier[dneg] + identifier[dpos]
identifier[Mu] = identifier[v] *( identifier[np] . identifier[sqrt] ( identifier[n] )- literal[int] +( identifier[old_div] ( literal[int] ,( identifier[np] . identifier[sqrt] ( identifier[n] )))))
identifier[plt] . identifier[axis] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[bounds] = identifier[plt] . identifier[axis] ()
identifier[notestr] = literal[string] + literal[string] %( identifier[n] )
identifier[plt] . identifier[text] ( literal[int] * identifier[bounds] [ literal[int] ], literal[int] * identifier[bounds] [ literal[int] ], identifier[notestr] )
identifier[notestr] = literal[string] + literal[string] %( identifier[Mu] )
identifier[plt] . identifier[text] ( literal[int] * identifier[bounds] [ literal[int] ], literal[int] * identifier[bounds] [ literal[int] ], identifier[notestr] )
keyword[if] identifier[Mu] > literal[int] :
identifier[notestr] = literal[string]
keyword[elif] identifier[Mu] < literal[int] :
identifier[notestr] = literal[string]
keyword[elif] identifier[Mu] > literal[int] :
identifier[notestr] = literal[string]
identifier[plt] . identifier[text] ( literal[int] * identifier[bounds] [ literal[int] ], literal[int] * identifier[bounds] [ literal[int] ], identifier[notestr] )
identifier[plt] . identifier[text] ( literal[int] * identifier[bounds] [ literal[int] ], literal[int] * identifier[bounds] [ literal[int] ], identifier[notestr] )
identifier[plt] . identifier[title] ( identifier[title] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[return] identifier[Mu] , literal[int] | def plot_qq_unf(fignum, D, title, subplot=False, degrees=True):
"""
plots data against a uniform distribution in 0=>360.
Parameters
_________
fignum : matplotlib figure number
D : data
title : title for plot
subplot : if True, make this number one of two subplots
degrees : if True, assume that these are degrees
Return
Mu : Mu statistic (Fisher et al., 1987)
Mu_crit : critical value of Mu for uniform distribution
Effect
______
makes a Quantile Quantile plot of data
"""
if subplot == True:
plt.subplot(1, 2, fignum) # depends on [control=['if'], data=[]]
else:
plt.figure(num=fignum)
(X, Y, dpos, dneg) = ([], [], 0.0, 0.0)
if degrees:
D = np.array(D) % 360 # depends on [control=['if'], data=[]]
X = D / D.max()
X = np.sort(X)
n = float(len(D))
i = np.arange(0, len(D))
Y = (i - 0.5) / n
ds = i / n - X
dpos = ds.max()
dneg = ds.min()
plt.plot(Y, X, 'ro')
v = dneg + dpos # kuiper's v
# Mu of fisher et al. equation 5.16
Mu = v * (np.sqrt(n) - 0.567 + old_div(1.623, np.sqrt(n)))
plt.axis([0, 1.0, 0.0, 1.0])
bounds = plt.axis()
notestr = 'N: ' + '%i' % n
plt.text(0.1 * bounds[1], 0.9 * bounds[3], notestr)
notestr = 'Mu: ' + '%7.3f' % Mu
plt.text(0.1 * bounds[1], 0.8 * bounds[3], notestr)
if Mu > 1.347:
notestr = 'Non-uniform (99%)' # depends on [control=['if'], data=[]]
elif Mu < 1.207:
notestr = 'Uniform (95%)' # depends on [control=['if'], data=[]]
elif Mu > 1.207:
notestr = 'Uniform (99%)' # depends on [control=['if'], data=[]]
plt.text(0.1 * bounds[1], 0.7 * bounds[3], notestr)
plt.text(0.1 * bounds[1], 0.7 * bounds[3], notestr)
plt.title(title)
plt.xlabel('Uniform Quantile')
plt.ylabel('Data Quantile')
return (Mu, 1.207) |
def _sline_bokeh(self, window_size, y_label):
"""
Returns a chart with a smooth line from a serie
"""
try:
ds2 = self.clone_()
window = np.ones(int(window_size)) / float(window_size)
ds2.df[y_label] = np.convolve(self.df[self.y], window, 'same')
ds2.chart(self.x, y_label)
return ds2.line_()
except Exception as e:
self.err(e, self._sline_bokeh, "Can not draw smooth line chart") | def function[_sline_bokeh, parameter[self, window_size, y_label]]:
constant[
Returns a chart with a smooth line from a serie
]
<ast.Try object at 0x7da204622050> | keyword[def] identifier[_sline_bokeh] ( identifier[self] , identifier[window_size] , identifier[y_label] ):
literal[string]
keyword[try] :
identifier[ds2] = identifier[self] . identifier[clone_] ()
identifier[window] = identifier[np] . identifier[ones] ( identifier[int] ( identifier[window_size] ))/ identifier[float] ( identifier[window_size] )
identifier[ds2] . identifier[df] [ identifier[y_label] ]= identifier[np] . identifier[convolve] ( identifier[self] . identifier[df] [ identifier[self] . identifier[y] ], identifier[window] , literal[string] )
identifier[ds2] . identifier[chart] ( identifier[self] . identifier[x] , identifier[y_label] )
keyword[return] identifier[ds2] . identifier[line_] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , identifier[self] . identifier[_sline_bokeh] , literal[string] ) | def _sline_bokeh(self, window_size, y_label):
"""
Returns a chart with a smooth line from a serie
"""
try:
ds2 = self.clone_()
window = np.ones(int(window_size)) / float(window_size)
ds2.df[y_label] = np.convolve(self.df[self.y], window, 'same')
ds2.chart(self.x, y_label)
return ds2.line_() # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, self._sline_bokeh, 'Can not draw smooth line chart') # depends on [control=['except'], data=['e']] |
def get_structure_seqs(pdb_file, file_type):
"""Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence}
"""
# TODO: Please check out capitalization of chain IDs in mmcif files. example: 5afi - chain "l" is present but
# it seems like biopython capitalizes it to chain L
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
structure_seqs = {}
# Loop over each chain of the PDB
for chain in model:
chain_seq = ''
tracker = 0
# Loop over the residues
for res in chain.get_residues():
# NOTE: you can get the residue number too
# res_num = res.id[1]
# Double check if the residue name is a standard residue
# If it is not a standard residue (ie. selenomethionine),
# it will be filled in with an X on the next iteration)
if Polypeptide.is_aa(res, standard=True):
full_id = res.get_full_id()
end_tracker = full_id[3][1]
i_code = full_id[3][2]
aa = Polypeptide.three_to_one(res.get_resname())
# Tracker to fill in X's
if end_tracker != (tracker + 1):
if i_code != ' ':
chain_seq += aa
tracker = end_tracker + 1
continue
else:
chain_seq += 'X' * (end_tracker - tracker - 1)
chain_seq += aa
tracker = end_tracker
else:
continue
structure_seqs[chain.get_id()] = chain_seq
return structure_seqs | def function[get_structure_seqs, parameter[pdb_file, file_type]]:
constant[Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence}
]
variable[my_structure] assign[=] call[name[StructureIO], parameter[name[pdb_file]]]
variable[model] assign[=] name[my_structure].first_model
variable[structure_seqs] assign[=] dictionary[[], []]
for taget[name[chain]] in starred[name[model]] begin[:]
variable[chain_seq] assign[=] constant[]
variable[tracker] assign[=] constant[0]
for taget[name[res]] in starred[call[name[chain].get_residues, parameter[]]] begin[:]
if call[name[Polypeptide].is_aa, parameter[name[res]]] begin[:]
variable[full_id] assign[=] call[name[res].get_full_id, parameter[]]
variable[end_tracker] assign[=] call[call[name[full_id]][constant[3]]][constant[1]]
variable[i_code] assign[=] call[call[name[full_id]][constant[3]]][constant[2]]
variable[aa] assign[=] call[name[Polypeptide].three_to_one, parameter[call[name[res].get_resname, parameter[]]]]
if compare[name[end_tracker] not_equal[!=] binary_operation[name[tracker] + constant[1]]] begin[:]
if compare[name[i_code] not_equal[!=] constant[ ]] begin[:]
<ast.AugAssign object at 0x7da204620910>
variable[tracker] assign[=] binary_operation[name[end_tracker] + constant[1]]
continue
<ast.AugAssign object at 0x7da1b0e2c490>
variable[tracker] assign[=] name[end_tracker]
call[name[structure_seqs]][call[name[chain].get_id, parameter[]]] assign[=] name[chain_seq]
return[name[structure_seqs]] | keyword[def] identifier[get_structure_seqs] ( identifier[pdb_file] , identifier[file_type] ):
literal[string]
identifier[my_structure] = identifier[StructureIO] ( identifier[pdb_file] )
identifier[model] = identifier[my_structure] . identifier[first_model]
identifier[structure_seqs] ={}
keyword[for] identifier[chain] keyword[in] identifier[model] :
identifier[chain_seq] = literal[string]
identifier[tracker] = literal[int]
keyword[for] identifier[res] keyword[in] identifier[chain] . identifier[get_residues] ():
keyword[if] identifier[Polypeptide] . identifier[is_aa] ( identifier[res] , identifier[standard] = keyword[True] ):
identifier[full_id] = identifier[res] . identifier[get_full_id] ()
identifier[end_tracker] = identifier[full_id] [ literal[int] ][ literal[int] ]
identifier[i_code] = identifier[full_id] [ literal[int] ][ literal[int] ]
identifier[aa] = identifier[Polypeptide] . identifier[three_to_one] ( identifier[res] . identifier[get_resname] ())
keyword[if] identifier[end_tracker] !=( identifier[tracker] + literal[int] ):
keyword[if] identifier[i_code] != literal[string] :
identifier[chain_seq] += identifier[aa]
identifier[tracker] = identifier[end_tracker] + literal[int]
keyword[continue]
keyword[else] :
identifier[chain_seq] += literal[string] *( identifier[end_tracker] - identifier[tracker] - literal[int] )
identifier[chain_seq] += identifier[aa]
identifier[tracker] = identifier[end_tracker]
keyword[else] :
keyword[continue]
identifier[structure_seqs] [ identifier[chain] . identifier[get_id] ()]= identifier[chain_seq]
keyword[return] identifier[structure_seqs] | def get_structure_seqs(pdb_file, file_type):
"""Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence}
"""
# TODO: Please check out capitalization of chain IDs in mmcif files. example: 5afi - chain "l" is present but
# it seems like biopython capitalizes it to chain L
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
structure_seqs = {}
# Loop over each chain of the PDB
for chain in model:
chain_seq = ''
tracker = 0
# Loop over the residues
for res in chain.get_residues():
# NOTE: you can get the residue number too
# res_num = res.id[1]
# Double check if the residue name is a standard residue
# If it is not a standard residue (ie. selenomethionine),
# it will be filled in with an X on the next iteration)
if Polypeptide.is_aa(res, standard=True):
full_id = res.get_full_id()
end_tracker = full_id[3][1]
i_code = full_id[3][2]
aa = Polypeptide.three_to_one(res.get_resname())
# Tracker to fill in X's
if end_tracker != tracker + 1:
if i_code != ' ':
chain_seq += aa
tracker = end_tracker + 1
continue # depends on [control=['if'], data=[]]
else:
chain_seq += 'X' * (end_tracker - tracker - 1) # depends on [control=['if'], data=['end_tracker']]
chain_seq += aa
tracker = end_tracker # depends on [control=['if'], data=[]]
else:
continue # depends on [control=['for'], data=['res']]
structure_seqs[chain.get_id()] = chain_seq # depends on [control=['for'], data=['chain']]
return structure_seqs |
def set_inheritance(obj_name, enabled, obj_type='file', clear=False):
'''
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
'''
if obj_type not in ['file', 'registry', 'registry32']:
raise SaltInvocationError(
'obj_type called with incorrect parameter: {0}'.format(obj_name))
if clear:
obj_dacl = dacl(obj_type=obj_type)
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled) | def function[set_inheritance, parameter[obj_name, enabled, obj_type, clear]]:
constant[
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\Temp', False)
]
if compare[name[obj_type] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18dc061d0>, <ast.Constant object at 0x7da18dc05240>, <ast.Constant object at 0x7da18dc04880>]]] begin[:]
<ast.Raise object at 0x7da18dc06050>
if name[clear] begin[:]
variable[obj_dacl] assign[=] call[name[dacl], parameter[]]
return[call[name[obj_dacl].save, parameter[name[obj_name], <ast.UnaryOp object at 0x7da18dc99120>]]] | keyword[def] identifier[set_inheritance] ( identifier[obj_name] , identifier[enabled] , identifier[obj_type] = literal[string] , identifier[clear] = keyword[False] ):
literal[string]
keyword[if] identifier[obj_type] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[SaltInvocationError] (
literal[string] . identifier[format] ( identifier[obj_name] ))
keyword[if] identifier[clear] :
identifier[obj_dacl] = identifier[dacl] ( identifier[obj_type] = identifier[obj_type] )
keyword[else] :
identifier[obj_dacl] = identifier[dacl] ( identifier[obj_name] , identifier[obj_type] )
keyword[return] identifier[obj_dacl] . identifier[save] ( identifier[obj_name] , keyword[not] identifier[enabled] ) | def set_inheritance(obj_name, enabled, obj_type='file', clear=False):
"""
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
"""
if obj_type not in ['file', 'registry', 'registry32']:
raise SaltInvocationError('obj_type called with incorrect parameter: {0}'.format(obj_name)) # depends on [control=['if'], data=[]]
if clear:
obj_dacl = dacl(obj_type=obj_type) # depends on [control=['if'], data=[]]
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled) |
def SetFlushInterval(self, flush_interval):
"""Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert.
"""
self._flush_interval = flush_interval
logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval)) | def function[SetFlushInterval, parameter[self, flush_interval]]:
constant[Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert.
]
name[self]._flush_interval assign[=] name[flush_interval]
call[name[logger].debug, parameter[call[constant[Elasticsearch flush interval: {0:d}].format, parameter[name[flush_interval]]]]] | keyword[def] identifier[SetFlushInterval] ( identifier[self] , identifier[flush_interval] ):
literal[string]
identifier[self] . identifier[_flush_interval] = identifier[flush_interval]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[flush_interval] )) | def SetFlushInterval(self, flush_interval):
"""Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert.
"""
self._flush_interval = flush_interval
logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval)) |
def summary(args):
"""
%prog summary coordsfile
provide summary on id% and cov%, for both query and reference
"""
from jcvi.formats.blast import AlignStats
p = OptionParser(summary.__doc__)
p.add_option("-s", dest="single", default=False, action="store_true",
help="provide stats per reference seq")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
alignstats = get_stats(coordsfile)
alignstats.print_stats() | def function[summary, parameter[args]]:
constant[
%prog summary coordsfile
provide summary on id% and cov%, for both query and reference
]
from relative_module[jcvi.formats.blast] import module[AlignStats]
variable[p] assign[=] call[name[OptionParser], parameter[name[summary].__doc__]]
call[name[p].add_option, parameter[constant[-s]]]
<ast.Tuple object at 0x7da207f9a620> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[call[name[p].print_help, parameter[]]]]
<ast.Tuple object at 0x7da207f9aef0> assign[=] name[args]
variable[alignstats] assign[=] call[name[get_stats], parameter[name[coordsfile]]]
call[name[alignstats].print_stats, parameter[]] | keyword[def] identifier[summary] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[blast] keyword[import] identifier[AlignStats]
identifier[p] = identifier[OptionParser] ( identifier[summary] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( identifier[p] . identifier[print_help] ())
identifier[coordsfile] ,= identifier[args]
identifier[alignstats] = identifier[get_stats] ( identifier[coordsfile] )
identifier[alignstats] . identifier[print_stats] () | def summary(args):
"""
%prog summary coordsfile
provide summary on id% and cov%, for both query and reference
"""
from jcvi.formats.blast import AlignStats
p = OptionParser(summary.__doc__)
p.add_option('-s', dest='single', default=False, action='store_true', help='provide stats per reference seq')
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help()) # depends on [control=['if'], data=[]]
(coordsfile,) = args
alignstats = get_stats(coordsfile)
alignstats.print_stats() |
def get_refresh_stats(self):
"""Return the measured statistics for timed refresh intervals.
Returns
-------
stats : float
The measured rate of actual back end updates in frames per second.
"""
if self.rf_draw_count == 0:
fps = 0.0
else:
interval = time.time() - self.rf_start_time
fps = self.rf_draw_count / interval
jitter = self.rf_delta_total / max(1, self.rf_timer_count)
late_avg = self.rf_late_total / max(1, self.rf_late_count)
late_pct = self.rf_late_count / max(1.0, float(self.rf_timer_count)) * 100
early_avg = self.rf_early_total / max(1, self.rf_early_count)
early_pct = self.rf_early_count / max(1.0, float(self.rf_timer_count)) * 100
balance = self.rf_late_total - self.rf_early_total
stats = dict(fps=fps, jitter=jitter,
early_avg=early_avg, early_pct=early_pct,
late_avg=late_avg, late_pct=late_pct,
balance=balance)
return stats | def function[get_refresh_stats, parameter[self]]:
constant[Return the measured statistics for timed refresh intervals.
Returns
-------
stats : float
The measured rate of actual back end updates in frames per second.
]
if compare[name[self].rf_draw_count equal[==] constant[0]] begin[:]
variable[fps] assign[=] constant[0.0]
variable[jitter] assign[=] binary_operation[name[self].rf_delta_total / call[name[max], parameter[constant[1], name[self].rf_timer_count]]]
variable[late_avg] assign[=] binary_operation[name[self].rf_late_total / call[name[max], parameter[constant[1], name[self].rf_late_count]]]
variable[late_pct] assign[=] binary_operation[binary_operation[name[self].rf_late_count / call[name[max], parameter[constant[1.0], call[name[float], parameter[name[self].rf_timer_count]]]]] * constant[100]]
variable[early_avg] assign[=] binary_operation[name[self].rf_early_total / call[name[max], parameter[constant[1], name[self].rf_early_count]]]
variable[early_pct] assign[=] binary_operation[binary_operation[name[self].rf_early_count / call[name[max], parameter[constant[1.0], call[name[float], parameter[name[self].rf_timer_count]]]]] * constant[100]]
variable[balance] assign[=] binary_operation[name[self].rf_late_total - name[self].rf_early_total]
variable[stats] assign[=] call[name[dict], parameter[]]
return[name[stats]] | keyword[def] identifier[get_refresh_stats] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[rf_draw_count] == literal[int] :
identifier[fps] = literal[int]
keyword[else] :
identifier[interval] = identifier[time] . identifier[time] ()- identifier[self] . identifier[rf_start_time]
identifier[fps] = identifier[self] . identifier[rf_draw_count] / identifier[interval]
identifier[jitter] = identifier[self] . identifier[rf_delta_total] / identifier[max] ( literal[int] , identifier[self] . identifier[rf_timer_count] )
identifier[late_avg] = identifier[self] . identifier[rf_late_total] / identifier[max] ( literal[int] , identifier[self] . identifier[rf_late_count] )
identifier[late_pct] = identifier[self] . identifier[rf_late_count] / identifier[max] ( literal[int] , identifier[float] ( identifier[self] . identifier[rf_timer_count] ))* literal[int]
identifier[early_avg] = identifier[self] . identifier[rf_early_total] / identifier[max] ( literal[int] , identifier[self] . identifier[rf_early_count] )
identifier[early_pct] = identifier[self] . identifier[rf_early_count] / identifier[max] ( literal[int] , identifier[float] ( identifier[self] . identifier[rf_timer_count] ))* literal[int]
identifier[balance] = identifier[self] . identifier[rf_late_total] - identifier[self] . identifier[rf_early_total]
identifier[stats] = identifier[dict] ( identifier[fps] = identifier[fps] , identifier[jitter] = identifier[jitter] ,
identifier[early_avg] = identifier[early_avg] , identifier[early_pct] = identifier[early_pct] ,
identifier[late_avg] = identifier[late_avg] , identifier[late_pct] = identifier[late_pct] ,
identifier[balance] = identifier[balance] )
keyword[return] identifier[stats] | def get_refresh_stats(self):
"""Return the measured statistics for timed refresh intervals.
Returns
-------
stats : float
The measured rate of actual back end updates in frames per second.
"""
if self.rf_draw_count == 0:
fps = 0.0 # depends on [control=['if'], data=[]]
else:
interval = time.time() - self.rf_start_time
fps = self.rf_draw_count / interval
jitter = self.rf_delta_total / max(1, self.rf_timer_count)
late_avg = self.rf_late_total / max(1, self.rf_late_count)
late_pct = self.rf_late_count / max(1.0, float(self.rf_timer_count)) * 100
early_avg = self.rf_early_total / max(1, self.rf_early_count)
early_pct = self.rf_early_count / max(1.0, float(self.rf_timer_count)) * 100
balance = self.rf_late_total - self.rf_early_total
stats = dict(fps=fps, jitter=jitter, early_avg=early_avg, early_pct=early_pct, late_avg=late_avg, late_pct=late_pct, balance=balance)
return stats |
def init():
'''
Return the list of svn remotes and their configuration information
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = \
six.text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
)
if not per_remote_conf:
log.error(
'Invalid per-remote configuration for remote %s. If no '
'per-remote parameters are being specified, there may be '
'a trailing colon after the URL, which should be removed. '
'Check the master configuration file.', repo_url
)
_failhard()
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in PER_REMOTE_OVERRIDES):
log.error(
'Invalid configuration parameter \'%s\' for remote %s. '
'Valid parameters are: %s. See the documentation for '
'further information.',
param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
)
per_remote_errors = True
if per_remote_errors:
_failhard()
repo_conf.update(per_remote_conf)
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error(
'Invalid svnfs remote %s. Remotes must be strings, you may '
'need to enclose the URL in quotes', repo_url
)
_failhard()
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(
repo_conf['mountpoint']
)
except TypeError:
# mountpoint not specified
pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
log.error(
'Failed to initialize svnfs remote \'%s\': %s',
repo_url, exc
)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
'Cache path %s (corresponding remote: %s) exists but is '
'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue '
'to use this svnfs remote.', rp_, repo_url
)
_failhard()
repo_conf.update({
'repo': rp_,
'url': repo_url,
'hash': repo_hash,
'cachedir': rp_,
'lockfile': os.path.join(rp_, 'update.lk')
})
repos.append(repo_conf)
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['url']
)
)
)
except OSError:
pass
else:
log.info('Wrote new svnfs_remote map to %s', remote_map)
return repos | def function[init, parameter[]]:
constant[
Return the list of svn remotes and their configuration information
]
variable[bp_] assign[=] call[name[os].path.join, parameter[call[name[__opts__]][constant[cachedir]], constant[svnfs]]]
variable[new_remote] assign[=] constant[False]
variable[repos] assign[=] list[[]]
variable[per_remote_defaults] assign[=] dictionary[[], []]
for taget[name[param]] in starred[name[PER_REMOTE_OVERRIDES]] begin[:]
call[name[per_remote_defaults]][name[param]] assign[=] call[name[six].text_type, parameter[call[name[__opts__]][call[constant[svnfs_{0}].format, parameter[name[param]]]]]]
for taget[name[remote]] in starred[call[name[__opts__]][constant[svnfs_remotes]]] begin[:]
variable[repo_conf] assign[=] call[name[copy].deepcopy, parameter[name[per_remote_defaults]]]
if call[name[isinstance], parameter[name[remote], name[dict]]] begin[:]
variable[repo_url] assign[=] call[name[next], parameter[call[name[iter], parameter[name[remote]]]]]
variable[per_remote_conf] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da204963c40>]]
if <ast.UnaryOp object at 0x7da20c76cee0> begin[:]
call[name[log].error, parameter[constant[Invalid per-remote configuration for remote %s. If no per-remote parameters are being specified, there may be a trailing colon after the URL, which should be removed. Check the master configuration file.], name[repo_url]]]
call[name[_failhard], parameter[]]
variable[per_remote_errors] assign[=] constant[False]
for taget[name[param]] in starred[<ast.GeneratorExp object at 0x7da20c76cdc0>] begin[:]
call[name[log].error, parameter[constant[Invalid configuration parameter '%s' for remote %s. Valid parameters are: %s. See the documentation for further information.], name[param], name[repo_url], call[constant[, ].join, parameter[name[PER_REMOTE_OVERRIDES]]]]]
variable[per_remote_errors] assign[=] constant[True]
if name[per_remote_errors] begin[:]
call[name[_failhard], parameter[]]
call[name[repo_conf].update, parameter[name[per_remote_conf]]]
if <ast.UnaryOp object at 0x7da20c76e5f0> begin[:]
call[name[log].error, parameter[constant[Invalid svnfs remote %s. Remotes must be strings, you may need to enclose the URL in quotes], name[repo_url]]]
call[name[_failhard], parameter[]]
<ast.Try object at 0x7da20c76f760>
variable[hash_type] assign[=] call[name[getattr], parameter[name[hashlib], call[name[__opts__].get, parameter[constant[hash_type], constant[md5]]]]]
variable[repo_hash] assign[=] call[call[name[hash_type], parameter[name[repo_url]]].hexdigest, parameter[]]
variable[rp_] assign[=] call[name[os].path.join, parameter[name[bp_], name[repo_hash]]]
if <ast.UnaryOp object at 0x7da20c76dfc0> begin[:]
call[name[os].makedirs, parameter[name[rp_]]]
if <ast.UnaryOp object at 0x7da20c76d4e0> begin[:]
<ast.Try object at 0x7da20c76c790>
call[name[repo_conf].update, parameter[dictionary[[<ast.Constant object at 0x7da204566020>, <ast.Constant object at 0x7da204566530>, <ast.Constant object at 0x7da204566fb0>, <ast.Constant object at 0x7da204567820>, <ast.Constant object at 0x7da204566800>], [<ast.Name object at 0x7da204565660>, <ast.Name object at 0x7da204565900>, <ast.Name object at 0x7da204566aa0>, <ast.Name object at 0x7da204567010>, <ast.Call object at 0x7da204565000>]]]]
call[name[repos].append, parameter[name[repo_conf]]]
if name[new_remote] begin[:]
variable[remote_map] assign[=] call[name[os].path.join, parameter[call[name[__opts__]][constant[cachedir]], constant[svnfs/remote_map.txt]]]
<ast.Try object at 0x7da204564fd0>
return[name[repos]] | keyword[def] identifier[init] ():
literal[string]
identifier[bp_] = identifier[os] . identifier[path] . identifier[join] ( identifier[__opts__] [ literal[string] ], literal[string] )
identifier[new_remote] = keyword[False]
identifier[repos] =[]
identifier[per_remote_defaults] ={}
keyword[for] identifier[param] keyword[in] identifier[PER_REMOTE_OVERRIDES] :
identifier[per_remote_defaults] [ identifier[param] ]= identifier[six] . identifier[text_type] ( identifier[__opts__] [ literal[string] . identifier[format] ( identifier[param] )])
keyword[for] identifier[remote] keyword[in] identifier[__opts__] [ literal[string] ]:
identifier[repo_conf] = identifier[copy] . identifier[deepcopy] ( identifier[per_remote_defaults] )
keyword[if] identifier[isinstance] ( identifier[remote] , identifier[dict] ):
identifier[repo_url] = identifier[next] ( identifier[iter] ( identifier[remote] ))
identifier[per_remote_conf] = identifier[dict] (
[( identifier[key] , identifier[six] . identifier[text_type] ( identifier[val] )) keyword[for] identifier[key] , identifier[val] keyword[in]
identifier[six] . identifier[iteritems] ( identifier[salt] . identifier[utils] . identifier[data] . identifier[repack_dictlist] ( identifier[remote] [ identifier[repo_url] ]))]
)
keyword[if] keyword[not] identifier[per_remote_conf] :
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string] , identifier[repo_url]
)
identifier[_failhard] ()
identifier[per_remote_errors] = keyword[False]
keyword[for] identifier[param] keyword[in] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[per_remote_conf]
keyword[if] identifier[x] keyword[not] keyword[in] identifier[PER_REMOTE_OVERRIDES] ):
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string] ,
identifier[param] , identifier[repo_url] , literal[string] . identifier[join] ( identifier[PER_REMOTE_OVERRIDES] )
)
identifier[per_remote_errors] = keyword[True]
keyword[if] identifier[per_remote_errors] :
identifier[_failhard] ()
identifier[repo_conf] . identifier[update] ( identifier[per_remote_conf] )
keyword[else] :
identifier[repo_url] = identifier[remote]
keyword[if] keyword[not] identifier[isinstance] ( identifier[repo_url] , identifier[six] . identifier[string_types] ):
identifier[log] . identifier[error] (
literal[string]
literal[string] , identifier[repo_url]
)
identifier[_failhard] ()
keyword[try] :
identifier[repo_conf] [ literal[string] ]= identifier[salt] . identifier[utils] . identifier[url] . identifier[strip_proto] (
identifier[repo_conf] [ literal[string] ]
)
keyword[except] identifier[TypeError] :
keyword[pass]
identifier[hash_type] = identifier[getattr] ( identifier[hashlib] , identifier[__opts__] . identifier[get] ( literal[string] , literal[string] ))
identifier[repo_hash] = identifier[hash_type] ( identifier[repo_url] ). identifier[hexdigest] ()
identifier[rp_] = identifier[os] . identifier[path] . identifier[join] ( identifier[bp_] , identifier[repo_hash] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[rp_] ):
identifier[os] . identifier[makedirs] ( identifier[rp_] )
keyword[if] keyword[not] identifier[os] . identifier[listdir] ( identifier[rp_] ):
keyword[try] :
identifier[CLIENT] . identifier[checkout] ( identifier[repo_url] , identifier[rp_] )
identifier[repos] . identifier[append] ( identifier[rp_] )
identifier[new_remote] = keyword[True]
keyword[except] identifier[pysvn] . identifier[_pysvn] . identifier[ClientError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] (
literal[string] ,
identifier[repo_url] , identifier[exc]
)
identifier[_failhard] ()
keyword[else] :
keyword[try] :
identifier[CLIENT] . identifier[status] ( identifier[rp_] )
keyword[except] identifier[pysvn] . identifier[_pysvn] . identifier[ClientError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string] , identifier[rp_] , identifier[repo_url]
)
identifier[_failhard] ()
identifier[repo_conf] . identifier[update] ({
literal[string] : identifier[rp_] ,
literal[string] : identifier[repo_url] ,
literal[string] : identifier[repo_hash] ,
literal[string] : identifier[rp_] ,
literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[rp_] , literal[string] )
})
identifier[repos] . identifier[append] ( identifier[repo_conf] )
keyword[if] identifier[new_remote] :
identifier[remote_map] = identifier[os] . identifier[path] . identifier[join] ( identifier[__opts__] [ literal[string] ], literal[string] )
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[remote_map] , literal[string] ) keyword[as] identifier[fp_] :
identifier[timestamp] = identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )
identifier[fp_] . identifier[write] ( literal[string] . identifier[format] ( identifier[timestamp] ))
keyword[for] identifier[repo_conf] keyword[in] identifier[repos] :
identifier[fp_] . identifier[write] (
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] (
literal[string] . identifier[format] (
identifier[repo_conf] [ literal[string] ], identifier[repo_conf] [ literal[string] ]
)
)
)
keyword[except] identifier[OSError] :
keyword[pass]
keyword[else] :
identifier[log] . identifier[info] ( literal[string] , identifier[remote_map] )
keyword[return] identifier[repos] | def init():
"""
Return the list of svn remotes and their configuration information
"""
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = six.text_type(__opts__['svnfs_{0}'.format(param)]) # depends on [control=['for'], data=['param']]
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict([(key, six.text_type(val)) for (key, val) in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))])
if not per_remote_conf:
log.error('Invalid per-remote configuration for remote %s. If no per-remote parameters are being specified, there may be a trailing colon after the URL, which should be removed. Check the master configuration file.', repo_url)
_failhard() # depends on [control=['if'], data=[]]
per_remote_errors = False
for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES):
log.error("Invalid configuration parameter '%s' for remote %s. Valid parameters are: %s. See the documentation for further information.", param, repo_url, ', '.join(PER_REMOTE_OVERRIDES))
per_remote_errors = True # depends on [control=['for'], data=['param']]
if per_remote_errors:
_failhard() # depends on [control=['if'], data=[]]
repo_conf.update(per_remote_conf) # depends on [control=['if'], data=[]]
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error('Invalid svnfs remote %s. Remotes must be strings, you may need to enclose the URL in quotes', repo_url)
_failhard() # depends on [control=['if'], data=[]]
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(repo_conf['mountpoint']) # depends on [control=['try'], data=[]]
except TypeError:
# mountpoint not specified
pass # depends on [control=['except'], data=[]]
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_) # depends on [control=['if'], data=[]]
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True # depends on [control=['try'], data=[]]
except pysvn._pysvn.ClientError as exc:
log.error("Failed to initialize svnfs remote '%s': %s", repo_url, exc)
_failhard() # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_) # depends on [control=['try'], data=[]]
except pysvn._pysvn.ClientError as exc:
log.error('Cache path %s (corresponding remote: %s) exists but is not a valid subversion checkout. You will need to manually delete this directory on the master to continue to use this svnfs remote.', rp_, repo_url)
_failhard() # depends on [control=['except'], data=[]]
repo_conf.update({'repo': rp_, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(rp_, 'update.lk')})
repos.append(repo_conf) # depends on [control=['for'], data=['remote']]
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(salt.utils.stringutils.to_str('{0} = {1}\n'.format(repo_conf['hash'], repo_conf['url']))) # depends on [control=['for'], data=['repo_conf']] # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
else:
log.info('Wrote new svnfs_remote map to %s', remote_map) # depends on [control=['if'], data=[]]
return repos |
def load_plugins(self, raise_error=False):
"""
Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised"""
pm_env = os.getenv('PSYPLOT_PLOTMETHODS', '').split('::')
include_pms = [s[4:] for s in pm_env if s.startswith('yes:')]
exclude_pms = [s[3:] for s in pm_env if s.startswith('no:')]
logger = logging.getLogger(__name__)
plotters = self['project.plotters']
def_plots = {'default': list(plotters)}
defaultParams = self.defaultParams
def_keys = {'default': defaultParams}
def register_pm(ep, name):
full_name = '%s:%s' % (ep.module_name, name)
ret = True
if pm_env == ['no']:
ret = False
elif name in exclude_pms or full_name in exclude_pms:
ret = False
elif include_pms and (name not in include_pms and
full_name not in include_pms):
ret = False
if not ret:
logger.debug('Skipping plot method %s', full_name)
return ret
for ep in self._load_plugin_entrypoints():
plugin_mod = ep.load()
rc = plugin_mod.rcParams
# load the plotters
plugin_plotters = {
key: val for key, val in rc.get('project.plotters', {}).items()
if register_pm(ep, key)}
already_defined = set(plotters).intersection(plugin_plotters)
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following plotters have already been "
"defined") % ep
msg += 'and will be overwritten:' if not raise_error else ':'
msg += '\n' + '\n'.join(chain.from_iterable(
(('%s by %s' % (key, plugin)
for plugin, keys in def_plots.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
for d in plugin_plotters.values():
d['plugin'] = ep.module_name
plotters.update(plugin_plotters)
def_plots[ep] = list(plugin_plotters)
# load the defaultParams keys
plugin_defaultParams = rc.defaultParams
already_defined = set(defaultParams).intersection(
plugin_defaultParams) - {'project.plotters'}
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following default keys have already been "
"defined:") % ep
msg += '\n' + '\n'.join(chain.from_iterable(
(('%s by %s' % (key, plugin)
for plugin, keys in def_keys.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
update_keys = set(plugin_defaultParams) - {'project.plotters'}
def_keys[ep] = update_keys
self.defaultParams.update(
{key: plugin_defaultParams[key] for key in update_keys})
# load the rcParams (without validation)
super(RcParams, self).update({key: rc[key] for key in update_keys})
# add the deprecated keys
self._deprecated_ignore_map.update(rc._deprecated_ignore_map)
self._deprecated_map.update(rc._deprecated_map) | def function[load_plugins, parameter[self, raise_error]]:
constant[
Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised]
variable[pm_env] assign[=] call[call[name[os].getenv, parameter[constant[PSYPLOT_PLOTMETHODS], constant[]]].split, parameter[constant[::]]]
variable[include_pms] assign[=] <ast.ListComp object at 0x7da2043459f0>
variable[exclude_pms] assign[=] <ast.ListComp object at 0x7da204346680>
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
variable[plotters] assign[=] call[name[self]][constant[project.plotters]]
variable[def_plots] assign[=] dictionary[[<ast.Constant object at 0x7da204347820>], [<ast.Call object at 0x7da2043451e0>]]
variable[defaultParams] assign[=] name[self].defaultParams
variable[def_keys] assign[=] dictionary[[<ast.Constant object at 0x7da2043464d0>], [<ast.Name object at 0x7da204347880>]]
def function[register_pm, parameter[ep, name]]:
variable[full_name] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204345210>, <ast.Name object at 0x7da204346e00>]]]
variable[ret] assign[=] constant[True]
if compare[name[pm_env] equal[==] list[[<ast.Constant object at 0x7da2043444f0>]]] begin[:]
variable[ret] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da204345930> begin[:]
call[name[logger].debug, parameter[constant[Skipping plot method %s], name[full_name]]]
return[name[ret]]
for taget[name[ep]] in starred[call[name[self]._load_plugin_entrypoints, parameter[]]] begin[:]
variable[plugin_mod] assign[=] call[name[ep].load, parameter[]]
variable[rc] assign[=] name[plugin_mod].rcParams
variable[plugin_plotters] assign[=] <ast.DictComp object at 0x7da204346f50>
variable[already_defined] assign[=] call[call[name[set], parameter[name[plotters]]].intersection, parameter[name[plugin_plotters]]]
if name[already_defined] begin[:]
variable[msg] assign[=] binary_operation[constant[Error while loading psyplot plugin %s! The following plotters have already been defined] <ast.Mod object at 0x7da2590d6920> name[ep]]
<ast.AugAssign object at 0x7da1b26ad5a0>
<ast.AugAssign object at 0x7da1b26af610>
if name[raise_error] begin[:]
<ast.Raise object at 0x7da1b26af4f0>
for taget[name[d]] in starred[call[name[plugin_plotters].values, parameter[]]] begin[:]
call[name[d]][constant[plugin]] assign[=] name[ep].module_name
call[name[plotters].update, parameter[name[plugin_plotters]]]
call[name[def_plots]][name[ep]] assign[=] call[name[list], parameter[name[plugin_plotters]]]
variable[plugin_defaultParams] assign[=] name[rc].defaultParams
variable[already_defined] assign[=] binary_operation[call[call[name[set], parameter[name[defaultParams]]].intersection, parameter[name[plugin_defaultParams]]] - <ast.Set object at 0x7da1b26ad4b0>]
if name[already_defined] begin[:]
variable[msg] assign[=] binary_operation[constant[Error while loading psyplot plugin %s! The following default keys have already been defined:] <ast.Mod object at 0x7da2590d6920> name[ep]]
<ast.AugAssign object at 0x7da1b26ace20>
if name[raise_error] begin[:]
<ast.Raise object at 0x7da18bcca560>
variable[update_keys] assign[=] binary_operation[call[name[set], parameter[name[plugin_defaultParams]]] - <ast.Set object at 0x7da18bccb1c0>]
call[name[def_keys]][name[ep]] assign[=] name[update_keys]
call[name[self].defaultParams.update, parameter[<ast.DictComp object at 0x7da18bccbdf0>]]
call[call[name[super], parameter[name[RcParams], name[self]]].update, parameter[<ast.DictComp object at 0x7da18bcc95d0>]]
call[name[self]._deprecated_ignore_map.update, parameter[name[rc]._deprecated_ignore_map]]
call[name[self]._deprecated_map.update, parameter[name[rc]._deprecated_map]] | keyword[def] identifier[load_plugins] ( identifier[self] , identifier[raise_error] = keyword[False] ):
literal[string]
identifier[pm_env] = identifier[os] . identifier[getenv] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[include_pms] =[ identifier[s] [ literal[int] :] keyword[for] identifier[s] keyword[in] identifier[pm_env] keyword[if] identifier[s] . identifier[startswith] ( literal[string] )]
identifier[exclude_pms] =[ identifier[s] [ literal[int] :] keyword[for] identifier[s] keyword[in] identifier[pm_env] keyword[if] identifier[s] . identifier[startswith] ( literal[string] )]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[plotters] = identifier[self] [ literal[string] ]
identifier[def_plots] ={ literal[string] : identifier[list] ( identifier[plotters] )}
identifier[defaultParams] = identifier[self] . identifier[defaultParams]
identifier[def_keys] ={ literal[string] : identifier[defaultParams] }
keyword[def] identifier[register_pm] ( identifier[ep] , identifier[name] ):
identifier[full_name] = literal[string] %( identifier[ep] . identifier[module_name] , identifier[name] )
identifier[ret] = keyword[True]
keyword[if] identifier[pm_env] ==[ literal[string] ]:
identifier[ret] = keyword[False]
keyword[elif] identifier[name] keyword[in] identifier[exclude_pms] keyword[or] identifier[full_name] keyword[in] identifier[exclude_pms] :
identifier[ret] = keyword[False]
keyword[elif] identifier[include_pms] keyword[and] ( identifier[name] keyword[not] keyword[in] identifier[include_pms] keyword[and]
identifier[full_name] keyword[not] keyword[in] identifier[include_pms] ):
identifier[ret] = keyword[False]
keyword[if] keyword[not] identifier[ret] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[full_name] )
keyword[return] identifier[ret]
keyword[for] identifier[ep] keyword[in] identifier[self] . identifier[_load_plugin_entrypoints] ():
identifier[plugin_mod] = identifier[ep] . identifier[load] ()
identifier[rc] = identifier[plugin_mod] . identifier[rcParams]
identifier[plugin_plotters] ={
identifier[key] : identifier[val] keyword[for] identifier[key] , identifier[val] keyword[in] identifier[rc] . identifier[get] ( literal[string] ,{}). identifier[items] ()
keyword[if] identifier[register_pm] ( identifier[ep] , identifier[key] )}
identifier[already_defined] = identifier[set] ( identifier[plotters] ). identifier[intersection] ( identifier[plugin_plotters] )
keyword[if] identifier[already_defined] :
identifier[msg] =( literal[string]
literal[string]
literal[string] )% identifier[ep]
identifier[msg] += literal[string] keyword[if] keyword[not] identifier[raise_error] keyword[else] literal[string]
identifier[msg] += literal[string] + literal[string] . identifier[join] ( identifier[chain] . identifier[from_iterable] (
(( literal[string] %( identifier[key] , identifier[plugin] )
keyword[for] identifier[plugin] , identifier[keys] keyword[in] identifier[def_plots] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[keys] )
keyword[for] identifier[key] keyword[in] identifier[already_defined] )))
keyword[if] identifier[raise_error] :
keyword[raise] identifier[ImportError] ( identifier[msg] )
keyword[else] :
identifier[warn] ( identifier[msg] )
keyword[for] identifier[d] keyword[in] identifier[plugin_plotters] . identifier[values] ():
identifier[d] [ literal[string] ]= identifier[ep] . identifier[module_name]
identifier[plotters] . identifier[update] ( identifier[plugin_plotters] )
identifier[def_plots] [ identifier[ep] ]= identifier[list] ( identifier[plugin_plotters] )
identifier[plugin_defaultParams] = identifier[rc] . identifier[defaultParams]
identifier[already_defined] = identifier[set] ( identifier[defaultParams] ). identifier[intersection] (
identifier[plugin_defaultParams] )-{ literal[string] }
keyword[if] identifier[already_defined] :
identifier[msg] =( literal[string]
literal[string]
literal[string] )% identifier[ep]
identifier[msg] += literal[string] + literal[string] . identifier[join] ( identifier[chain] . identifier[from_iterable] (
(( literal[string] %( identifier[key] , identifier[plugin] )
keyword[for] identifier[plugin] , identifier[keys] keyword[in] identifier[def_keys] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[keys] )
keyword[for] identifier[key] keyword[in] identifier[already_defined] )))
keyword[if] identifier[raise_error] :
keyword[raise] identifier[ImportError] ( identifier[msg] )
keyword[else] :
identifier[warn] ( identifier[msg] )
identifier[update_keys] = identifier[set] ( identifier[plugin_defaultParams] )-{ literal[string] }
identifier[def_keys] [ identifier[ep] ]= identifier[update_keys]
identifier[self] . identifier[defaultParams] . identifier[update] (
{ identifier[key] : identifier[plugin_defaultParams] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[update_keys] })
identifier[super] ( identifier[RcParams] , identifier[self] ). identifier[update] ({ identifier[key] : identifier[rc] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[update_keys] })
identifier[self] . identifier[_deprecated_ignore_map] . identifier[update] ( identifier[rc] . identifier[_deprecated_ignore_map] )
identifier[self] . identifier[_deprecated_map] . identifier[update] ( identifier[rc] . identifier[_deprecated_map] ) | def load_plugins(self, raise_error=False):
"""
Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised"""
pm_env = os.getenv('PSYPLOT_PLOTMETHODS', '').split('::')
include_pms = [s[4:] for s in pm_env if s.startswith('yes:')]
exclude_pms = [s[3:] for s in pm_env if s.startswith('no:')]
logger = logging.getLogger(__name__)
plotters = self['project.plotters']
def_plots = {'default': list(plotters)}
defaultParams = self.defaultParams
def_keys = {'default': defaultParams}
def register_pm(ep, name):
full_name = '%s:%s' % (ep.module_name, name)
ret = True
if pm_env == ['no']:
ret = False # depends on [control=['if'], data=[]]
elif name in exclude_pms or full_name in exclude_pms:
ret = False # depends on [control=['if'], data=[]]
elif include_pms and (name not in include_pms and full_name not in include_pms):
ret = False # depends on [control=['if'], data=[]]
if not ret:
logger.debug('Skipping plot method %s', full_name) # depends on [control=['if'], data=[]]
return ret
for ep in self._load_plugin_entrypoints():
plugin_mod = ep.load()
rc = plugin_mod.rcParams
# load the plotters
plugin_plotters = {key: val for (key, val) in rc.get('project.plotters', {}).items() if register_pm(ep, key)}
already_defined = set(plotters).intersection(plugin_plotters)
if already_defined:
msg = 'Error while loading psyplot plugin %s! The following plotters have already been defined' % ep
msg += 'and will be overwritten:' if not raise_error else ':'
msg += '\n' + '\n'.join(chain.from_iterable((('%s by %s' % (key, plugin) for (plugin, keys) in def_plots.items() if key in keys) for key in already_defined)))
if raise_error:
raise ImportError(msg) # depends on [control=['if'], data=[]]
else:
warn(msg) # depends on [control=['if'], data=[]]
for d in plugin_plotters.values():
d['plugin'] = ep.module_name # depends on [control=['for'], data=['d']]
plotters.update(plugin_plotters)
def_plots[ep] = list(plugin_plotters)
# load the defaultParams keys
plugin_defaultParams = rc.defaultParams
already_defined = set(defaultParams).intersection(plugin_defaultParams) - {'project.plotters'}
if already_defined:
msg = 'Error while loading psyplot plugin %s! The following default keys have already been defined:' % ep
msg += '\n' + '\n'.join(chain.from_iterable((('%s by %s' % (key, plugin) for (plugin, keys) in def_keys.items() if key in keys) for key in already_defined)))
if raise_error:
raise ImportError(msg) # depends on [control=['if'], data=[]]
else:
warn(msg) # depends on [control=['if'], data=[]]
update_keys = set(plugin_defaultParams) - {'project.plotters'}
def_keys[ep] = update_keys
self.defaultParams.update({key: plugin_defaultParams[key] for key in update_keys})
# load the rcParams (without validation)
super(RcParams, self).update({key: rc[key] for key in update_keys})
# add the deprecated keys
self._deprecated_ignore_map.update(rc._deprecated_ignore_map)
self._deprecated_map.update(rc._deprecated_map) # depends on [control=['for'], data=['ep']] |
def get_3_tuple_list(self,obj,default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
"""
if is_sequence2(obj):
return [self.get_3_tuple(o,default) for o in obj]
elif is_sequence(obj):
return [self.get_3_tuple(obj[i:i+3],default) for i in range(0,len(obj),3)]
else:
return [self.get_3_tuple(obj,default)] | def function[get_3_tuple_list, parameter[self, obj, default]]:
constant[Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
]
if call[name[is_sequence2], parameter[name[obj]]] begin[:]
return[<ast.ListComp object at 0x7da1b1027820>] | keyword[def] identifier[get_3_tuple_list] ( identifier[self] , identifier[obj] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[is_sequence2] ( identifier[obj] ):
keyword[return] [ identifier[self] . identifier[get_3_tuple] ( identifier[o] , identifier[default] ) keyword[for] identifier[o] keyword[in] identifier[obj] ]
keyword[elif] identifier[is_sequence] ( identifier[obj] ):
keyword[return] [ identifier[self] . identifier[get_3_tuple] ( identifier[obj] [ identifier[i] : identifier[i] + literal[int] ], identifier[default] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[obj] ), literal[int] )]
keyword[else] :
keyword[return] [ identifier[self] . identifier[get_3_tuple] ( identifier[obj] , identifier[default] )] | def get_3_tuple_list(self, obj, default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
"""
if is_sequence2(obj):
return [self.get_3_tuple(o, default) for o in obj] # depends on [control=['if'], data=[]]
elif is_sequence(obj):
return [self.get_3_tuple(obj[i:i + 3], default) for i in range(0, len(obj), 3)] # depends on [control=['if'], data=[]]
else:
return [self.get_3_tuple(obj, default)] |
def read_raw_table(self, table):
"""
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
"""
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) '
'differ from the expected number({}); '
'fields may be misaligned!'
.format(len(cols), field_len))
row = OrderedDict(zip(field_names, cols))
yield row | def function[read_raw_table, parameter[self, table]]:
constant[
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
]
variable[fields] assign[=] <ast.IfExp object at 0x7da18f09c760>
variable[field_names] assign[=] <ast.ListComp object at 0x7da18f09fee0>
variable[field_len] assign[=] call[name[len], parameter[name[field_names]]]
variable[table_path] assign[=] call[name[os].path.join, parameter[name[self].root, name[table]]]
with call[name[_open_table], parameter[name[table_path], name[self].encoding]] begin[:]
for taget[name[line]] in starred[name[tbl]] begin[:]
variable[cols] assign[=] call[name[decode_row], parameter[name[line]]]
if compare[call[name[len], parameter[name[cols]]] not_equal[!=] name[field_len]] begin[:]
call[name[logging].error, parameter[call[constant[Number of stored fields ({}) differ from the expected number({}); fields may be misaligned!].format, parameter[call[name[len], parameter[name[cols]]], name[field_len]]]]]
variable[row] assign[=] call[name[OrderedDict], parameter[call[name[zip], parameter[name[field_names], name[cols]]]]]
<ast.Yield object at 0x7da18f09f970> | keyword[def] identifier[read_raw_table] ( identifier[self] , identifier[table] ):
literal[string]
identifier[fields] = identifier[self] . identifier[table_relations] ( identifier[table] ) keyword[if] identifier[self] . identifier[cast] keyword[else] keyword[None]
identifier[field_names] =[ identifier[f] . identifier[name] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[table_relations] ( identifier[table] )]
identifier[field_len] = identifier[len] ( identifier[field_names] )
identifier[table_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[root] , identifier[table] )
keyword[with] identifier[_open_table] ( identifier[table_path] , identifier[self] . identifier[encoding] ) keyword[as] identifier[tbl] :
keyword[for] identifier[line] keyword[in] identifier[tbl] :
identifier[cols] = identifier[decode_row] ( identifier[line] , identifier[fields] = identifier[fields] )
keyword[if] identifier[len] ( identifier[cols] )!= identifier[field_len] :
identifier[logging] . identifier[error] ( literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[len] ( identifier[cols] ), identifier[field_len] ))
identifier[row] = identifier[OrderedDict] ( identifier[zip] ( identifier[field_names] , identifier[cols] ))
keyword[yield] identifier[row] | def read_raw_table(self, table):
"""
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
"""
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) differ from the expected number({}); fields may be misaligned!'.format(len(cols), field_len)) # depends on [control=['if'], data=['field_len']]
row = OrderedDict(zip(field_names, cols))
yield row # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['tbl']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.