code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def load_certificate(filetype, buf):
"""Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate.
"""
x509cert = crypto.load_certificate(filetype, buf)
patch_certificate(x509cert)
return x509cert | def function[load_certificate, parameter[filetype, buf]]:
constant[Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate.
]
variable[x509cert] assign[=] call[name[crypto].load_certificate, parameter[name[filetype], name[buf]]]
call[name[patch_certificate], parameter[name[x509cert]]]
return[name[x509cert]] | keyword[def] identifier[load_certificate] ( identifier[filetype] , identifier[buf] ):
literal[string]
identifier[x509cert] = identifier[crypto] . identifier[load_certificate] ( identifier[filetype] , identifier[buf] )
identifier[patch_certificate] ( identifier[x509cert] )
keyword[return] identifier[x509cert] | def load_certificate(filetype, buf):
"""Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate.
"""
x509cert = crypto.load_certificate(filetype, buf)
patch_certificate(x509cert)
return x509cert |
def catalog_to_cells(catalog, radius, order, include_fallback=True, **kwargs):
"""
Convert a catalog to a set of cells.
This function is intended to be used via `catalog_to_moc` but
is available for separate usage. It takes the same arguments
as that function.
This function uses the Healpy `query_disc` function to get a list
of cells for each item in the catalog in turn. Additional keyword
arguments, if specified, are passed to `query_disc`. This can include,
for example, `inclusive` (set to `True` to include cells overlapping
the radius as well as those with centers within it) and `fact`
(to control sampling when `inclusive` is specified).
If cells at the given order are bigger than the given radius, then
`query_disc` may find none inside the radius. In this case,
if `include_fallback` is `True` (the default), the cell at each
position is included.
If the given radius is zero (or smaller) then Healpy `query_disc`
is not used -- instead the fallback position is used automatically.
"""
nside = 2 ** order
# Ensure catalog is in ICRS coordinates.
catalog = catalog.icrs
# Ensure radius is in radians.
if isinstance(radius, Quantity):
radius = radius.to(radian).value
else:
radius = radius * pi / (180.0 * 3600.0)
# Convert coordinates to position vectors.
phi = catalog.ra.radian
theta = (pi / 2) - catalog.dec.radian
vectors = ang2vec(theta, phi)
# Ensure we can iterate over vectors (it might be a single position).
if catalog.isscalar:
vectors = [vectors]
# Query for a list of cells for each catalog position.
cells = set()
for vector in vectors:
if radius > 0.0:
# Try "disc" query.
vector_cells = query_disc(nside, vector, radius, nest=True, **kwargs)
if vector_cells.size > 0:
cells.update(vector_cells.tolist())
continue
elif not include_fallback:
continue
# The query didn't find anything -- include the cell at the
# given position at least.
cell = vec2pix(nside, vector[0], vector[1], vector[2], nest=True)
cells.add(cell.item())
return cells | def function[catalog_to_cells, parameter[catalog, radius, order, include_fallback]]:
constant[
Convert a catalog to a set of cells.
This function is intended to be used via `catalog_to_moc` but
is available for separate usage. It takes the same arguments
as that function.
This function uses the Healpy `query_disc` function to get a list
of cells for each item in the catalog in turn. Additional keyword
arguments, if specified, are passed to `query_disc`. This can include,
for example, `inclusive` (set to `True` to include cells overlapping
the radius as well as those with centers within it) and `fact`
(to control sampling when `inclusive` is specified).
If cells at the given order are bigger than the given radius, then
`query_disc` may find none inside the radius. In this case,
if `include_fallback` is `True` (the default), the cell at each
position is included.
If the given radius is zero (or smaller) then Healpy `query_disc`
is not used -- instead the fallback position is used automatically.
]
variable[nside] assign[=] binary_operation[constant[2] ** name[order]]
variable[catalog] assign[=] name[catalog].icrs
if call[name[isinstance], parameter[name[radius], name[Quantity]]] begin[:]
variable[radius] assign[=] call[name[radius].to, parameter[name[radian]]].value
variable[phi] assign[=] name[catalog].ra.radian
variable[theta] assign[=] binary_operation[binary_operation[name[pi] / constant[2]] - name[catalog].dec.radian]
variable[vectors] assign[=] call[name[ang2vec], parameter[name[theta], name[phi]]]
if name[catalog].isscalar begin[:]
variable[vectors] assign[=] list[[<ast.Name object at 0x7da1b0914ee0>]]
variable[cells] assign[=] call[name[set], parameter[]]
for taget[name[vector]] in starred[name[vectors]] begin[:]
if compare[name[radius] greater[>] constant[0.0]] begin[:]
variable[vector_cells] assign[=] call[name[query_disc], parameter[name[nside], name[vector], name[radius]]]
if compare[name[vector_cells].size greater[>] constant[0]] begin[:]
call[name[cells].update, parameter[call[name[vector_cells].tolist, parameter[]]]]
continue
variable[cell] assign[=] call[name[vec2pix], parameter[name[nside], call[name[vector]][constant[0]], call[name[vector]][constant[1]], call[name[vector]][constant[2]]]]
call[name[cells].add, parameter[call[name[cell].item, parameter[]]]]
return[name[cells]] | keyword[def] identifier[catalog_to_cells] ( identifier[catalog] , identifier[radius] , identifier[order] , identifier[include_fallback] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[nside] = literal[int] ** identifier[order]
identifier[catalog] = identifier[catalog] . identifier[icrs]
keyword[if] identifier[isinstance] ( identifier[radius] , identifier[Quantity] ):
identifier[radius] = identifier[radius] . identifier[to] ( identifier[radian] ). identifier[value]
keyword[else] :
identifier[radius] = identifier[radius] * identifier[pi] /( literal[int] * literal[int] )
identifier[phi] = identifier[catalog] . identifier[ra] . identifier[radian]
identifier[theta] =( identifier[pi] / literal[int] )- identifier[catalog] . identifier[dec] . identifier[radian]
identifier[vectors] = identifier[ang2vec] ( identifier[theta] , identifier[phi] )
keyword[if] identifier[catalog] . identifier[isscalar] :
identifier[vectors] =[ identifier[vectors] ]
identifier[cells] = identifier[set] ()
keyword[for] identifier[vector] keyword[in] identifier[vectors] :
keyword[if] identifier[radius] > literal[int] :
identifier[vector_cells] = identifier[query_disc] ( identifier[nside] , identifier[vector] , identifier[radius] , identifier[nest] = keyword[True] ,** identifier[kwargs] )
keyword[if] identifier[vector_cells] . identifier[size] > literal[int] :
identifier[cells] . identifier[update] ( identifier[vector_cells] . identifier[tolist] ())
keyword[continue]
keyword[elif] keyword[not] identifier[include_fallback] :
keyword[continue]
identifier[cell] = identifier[vec2pix] ( identifier[nside] , identifier[vector] [ literal[int] ], identifier[vector] [ literal[int] ], identifier[vector] [ literal[int] ], identifier[nest] = keyword[True] )
identifier[cells] . identifier[add] ( identifier[cell] . identifier[item] ())
keyword[return] identifier[cells] | def catalog_to_cells(catalog, radius, order, include_fallback=True, **kwargs):
"""
Convert a catalog to a set of cells.
This function is intended to be used via `catalog_to_moc` but
is available for separate usage. It takes the same arguments
as that function.
This function uses the Healpy `query_disc` function to get a list
of cells for each item in the catalog in turn. Additional keyword
arguments, if specified, are passed to `query_disc`. This can include,
for example, `inclusive` (set to `True` to include cells overlapping
the radius as well as those with centers within it) and `fact`
(to control sampling when `inclusive` is specified).
If cells at the given order are bigger than the given radius, then
`query_disc` may find none inside the radius. In this case,
if `include_fallback` is `True` (the default), the cell at each
position is included.
If the given radius is zero (or smaller) then Healpy `query_disc`
is not used -- instead the fallback position is used automatically.
"""
nside = 2 ** order
# Ensure catalog is in ICRS coordinates.
catalog = catalog.icrs
# Ensure radius is in radians.
if isinstance(radius, Quantity):
radius = radius.to(radian).value # depends on [control=['if'], data=[]]
else:
radius = radius * pi / (180.0 * 3600.0)
# Convert coordinates to position vectors.
phi = catalog.ra.radian
theta = pi / 2 - catalog.dec.radian
vectors = ang2vec(theta, phi)
# Ensure we can iterate over vectors (it might be a single position).
if catalog.isscalar:
vectors = [vectors] # depends on [control=['if'], data=[]]
# Query for a list of cells for each catalog position.
cells = set()
for vector in vectors:
if radius > 0.0:
# Try "disc" query.
vector_cells = query_disc(nside, vector, radius, nest=True, **kwargs)
if vector_cells.size > 0:
cells.update(vector_cells.tolist())
continue # depends on [control=['if'], data=[]]
elif not include_fallback:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['radius']]
# The query didn't find anything -- include the cell at the
# given position at least.
cell = vec2pix(nside, vector[0], vector[1], vector[2], nest=True)
cells.add(cell.item()) # depends on [control=['for'], data=['vector']]
return cells |
def delete(self):
"""Extend to the delete the session from storage
"""
self.clear()
if os.path.isfile(self._filename):
os.unlink(self._filename)
else:
LOGGER.debug('Session file did not exist: %s', self._filename) | def function[delete, parameter[self]]:
constant[Extend to the delete the session from storage
]
call[name[self].clear, parameter[]]
if call[name[os].path.isfile, parameter[name[self]._filename]] begin[:]
call[name[os].unlink, parameter[name[self]._filename]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[self] . identifier[clear] ()
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[_filename] ):
identifier[os] . identifier[unlink] ( identifier[self] . identifier[_filename] )
keyword[else] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[self] . identifier[_filename] ) | def delete(self):
"""Extend to the delete the session from storage
"""
self.clear()
if os.path.isfile(self._filename):
os.unlink(self._filename) # depends on [control=['if'], data=[]]
else:
LOGGER.debug('Session file did not exist: %s', self._filename) |
def _createShapelet(self, coeffs):
"""
returns a shapelet array out of the coefficients *a, up to order l
:param num_l: order of shapelets
:type num_l: int.
:param coeff: shapelet coefficients
:type coeff: floats
:returns: complex array
:raises: AttributeError, KeyError
"""
n_coeffs = len(coeffs)
num_n = self._get_num_n(n_coeffs)
shapelets=np.zeros((num_n+1, num_n+1))
n = 0
k = 0
for coeff in coeffs:
shapelets[n-k][k] = coeff
k += 1
if k == n + 1:
n += 1
k = 0
return shapelets | def function[_createShapelet, parameter[self, coeffs]]:
constant[
returns a shapelet array out of the coefficients *a, up to order l
:param num_l: order of shapelets
:type num_l: int.
:param coeff: shapelet coefficients
:type coeff: floats
:returns: complex array
:raises: AttributeError, KeyError
]
variable[n_coeffs] assign[=] call[name[len], parameter[name[coeffs]]]
variable[num_n] assign[=] call[name[self]._get_num_n, parameter[name[n_coeffs]]]
variable[shapelets] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b26add80>, <ast.BinOp object at 0x7da1b26ac730>]]]]
variable[n] assign[=] constant[0]
variable[k] assign[=] constant[0]
for taget[name[coeff]] in starred[name[coeffs]] begin[:]
call[call[name[shapelets]][binary_operation[name[n] - name[k]]]][name[k]] assign[=] name[coeff]
<ast.AugAssign object at 0x7da18dc9bb20>
if compare[name[k] equal[==] binary_operation[name[n] + constant[1]]] begin[:]
<ast.AugAssign object at 0x7da18dc98250>
variable[k] assign[=] constant[0]
return[name[shapelets]] | keyword[def] identifier[_createShapelet] ( identifier[self] , identifier[coeffs] ):
literal[string]
identifier[n_coeffs] = identifier[len] ( identifier[coeffs] )
identifier[num_n] = identifier[self] . identifier[_get_num_n] ( identifier[n_coeffs] )
identifier[shapelets] = identifier[np] . identifier[zeros] (( identifier[num_n] + literal[int] , identifier[num_n] + literal[int] ))
identifier[n] = literal[int]
identifier[k] = literal[int]
keyword[for] identifier[coeff] keyword[in] identifier[coeffs] :
identifier[shapelets] [ identifier[n] - identifier[k] ][ identifier[k] ]= identifier[coeff]
identifier[k] += literal[int]
keyword[if] identifier[k] == identifier[n] + literal[int] :
identifier[n] += literal[int]
identifier[k] = literal[int]
keyword[return] identifier[shapelets] | def _createShapelet(self, coeffs):
"""
returns a shapelet array out of the coefficients *a, up to order l
:param num_l: order of shapelets
:type num_l: int.
:param coeff: shapelet coefficients
:type coeff: floats
:returns: complex array
:raises: AttributeError, KeyError
"""
n_coeffs = len(coeffs)
num_n = self._get_num_n(n_coeffs)
shapelets = np.zeros((num_n + 1, num_n + 1))
n = 0
k = 0
for coeff in coeffs:
shapelets[n - k][k] = coeff
k += 1
if k == n + 1:
n += 1
k = 0 # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['coeff']]
return shapelets |
def construct_formset(self):
"""
Overrides construct_formset to attach the model class as
an attribute of the returned formset instance.
"""
formset = super(InlineFormSetFactory, self).construct_formset()
formset.model = self.inline_model
return formset | def function[construct_formset, parameter[self]]:
constant[
Overrides construct_formset to attach the model class as
an attribute of the returned formset instance.
]
variable[formset] assign[=] call[call[name[super], parameter[name[InlineFormSetFactory], name[self]]].construct_formset, parameter[]]
name[formset].model assign[=] name[self].inline_model
return[name[formset]] | keyword[def] identifier[construct_formset] ( identifier[self] ):
literal[string]
identifier[formset] = identifier[super] ( identifier[InlineFormSetFactory] , identifier[self] ). identifier[construct_formset] ()
identifier[formset] . identifier[model] = identifier[self] . identifier[inline_model]
keyword[return] identifier[formset] | def construct_formset(self):
"""
Overrides construct_formset to attach the model class as
an attribute of the returned formset instance.
"""
formset = super(InlineFormSetFactory, self).construct_formset()
formset.model = self.inline_model
return formset |
def differing_functions(self):
"""
:returns: A list of function matches that appear to differ
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b):
different_funcs.append((func_a, func_b))
return different_funcs | def function[differing_functions, parameter[self]]:
constant[
:returns: A list of function matches that appear to differ
]
variable[different_funcs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f000d0>, <ast.Name object at 0x7da207f01ff0>]]] in starred[name[self].function_matches] begin[:]
if <ast.UnaryOp object at 0x7da207f01360> begin[:]
call[name[different_funcs].append, parameter[tuple[[<ast.Name object at 0x7da207f00190>, <ast.Name object at 0x7da207f03af0>]]]]
return[name[different_funcs]] | keyword[def] identifier[differing_functions] ( identifier[self] ):
literal[string]
identifier[different_funcs] =[]
keyword[for] ( identifier[func_a] , identifier[func_b] ) keyword[in] identifier[self] . identifier[function_matches] :
keyword[if] keyword[not] identifier[self] . identifier[functions_probably_identical] ( identifier[func_a] , identifier[func_b] ):
identifier[different_funcs] . identifier[append] (( identifier[func_a] , identifier[func_b] ))
keyword[return] identifier[different_funcs] | def differing_functions(self):
"""
:returns: A list of function matches that appear to differ
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b):
different_funcs.append((func_a, func_b)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return different_funcs |
def _register_engine(self, uid):
"""New engine with ident `uid` became available."""
# head of the line:
self.targets.insert(0,uid)
self.loads.insert(0,0)
# initialize sets
self.completed[uid] = set()
self.failed[uid] = set()
self.pending[uid] = {}
# rescan the graph:
self.update_graph(None) | def function[_register_engine, parameter[self, uid]]:
constant[New engine with ident `uid` became available.]
call[name[self].targets.insert, parameter[constant[0], name[uid]]]
call[name[self].loads.insert, parameter[constant[0], constant[0]]]
call[name[self].completed][name[uid]] assign[=] call[name[set], parameter[]]
call[name[self].failed][name[uid]] assign[=] call[name[set], parameter[]]
call[name[self].pending][name[uid]] assign[=] dictionary[[], []]
call[name[self].update_graph, parameter[constant[None]]] | keyword[def] identifier[_register_engine] ( identifier[self] , identifier[uid] ):
literal[string]
identifier[self] . identifier[targets] . identifier[insert] ( literal[int] , identifier[uid] )
identifier[self] . identifier[loads] . identifier[insert] ( literal[int] , literal[int] )
identifier[self] . identifier[completed] [ identifier[uid] ]= identifier[set] ()
identifier[self] . identifier[failed] [ identifier[uid] ]= identifier[set] ()
identifier[self] . identifier[pending] [ identifier[uid] ]={}
identifier[self] . identifier[update_graph] ( keyword[None] ) | def _register_engine(self, uid):
"""New engine with ident `uid` became available."""
# head of the line:
self.targets.insert(0, uid)
self.loads.insert(0, 0)
# initialize sets
self.completed[uid] = set()
self.failed[uid] = set()
self.pending[uid] = {}
# rescan the graph:
self.update_graph(None) |
def _read_services(self):
"""
Read the control XML file and populate self.services with a list of
services in the form of Service class instances.
"""
# The double slash in the XPath is deliberate, as services can be
# listed in two places (Section 2.3 of uPNP device architecture v1.1)
for node in self._findall('device//serviceList/service'):
findtext = partial(node.findtext, namespaces=self._root_xml.nsmap)
svc = Service(
self,
self._url_base,
findtext('serviceType'),
findtext('serviceId'),
findtext('controlURL'),
findtext('SCPDURL'),
findtext('eventSubURL')
)
self._log.debug(
'%s: Service %r at %r', self.device_name, svc.service_type, svc.scpd_url)
self.services.append(svc)
self.service_map[svc.name] = svc | def function[_read_services, parameter[self]]:
constant[
Read the control XML file and populate self.services with a list of
services in the form of Service class instances.
]
for taget[name[node]] in starred[call[name[self]._findall, parameter[constant[device//serviceList/service]]]] begin[:]
variable[findtext] assign[=] call[name[partial], parameter[name[node].findtext]]
variable[svc] assign[=] call[name[Service], parameter[name[self], name[self]._url_base, call[name[findtext], parameter[constant[serviceType]]], call[name[findtext], parameter[constant[serviceId]]], call[name[findtext], parameter[constant[controlURL]]], call[name[findtext], parameter[constant[SCPDURL]]], call[name[findtext], parameter[constant[eventSubURL]]]]]
call[name[self]._log.debug, parameter[constant[%s: Service %r at %r], name[self].device_name, name[svc].service_type, name[svc].scpd_url]]
call[name[self].services.append, parameter[name[svc]]]
call[name[self].service_map][name[svc].name] assign[=] name[svc] | keyword[def] identifier[_read_services] ( identifier[self] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_findall] ( literal[string] ):
identifier[findtext] = identifier[partial] ( identifier[node] . identifier[findtext] , identifier[namespaces] = identifier[self] . identifier[_root_xml] . identifier[nsmap] )
identifier[svc] = identifier[Service] (
identifier[self] ,
identifier[self] . identifier[_url_base] ,
identifier[findtext] ( literal[string] ),
identifier[findtext] ( literal[string] ),
identifier[findtext] ( literal[string] ),
identifier[findtext] ( literal[string] ),
identifier[findtext] ( literal[string] )
)
identifier[self] . identifier[_log] . identifier[debug] (
literal[string] , identifier[self] . identifier[device_name] , identifier[svc] . identifier[service_type] , identifier[svc] . identifier[scpd_url] )
identifier[self] . identifier[services] . identifier[append] ( identifier[svc] )
identifier[self] . identifier[service_map] [ identifier[svc] . identifier[name] ]= identifier[svc] | def _read_services(self):
"""
Read the control XML file and populate self.services with a list of
services in the form of Service class instances.
"""
# The double slash in the XPath is deliberate, as services can be
# listed in two places (Section 2.3 of uPNP device architecture v1.1)
for node in self._findall('device//serviceList/service'):
findtext = partial(node.findtext, namespaces=self._root_xml.nsmap)
svc = Service(self, self._url_base, findtext('serviceType'), findtext('serviceId'), findtext('controlURL'), findtext('SCPDURL'), findtext('eventSubURL'))
self._log.debug('%s: Service %r at %r', self.device_name, svc.service_type, svc.scpd_url)
self.services.append(svc)
self.service_map[svc.name] = svc # depends on [control=['for'], data=['node']] |
def bloomfilter(collection, on, column, capacity=3000, error_rate=0.01):
"""
Filter collection on the `on` sequence by BloomFilter built by `column`
:param collection:
:param on: sequence or column name
:param column: instance of Column
:param capacity: numbers of capacity
:type capacity: int
:param error_rate: error rate
:type error_rate: float
:return: collection
:Example:
>>> df1 = DataFrame(pd.DataFrame({'a': ['name1', 'name2', 'name3', 'name1'], 'b': [1, 2, 3, 4]}))
>>> df2 = DataFrame(pd.DataFrame({'a': ['name1']}))
>>> df1.bloom_filter('a', df2.a)
a b
0 name1 1
1 name1 4
"""
if not isinstance(column, Column):
raise TypeError('bloomfilter can only filter on the column of a collection')
# to make the class pickled right by the cloudpickle
with open(os.path.join(path, 'lib', 'bloomfilter.py')) as bloomfilter_file:
local = {}
six.exec_(bloomfilter_file.read(), local)
BloomFilter = local['BloomFilter']
col_name = column.source_name or column.name
on_name = on.name if isinstance(on, SequenceExpr) else on
rand_name = '%s_%s'% (on_name, str(uuid.uuid4()).replace('-', '_'))
on_col = collection._get_field(on).rename(rand_name)
src_collection = collection
collection = collection[collection, on_col]
@output(src_collection.schema.names, src_collection.schema.types)
class Filter(object):
def __init__(self, resources):
table = resources[0]
bloom = BloomFilter(capacity, error_rate)
for row in table:
bloom.add(str(getattr(row, col_name)))
self.bloom = bloom
def __call__(self, row):
if str(getattr(row, rand_name)) not in self.bloom:
return
return row[:-1]
return collection.apply(Filter, axis=1, resources=[column.input, ]) | def function[bloomfilter, parameter[collection, on, column, capacity, error_rate]]:
constant[
Filter collection on the `on` sequence by BloomFilter built by `column`
:param collection:
:param on: sequence or column name
:param column: instance of Column
:param capacity: numbers of capacity
:type capacity: int
:param error_rate: error rate
:type error_rate: float
:return: collection
:Example:
>>> df1 = DataFrame(pd.DataFrame({'a': ['name1', 'name2', 'name3', 'name1'], 'b': [1, 2, 3, 4]}))
>>> df2 = DataFrame(pd.DataFrame({'a': ['name1']}))
>>> df1.bloom_filter('a', df2.a)
a b
0 name1 1
1 name1 4
]
if <ast.UnaryOp object at 0x7da1b26ad300> begin[:]
<ast.Raise object at 0x7da1b26ae4a0>
with call[name[open], parameter[call[name[os].path.join, parameter[name[path], constant[lib], constant[bloomfilter.py]]]]] begin[:]
variable[local] assign[=] dictionary[[], []]
call[name[six].exec_, parameter[call[name[bloomfilter_file].read, parameter[]], name[local]]]
variable[BloomFilter] assign[=] call[name[local]][constant[BloomFilter]]
variable[col_name] assign[=] <ast.BoolOp object at 0x7da1b26af340>
variable[on_name] assign[=] <ast.IfExp object at 0x7da1b26adc30>
variable[rand_name] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ace50>, <ast.Call object at 0x7da1b26ad420>]]]
variable[on_col] assign[=] call[call[name[collection]._get_field, parameter[name[on]]].rename, parameter[name[rand_name]]]
variable[src_collection] assign[=] name[collection]
variable[collection] assign[=] call[name[collection]][tuple[[<ast.Name object at 0x7da1b26aeef0>, <ast.Name object at 0x7da1b26ad150>]]]
class class[Filter, parameter[]] begin[:]
def function[__init__, parameter[self, resources]]:
variable[table] assign[=] call[name[resources]][constant[0]]
variable[bloom] assign[=] call[name[BloomFilter], parameter[name[capacity], name[error_rate]]]
for taget[name[row]] in starred[name[table]] begin[:]
call[name[bloom].add, parameter[call[name[str], parameter[call[name[getattr], parameter[name[row], name[col_name]]]]]]]
name[self].bloom assign[=] name[bloom]
def function[__call__, parameter[self, row]]:
if compare[call[name[str], parameter[call[name[getattr], parameter[name[row], name[rand_name]]]]] <ast.NotIn object at 0x7da2590d7190> name[self].bloom] begin[:]
return[None]
return[call[name[row]][<ast.Slice object at 0x7da20cabe140>]]
return[call[name[collection].apply, parameter[name[Filter]]]] | keyword[def] identifier[bloomfilter] ( identifier[collection] , identifier[on] , identifier[column] , identifier[capacity] = literal[int] , identifier[error_rate] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[column] , identifier[Column] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] , literal[string] )) keyword[as] identifier[bloomfilter_file] :
identifier[local] ={}
identifier[six] . identifier[exec_] ( identifier[bloomfilter_file] . identifier[read] (), identifier[local] )
identifier[BloomFilter] = identifier[local] [ literal[string] ]
identifier[col_name] = identifier[column] . identifier[source_name] keyword[or] identifier[column] . identifier[name]
identifier[on_name] = identifier[on] . identifier[name] keyword[if] identifier[isinstance] ( identifier[on] , identifier[SequenceExpr] ) keyword[else] identifier[on]
identifier[rand_name] = literal[string] %( identifier[on_name] , identifier[str] ( identifier[uuid] . identifier[uuid4] ()). identifier[replace] ( literal[string] , literal[string] ))
identifier[on_col] = identifier[collection] . identifier[_get_field] ( identifier[on] ). identifier[rename] ( identifier[rand_name] )
identifier[src_collection] = identifier[collection]
identifier[collection] = identifier[collection] [ identifier[collection] , identifier[on_col] ]
@ identifier[output] ( identifier[src_collection] . identifier[schema] . identifier[names] , identifier[src_collection] . identifier[schema] . identifier[types] )
keyword[class] identifier[Filter] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[resources] ):
identifier[table] = identifier[resources] [ literal[int] ]
identifier[bloom] = identifier[BloomFilter] ( identifier[capacity] , identifier[error_rate] )
keyword[for] identifier[row] keyword[in] identifier[table] :
identifier[bloom] . identifier[add] ( identifier[str] ( identifier[getattr] ( identifier[row] , identifier[col_name] )))
identifier[self] . identifier[bloom] = identifier[bloom]
keyword[def] identifier[__call__] ( identifier[self] , identifier[row] ):
keyword[if] identifier[str] ( identifier[getattr] ( identifier[row] , identifier[rand_name] )) keyword[not] keyword[in] identifier[self] . identifier[bloom] :
keyword[return]
keyword[return] identifier[row] [:- literal[int] ]
keyword[return] identifier[collection] . identifier[apply] ( identifier[Filter] , identifier[axis] = literal[int] , identifier[resources] =[ identifier[column] . identifier[input] ,]) | def bloomfilter(collection, on, column, capacity=3000, error_rate=0.01):
"""
Filter collection on the `on` sequence by BloomFilter built by `column`
:param collection:
:param on: sequence or column name
:param column: instance of Column
:param capacity: numbers of capacity
:type capacity: int
:param error_rate: error rate
:type error_rate: float
:return: collection
:Example:
>>> df1 = DataFrame(pd.DataFrame({'a': ['name1', 'name2', 'name3', 'name1'], 'b': [1, 2, 3, 4]}))
>>> df2 = DataFrame(pd.DataFrame({'a': ['name1']}))
>>> df1.bloom_filter('a', df2.a)
a b
0 name1 1
1 name1 4
"""
if not isinstance(column, Column):
raise TypeError('bloomfilter can only filter on the column of a collection') # depends on [control=['if'], data=[]]
# to make the class pickled right by the cloudpickle
with open(os.path.join(path, 'lib', 'bloomfilter.py')) as bloomfilter_file:
local = {}
six.exec_(bloomfilter_file.read(), local)
BloomFilter = local['BloomFilter']
col_name = column.source_name or column.name
on_name = on.name if isinstance(on, SequenceExpr) else on
rand_name = '%s_%s' % (on_name, str(uuid.uuid4()).replace('-', '_'))
on_col = collection._get_field(on).rename(rand_name)
src_collection = collection
collection = collection[collection, on_col]
@output(src_collection.schema.names, src_collection.schema.types)
class Filter(object):
def __init__(self, resources):
table = resources[0]
bloom = BloomFilter(capacity, error_rate)
for row in table:
bloom.add(str(getattr(row, col_name))) # depends on [control=['for'], data=['row']]
self.bloom = bloom
def __call__(self, row):
if str(getattr(row, rand_name)) not in self.bloom:
return # depends on [control=['if'], data=[]]
return row[:-1]
return collection.apply(Filter, axis=1, resources=[column.input]) # depends on [control=['with'], data=['bloomfilter_file']] |
def align(self, alignraster, how=np.mean, cxsize=None, cysize=None):
'''
geo.align(geo2, how=np.mean)
Returns both georasters aligned and with the same pixelsize
'''
return align_georasters(self, alignraster, how=how, cxsize=cxsize, cysize=cysize) | def function[align, parameter[self, alignraster, how, cxsize, cysize]]:
constant[
geo.align(geo2, how=np.mean)
Returns both georasters aligned and with the same pixelsize
]
return[call[name[align_georasters], parameter[name[self], name[alignraster]]]] | keyword[def] identifier[align] ( identifier[self] , identifier[alignraster] , identifier[how] = identifier[np] . identifier[mean] , identifier[cxsize] = keyword[None] , identifier[cysize] = keyword[None] ):
literal[string]
keyword[return] identifier[align_georasters] ( identifier[self] , identifier[alignraster] , identifier[how] = identifier[how] , identifier[cxsize] = identifier[cxsize] , identifier[cysize] = identifier[cysize] ) | def align(self, alignraster, how=np.mean, cxsize=None, cysize=None):
"""
geo.align(geo2, how=np.mean)
Returns both georasters aligned and with the same pixelsize
"""
return align_georasters(self, alignraster, how=how, cxsize=cxsize, cysize=cysize) |
def sparse_diff(array, n=1, axis=-1):
"""
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
"""
if (n < 0) or (int(n) != n):
raise ValueError('Expected order is non-negative integer, '\
'but found: {}'.format(n))
if not sp.sparse.issparse(array):
warnings.warn('Array is not sparse. Consider using numpy.diff')
if n == 0:
return array
nd = array.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
A = sparse_diff(array, n-1, axis=axis)
return A[slice1] - A[slice2] | def function[sparse_diff, parameter[array, n, axis]]:
constant[
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
]
if <ast.BoolOp object at 0x7da18bcc88b0> begin[:]
<ast.Raise object at 0x7da18bcc8d00>
if <ast.UnaryOp object at 0x7da18bcc9360> begin[:]
call[name[warnings].warn, parameter[constant[Array is not sparse. Consider using numpy.diff]]]
if compare[name[n] equal[==] constant[0]] begin[:]
return[name[array]]
variable[nd] assign[=] name[array].ndim
variable[slice1] assign[=] binary_operation[list[[<ast.Call object at 0x7da20eb29cc0>]] * name[nd]]
variable[slice2] assign[=] binary_operation[list[[<ast.Call object at 0x7da18f810070>]] * name[nd]]
call[name[slice1]][name[axis]] assign[=] call[name[slice], parameter[constant[1], constant[None]]]
call[name[slice2]][name[axis]] assign[=] call[name[slice], parameter[constant[None], <ast.UnaryOp object at 0x7da18f811330>]]
variable[slice1] assign[=] call[name[tuple], parameter[name[slice1]]]
variable[slice2] assign[=] call[name[tuple], parameter[name[slice2]]]
variable[A] assign[=] call[name[sparse_diff], parameter[name[array], binary_operation[name[n] - constant[1]]]]
return[binary_operation[call[name[A]][name[slice1]] - call[name[A]][name[slice2]]]] | keyword[def] identifier[sparse_diff] ( identifier[array] , identifier[n] = literal[int] , identifier[axis] =- literal[int] ):
literal[string]
keyword[if] ( identifier[n] < literal[int] ) keyword[or] ( identifier[int] ( identifier[n] )!= identifier[n] ):
keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[n] ))
keyword[if] keyword[not] identifier[sp] . identifier[sparse] . identifier[issparse] ( identifier[array] ):
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[if] identifier[n] == literal[int] :
keyword[return] identifier[array]
identifier[nd] = identifier[array] . identifier[ndim]
identifier[slice1] =[ identifier[slice] ( keyword[None] )]* identifier[nd]
identifier[slice2] =[ identifier[slice] ( keyword[None] )]* identifier[nd]
identifier[slice1] [ identifier[axis] ]= identifier[slice] ( literal[int] , keyword[None] )
identifier[slice2] [ identifier[axis] ]= identifier[slice] ( keyword[None] ,- literal[int] )
identifier[slice1] = identifier[tuple] ( identifier[slice1] )
identifier[slice2] = identifier[tuple] ( identifier[slice2] )
identifier[A] = identifier[sparse_diff] ( identifier[array] , identifier[n] - literal[int] , identifier[axis] = identifier[axis] )
keyword[return] identifier[A] [ identifier[slice1] ]- identifier[A] [ identifier[slice2] ] | def sparse_diff(array, n=1, axis=-1):
"""
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
"""
if n < 0 or int(n) != n:
raise ValueError('Expected order is non-negative integer, but found: {}'.format(n)) # depends on [control=['if'], data=[]]
if not sp.sparse.issparse(array):
warnings.warn('Array is not sparse. Consider using numpy.diff') # depends on [control=['if'], data=[]]
if n == 0:
return array # depends on [control=['if'], data=[]]
nd = array.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
A = sparse_diff(array, n - 1, axis=axis)
return A[slice1] - A[slice2] |
def _process_disease2gene(self, row):
"""
Here, we process the disease-to-gene associations.
Note that we ONLY process direct associations
(not inferred through chemicals).
Furthermore, we also ONLY process "marker/mechanism" associations.
We preferentially utilize OMIM identifiers over MESH identifiers
for disease/phenotype.
Therefore, if a single OMIM id is listed under the "omim_ids" list,
we will choose this over any MeSH id that might be listed as
the disease_id. If multiple OMIM ids are listed in the omim_ids column,
we toss this for now.
(Mostly, we are not sure what to do with this information.)
We also pull in the MeSH labels here (but not OMIM) to ensure that
we have them (as they may not be brought in separately).
:param row:
:return:
"""
# if self.test_mode:
# graph = self.testgraph
# else:
# graph = self.graph
# self._check_list_len(row, 9)
# geno = Genotype(graph)
# gu = GraphUtils(curie_map.get())
model = Model(self.graph)
(gene_symbol, gene_id, disease_name, disease_id, direct_evidence,
inference_chemical_name, inference_score, omim_ids, pubmed_ids) = row
# we only want the direct associations; skipping inferred for now
if direct_evidence == '' or direct_evidence != 'marker/mechanism':
return
# scrub some of the associations...
# it seems odd to link human genes to the following "diseases"
diseases_to_scrub = [
'MESH:D004283', # dog diseases
'MESH:D004195', # disease models, animal
'MESH:D030342', # genetic diseases, inborn
'MESH:D040181', # genetic dieases, x-linked
'MESH:D020022'] # genetic predisposition to a disease
if disease_id in diseases_to_scrub:
LOG.info(
"Skipping association between NCBIGene:%s and %s",
str(gene_id), disease_id)
return
intersect = list(
set(['OMIM:' + str(i) for i in omim_ids.split('|')] +
[disease_id]) & set(self.test_diseaseids))
if self.test_mode and (
int(gene_id) not in self.test_geneids or len(intersect) < 1):
return
# there are three kinds of direct evidence:
# (marker/mechanism | marker/mechanism|therapeutic | therapeutic)
# we are only using the "marker/mechanism" for now
# TODO what does it mean for a gene to be therapeutic for disease?
# a therapeutic target?
gene_id = 'NCBIGene:' + gene_id
preferred_disease_id = disease_id
if omim_ids is not None and omim_ids != '':
omim_id_list = re.split(r'\|', omim_ids)
# If there is only one OMIM ID for the Disease ID
# or in the omim_ids list,
# use the OMIM ID preferentially over any MeSH ID.
if re.match(r'OMIM:.*', disease_id):
if len(omim_id_list) > 1:
# the disease ID is an OMIM ID and
# there is more than one OMIM entry in omim_ids.
# Currently no entries satisfy this condition
pass
elif disease_id != ('OMIM:' + omim_ids):
# the disease ID is an OMIM ID and
# there is only one non-equiv OMIM entry in omim_ids
# we preferentially use the disease_id here
LOG.warning(
"There may be alternate identifier for %s: %s",
disease_id, omim_ids)
# TODO: What should be done with the alternate disease IDs?
else:
if len(omim_id_list) == 1:
# the disease ID is not an OMIM ID
# and there is only one OMIM entry in omim_ids.
preferred_disease_id = 'OMIM:' + omim_ids
elif len(omim_id_list) > 1:
# This is when the disease ID is not an OMIM ID and
# there is more than one OMIM entry in omim_ids.
pass
model.addClassToGraph(gene_id, None)
# not sure if MESH is getting added separately.
# adding labels here for good measure
dlabel = None
if re.match(r'MESH', preferred_disease_id):
dlabel = disease_name
model.addClassToGraph(preferred_disease_id, dlabel)
# Add the disease to gene relationship.
rel_id = self.resolve(direct_evidence)
refs = self._process_pubmed_ids(pubmed_ids)
self._make_association(gene_id, preferred_disease_id, rel_id, refs)
return | def function[_process_disease2gene, parameter[self, row]]:
constant[
Here, we process the disease-to-gene associations.
Note that we ONLY process direct associations
(not inferred through chemicals).
Furthermore, we also ONLY process "marker/mechanism" associations.
We preferentially utilize OMIM identifiers over MESH identifiers
for disease/phenotype.
Therefore, if a single OMIM id is listed under the "omim_ids" list,
we will choose this over any MeSH id that might be listed as
the disease_id. If multiple OMIM ids are listed in the omim_ids column,
we toss this for now.
(Mostly, we are not sure what to do with this information.)
We also pull in the MeSH labels here (but not OMIM) to ensure that
we have them (as they may not be brought in separately).
:param row:
:return:
]
variable[model] assign[=] call[name[Model], parameter[name[self].graph]]
<ast.Tuple object at 0x7da1b1083190> assign[=] name[row]
if <ast.BoolOp object at 0x7da1b1080c10> begin[:]
return[None]
variable[diseases_to_scrub] assign[=] list[[<ast.Constant object at 0x7da1b1082bc0>, <ast.Constant object at 0x7da1b1081de0>, <ast.Constant object at 0x7da1b10816c0>, <ast.Constant object at 0x7da1b1080100>, <ast.Constant object at 0x7da1b10828f0>]]
if compare[name[disease_id] in name[diseases_to_scrub]] begin[:]
call[name[LOG].info, parameter[constant[Skipping association between NCBIGene:%s and %s], call[name[str], parameter[name[gene_id]]], name[disease_id]]]
return[None]
variable[intersect] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[binary_operation[<ast.ListComp object at 0x7da1b1081d80> + list[[<ast.Name object at 0x7da1b1080b20>]]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[name[self].test_diseaseids]]]]]
if <ast.BoolOp object at 0x7da1b1080c40> begin[:]
return[None]
variable[gene_id] assign[=] binary_operation[constant[NCBIGene:] + name[gene_id]]
variable[preferred_disease_id] assign[=] name[disease_id]
if <ast.BoolOp object at 0x7da1b10800d0> begin[:]
variable[omim_id_list] assign[=] call[name[re].split, parameter[constant[\|], name[omim_ids]]]
if call[name[re].match, parameter[constant[OMIM:.*], name[disease_id]]] begin[:]
if compare[call[name[len], parameter[name[omim_id_list]]] greater[>] constant[1]] begin[:]
pass
call[name[model].addClassToGraph, parameter[name[gene_id], constant[None]]]
variable[dlabel] assign[=] constant[None]
if call[name[re].match, parameter[constant[MESH], name[preferred_disease_id]]] begin[:]
variable[dlabel] assign[=] name[disease_name]
call[name[model].addClassToGraph, parameter[name[preferred_disease_id], name[dlabel]]]
variable[rel_id] assign[=] call[name[self].resolve, parameter[name[direct_evidence]]]
variable[refs] assign[=] call[name[self]._process_pubmed_ids, parameter[name[pubmed_ids]]]
call[name[self]._make_association, parameter[name[gene_id], name[preferred_disease_id], name[rel_id], name[refs]]]
return[None] | keyword[def] identifier[_process_disease2gene] ( identifier[self] , identifier[row] ):
literal[string]
identifier[model] = identifier[Model] ( identifier[self] . identifier[graph] )
( identifier[gene_symbol] , identifier[gene_id] , identifier[disease_name] , identifier[disease_id] , identifier[direct_evidence] ,
identifier[inference_chemical_name] , identifier[inference_score] , identifier[omim_ids] , identifier[pubmed_ids] )= identifier[row]
keyword[if] identifier[direct_evidence] == literal[string] keyword[or] identifier[direct_evidence] != literal[string] :
keyword[return]
identifier[diseases_to_scrub] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
keyword[if] identifier[disease_id] keyword[in] identifier[diseases_to_scrub] :
identifier[LOG] . identifier[info] (
literal[string] ,
identifier[str] ( identifier[gene_id] ), identifier[disease_id] )
keyword[return]
identifier[intersect] = identifier[list] (
identifier[set] ([ literal[string] + identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[omim_ids] . identifier[split] ( literal[string] )]+
[ identifier[disease_id] ])& identifier[set] ( identifier[self] . identifier[test_diseaseids] ))
keyword[if] identifier[self] . identifier[test_mode] keyword[and] (
identifier[int] ( identifier[gene_id] ) keyword[not] keyword[in] identifier[self] . identifier[test_geneids] keyword[or] identifier[len] ( identifier[intersect] )< literal[int] ):
keyword[return]
identifier[gene_id] = literal[string] + identifier[gene_id]
identifier[preferred_disease_id] = identifier[disease_id]
keyword[if] identifier[omim_ids] keyword[is] keyword[not] keyword[None] keyword[and] identifier[omim_ids] != literal[string] :
identifier[omim_id_list] = identifier[re] . identifier[split] ( literal[string] , identifier[omim_ids] )
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[disease_id] ):
keyword[if] identifier[len] ( identifier[omim_id_list] )> literal[int] :
keyword[pass]
keyword[elif] identifier[disease_id] !=( literal[string] + identifier[omim_ids] ):
identifier[LOG] . identifier[warning] (
literal[string] ,
identifier[disease_id] , identifier[omim_ids] )
keyword[else] :
keyword[if] identifier[len] ( identifier[omim_id_list] )== literal[int] :
identifier[preferred_disease_id] = literal[string] + identifier[omim_ids]
keyword[elif] identifier[len] ( identifier[omim_id_list] )> literal[int] :
keyword[pass]
identifier[model] . identifier[addClassToGraph] ( identifier[gene_id] , keyword[None] )
identifier[dlabel] = keyword[None]
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[preferred_disease_id] ):
identifier[dlabel] = identifier[disease_name]
identifier[model] . identifier[addClassToGraph] ( identifier[preferred_disease_id] , identifier[dlabel] )
identifier[rel_id] = identifier[self] . identifier[resolve] ( identifier[direct_evidence] )
identifier[refs] = identifier[self] . identifier[_process_pubmed_ids] ( identifier[pubmed_ids] )
identifier[self] . identifier[_make_association] ( identifier[gene_id] , identifier[preferred_disease_id] , identifier[rel_id] , identifier[refs] )
keyword[return] | def _process_disease2gene(self, row):
"""
Here, we process the disease-to-gene associations.
Note that we ONLY process direct associations
(not inferred through chemicals).
Furthermore, we also ONLY process "marker/mechanism" associations.
We preferentially utilize OMIM identifiers over MESH identifiers
for disease/phenotype.
Therefore, if a single OMIM id is listed under the "omim_ids" list,
we will choose this over any MeSH id that might be listed as
the disease_id. If multiple OMIM ids are listed in the omim_ids column,
we toss this for now.
(Mostly, we are not sure what to do with this information.)
We also pull in the MeSH labels here (but not OMIM) to ensure that
we have them (as they may not be brought in separately).
:param row:
:return:
"""
# if self.test_mode:
# graph = self.testgraph
# else:
# graph = self.graph
# self._check_list_len(row, 9)
# geno = Genotype(graph)
# gu = GraphUtils(curie_map.get())
model = Model(self.graph)
(gene_symbol, gene_id, disease_name, disease_id, direct_evidence, inference_chemical_name, inference_score, omim_ids, pubmed_ids) = row
# we only want the direct associations; skipping inferred for now
if direct_evidence == '' or direct_evidence != 'marker/mechanism':
return # depends on [control=['if'], data=[]]
# scrub some of the associations...
# it seems odd to link human genes to the following "diseases"
# dog diseases
# disease models, animal
# genetic diseases, inborn
# genetic dieases, x-linked
diseases_to_scrub = ['MESH:D004283', 'MESH:D004195', 'MESH:D030342', 'MESH:D040181', 'MESH:D020022'] # genetic predisposition to a disease
if disease_id in diseases_to_scrub:
LOG.info('Skipping association between NCBIGene:%s and %s', str(gene_id), disease_id)
return # depends on [control=['if'], data=['disease_id']]
intersect = list(set(['OMIM:' + str(i) for i in omim_ids.split('|')] + [disease_id]) & set(self.test_diseaseids))
if self.test_mode and (int(gene_id) not in self.test_geneids or len(intersect) < 1):
return # depends on [control=['if'], data=[]]
# there are three kinds of direct evidence:
# (marker/mechanism | marker/mechanism|therapeutic | therapeutic)
# we are only using the "marker/mechanism" for now
# TODO what does it mean for a gene to be therapeutic for disease?
# a therapeutic target?
gene_id = 'NCBIGene:' + gene_id
preferred_disease_id = disease_id
if omim_ids is not None and omim_ids != '':
omim_id_list = re.split('\\|', omim_ids)
# If there is only one OMIM ID for the Disease ID
# or in the omim_ids list,
# use the OMIM ID preferentially over any MeSH ID.
if re.match('OMIM:.*', disease_id):
if len(omim_id_list) > 1:
# the disease ID is an OMIM ID and
# there is more than one OMIM entry in omim_ids.
# Currently no entries satisfy this condition
pass # depends on [control=['if'], data=[]]
elif disease_id != 'OMIM:' + omim_ids:
# the disease ID is an OMIM ID and
# there is only one non-equiv OMIM entry in omim_ids
# we preferentially use the disease_id here
LOG.warning('There may be alternate identifier for %s: %s', disease_id, omim_ids) # depends on [control=['if'], data=['disease_id']] # depends on [control=['if'], data=[]]
# TODO: What should be done with the alternate disease IDs?
elif len(omim_id_list) == 1:
# the disease ID is not an OMIM ID
# and there is only one OMIM entry in omim_ids.
preferred_disease_id = 'OMIM:' + omim_ids # depends on [control=['if'], data=[]]
elif len(omim_id_list) > 1:
# This is when the disease ID is not an OMIM ID and
# there is more than one OMIM entry in omim_ids.
pass # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
model.addClassToGraph(gene_id, None)
# not sure if MESH is getting added separately.
# adding labels here for good measure
dlabel = None
if re.match('MESH', preferred_disease_id):
dlabel = disease_name # depends on [control=['if'], data=[]]
model.addClassToGraph(preferred_disease_id, dlabel)
# Add the disease to gene relationship.
rel_id = self.resolve(direct_evidence)
refs = self._process_pubmed_ids(pubmed_ids)
self._make_association(gene_id, preferred_disease_id, rel_id, refs)
return |
def convert_file_to_upload_string(i):
"""
Input: {
filename - file name to convert
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_content_base64 - string that can be transmitted through Internet
}
"""
import base64
fn=i['filename']
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
s=b''
try:
f=open(fn, 'rb')
while True:
x = f.read(32768);
if not x: break
s+=x
f.close()
except Exception as e:
return {'return':1, 'error':'error reading file ('+format(e)+')'}
s=base64.urlsafe_b64encode(s).decode('utf8')
return {'return':0, 'file_content_base64': s} | def function[convert_file_to_upload_string, parameter[i]]:
constant[
Input: {
filename - file name to convert
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_content_base64 - string that can be transmitted through Internet
}
]
import module[base64]
variable[fn] assign[=] call[name[i]][constant[filename]]
if <ast.UnaryOp object at 0x7da1b22f5450> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b22f7af0>, <ast.Constant object at 0x7da1b22f60b0>], [<ast.Constant object at 0x7da1b22f6710>, <ast.BinOp object at 0x7da1b22f6410>]]]
variable[s] assign[=] constant[b'']
<ast.Try object at 0x7da1b22f6770>
variable[s] assign[=] call[call[name[base64].urlsafe_b64encode, parameter[name[s]]].decode, parameter[constant[utf8]]]
return[dictionary[[<ast.Constant object at 0x7da1b22f5990>, <ast.Constant object at 0x7da1b22f6110>], [<ast.Constant object at 0x7da1b22f7a60>, <ast.Name object at 0x7da1b22f6c80>]]] | keyword[def] identifier[convert_file_to_upload_string] ( identifier[i] ):
literal[string]
keyword[import] identifier[base64]
identifier[fn] = identifier[i] [ literal[string] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[fn] ):
keyword[return] { literal[string] : literal[int] , literal[string] : literal[string] + identifier[fn] + literal[string] }
identifier[s] = literal[string]
keyword[try] :
identifier[f] = identifier[open] ( identifier[fn] , literal[string] )
keyword[while] keyword[True] :
identifier[x] = identifier[f] . identifier[read] ( literal[int] );
keyword[if] keyword[not] identifier[x] : keyword[break]
identifier[s] += identifier[x]
identifier[f] . identifier[close] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[string] + identifier[format] ( identifier[e] )+ literal[string] }
identifier[s] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[s] ). identifier[decode] ( literal[string] )
keyword[return] { literal[string] : literal[int] , literal[string] : identifier[s] } | def convert_file_to_upload_string(i):
"""
Input: {
filename - file name to convert
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_content_base64 - string that can be transmitted through Internet
}
"""
import base64
fn = i['filename']
if not os.path.isfile(fn):
return {'return': 1, 'error': 'file ' + fn + ' not found'} # depends on [control=['if'], data=[]]
s = b''
try:
f = open(fn, 'rb')
while True:
x = f.read(32768)
if not x:
break # depends on [control=['if'], data=[]]
s += x # depends on [control=['while'], data=[]]
f.close() # depends on [control=['try'], data=[]]
except Exception as e:
return {'return': 1, 'error': 'error reading file (' + format(e) + ')'} # depends on [control=['except'], data=['e']]
s = base64.urlsafe_b64encode(s).decode('utf8')
return {'return': 0, 'file_content_base64': s} |
def as_translation_key(self):
"""
Project Translation object or any other derived class into just a
TranslationKey, which has fewer fields and can be used as a
dictionary key.
"""
return TranslationKey(**{
name: getattr(self, name)
for name in TranslationKey._fields}) | def function[as_translation_key, parameter[self]]:
constant[
Project Translation object or any other derived class into just a
TranslationKey, which has fewer fields and can be used as a
dictionary key.
]
return[call[name[TranslationKey], parameter[]]] | keyword[def] identifier[as_translation_key] ( identifier[self] ):
literal[string]
keyword[return] identifier[TranslationKey] (**{
identifier[name] : identifier[getattr] ( identifier[self] , identifier[name] )
keyword[for] identifier[name] keyword[in] identifier[TranslationKey] . identifier[_fields] }) | def as_translation_key(self):
"""
Project Translation object or any other derived class into just a
TranslationKey, which has fewer fields and can be used as a
dictionary key.
"""
return TranslationKey(**{name: getattr(self, name) for name in TranslationKey._fields}) |
def _get_peer_type_param(self, peer_type):
"""
Checks if the resource_root's API version is >= 11 and construct type param.
"""
params = None
if self._get_resource_root().version >= 11:
params = {
'type': peer_type,
}
return params | def function[_get_peer_type_param, parameter[self, peer_type]]:
constant[
Checks if the resource_root's API version is >= 11 and construct type param.
]
variable[params] assign[=] constant[None]
if compare[call[name[self]._get_resource_root, parameter[]].version greater_or_equal[>=] constant[11]] begin[:]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b206aa70>], [<ast.Name object at 0x7da1b20686a0>]]
return[name[params]] | keyword[def] identifier[_get_peer_type_param] ( identifier[self] , identifier[peer_type] ):
literal[string]
identifier[params] = keyword[None]
keyword[if] identifier[self] . identifier[_get_resource_root] (). identifier[version] >= literal[int] :
identifier[params] ={
literal[string] : identifier[peer_type] ,
}
keyword[return] identifier[params] | def _get_peer_type_param(self, peer_type):
"""
Checks if the resource_root's API version is >= 11 and construct type param.
"""
params = None
if self._get_resource_root().version >= 11:
params = {'type': peer_type} # depends on [control=['if'], data=[]]
return params |
def imagine_cache_clear(path, filter_name=None):
"""
Clear cache for resource path.
:param path: str
:param filter_name: str or None
"""
self = current_app.extensions['imagine']
self.clear_cache(path, filter_name) | def function[imagine_cache_clear, parameter[path, filter_name]]:
constant[
Clear cache for resource path.
:param path: str
:param filter_name: str or None
]
variable[self] assign[=] call[name[current_app].extensions][constant[imagine]]
call[name[self].clear_cache, parameter[name[path], name[filter_name]]] | keyword[def] identifier[imagine_cache_clear] ( identifier[path] , identifier[filter_name] = keyword[None] ):
literal[string]
identifier[self] = identifier[current_app] . identifier[extensions] [ literal[string] ]
identifier[self] . identifier[clear_cache] ( identifier[path] , identifier[filter_name] ) | def imagine_cache_clear(path, filter_name=None):
"""
Clear cache for resource path.
:param path: str
:param filter_name: str or None
"""
self = current_app.extensions['imagine']
self.clear_cache(path, filter_name) |
def from_etree(
el, node=None, node_cls=None,
tagsub=functools.partial(re.sub, r'\{.+?\}', ''),
Node=Node):
'''Convert the element tree to a tater tree.
'''
node_cls = node_cls or Node
if node is None:
node = node_cls()
tag = tagsub(el.tag)
attrib = dict((tagsub(k), v) for (k, v) in el.attrib.items())
node.update(attrib, tag=tag)
if el.text:
node['text'] = el.text
for child in el:
child = from_etree(child, node_cls=node_cls)
node.append(child)
if el.tail:
node['tail'] = el.tail
return node | def function[from_etree, parameter[el, node, node_cls, tagsub, Node]]:
constant[Convert the element tree to a tater tree.
]
variable[node_cls] assign[=] <ast.BoolOp object at 0x7da20c6e7c10>
if compare[name[node] is constant[None]] begin[:]
variable[node] assign[=] call[name[node_cls], parameter[]]
variable[tag] assign[=] call[name[tagsub], parameter[name[el].tag]]
variable[attrib] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da207f985b0>]]
call[name[node].update, parameter[name[attrib]]]
if name[el].text begin[:]
call[name[node]][constant[text]] assign[=] name[el].text
for taget[name[child]] in starred[name[el]] begin[:]
variable[child] assign[=] call[name[from_etree], parameter[name[child]]]
call[name[node].append, parameter[name[child]]]
if name[el].tail begin[:]
call[name[node]][constant[tail]] assign[=] name[el].tail
return[name[node]] | keyword[def] identifier[from_etree] (
identifier[el] , identifier[node] = keyword[None] , identifier[node_cls] = keyword[None] ,
identifier[tagsub] = identifier[functools] . identifier[partial] ( identifier[re] . identifier[sub] , literal[string] , literal[string] ),
identifier[Node] = identifier[Node] ):
literal[string]
identifier[node_cls] = identifier[node_cls] keyword[or] identifier[Node]
keyword[if] identifier[node] keyword[is] keyword[None] :
identifier[node] = identifier[node_cls] ()
identifier[tag] = identifier[tagsub] ( identifier[el] . identifier[tag] )
identifier[attrib] = identifier[dict] (( identifier[tagsub] ( identifier[k] ), identifier[v] ) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[el] . identifier[attrib] . identifier[items] ())
identifier[node] . identifier[update] ( identifier[attrib] , identifier[tag] = identifier[tag] )
keyword[if] identifier[el] . identifier[text] :
identifier[node] [ literal[string] ]= identifier[el] . identifier[text]
keyword[for] identifier[child] keyword[in] identifier[el] :
identifier[child] = identifier[from_etree] ( identifier[child] , identifier[node_cls] = identifier[node_cls] )
identifier[node] . identifier[append] ( identifier[child] )
keyword[if] identifier[el] . identifier[tail] :
identifier[node] [ literal[string] ]= identifier[el] . identifier[tail]
keyword[return] identifier[node] | def from_etree(el, node=None, node_cls=None, tagsub=functools.partial(re.sub, '\\{.+?\\}', ''), Node=Node):
"""Convert the element tree to a tater tree.
"""
node_cls = node_cls or Node
if node is None:
node = node_cls() # depends on [control=['if'], data=['node']]
tag = tagsub(el.tag)
attrib = dict(((tagsub(k), v) for (k, v) in el.attrib.items()))
node.update(attrib, tag=tag)
if el.text:
node['text'] = el.text # depends on [control=['if'], data=[]]
for child in el:
child = from_etree(child, node_cls=node_cls)
node.append(child) # depends on [control=['for'], data=['child']]
if el.tail:
node['tail'] = el.tail # depends on [control=['if'], data=[]]
return node |
def prepare_endpoint_props(self, intfs, svc_ref, export_props):
# type: (List[str], ServiceReference, Dict[str, Any]) -> Dict[str, Any]
"""
Sets up the properties of an endpoint
:param intfs: Specifications to export
:param svc_ref: Reference of the exported service
:param export_props: Export properties
:return: The properties of the endpoint
"""
pkg_vers = rsa.get_package_versions(intfs, export_props)
exported_configs = get_string_plus_property_value(
svc_ref.get_property(SERVICE_EXPORTED_CONFIGS)
)
if not exported_configs:
exported_configs = [self.get_config_name()]
service_intents = set()
svc_intents = export_props.get(SERVICE_INTENTS, None)
if svc_intents:
service_intents.update(svc_intents)
svc_exp_intents = export_props.get(SERVICE_EXPORTED_INTENTS, None)
if svc_exp_intents:
service_intents.update(svc_exp_intents)
svc_exp_intents_extra = export_props.get(
SERVICE_EXPORTED_INTENTS_EXTRA, None
)
if svc_exp_intents_extra:
service_intents.update(svc_exp_intents_extra)
rsa_props = rsa.get_rsa_props(
intfs,
exported_configs,
self._get_supported_intents(),
svc_ref.get_property(SERVICE_ID),
export_props.get(ENDPOINT_FRAMEWORK_UUID),
pkg_vers,
list(service_intents),
)
ecf_props = rsa.get_ecf_props(
self.get_id(),
self.get_namespace(),
rsa.get_next_rsid(),
rsa.get_current_time_millis(),
)
extra_props = rsa.get_extra_props(export_props)
merged = rsa.merge_dicts(rsa_props, ecf_props, extra_props)
# remove service.bundleid
merged.pop(SERVICE_BUNDLE_ID, None)
# remove service.scope
merged.pop(SERVICE_SCOPE, None)
return merged | def function[prepare_endpoint_props, parameter[self, intfs, svc_ref, export_props]]:
constant[
Sets up the properties of an endpoint
:param intfs: Specifications to export
:param svc_ref: Reference of the exported service
:param export_props: Export properties
:return: The properties of the endpoint
]
variable[pkg_vers] assign[=] call[name[rsa].get_package_versions, parameter[name[intfs], name[export_props]]]
variable[exported_configs] assign[=] call[name[get_string_plus_property_value], parameter[call[name[svc_ref].get_property, parameter[name[SERVICE_EXPORTED_CONFIGS]]]]]
if <ast.UnaryOp object at 0x7da1b0393dc0> begin[:]
variable[exported_configs] assign[=] list[[<ast.Call object at 0x7da1b03903a0>]]
variable[service_intents] assign[=] call[name[set], parameter[]]
variable[svc_intents] assign[=] call[name[export_props].get, parameter[name[SERVICE_INTENTS], constant[None]]]
if name[svc_intents] begin[:]
call[name[service_intents].update, parameter[name[svc_intents]]]
variable[svc_exp_intents] assign[=] call[name[export_props].get, parameter[name[SERVICE_EXPORTED_INTENTS], constant[None]]]
if name[svc_exp_intents] begin[:]
call[name[service_intents].update, parameter[name[svc_exp_intents]]]
variable[svc_exp_intents_extra] assign[=] call[name[export_props].get, parameter[name[SERVICE_EXPORTED_INTENTS_EXTRA], constant[None]]]
if name[svc_exp_intents_extra] begin[:]
call[name[service_intents].update, parameter[name[svc_exp_intents_extra]]]
variable[rsa_props] assign[=] call[name[rsa].get_rsa_props, parameter[name[intfs], name[exported_configs], call[name[self]._get_supported_intents, parameter[]], call[name[svc_ref].get_property, parameter[name[SERVICE_ID]]], call[name[export_props].get, parameter[name[ENDPOINT_FRAMEWORK_UUID]]], name[pkg_vers], call[name[list], parameter[name[service_intents]]]]]
variable[ecf_props] assign[=] call[name[rsa].get_ecf_props, parameter[call[name[self].get_id, parameter[]], call[name[self].get_namespace, parameter[]], call[name[rsa].get_next_rsid, parameter[]], call[name[rsa].get_current_time_millis, parameter[]]]]
variable[extra_props] assign[=] call[name[rsa].get_extra_props, parameter[name[export_props]]]
variable[merged] assign[=] call[name[rsa].merge_dicts, parameter[name[rsa_props], name[ecf_props], name[extra_props]]]
call[name[merged].pop, parameter[name[SERVICE_BUNDLE_ID], constant[None]]]
call[name[merged].pop, parameter[name[SERVICE_SCOPE], constant[None]]]
return[name[merged]] | keyword[def] identifier[prepare_endpoint_props] ( identifier[self] , identifier[intfs] , identifier[svc_ref] , identifier[export_props] ):
literal[string]
identifier[pkg_vers] = identifier[rsa] . identifier[get_package_versions] ( identifier[intfs] , identifier[export_props] )
identifier[exported_configs] = identifier[get_string_plus_property_value] (
identifier[svc_ref] . identifier[get_property] ( identifier[SERVICE_EXPORTED_CONFIGS] )
)
keyword[if] keyword[not] identifier[exported_configs] :
identifier[exported_configs] =[ identifier[self] . identifier[get_config_name] ()]
identifier[service_intents] = identifier[set] ()
identifier[svc_intents] = identifier[export_props] . identifier[get] ( identifier[SERVICE_INTENTS] , keyword[None] )
keyword[if] identifier[svc_intents] :
identifier[service_intents] . identifier[update] ( identifier[svc_intents] )
identifier[svc_exp_intents] = identifier[export_props] . identifier[get] ( identifier[SERVICE_EXPORTED_INTENTS] , keyword[None] )
keyword[if] identifier[svc_exp_intents] :
identifier[service_intents] . identifier[update] ( identifier[svc_exp_intents] )
identifier[svc_exp_intents_extra] = identifier[export_props] . identifier[get] (
identifier[SERVICE_EXPORTED_INTENTS_EXTRA] , keyword[None]
)
keyword[if] identifier[svc_exp_intents_extra] :
identifier[service_intents] . identifier[update] ( identifier[svc_exp_intents_extra] )
identifier[rsa_props] = identifier[rsa] . identifier[get_rsa_props] (
identifier[intfs] ,
identifier[exported_configs] ,
identifier[self] . identifier[_get_supported_intents] (),
identifier[svc_ref] . identifier[get_property] ( identifier[SERVICE_ID] ),
identifier[export_props] . identifier[get] ( identifier[ENDPOINT_FRAMEWORK_UUID] ),
identifier[pkg_vers] ,
identifier[list] ( identifier[service_intents] ),
)
identifier[ecf_props] = identifier[rsa] . identifier[get_ecf_props] (
identifier[self] . identifier[get_id] (),
identifier[self] . identifier[get_namespace] (),
identifier[rsa] . identifier[get_next_rsid] (),
identifier[rsa] . identifier[get_current_time_millis] (),
)
identifier[extra_props] = identifier[rsa] . identifier[get_extra_props] ( identifier[export_props] )
identifier[merged] = identifier[rsa] . identifier[merge_dicts] ( identifier[rsa_props] , identifier[ecf_props] , identifier[extra_props] )
identifier[merged] . identifier[pop] ( identifier[SERVICE_BUNDLE_ID] , keyword[None] )
identifier[merged] . identifier[pop] ( identifier[SERVICE_SCOPE] , keyword[None] )
keyword[return] identifier[merged] | def prepare_endpoint_props(self, intfs, svc_ref, export_props):
# type: (List[str], ServiceReference, Dict[str, Any]) -> Dict[str, Any]
'\n Sets up the properties of an endpoint\n\n :param intfs: Specifications to export\n :param svc_ref: Reference of the exported service\n :param export_props: Export properties\n :return: The properties of the endpoint\n '
pkg_vers = rsa.get_package_versions(intfs, export_props)
exported_configs = get_string_plus_property_value(svc_ref.get_property(SERVICE_EXPORTED_CONFIGS))
if not exported_configs:
exported_configs = [self.get_config_name()] # depends on [control=['if'], data=[]]
service_intents = set()
svc_intents = export_props.get(SERVICE_INTENTS, None)
if svc_intents:
service_intents.update(svc_intents) # depends on [control=['if'], data=[]]
svc_exp_intents = export_props.get(SERVICE_EXPORTED_INTENTS, None)
if svc_exp_intents:
service_intents.update(svc_exp_intents) # depends on [control=['if'], data=[]]
svc_exp_intents_extra = export_props.get(SERVICE_EXPORTED_INTENTS_EXTRA, None)
if svc_exp_intents_extra:
service_intents.update(svc_exp_intents_extra) # depends on [control=['if'], data=[]]
rsa_props = rsa.get_rsa_props(intfs, exported_configs, self._get_supported_intents(), svc_ref.get_property(SERVICE_ID), export_props.get(ENDPOINT_FRAMEWORK_UUID), pkg_vers, list(service_intents))
ecf_props = rsa.get_ecf_props(self.get_id(), self.get_namespace(), rsa.get_next_rsid(), rsa.get_current_time_millis())
extra_props = rsa.get_extra_props(export_props)
merged = rsa.merge_dicts(rsa_props, ecf_props, extra_props)
# remove service.bundleid
merged.pop(SERVICE_BUNDLE_ID, None)
# remove service.scope
merged.pop(SERVICE_SCOPE, None)
return merged |
def concat_urls(*urls):
"""Concat Urls
Args:
*args: (str)
Returns:
str: urls starting and ending with / merged with /
"""
normalized_urls = filter(bool, [url.strip('/') for url in urls])
joined_urls = '/'.join(normalized_urls)
if not joined_urls:
return '/'
return '/{}/'.format(joined_urls) | def function[concat_urls, parameter[]]:
constant[Concat Urls
Args:
*args: (str)
Returns:
str: urls starting and ending with / merged with /
]
variable[normalized_urls] assign[=] call[name[filter], parameter[name[bool], <ast.ListComp object at 0x7da20c76c100>]]
variable[joined_urls] assign[=] call[constant[/].join, parameter[name[normalized_urls]]]
if <ast.UnaryOp object at 0x7da20c76dd50> begin[:]
return[constant[/]]
return[call[constant[/{}/].format, parameter[name[joined_urls]]]] | keyword[def] identifier[concat_urls] (* identifier[urls] ):
literal[string]
identifier[normalized_urls] = identifier[filter] ( identifier[bool] ,[ identifier[url] . identifier[strip] ( literal[string] ) keyword[for] identifier[url] keyword[in] identifier[urls] ])
identifier[joined_urls] = literal[string] . identifier[join] ( identifier[normalized_urls] )
keyword[if] keyword[not] identifier[joined_urls] :
keyword[return] literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[joined_urls] ) | def concat_urls(*urls):
"""Concat Urls
Args:
*args: (str)
Returns:
str: urls starting and ending with / merged with /
"""
normalized_urls = filter(bool, [url.strip('/') for url in urls])
joined_urls = '/'.join(normalized_urls)
if not joined_urls:
return '/' # depends on [control=['if'], data=[]]
return '/{}/'.format(joined_urls) |
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text) | def function[decompose_nfkd, parameter[text]]:
constant[Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
]
if compare[name[text] is constant[None]] begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da18f58f040> begin[:]
name[decompose_nfkd]._tr assign[=] call[name[Transliterator].createInstance, parameter[constant[Any-NFKD]]]
return[call[name[decompose_nfkd]._tr.transliterate, parameter[name[text]]]] | keyword[def] identifier[decompose_nfkd] ( identifier[text] ):
literal[string]
keyword[if] identifier[text] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[hasattr] ( identifier[decompose_nfkd] , literal[string] ):
identifier[decompose_nfkd] . identifier[_tr] = identifier[Transliterator] . identifier[createInstance] ( literal[string] )
keyword[return] identifier[decompose_nfkd] . identifier[_tr] . identifier[transliterate] ( identifier[text] ) | def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None # depends on [control=['if'], data=[]]
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD') # depends on [control=['if'], data=[]]
return decompose_nfkd._tr.transliterate(text) |
def http_responder_factory(proto):
"""
The default factory function which creates a GrowlerHTTPResponder with
this object as the parent protocol, and the application's req/res
factory functions.
To change the default responder, overload this method with the same
to return your
own responder.
Params
------
proto : GrowlerHTTPProtocol
Explicitly passed protocol object (actually it's what would be
'self'!)
Note
----
This method is decorated with @staticmethod, as the connection_made
method of GrowlerProtocol explicitly passes `self` as a parameters,
instead of treating as a bound method.
"""
return GrowlerHTTPResponder(
proto,
request_factory=proto.http_application._request_class,
response_factory=proto.http_application._response_class,
) | def function[http_responder_factory, parameter[proto]]:
constant[
The default factory function which creates a GrowlerHTTPResponder with
this object as the parent protocol, and the application's req/res
factory functions.
To change the default responder, overload this method with the same
to return your
own responder.
Params
------
proto : GrowlerHTTPProtocol
Explicitly passed protocol object (actually it's what would be
'self'!)
Note
----
This method is decorated with @staticmethod, as the connection_made
method of GrowlerProtocol explicitly passes `self` as a parameters,
instead of treating as a bound method.
]
return[call[name[GrowlerHTTPResponder], parameter[name[proto]]]] | keyword[def] identifier[http_responder_factory] ( identifier[proto] ):
literal[string]
keyword[return] identifier[GrowlerHTTPResponder] (
identifier[proto] ,
identifier[request_factory] = identifier[proto] . identifier[http_application] . identifier[_request_class] ,
identifier[response_factory] = identifier[proto] . identifier[http_application] . identifier[_response_class] ,
) | def http_responder_factory(proto):
"""
The default factory function which creates a GrowlerHTTPResponder with
this object as the parent protocol, and the application's req/res
factory functions.
To change the default responder, overload this method with the same
to return your
own responder.
Params
------
proto : GrowlerHTTPProtocol
Explicitly passed protocol object (actually it's what would be
'self'!)
Note
----
This method is decorated with @staticmethod, as the connection_made
method of GrowlerProtocol explicitly passes `self` as a parameters,
instead of treating as a bound method.
"""
return GrowlerHTTPResponder(proto, request_factory=proto.http_application._request_class, response_factory=proto.http_application._response_class) |
def head_ref(self):
"""
Filters the current DataFrame to only contain those rows whose reference is HEAD.
>>> heads_df = refs_df.head_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getHEAD(),
self._session, self._implicits) | def function[head_ref, parameter[self]]:
constant[
Filters the current DataFrame to only contain those rows whose reference is HEAD.
>>> heads_df = refs_df.head_ref
:rtype: ReferencesDataFrame
]
return[call[name[ReferencesDataFrame], parameter[call[name[self]._engine_dataframe.getHEAD, parameter[]], name[self]._session, name[self]._implicits]]] | keyword[def] identifier[head_ref] ( identifier[self] ):
literal[string]
keyword[return] identifier[ReferencesDataFrame] ( identifier[self] . identifier[_engine_dataframe] . identifier[getHEAD] (),
identifier[self] . identifier[_session] , identifier[self] . identifier[_implicits] ) | def head_ref(self):
"""
Filters the current DataFrame to only contain those rows whose reference is HEAD.
>>> heads_df = refs_df.head_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getHEAD(), self._session, self._implicits) |
def deprecated_capacity_meyerhof_and_hanna_1978(sl_0, sl_1, h0, fd, verbose=0):
"""
Calculates the two-layered foundation capacity according Meyerhof and Hanna (1978)
:param sl_0: Top Soil object
:param sl_1: Base Soil object
:param h0: Height of top soil layer
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
"""
# UNFINISHED, this code is copied from the Meyerhoff method
# horizontal_load = np.sqrt(h_l ** 2 + h_b ** 2)
sl_0.nq_factor_0 = (
(np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2))) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_0.phi))))
if sl_0.phi == 0:
sl_0.nc_factor_0 = 5.14
else:
sl_0.nc_factor_0 = (sl_0.nq_factor_0 - 1) / np.tan(np.deg2rad(sl_0.phi))
sl_0.ng_factor_0 = (sl_0.nq_factor_0 - 1) * np.tan(1.4 * np.deg2rad(sl_0.phi))
sl_1.nq_factor_1 = (
(np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2))) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_1.phi))))
if sl_1.phi == 0:
sl_1.nc_factor_1 = 5.14
else:
sl_1.nc_factor_1 = (sl_1.nq_factor_1 - 1) / np.tan(np.deg2rad(sl_1.phi))
sl_1.ng_factor_1 = (sl_1.nq_factor_1 - 1) * np.tan(1.4 * np.deg2rad(sl_1.phi))
if verbose:
log("Nc: ", sl_1.nc_factor_1)
log("Nq: ", sl_1.nq_factor_1)
log("Ng: ", sl_1.ng_factor_1)
sl_0.kp_0 = (np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2))) ** 2
sl_1.kp_1 = (np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2))) ** 2
# shape factors
# s_c = 1 + 0.2 * kp * fd.width / fd.length
if sl_0.phi >= 10:
sl_0.s_c_0 = 1 + 0.2 * sl_0.kp_0 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0 + 0.1 * sl_0.kp_0 * (fd.width / fd.length)
else:
sl_0.s_c_0 = 1 + 0.2 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0
sl_0.s_g_0 = sl_0.s_q_0
if sl_1.phi >= 10:
sl_1.s_c_1 = 1 + 0.2 * sl_1.kp_1 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0 + 0.1 * sl_1.kp_1 * (fd.width / fd.length)
else:
sl_1.s_c_1 = 1 + 0.2 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0
sl_1.s_g_1 = sl_1.s_q_1
"""
# depth factors
d_c = 1 + 0.2 * np.sqrt(kp) * fd.depth / fd.width
if sl_0.phi > 10:
d_q = 1 + 0.1 * np.sqrt(kp) * fd.depth / fd.width
else:
d_q = 1.0
d_g = d_q
# inclination factors:
theta_load = np.arctan(horizontal_load / vertical_load)
i_c = (1 - theta_load / (np.pi * 0.5)) ** 2
i_q = i_c
if sl_0.phi > 0:
i_g = (1 - theta_load / sl_0.phi_r) ** 2
else:
i_g = 0
"""
# stress at footing base:
# q_d = sl_0.unit_dry_weight_0 * fd.depth
# ks
sl_0.q_0 = (sl_0.cohesion * sl_0.nc_factor_0) + (0.5 * sl_0.unit_dry_weight * fd.width * sl_0.ng_factor_0)
sl_1.q_1 = (sl_1.cohesion * sl_1.nc_factor_1) + (0.5 * sl_1.unit_dry_weight * fd.width * sl_1.ng_factor_1)
q1_q0 = sl_1.q_1 / sl_0.q_0
x_0 = np.array([0, 20.08, 22.42, 25.08, 27.58, 30.08, 32.58, 34.92, 37.83, 40.00, 42.67, 45.00, 47.00, 49.75])
y_0 = np.array([0.93, 0.93, 0.93, 0.93, 1.01, 1.17, 1.32, 1.56, 1.87, 2.26, 2.72, 3.35, 3.81, 4.82])
x_2 = np.array([0, 20.08, 22.50, 25.08, 27.58, 30.08, 32.50, 35.00, 37.67, 40.17, 42.67, 45.00, 47.50, 50.00])
y_2 = np.array([1.55, 1.55, 1.71, 1.86, 2.10, 2.33, 2.72, 3.11, 3.81, 4.43, 5.28, 6.14, 7.46, 9.24])
x_4 = np.array([0, 20.00, 22.51, 25.10, 27.69, 30.11, 32.45, 35.04, 37.88, 40.14, 42.65, 45.07, 47.33, 50.08])
y_4 = np.array([2.49, 2.49, 2.64, 2.87, 3.34, 3.81, 4.43, 5.20, 6.29, 7.38, 9.01, 11.11, 14.29, 19.34])
x_10 = np.array([0, 20.00, 22.50, 25.08, 28.00, 30.00, 32.50, 34.92, 37.50, 40.17, 42.42, 45.00, 47.17, 50.08])
y_10 = np.array([3.27, 3.27, 3.74, 4.44, 5.37, 6.07, 7.16, 8.33, 10.04, 12.30, 15.95, 21.17, 27.47, 40.00])
x_int = sl_0.phi
if sl_0.phi < 1:
fd.ks = 0
else:
if q1_q0 == 0:
fd.ks = np.interp(x_int, x_0, y_0)
elif q1_q0 == 0.2:
fd.ks = np.interp(x_int, x_2, y_2)
elif q1_q0 == 0.4:
fd.ks = np.interp(x_int, x_4, y_4)
elif q1_q0 == 1.0:
fd.ks = np.interp(x_int, x_10, y_10)
elif 0 < q1_q0 < 0.2:
ks_1 = np.interp(x_int, x_0, y_0)
ks_2 = np.interp(x_int, x_2, y_2)
fd.ks = (((ks_2 - ks_1) * q1_q0) / 0.2) + ks_1
elif 0.2 < q1_q0 < 0.4:
ks_1 = np.interp(x_int, x_2, y_2)
ks_2 = np.interp(x_int, x_4, y_4)
fd.ks = (((ks_2 - ks_1) * (q1_q0 - 0.2)) / 0.2) + ks_1
elif 0.4 < q1_q0 < 1.0:
ks_1 = np.interp(x_int, x_4, y_4)
ks_2 = np.interp(x_int, x_10, y_10)
fd.ks = (((ks_2 - ks_1) * (q1_q0 - 0.4)) / 0.6) + ks_1
else:
raise DesignError("Cannot compute 'ks', bearing ratio out-of-range (q1_q0 = %.3f) required: 0-1." % q1_q0)
# ca
if sl_0.cohesion == 0:
c1_c0 = 0
else:
c1_c0 = sl_1.cohesion / sl_0.cohesion
x = np.array([0.000, 0.082, 0.206, 0.298, 0.404, 0.509, 0.598, 0.685, 0.772])
y = np.array([0.627, 0.700, 0.794, 0.855, 0.912, 0.948, 0.968, 0.983, 0.997])
ca_c0 = np.interp(c1_c0, x, y)
fd.ca = ca_c0 * sl_0.cohesion
# Capacity
a = 1 # ????
s = 1 # ????
r = 1 + (fd.width / fd.length)
q_b1 = (sl_1.cohesion * sl_1.nc_factor_1 * sl_1.s_c_1)
q_b2 = (sl_0.unit_dry_weight * h0 * sl_1.nq_factor_1 * sl_1.s_q_1)
q_b3 = (sl_1.unit_dry_weight * fd.width * sl_1.ng_factor_1 * sl_1.s_g_1 / 2)
fd.q_b = q_b1 + q_b2 + q_b3
fd.q_ult4 = (r * (2 * fd.ca * (h0 - fd.depth) / fd.width) * a)
fd.q_ult5 = r * (sl_0.unit_dry_weight * ((h0 - fd.depth) ** 2)) * (1 + (2 * fd.depth / (h0 - fd.depth))) * (
fd.ks * np.tan(np.deg2rad(sl_0.phi)) / fd.width) * s
fd.q_ult6 = (sl_0.unit_dry_weight * (h0 - fd.depth))
fd.q_ult = fd.q_b + fd.q_ult4 + fd.q_ult5 - fd.q_ult6
# maximum value (qu <= qt)
q_t1 = (sl_0.cohesion * sl_0.nc_factor_0 * sl_0.s_c_0)
q_t2 = (sl_0.unit_dry_weight * fd.depth * sl_0.nq_factor_0 * sl_0.s_q_0)
q_t3 = (sl_0.unit_dry_weight * fd.width * sl_0.ng_factor_0 * sl_0.s_g_0 / 2)
fd.q_t = q_t1 + q_t2 + q_t3
if fd.q_ult > fd.q_t:
fd.q_ult = fd.q_t
return fd.q_ult | def function[deprecated_capacity_meyerhof_and_hanna_1978, parameter[sl_0, sl_1, h0, fd, verbose]]:
constant[
Calculates the two-layered foundation capacity according Meyerhof and Hanna (1978)
:param sl_0: Top Soil object
:param sl_1: Base Soil object
:param h0: Height of top soil layer
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
]
name[sl_0].nq_factor_0 assign[=] binary_operation[binary_operation[call[name[np].tan, parameter[binary_operation[binary_operation[name[np].pi / constant[4]] + call[name[np].deg2rad, parameter[binary_operation[name[sl_0].phi / constant[2]]]]]]] ** constant[2]] * call[name[np].exp, parameter[binary_operation[name[np].pi * call[name[np].tan, parameter[call[name[np].deg2rad, parameter[name[sl_0].phi]]]]]]]]
if compare[name[sl_0].phi equal[==] constant[0]] begin[:]
name[sl_0].nc_factor_0 assign[=] constant[5.14]
name[sl_0].ng_factor_0 assign[=] binary_operation[binary_operation[name[sl_0].nq_factor_0 - constant[1]] * call[name[np].tan, parameter[binary_operation[constant[1.4] * call[name[np].deg2rad, parameter[name[sl_0].phi]]]]]]
name[sl_1].nq_factor_1 assign[=] binary_operation[binary_operation[call[name[np].tan, parameter[binary_operation[binary_operation[name[np].pi / constant[4]] + call[name[np].deg2rad, parameter[binary_operation[name[sl_1].phi / constant[2]]]]]]] ** constant[2]] * call[name[np].exp, parameter[binary_operation[name[np].pi * call[name[np].tan, parameter[call[name[np].deg2rad, parameter[name[sl_1].phi]]]]]]]]
if compare[name[sl_1].phi equal[==] constant[0]] begin[:]
name[sl_1].nc_factor_1 assign[=] constant[5.14]
name[sl_1].ng_factor_1 assign[=] binary_operation[binary_operation[name[sl_1].nq_factor_1 - constant[1]] * call[name[np].tan, parameter[binary_operation[constant[1.4] * call[name[np].deg2rad, parameter[name[sl_1].phi]]]]]]
if name[verbose] begin[:]
call[name[log], parameter[constant[Nc: ], name[sl_1].nc_factor_1]]
call[name[log], parameter[constant[Nq: ], name[sl_1].nq_factor_1]]
call[name[log], parameter[constant[Ng: ], name[sl_1].ng_factor_1]]
name[sl_0].kp_0 assign[=] binary_operation[call[name[np].tan, parameter[binary_operation[binary_operation[name[np].pi / constant[4]] + call[name[np].deg2rad, parameter[binary_operation[name[sl_0].phi / constant[2]]]]]]] ** constant[2]]
name[sl_1].kp_1 assign[=] binary_operation[call[name[np].tan, parameter[binary_operation[binary_operation[name[np].pi / constant[4]] + call[name[np].deg2rad, parameter[binary_operation[name[sl_1].phi / constant[2]]]]]]] ** constant[2]]
if compare[name[sl_0].phi greater_or_equal[>=] constant[10]] begin[:]
name[sl_0].s_c_0 assign[=] binary_operation[constant[1] + binary_operation[binary_operation[constant[0.2] * name[sl_0].kp_0] * binary_operation[name[fd].width / name[fd].length]]]
name[sl_0].s_q_0 assign[=] binary_operation[constant[1.0] + binary_operation[binary_operation[constant[0.1] * name[sl_0].kp_0] * binary_operation[name[fd].width / name[fd].length]]]
name[sl_0].s_g_0 assign[=] name[sl_0].s_q_0
if compare[name[sl_1].phi greater_or_equal[>=] constant[10]] begin[:]
name[sl_1].s_c_1 assign[=] binary_operation[constant[1] + binary_operation[binary_operation[constant[0.2] * name[sl_1].kp_1] * binary_operation[name[fd].width / name[fd].length]]]
name[sl_1].s_q_1 assign[=] binary_operation[constant[1.0] + binary_operation[binary_operation[constant[0.1] * name[sl_1].kp_1] * binary_operation[name[fd].width / name[fd].length]]]
name[sl_1].s_g_1 assign[=] name[sl_1].s_q_1
constant[
# depth factors
d_c = 1 + 0.2 * np.sqrt(kp) * fd.depth / fd.width
if sl_0.phi > 10:
d_q = 1 + 0.1 * np.sqrt(kp) * fd.depth / fd.width
else:
d_q = 1.0
d_g = d_q
# inclination factors:
theta_load = np.arctan(horizontal_load / vertical_load)
i_c = (1 - theta_load / (np.pi * 0.5)) ** 2
i_q = i_c
if sl_0.phi > 0:
i_g = (1 - theta_load / sl_0.phi_r) ** 2
else:
i_g = 0
]
name[sl_0].q_0 assign[=] binary_operation[binary_operation[name[sl_0].cohesion * name[sl_0].nc_factor_0] + binary_operation[binary_operation[binary_operation[constant[0.5] * name[sl_0].unit_dry_weight] * name[fd].width] * name[sl_0].ng_factor_0]]
name[sl_1].q_1 assign[=] binary_operation[binary_operation[name[sl_1].cohesion * name[sl_1].nc_factor_1] + binary_operation[binary_operation[binary_operation[constant[0.5] * name[sl_1].unit_dry_weight] * name[fd].width] * name[sl_1].ng_factor_1]]
variable[q1_q0] assign[=] binary_operation[name[sl_1].q_1 / name[sl_0].q_0]
variable[x_0] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0315450>, <ast.Constant object at 0x7da1b0317760>, <ast.Constant object at 0x7da1b0317f40>, <ast.Constant object at 0x7da1b0314dc0>, <ast.Constant object at 0x7da1b0315120>, <ast.Constant object at 0x7da1b0315060>, <ast.Constant object at 0x7da1b0315750>, <ast.Constant object at 0x7da1b0317ca0>, <ast.Constant object at 0x7da1b0315840>, <ast.Constant object at 0x7da1b0314490>, <ast.Constant object at 0x7da1b0314220>, <ast.Constant object at 0x7da1b0315ea0>, <ast.Constant object at 0x7da1b03164d0>, <ast.Constant object at 0x7da1b03144f0>]]]]
variable[y_0] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0316c50>, <ast.Constant object at 0x7da1b0317100>, <ast.Constant object at 0x7da1b0317d00>, <ast.Constant object at 0x7da1b0315900>, <ast.Constant object at 0x7da1b03156f0>, <ast.Constant object at 0x7da1b0315150>, <ast.Constant object at 0x7da1b0314070>, <ast.Constant object at 0x7da1b03149d0>, <ast.Constant object at 0x7da1b0315ba0>, <ast.Constant object at 0x7da1b03154b0>, <ast.Constant object at 0x7da1b0317a00>, <ast.Constant object at 0x7da1b0315810>, <ast.Constant object at 0x7da1b0317f10>, <ast.Constant object at 0x7da1b0316050>]]]]
variable[x_2] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b03174c0>, <ast.Constant object at 0x7da1b03165c0>, <ast.Constant object at 0x7da1b0317940>, <ast.Constant object at 0x7da1b0314970>, <ast.Constant object at 0x7da1b0315f00>, <ast.Constant object at 0x7da1b0315540>, <ast.Constant object at 0x7da1b0317280>, <ast.Constant object at 0x7da1b03158a0>, <ast.Constant object at 0x7da1b0316080>, <ast.Constant object at 0x7da1b0317640>, <ast.Constant object at 0x7da1b0314730>, <ast.Constant object at 0x7da1b0316950>, <ast.Constant object at 0x7da1b0314250>, <ast.Constant object at 0x7da1b0314be0>]]]]
variable[y_2] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0314130>, <ast.Constant object at 0x7da1b0314160>, <ast.Constant object at 0x7da1b030bfd0>, <ast.Constant object at 0x7da1b030bfa0>, <ast.Constant object at 0x7da1b030bf70>, <ast.Constant object at 0x7da1b030bf40>, <ast.Constant object at 0x7da1b030bf10>, <ast.Constant object at 0x7da1b030bee0>, <ast.Constant object at 0x7da1b030beb0>, <ast.Constant object at 0x7da1b030be80>, <ast.Constant object at 0x7da1b030be50>, <ast.Constant object at 0x7da1b030be20>, <ast.Constant object at 0x7da1b030bdf0>, <ast.Constant object at 0x7da1b030bdc0>]]]]
variable[x_4] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b030bc70>, <ast.Constant object at 0x7da1b030bc40>, <ast.Constant object at 0x7da1b030bc10>, <ast.Constant object at 0x7da1b030bbe0>, <ast.Constant object at 0x7da1b030bbb0>, <ast.Constant object at 0x7da1b030bb80>, <ast.Constant object at 0x7da1b030bb50>, <ast.Constant object at 0x7da1b030bb20>, <ast.Constant object at 0x7da1b030baf0>, <ast.Constant object at 0x7da1b030bac0>, <ast.Constant object at 0x7da1b030ba90>, <ast.Constant object at 0x7da1b030ba60>, <ast.Constant object at 0x7da1b030ba30>, <ast.Constant object at 0x7da1b030ba00>]]]]
variable[y_4] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b030a4d0>, <ast.Constant object at 0x7da1b030a4a0>, <ast.Constant object at 0x7da1b030a470>, <ast.Constant object at 0x7da1b030a440>, <ast.Constant object at 0x7da1b030a410>, <ast.Constant object at 0x7da1b030a3e0>, <ast.Constant object at 0x7da1b030a3b0>, <ast.Constant object at 0x7da1b030a380>, <ast.Constant object at 0x7da1b030a350>, <ast.Constant object at 0x7da1b030a320>, <ast.Constant object at 0x7da1b030a2f0>, <ast.Constant object at 0x7da1b030a2c0>, <ast.Constant object at 0x7da1b030a290>, <ast.Constant object at 0x7da1b030a260>]]]]
variable[x_10] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b030a110>, <ast.Constant object at 0x7da1b030a0e0>, <ast.Constant object at 0x7da1b030a0b0>, <ast.Constant object at 0x7da1b030a080>, <ast.Constant object at 0x7da1b030a050>, <ast.Constant object at 0x7da1b030a020>, <ast.Constant object at 0x7da1b0309ff0>, <ast.Constant object at 0x7da1b0309fc0>, <ast.Constant object at 0x7da1b0309f90>, <ast.Constant object at 0x7da1b0309f60>, <ast.Constant object at 0x7da1b0309f30>, <ast.Constant object at 0x7da1b0309f00>, <ast.Constant object at 0x7da1b0309ed0>, <ast.Constant object at 0x7da1b0309ea0>]]]]
variable[y_10] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0309d50>, <ast.Constant object at 0x7da1b0309d20>, <ast.Constant object at 0x7da1b0309cf0>, <ast.Constant object at 0x7da1b0309cc0>, <ast.Constant object at 0x7da1b0309c90>, <ast.Constant object at 0x7da1b0309c60>, <ast.Constant object at 0x7da1b0309c30>, <ast.Constant object at 0x7da1b0309c00>, <ast.Constant object at 0x7da1b0309bd0>, <ast.Constant object at 0x7da1b0309ba0>, <ast.Constant object at 0x7da1b0309b70>, <ast.Constant object at 0x7da1b0309b40>, <ast.Constant object at 0x7da1b0309b10>, <ast.Constant object at 0x7da1b0309ae0>]]]]
variable[x_int] assign[=] name[sl_0].phi
if compare[name[sl_0].phi less[<] constant[1]] begin[:]
name[fd].ks assign[=] constant[0]
if compare[name[sl_0].cohesion equal[==] constant[0]] begin[:]
variable[c1_c0] assign[=] constant[0]
variable[x] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b032b5e0>, <ast.Constant object at 0x7da1b032b5b0>, <ast.Constant object at 0x7da1b032b580>, <ast.Constant object at 0x7da1b032b550>, <ast.Constant object at 0x7da1b032b520>, <ast.Constant object at 0x7da1b032b4f0>, <ast.Constant object at 0x7da1b03280d0>, <ast.Constant object at 0x7da1b0328100>, <ast.Constant object at 0x7da1b0328130>]]]]
variable[y] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0328280>, <ast.Constant object at 0x7da1b03282b0>, <ast.Constant object at 0x7da1b03282e0>, <ast.Constant object at 0x7da1b0328310>, <ast.Constant object at 0x7da1b0328340>, <ast.Constant object at 0x7da1b0328370>, <ast.Constant object at 0x7da1b03283a0>, <ast.Constant object at 0x7da1b03283d0>, <ast.Constant object at 0x7da1b0328400>]]]]
variable[ca_c0] assign[=] call[name[np].interp, parameter[name[c1_c0], name[x], name[y]]]
name[fd].ca assign[=] binary_operation[name[ca_c0] * name[sl_0].cohesion]
variable[a] assign[=] constant[1]
variable[s] assign[=] constant[1]
variable[r] assign[=] binary_operation[constant[1] + binary_operation[name[fd].width / name[fd].length]]
variable[q_b1] assign[=] binary_operation[binary_operation[name[sl_1].cohesion * name[sl_1].nc_factor_1] * name[sl_1].s_c_1]
variable[q_b2] assign[=] binary_operation[binary_operation[binary_operation[name[sl_0].unit_dry_weight * name[h0]] * name[sl_1].nq_factor_1] * name[sl_1].s_q_1]
variable[q_b3] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[sl_1].unit_dry_weight * name[fd].width] * name[sl_1].ng_factor_1] * name[sl_1].s_g_1] / constant[2]]
name[fd].q_b assign[=] binary_operation[binary_operation[name[q_b1] + name[q_b2]] + name[q_b3]]
name[fd].q_ult4 assign[=] binary_operation[binary_operation[name[r] * binary_operation[binary_operation[binary_operation[constant[2] * name[fd].ca] * binary_operation[name[h0] - name[fd].depth]] / name[fd].width]] * name[a]]
name[fd].q_ult5 assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[r] * binary_operation[name[sl_0].unit_dry_weight * binary_operation[binary_operation[name[h0] - name[fd].depth] ** constant[2]]]] * binary_operation[constant[1] + binary_operation[binary_operation[constant[2] * name[fd].depth] / binary_operation[name[h0] - name[fd].depth]]]] * binary_operation[binary_operation[name[fd].ks * call[name[np].tan, parameter[call[name[np].deg2rad, parameter[name[sl_0].phi]]]]] / name[fd].width]] * name[s]]
name[fd].q_ult6 assign[=] binary_operation[name[sl_0].unit_dry_weight * binary_operation[name[h0] - name[fd].depth]]
name[fd].q_ult assign[=] binary_operation[binary_operation[binary_operation[name[fd].q_b + name[fd].q_ult4] + name[fd].q_ult5] - name[fd].q_ult6]
variable[q_t1] assign[=] binary_operation[binary_operation[name[sl_0].cohesion * name[sl_0].nc_factor_0] * name[sl_0].s_c_0]
variable[q_t2] assign[=] binary_operation[binary_operation[binary_operation[name[sl_0].unit_dry_weight * name[fd].depth] * name[sl_0].nq_factor_0] * name[sl_0].s_q_0]
variable[q_t3] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[sl_0].unit_dry_weight * name[fd].width] * name[sl_0].ng_factor_0] * name[sl_0].s_g_0] / constant[2]]
name[fd].q_t assign[=] binary_operation[binary_operation[name[q_t1] + name[q_t2]] + name[q_t3]]
if compare[name[fd].q_ult greater[>] name[fd].q_t] begin[:]
name[fd].q_ult assign[=] name[fd].q_t
return[name[fd].q_ult] | keyword[def] identifier[deprecated_capacity_meyerhof_and_hanna_1978] ( identifier[sl_0] , identifier[sl_1] , identifier[h0] , identifier[fd] , identifier[verbose] = literal[int] ):
literal[string]
identifier[sl_0] . identifier[nq_factor_0] =(
( identifier[np] . identifier[tan] ( identifier[np] . identifier[pi] / literal[int] + identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] / literal[int] )))** literal[int] * identifier[np] . identifier[exp] ( identifier[np] . identifier[pi] * identifier[np] . identifier[tan] ( identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] ))))
keyword[if] identifier[sl_0] . identifier[phi] == literal[int] :
identifier[sl_0] . identifier[nc_factor_0] = literal[int]
keyword[else] :
identifier[sl_0] . identifier[nc_factor_0] =( identifier[sl_0] . identifier[nq_factor_0] - literal[int] )/ identifier[np] . identifier[tan] ( identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] ))
identifier[sl_0] . identifier[ng_factor_0] =( identifier[sl_0] . identifier[nq_factor_0] - literal[int] )* identifier[np] . identifier[tan] ( literal[int] * identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] ))
identifier[sl_1] . identifier[nq_factor_1] =(
( identifier[np] . identifier[tan] ( identifier[np] . identifier[pi] / literal[int] + identifier[np] . identifier[deg2rad] ( identifier[sl_1] . identifier[phi] / literal[int] )))** literal[int] * identifier[np] . identifier[exp] ( identifier[np] . identifier[pi] * identifier[np] . identifier[tan] ( identifier[np] . identifier[deg2rad] ( identifier[sl_1] . identifier[phi] ))))
keyword[if] identifier[sl_1] . identifier[phi] == literal[int] :
identifier[sl_1] . identifier[nc_factor_1] = literal[int]
keyword[else] :
identifier[sl_1] . identifier[nc_factor_1] =( identifier[sl_1] . identifier[nq_factor_1] - literal[int] )/ identifier[np] . identifier[tan] ( identifier[np] . identifier[deg2rad] ( identifier[sl_1] . identifier[phi] ))
identifier[sl_1] . identifier[ng_factor_1] =( identifier[sl_1] . identifier[nq_factor_1] - literal[int] )* identifier[np] . identifier[tan] ( literal[int] * identifier[np] . identifier[deg2rad] ( identifier[sl_1] . identifier[phi] ))
keyword[if] identifier[verbose] :
identifier[log] ( literal[string] , identifier[sl_1] . identifier[nc_factor_1] )
identifier[log] ( literal[string] , identifier[sl_1] . identifier[nq_factor_1] )
identifier[log] ( literal[string] , identifier[sl_1] . identifier[ng_factor_1] )
identifier[sl_0] . identifier[kp_0] =( identifier[np] . identifier[tan] ( identifier[np] . identifier[pi] / literal[int] + identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] / literal[int] )))** literal[int]
identifier[sl_1] . identifier[kp_1] =( identifier[np] . identifier[tan] ( identifier[np] . identifier[pi] / literal[int] + identifier[np] . identifier[deg2rad] ( identifier[sl_1] . identifier[phi] / literal[int] )))** literal[int]
keyword[if] identifier[sl_0] . identifier[phi] >= literal[int] :
identifier[sl_0] . identifier[s_c_0] = literal[int] + literal[int] * identifier[sl_0] . identifier[kp_0] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
identifier[sl_0] . identifier[s_q_0] = literal[int] + literal[int] * identifier[sl_0] . identifier[kp_0] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
keyword[else] :
identifier[sl_0] . identifier[s_c_0] = literal[int] + literal[int] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
identifier[sl_0] . identifier[s_q_0] = literal[int]
identifier[sl_0] . identifier[s_g_0] = identifier[sl_0] . identifier[s_q_0]
keyword[if] identifier[sl_1] . identifier[phi] >= literal[int] :
identifier[sl_1] . identifier[s_c_1] = literal[int] + literal[int] * identifier[sl_1] . identifier[kp_1] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
identifier[sl_1] . identifier[s_q_1] = literal[int] + literal[int] * identifier[sl_1] . identifier[kp_1] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
keyword[else] :
identifier[sl_1] . identifier[s_c_1] = literal[int] + literal[int] *( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
identifier[sl_1] . identifier[s_q_1] = literal[int]
identifier[sl_1] . identifier[s_g_1] = identifier[sl_1] . identifier[s_q_1]
literal[string]
identifier[sl_0] . identifier[q_0] =( identifier[sl_0] . identifier[cohesion] * identifier[sl_0] . identifier[nc_factor_0] )+( literal[int] * identifier[sl_0] . identifier[unit_dry_weight] * identifier[fd] . identifier[width] * identifier[sl_0] . identifier[ng_factor_0] )
identifier[sl_1] . identifier[q_1] =( identifier[sl_1] . identifier[cohesion] * identifier[sl_1] . identifier[nc_factor_1] )+( literal[int] * identifier[sl_1] . identifier[unit_dry_weight] * identifier[fd] . identifier[width] * identifier[sl_1] . identifier[ng_factor_1] )
identifier[q1_q0] = identifier[sl_1] . identifier[q_1] / identifier[sl_0] . identifier[q_0]
identifier[x_0] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[y_0] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[x_2] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[y_2] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[x_4] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[y_4] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[x_10] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[y_10] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[x_int] = identifier[sl_0] . identifier[phi]
keyword[if] identifier[sl_0] . identifier[phi] < literal[int] :
identifier[fd] . identifier[ks] = literal[int]
keyword[else] :
keyword[if] identifier[q1_q0] == literal[int] :
identifier[fd] . identifier[ks] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_0] , identifier[y_0] )
keyword[elif] identifier[q1_q0] == literal[int] :
identifier[fd] . identifier[ks] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_2] , identifier[y_2] )
keyword[elif] identifier[q1_q0] == literal[int] :
identifier[fd] . identifier[ks] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_4] , identifier[y_4] )
keyword[elif] identifier[q1_q0] == literal[int] :
identifier[fd] . identifier[ks] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_10] , identifier[y_10] )
keyword[elif] literal[int] < identifier[q1_q0] < literal[int] :
identifier[ks_1] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_0] , identifier[y_0] )
identifier[ks_2] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_2] , identifier[y_2] )
identifier[fd] . identifier[ks] =((( identifier[ks_2] - identifier[ks_1] )* identifier[q1_q0] )/ literal[int] )+ identifier[ks_1]
keyword[elif] literal[int] < identifier[q1_q0] < literal[int] :
identifier[ks_1] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_2] , identifier[y_2] )
identifier[ks_2] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_4] , identifier[y_4] )
identifier[fd] . identifier[ks] =((( identifier[ks_2] - identifier[ks_1] )*( identifier[q1_q0] - literal[int] ))/ literal[int] )+ identifier[ks_1]
keyword[elif] literal[int] < identifier[q1_q0] < literal[int] :
identifier[ks_1] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_4] , identifier[y_4] )
identifier[ks_2] = identifier[np] . identifier[interp] ( identifier[x_int] , identifier[x_10] , identifier[y_10] )
identifier[fd] . identifier[ks] =((( identifier[ks_2] - identifier[ks_1] )*( identifier[q1_q0] - literal[int] ))/ literal[int] )+ identifier[ks_1]
keyword[else] :
keyword[raise] identifier[DesignError] ( literal[string] % identifier[q1_q0] )
keyword[if] identifier[sl_0] . identifier[cohesion] == literal[int] :
identifier[c1_c0] = literal[int]
keyword[else] :
identifier[c1_c0] = identifier[sl_1] . identifier[cohesion] / identifier[sl_0] . identifier[cohesion]
identifier[x] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[y] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[ca_c0] = identifier[np] . identifier[interp] ( identifier[c1_c0] , identifier[x] , identifier[y] )
identifier[fd] . identifier[ca] = identifier[ca_c0] * identifier[sl_0] . identifier[cohesion]
identifier[a] = literal[int]
identifier[s] = literal[int]
identifier[r] = literal[int] +( identifier[fd] . identifier[width] / identifier[fd] . identifier[length] )
identifier[q_b1] =( identifier[sl_1] . identifier[cohesion] * identifier[sl_1] . identifier[nc_factor_1] * identifier[sl_1] . identifier[s_c_1] )
identifier[q_b2] =( identifier[sl_0] . identifier[unit_dry_weight] * identifier[h0] * identifier[sl_1] . identifier[nq_factor_1] * identifier[sl_1] . identifier[s_q_1] )
identifier[q_b3] =( identifier[sl_1] . identifier[unit_dry_weight] * identifier[fd] . identifier[width] * identifier[sl_1] . identifier[ng_factor_1] * identifier[sl_1] . identifier[s_g_1] / literal[int] )
identifier[fd] . identifier[q_b] = identifier[q_b1] + identifier[q_b2] + identifier[q_b3]
identifier[fd] . identifier[q_ult4] =( identifier[r] *( literal[int] * identifier[fd] . identifier[ca] *( identifier[h0] - identifier[fd] . identifier[depth] )/ identifier[fd] . identifier[width] )* identifier[a] )
identifier[fd] . identifier[q_ult5] = identifier[r] *( identifier[sl_0] . identifier[unit_dry_weight] *(( identifier[h0] - identifier[fd] . identifier[depth] )** literal[int] ))*( literal[int] +( literal[int] * identifier[fd] . identifier[depth] /( identifier[h0] - identifier[fd] . identifier[depth] )))*(
identifier[fd] . identifier[ks] * identifier[np] . identifier[tan] ( identifier[np] . identifier[deg2rad] ( identifier[sl_0] . identifier[phi] ))/ identifier[fd] . identifier[width] )* identifier[s]
identifier[fd] . identifier[q_ult6] =( identifier[sl_0] . identifier[unit_dry_weight] *( identifier[h0] - identifier[fd] . identifier[depth] ))
identifier[fd] . identifier[q_ult] = identifier[fd] . identifier[q_b] + identifier[fd] . identifier[q_ult4] + identifier[fd] . identifier[q_ult5] - identifier[fd] . identifier[q_ult6]
identifier[q_t1] =( identifier[sl_0] . identifier[cohesion] * identifier[sl_0] . identifier[nc_factor_0] * identifier[sl_0] . identifier[s_c_0] )
identifier[q_t2] =( identifier[sl_0] . identifier[unit_dry_weight] * identifier[fd] . identifier[depth] * identifier[sl_0] . identifier[nq_factor_0] * identifier[sl_0] . identifier[s_q_0] )
identifier[q_t3] =( identifier[sl_0] . identifier[unit_dry_weight] * identifier[fd] . identifier[width] * identifier[sl_0] . identifier[ng_factor_0] * identifier[sl_0] . identifier[s_g_0] / literal[int] )
identifier[fd] . identifier[q_t] = identifier[q_t1] + identifier[q_t2] + identifier[q_t3]
keyword[if] identifier[fd] . identifier[q_ult] > identifier[fd] . identifier[q_t] :
identifier[fd] . identifier[q_ult] = identifier[fd] . identifier[q_t]
keyword[return] identifier[fd] . identifier[q_ult] | def deprecated_capacity_meyerhof_and_hanna_1978(sl_0, sl_1, h0, fd, verbose=0):
"""
Calculates the two-layered foundation capacity according Meyerhof and Hanna (1978)
:param sl_0: Top Soil object
:param sl_1: Base Soil object
:param h0: Height of top soil layer
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
"""
# UNFINISHED, this code is copied from the Meyerhoff method
# horizontal_load = np.sqrt(h_l ** 2 + h_b ** 2)
sl_0.nq_factor_0 = np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2)) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_0.phi)))
if sl_0.phi == 0:
sl_0.nc_factor_0 = 5.14 # depends on [control=['if'], data=[]]
else:
sl_0.nc_factor_0 = (sl_0.nq_factor_0 - 1) / np.tan(np.deg2rad(sl_0.phi))
sl_0.ng_factor_0 = (sl_0.nq_factor_0 - 1) * np.tan(1.4 * np.deg2rad(sl_0.phi))
sl_1.nq_factor_1 = np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2)) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_1.phi)))
if sl_1.phi == 0:
sl_1.nc_factor_1 = 5.14 # depends on [control=['if'], data=[]]
else:
sl_1.nc_factor_1 = (sl_1.nq_factor_1 - 1) / np.tan(np.deg2rad(sl_1.phi))
sl_1.ng_factor_1 = (sl_1.nq_factor_1 - 1) * np.tan(1.4 * np.deg2rad(sl_1.phi))
if verbose:
log('Nc: ', sl_1.nc_factor_1)
log('Nq: ', sl_1.nq_factor_1)
log('Ng: ', sl_1.ng_factor_1) # depends on [control=['if'], data=[]]
sl_0.kp_0 = np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2)) ** 2
sl_1.kp_1 = np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2)) ** 2
# shape factors
# s_c = 1 + 0.2 * kp * fd.width / fd.length
if sl_0.phi >= 10:
sl_0.s_c_0 = 1 + 0.2 * sl_0.kp_0 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0 + 0.1 * sl_0.kp_0 * (fd.width / fd.length) # depends on [control=['if'], data=[]]
else:
sl_0.s_c_0 = 1 + 0.2 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0
sl_0.s_g_0 = sl_0.s_q_0
if sl_1.phi >= 10:
sl_1.s_c_1 = 1 + 0.2 * sl_1.kp_1 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0 + 0.1 * sl_1.kp_1 * (fd.width / fd.length) # depends on [control=['if'], data=[]]
else:
sl_1.s_c_1 = 1 + 0.2 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0
sl_1.s_g_1 = sl_1.s_q_1
'\n # depth factors\n d_c = 1 + 0.2 * np.sqrt(kp) * fd.depth / fd.width\n if sl_0.phi > 10:\n d_q = 1 + 0.1 * np.sqrt(kp) * fd.depth / fd.width\n else:\n d_q = 1.0\n d_g = d_q\n\n # inclination factors:\n theta_load = np.arctan(horizontal_load / vertical_load)\n i_c = (1 - theta_load / (np.pi * 0.5)) ** 2\n i_q = i_c\n if sl_0.phi > 0:\n i_g = (1 - theta_load / sl_0.phi_r) ** 2\n else:\n i_g = 0\n '
# stress at footing base:
# q_d = sl_0.unit_dry_weight_0 * fd.depth
# ks
sl_0.q_0 = sl_0.cohesion * sl_0.nc_factor_0 + 0.5 * sl_0.unit_dry_weight * fd.width * sl_0.ng_factor_0
sl_1.q_1 = sl_1.cohesion * sl_1.nc_factor_1 + 0.5 * sl_1.unit_dry_weight * fd.width * sl_1.ng_factor_1
q1_q0 = sl_1.q_1 / sl_0.q_0
x_0 = np.array([0, 20.08, 22.42, 25.08, 27.58, 30.08, 32.58, 34.92, 37.83, 40.0, 42.67, 45.0, 47.0, 49.75])
y_0 = np.array([0.93, 0.93, 0.93, 0.93, 1.01, 1.17, 1.32, 1.56, 1.87, 2.26, 2.72, 3.35, 3.81, 4.82])
x_2 = np.array([0, 20.08, 22.5, 25.08, 27.58, 30.08, 32.5, 35.0, 37.67, 40.17, 42.67, 45.0, 47.5, 50.0])
y_2 = np.array([1.55, 1.55, 1.71, 1.86, 2.1, 2.33, 2.72, 3.11, 3.81, 4.43, 5.28, 6.14, 7.46, 9.24])
x_4 = np.array([0, 20.0, 22.51, 25.1, 27.69, 30.11, 32.45, 35.04, 37.88, 40.14, 42.65, 45.07, 47.33, 50.08])
y_4 = np.array([2.49, 2.49, 2.64, 2.87, 3.34, 3.81, 4.43, 5.2, 6.29, 7.38, 9.01, 11.11, 14.29, 19.34])
x_10 = np.array([0, 20.0, 22.5, 25.08, 28.0, 30.0, 32.5, 34.92, 37.5, 40.17, 42.42, 45.0, 47.17, 50.08])
y_10 = np.array([3.27, 3.27, 3.74, 4.44, 5.37, 6.07, 7.16, 8.33, 10.04, 12.3, 15.95, 21.17, 27.47, 40.0])
x_int = sl_0.phi
if sl_0.phi < 1:
fd.ks = 0 # depends on [control=['if'], data=[]]
elif q1_q0 == 0:
fd.ks = np.interp(x_int, x_0, y_0) # depends on [control=['if'], data=[]]
elif q1_q0 == 0.2:
fd.ks = np.interp(x_int, x_2, y_2) # depends on [control=['if'], data=[]]
elif q1_q0 == 0.4:
fd.ks = np.interp(x_int, x_4, y_4) # depends on [control=['if'], data=[]]
elif q1_q0 == 1.0:
fd.ks = np.interp(x_int, x_10, y_10) # depends on [control=['if'], data=[]]
elif 0 < q1_q0 < 0.2:
ks_1 = np.interp(x_int, x_0, y_0)
ks_2 = np.interp(x_int, x_2, y_2)
fd.ks = (ks_2 - ks_1) * q1_q0 / 0.2 + ks_1 # depends on [control=['if'], data=['q1_q0']]
elif 0.2 < q1_q0 < 0.4:
ks_1 = np.interp(x_int, x_2, y_2)
ks_2 = np.interp(x_int, x_4, y_4)
fd.ks = (ks_2 - ks_1) * (q1_q0 - 0.2) / 0.2 + ks_1 # depends on [control=['if'], data=['q1_q0']]
elif 0.4 < q1_q0 < 1.0:
ks_1 = np.interp(x_int, x_4, y_4)
ks_2 = np.interp(x_int, x_10, y_10)
fd.ks = (ks_2 - ks_1) * (q1_q0 - 0.4) / 0.6 + ks_1 # depends on [control=['if'], data=['q1_q0']]
else:
raise DesignError("Cannot compute 'ks', bearing ratio out-of-range (q1_q0 = %.3f) required: 0-1." % q1_q0)
# ca
if sl_0.cohesion == 0:
c1_c0 = 0 # depends on [control=['if'], data=[]]
else:
c1_c0 = sl_1.cohesion / sl_0.cohesion
x = np.array([0.0, 0.082, 0.206, 0.298, 0.404, 0.509, 0.598, 0.685, 0.772])
y = np.array([0.627, 0.7, 0.794, 0.855, 0.912, 0.948, 0.968, 0.983, 0.997])
ca_c0 = np.interp(c1_c0, x, y)
fd.ca = ca_c0 * sl_0.cohesion
# Capacity
a = 1 # ????
s = 1 # ????
r = 1 + fd.width / fd.length
q_b1 = sl_1.cohesion * sl_1.nc_factor_1 * sl_1.s_c_1
q_b2 = sl_0.unit_dry_weight * h0 * sl_1.nq_factor_1 * sl_1.s_q_1
q_b3 = sl_1.unit_dry_weight * fd.width * sl_1.ng_factor_1 * sl_1.s_g_1 / 2
fd.q_b = q_b1 + q_b2 + q_b3
fd.q_ult4 = r * (2 * fd.ca * (h0 - fd.depth) / fd.width) * a
fd.q_ult5 = r * (sl_0.unit_dry_weight * (h0 - fd.depth) ** 2) * (1 + 2 * fd.depth / (h0 - fd.depth)) * (fd.ks * np.tan(np.deg2rad(sl_0.phi)) / fd.width) * s
fd.q_ult6 = sl_0.unit_dry_weight * (h0 - fd.depth)
fd.q_ult = fd.q_b + fd.q_ult4 + fd.q_ult5 - fd.q_ult6
# maximum value (qu <= qt)
q_t1 = sl_0.cohesion * sl_0.nc_factor_0 * sl_0.s_c_0
q_t2 = sl_0.unit_dry_weight * fd.depth * sl_0.nq_factor_0 * sl_0.s_q_0
q_t3 = sl_0.unit_dry_weight * fd.width * sl_0.ng_factor_0 * sl_0.s_g_0 / 2
fd.q_t = q_t1 + q_t2 + q_t3
if fd.q_ult > fd.q_t:
fd.q_ult = fd.q_t # depends on [control=['if'], data=[]]
return fd.q_ult |
def calibrate_pols(cross_pols,diode_cross,obsI=None,onefile=True,feedtype='l',**kwargs):
'''
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
'''
#Obtain time sample length, frequencies, and noise diode data
obs = Waterfall(diode_cross,max_load=150)
cross_dat = obs.data
tsamp = obs.header['tsamp']
#Calculate number of coarse channels in the noise diode measurement (usually 8)
dio_ncoarse = obs.calc_n_coarse_chan()
dio_nchans = obs.header['nchans']
dio_chan_per_coarse = dio_nchans/dio_ncoarse
obs = None
Idat,Qdat,Udat,Vdat = get_stokes(cross_dat,feedtype)
cross_dat = None
#Calculate differential gain and phase from noise diode measurements
print('Calculating Mueller Matrix variables')
gams = gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
psis = phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
#Clear data arrays to save memory
Idat = None
Qdat = None
Udat = None
Vdat = None
#Get corrected Stokes parameters
print('Opening '+cross_pols)
cross_obs = Waterfall(cross_pols,max_load=150)
obs_ncoarse = cross_obs.calc_n_coarse_chan()
obs_nchans = cross_obs.header['nchans']
obs_chan_per_coarse = obs_nchans/obs_ncoarse
print('Grabbing Stokes parameters')
I,Q,U,V = get_stokes(cross_obs.data,feedtype)
print('Applying Mueller Matrix')
I,Q,U,V = apply_Mueller(I,Q,U,V,gams,psis,obs_chan_per_coarse,feedtype)
#Use onefile (default) to produce one filterbank file containing all Stokes information
if onefile==True:
cross_obs.data[:,0,:] = np.squeeze(I)
cross_obs.data[:,1,:] = np.squeeze(Q)
cross_obs.data[:,2,:] = np.squeeze(U)
cross_obs.data[:,3,:] = np.squeeze(V)
cross_obs.write_to_fil(cross_pols[:-15]+'.SIQUV.polcal.fil')
print('Calibrated Stokes parameters written to '+cross_pols[:-15]+'.SIQUV.polcal.fil')
return
#Write corrected Stokes parameters to four filterbank files if onefile==False
obs = Waterfall(obs_I,max_load=150)
obs.data = I
obs.write_to_fil(cross_pols[:-15]+'.SI.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes I written to '+cross_pols[:-15]+'.SI.polcal.fil')
obs.data = Q
obs.write_to_fil(cross_pols[:-15]+'.Q.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes Q written to '+cross_pols[:-15]+'.Q.polcal.fil')
obs.data = U
obs.write_to_fil(cross_pols[:-15]+'.U.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes U written to '+cross_pols[:-15]+'.U.polcal.fil')
obs.data = V
obs.write_to_fil(cross_pols[:-15]+'.V.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes V written to '+cross_pols[:-15]+'.V.polcal.fil') | def function[calibrate_pols, parameter[cross_pols, diode_cross, obsI, onefile, feedtype]]:
constant[
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
]
variable[obs] assign[=] call[name[Waterfall], parameter[name[diode_cross]]]
variable[cross_dat] assign[=] name[obs].data
variable[tsamp] assign[=] call[name[obs].header][constant[tsamp]]
variable[dio_ncoarse] assign[=] call[name[obs].calc_n_coarse_chan, parameter[]]
variable[dio_nchans] assign[=] call[name[obs].header][constant[nchans]]
variable[dio_chan_per_coarse] assign[=] binary_operation[name[dio_nchans] / name[dio_ncoarse]]
variable[obs] assign[=] constant[None]
<ast.Tuple object at 0x7da204623250> assign[=] call[name[get_stokes], parameter[name[cross_dat], name[feedtype]]]
variable[cross_dat] assign[=] constant[None]
call[name[print], parameter[constant[Calculating Mueller Matrix variables]]]
variable[gams] assign[=] call[name[gain_offsets], parameter[name[Idat], name[Qdat], name[Udat], name[Vdat], name[tsamp], name[dio_chan_per_coarse], name[feedtype]]]
variable[psis] assign[=] call[name[phase_offsets], parameter[name[Idat], name[Qdat], name[Udat], name[Vdat], name[tsamp], name[dio_chan_per_coarse], name[feedtype]]]
variable[Idat] assign[=] constant[None]
variable[Qdat] assign[=] constant[None]
variable[Udat] assign[=] constant[None]
variable[Vdat] assign[=] constant[None]
call[name[print], parameter[binary_operation[constant[Opening ] + name[cross_pols]]]]
variable[cross_obs] assign[=] call[name[Waterfall], parameter[name[cross_pols]]]
variable[obs_ncoarse] assign[=] call[name[cross_obs].calc_n_coarse_chan, parameter[]]
variable[obs_nchans] assign[=] call[name[cross_obs].header][constant[nchans]]
variable[obs_chan_per_coarse] assign[=] binary_operation[name[obs_nchans] / name[obs_ncoarse]]
call[name[print], parameter[constant[Grabbing Stokes parameters]]]
<ast.Tuple object at 0x7da204623880> assign[=] call[name[get_stokes], parameter[name[cross_obs].data, name[feedtype]]]
call[name[print], parameter[constant[Applying Mueller Matrix]]]
<ast.Tuple object at 0x7da204621d20> assign[=] call[name[apply_Mueller], parameter[name[I], name[Q], name[U], name[V], name[gams], name[psis], name[obs_chan_per_coarse], name[feedtype]]]
if compare[name[onefile] equal[==] constant[True]] begin[:]
call[name[cross_obs].data][tuple[[<ast.Slice object at 0x7da204620c40>, <ast.Constant object at 0x7da204620700>, <ast.Slice object at 0x7da204622e90>]]] assign[=] call[name[np].squeeze, parameter[name[I]]]
call[name[cross_obs].data][tuple[[<ast.Slice object at 0x7da204622740>, <ast.Constant object at 0x7da204623970>, <ast.Slice object at 0x7da204623c10>]]] assign[=] call[name[np].squeeze, parameter[name[Q]]]
call[name[cross_obs].data][tuple[[<ast.Slice object at 0x7da204620130>, <ast.Constant object at 0x7da204620880>, <ast.Slice object at 0x7da2046232e0>]]] assign[=] call[name[np].squeeze, parameter[name[U]]]
call[name[cross_obs].data][tuple[[<ast.Slice object at 0x7da2046227d0>, <ast.Constant object at 0x7da204620640>, <ast.Slice object at 0x7da204621ff0>]]] assign[=] call[name[np].squeeze, parameter[name[V]]]
call[name[cross_obs].write_to_fil, parameter[binary_operation[call[name[cross_pols]][<ast.Slice object at 0x7da204346e90>] + constant[.SIQUV.polcal.fil]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Calibrated Stokes parameters written to ] + call[name[cross_pols]][<ast.Slice object at 0x7da2043478b0>]] + constant[.SIQUV.polcal.fil]]]]
return[None]
variable[obs] assign[=] call[name[Waterfall], parameter[name[obs_I]]]
name[obs].data assign[=] name[I]
call[name[obs].write_to_fil, parameter[binary_operation[call[name[cross_pols]][<ast.Slice object at 0x7da2043442b0>] + constant[.SI.polcal.fil]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Calibrated Stokes I written to ] + call[name[cross_pols]][<ast.Slice object at 0x7da18f58cb50>]] + constant[.SI.polcal.fil]]]]
name[obs].data assign[=] name[Q]
call[name[obs].write_to_fil, parameter[binary_operation[call[name[cross_pols]][<ast.Slice object at 0x7da18f58f4f0>] + constant[.Q.polcal.fil]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Calibrated Stokes Q written to ] + call[name[cross_pols]][<ast.Slice object at 0x7da18f58c8e0>]] + constant[.Q.polcal.fil]]]]
name[obs].data assign[=] name[U]
call[name[obs].write_to_fil, parameter[binary_operation[call[name[cross_pols]][<ast.Slice object at 0x7da18f58fe50>] + constant[.U.polcal.fil]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Calibrated Stokes U written to ] + call[name[cross_pols]][<ast.Slice object at 0x7da18f58f7f0>]] + constant[.U.polcal.fil]]]]
name[obs].data assign[=] name[V]
call[name[obs].write_to_fil, parameter[binary_operation[call[name[cross_pols]][<ast.Slice object at 0x7da18f58fbb0>] + constant[.V.polcal.fil]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Calibrated Stokes V written to ] + call[name[cross_pols]][<ast.Slice object at 0x7da18f58d330>]] + constant[.V.polcal.fil]]]] | keyword[def] identifier[calibrate_pols] ( identifier[cross_pols] , identifier[diode_cross] , identifier[obsI] = keyword[None] , identifier[onefile] = keyword[True] , identifier[feedtype] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[obs] = identifier[Waterfall] ( identifier[diode_cross] , identifier[max_load] = literal[int] )
identifier[cross_dat] = identifier[obs] . identifier[data]
identifier[tsamp] = identifier[obs] . identifier[header] [ literal[string] ]
identifier[dio_ncoarse] = identifier[obs] . identifier[calc_n_coarse_chan] ()
identifier[dio_nchans] = identifier[obs] . identifier[header] [ literal[string] ]
identifier[dio_chan_per_coarse] = identifier[dio_nchans] / identifier[dio_ncoarse]
identifier[obs] = keyword[None]
identifier[Idat] , identifier[Qdat] , identifier[Udat] , identifier[Vdat] = identifier[get_stokes] ( identifier[cross_dat] , identifier[feedtype] )
identifier[cross_dat] = keyword[None]
identifier[print] ( literal[string] )
identifier[gams] = identifier[gain_offsets] ( identifier[Idat] , identifier[Qdat] , identifier[Udat] , identifier[Vdat] , identifier[tsamp] , identifier[dio_chan_per_coarse] , identifier[feedtype] ,** identifier[kwargs] )
identifier[psis] = identifier[phase_offsets] ( identifier[Idat] , identifier[Qdat] , identifier[Udat] , identifier[Vdat] , identifier[tsamp] , identifier[dio_chan_per_coarse] , identifier[feedtype] ,** identifier[kwargs] )
identifier[Idat] = keyword[None]
identifier[Qdat] = keyword[None]
identifier[Udat] = keyword[None]
identifier[Vdat] = keyword[None]
identifier[print] ( literal[string] + identifier[cross_pols] )
identifier[cross_obs] = identifier[Waterfall] ( identifier[cross_pols] , identifier[max_load] = literal[int] )
identifier[obs_ncoarse] = identifier[cross_obs] . identifier[calc_n_coarse_chan] ()
identifier[obs_nchans] = identifier[cross_obs] . identifier[header] [ literal[string] ]
identifier[obs_chan_per_coarse] = identifier[obs_nchans] / identifier[obs_ncoarse]
identifier[print] ( literal[string] )
identifier[I] , identifier[Q] , identifier[U] , identifier[V] = identifier[get_stokes] ( identifier[cross_obs] . identifier[data] , identifier[feedtype] )
identifier[print] ( literal[string] )
identifier[I] , identifier[Q] , identifier[U] , identifier[V] = identifier[apply_Mueller] ( identifier[I] , identifier[Q] , identifier[U] , identifier[V] , identifier[gams] , identifier[psis] , identifier[obs_chan_per_coarse] , identifier[feedtype] )
keyword[if] identifier[onefile] == keyword[True] :
identifier[cross_obs] . identifier[data] [:, literal[int] ,:]= identifier[np] . identifier[squeeze] ( identifier[I] )
identifier[cross_obs] . identifier[data] [:, literal[int] ,:]= identifier[np] . identifier[squeeze] ( identifier[Q] )
identifier[cross_obs] . identifier[data] [:, literal[int] ,:]= identifier[np] . identifier[squeeze] ( identifier[U] )
identifier[cross_obs] . identifier[data] [:, literal[int] ,:]= identifier[np] . identifier[squeeze] ( identifier[V] )
identifier[cross_obs] . identifier[write_to_fil] ( identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[print] ( literal[string] + identifier[cross_pols] [:- literal[int] ]+ literal[string] )
keyword[return]
identifier[obs] = identifier[Waterfall] ( identifier[obs_I] , identifier[max_load] = literal[int] )
identifier[obs] . identifier[data] = identifier[I]
identifier[obs] . identifier[write_to_fil] ( identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[print] ( literal[string] + identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[obs] . identifier[data] = identifier[Q]
identifier[obs] . identifier[write_to_fil] ( identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[print] ( literal[string] + identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[obs] . identifier[data] = identifier[U]
identifier[obs] . identifier[write_to_fil] ( identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[print] ( literal[string] + identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[obs] . identifier[data] = identifier[V]
identifier[obs] . identifier[write_to_fil] ( identifier[cross_pols] [:- literal[int] ]+ literal[string] )
identifier[print] ( literal[string] + identifier[cross_pols] [:- literal[int] ]+ literal[string] ) | def calibrate_pols(cross_pols, diode_cross, obsI=None, onefile=True, feedtype='l', **kwargs):
"""
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
"""
#Obtain time sample length, frequencies, and noise diode data
obs = Waterfall(diode_cross, max_load=150)
cross_dat = obs.data
tsamp = obs.header['tsamp']
#Calculate number of coarse channels in the noise diode measurement (usually 8)
dio_ncoarse = obs.calc_n_coarse_chan()
dio_nchans = obs.header['nchans']
dio_chan_per_coarse = dio_nchans / dio_ncoarse
obs = None
(Idat, Qdat, Udat, Vdat) = get_stokes(cross_dat, feedtype)
cross_dat = None
#Calculate differential gain and phase from noise diode measurements
print('Calculating Mueller Matrix variables')
gams = gain_offsets(Idat, Qdat, Udat, Vdat, tsamp, dio_chan_per_coarse, feedtype, **kwargs)
psis = phase_offsets(Idat, Qdat, Udat, Vdat, tsamp, dio_chan_per_coarse, feedtype, **kwargs)
#Clear data arrays to save memory
Idat = None
Qdat = None
Udat = None
Vdat = None
#Get corrected Stokes parameters
print('Opening ' + cross_pols)
cross_obs = Waterfall(cross_pols, max_load=150)
obs_ncoarse = cross_obs.calc_n_coarse_chan()
obs_nchans = cross_obs.header['nchans']
obs_chan_per_coarse = obs_nchans / obs_ncoarse
print('Grabbing Stokes parameters')
(I, Q, U, V) = get_stokes(cross_obs.data, feedtype)
print('Applying Mueller Matrix')
(I, Q, U, V) = apply_Mueller(I, Q, U, V, gams, psis, obs_chan_per_coarse, feedtype)
#Use onefile (default) to produce one filterbank file containing all Stokes information
if onefile == True:
cross_obs.data[:, 0, :] = np.squeeze(I)
cross_obs.data[:, 1, :] = np.squeeze(Q)
cross_obs.data[:, 2, :] = np.squeeze(U)
cross_obs.data[:, 3, :] = np.squeeze(V)
cross_obs.write_to_fil(cross_pols[:-15] + '.SIQUV.polcal.fil')
print('Calibrated Stokes parameters written to ' + cross_pols[:-15] + '.SIQUV.polcal.fil')
return # depends on [control=['if'], data=[]]
#Write corrected Stokes parameters to four filterbank files if onefile==False
obs = Waterfall(obs_I, max_load=150)
obs.data = I
obs.write_to_fil(cross_pols[:-15] + '.SI.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes I written to ' + cross_pols[:-15] + '.SI.polcal.fil')
obs.data = Q
obs.write_to_fil(cross_pols[:-15] + '.Q.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes Q written to ' + cross_pols[:-15] + '.Q.polcal.fil')
obs.data = U
obs.write_to_fil(cross_pols[:-15] + '.U.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes U written to ' + cross_pols[:-15] + '.U.polcal.fil')
obs.data = V
obs.write_to_fil(cross_pols[:-15] + '.V.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes V written to ' + cross_pols[:-15] + '.V.polcal.fil') |
def vs(*args, **kwargs):
"""
exactly like v, but doesn't print variable names or file positions
.. seealso:: ss()
"""
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance.writeline(instance.value()) | def function[vs, parameter[]]:
constant[
exactly like v, but doesn't print variable names or file positions
.. seealso:: ss()
]
if <ast.UnaryOp object at 0x7da18eb55fc0> begin[:]
<ast.Raise object at 0x7da18eb55420>
with call[name[Reflect].context, parameter[name[args]]] begin[:]
variable[instance] assign[=] call[name[V_CLASS], parameter[name[r], name[stream]]]
call[name[instance].writeline, parameter[call[name[instance].value, parameter[]]]] | keyword[def] identifier[vs] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[args] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[with] identifier[Reflect] . identifier[context] ( identifier[args] ,** identifier[kwargs] ) keyword[as] identifier[r] :
identifier[instance] = identifier[V_CLASS] ( identifier[r] , identifier[stream] ,** identifier[kwargs] )
identifier[instance] . identifier[writeline] ( identifier[instance] . identifier[value] ()) | def vs(*args, **kwargs):
"""
exactly like v, but doesn't print variable names or file positions
.. seealso:: ss()
"""
if not args:
raise ValueError("you didn't pass any arguments to print out") # depends on [control=['if'], data=[]]
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance.writeline(instance.value()) # depends on [control=['with'], data=['r']] |
def _xor_block(a, b):
""" XOR two blocks of equal length. """
return ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)]) | def function[_xor_block, parameter[a, b]]:
constant[ XOR two blocks of equal length. ]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da18bcc9180>]]] | keyword[def] identifier[_xor_block] ( identifier[a] , identifier[b] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[chr] ( identifier[ord] ( identifier[x] )^ identifier[ord] ( identifier[y] )) keyword[for] ( identifier[x] , identifier[y] ) keyword[in] identifier[zip] ( identifier[a] , identifier[b] )]) | def _xor_block(a, b):
""" XOR two blocks of equal length. """
return ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)]) |
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code | def function[run_cmd_unit, parameter[self, sentry_unit, cmd]]:
constant[Run a command on a unit, return the output and exit code.]
<ast.Tuple object at 0x7da2054a5b40> assign[=] call[name[sentry_unit].run, parameter[name[cmd]]]
if compare[name[code] equal[==] constant[0]] begin[:]
call[name[self].log.debug, parameter[call[constant[{} `{}` command returned {} (OK)].format, parameter[call[name[sentry_unit].info][constant[unit_name]], name[cmd], name[code]]]]]
return[tuple[[<ast.Call object at 0x7da18f09ea70>, <ast.Name object at 0x7da18f09d9f0>]]] | keyword[def] identifier[run_cmd_unit] ( identifier[self] , identifier[sentry_unit] , identifier[cmd] ):
literal[string]
identifier[output] , identifier[code] = identifier[sentry_unit] . identifier[run] ( identifier[cmd] )
keyword[if] identifier[code] == literal[int] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[sentry_unit] . identifier[info] [ literal[string] ],
identifier[cmd] , identifier[code] ))
keyword[else] :
identifier[msg] =( literal[string]
literal[string] . identifier[format] ( identifier[sentry_unit] . identifier[info] [ literal[string] ],
identifier[cmd] , identifier[code] , identifier[output] ))
identifier[amulet] . identifier[raise_status] ( identifier[amulet] . identifier[FAIL] , identifier[msg] = identifier[msg] )
keyword[return] identifier[str] ( identifier[output] ), identifier[code] | def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
(output, code) = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} (OK)'.format(sentry_unit.info['unit_name'], cmd, code)) # depends on [control=['if'], data=['code']]
else:
msg = '{} `{}` command returned {} {}'.format(sentry_unit.info['unit_name'], cmd, code, output)
amulet.raise_status(amulet.FAIL, msg=msg)
return (str(output), code) |
def _on_invalid_compile_dependency(self, dep, compile_target, contexts):
"""Decide whether to continue searching for invalid targets to use in the execution graph.
If a necessary dep is a rsc-then-zinc dep and the root is a zinc-only one, continue to recurse
because otherwise we'll drop the path between Zinc compile of the zinc-only target and a Zinc
compile of a transitive rsc-then-zinc dependency.
This is only an issue for graphs like J -> S1 -> S2, where J is a zinc-only target,
S1/2 are rsc-then-zinc targets and S2 must be on the classpath to compile J successfully.
"""
return contexts[compile_target][0].workflow.resolve_for_enum_variant({
'zinc-only': lambda : contexts[dep][0].workflow == self.JvmCompileWorkflowType.rsc_then_zinc,
'rsc-then-zinc': lambda : False
})() | def function[_on_invalid_compile_dependency, parameter[self, dep, compile_target, contexts]]:
constant[Decide whether to continue searching for invalid targets to use in the execution graph.
If a necessary dep is a rsc-then-zinc dep and the root is a zinc-only one, continue to recurse
because otherwise we'll drop the path between Zinc compile of the zinc-only target and a Zinc
compile of a transitive rsc-then-zinc dependency.
This is only an issue for graphs like J -> S1 -> S2, where J is a zinc-only target,
S1/2 are rsc-then-zinc targets and S2 must be on the classpath to compile J successfully.
]
return[call[call[call[call[name[contexts]][name[compile_target]]][constant[0]].workflow.resolve_for_enum_variant, parameter[dictionary[[<ast.Constant object at 0x7da1b1e68940>, <ast.Constant object at 0x7da1b1e6a830>], [<ast.Lambda object at 0x7da1b1e6b130>, <ast.Lambda object at 0x7da1b1e681f0>]]]], parameter[]]] | keyword[def] identifier[_on_invalid_compile_dependency] ( identifier[self] , identifier[dep] , identifier[compile_target] , identifier[contexts] ):
literal[string]
keyword[return] identifier[contexts] [ identifier[compile_target] ][ literal[int] ]. identifier[workflow] . identifier[resolve_for_enum_variant] ({
literal[string] : keyword[lambda] : identifier[contexts] [ identifier[dep] ][ literal[int] ]. identifier[workflow] == identifier[self] . identifier[JvmCompileWorkflowType] . identifier[rsc_then_zinc] ,
literal[string] : keyword[lambda] : keyword[False]
})() | def _on_invalid_compile_dependency(self, dep, compile_target, contexts):
"""Decide whether to continue searching for invalid targets to use in the execution graph.
If a necessary dep is a rsc-then-zinc dep and the root is a zinc-only one, continue to recurse
because otherwise we'll drop the path between Zinc compile of the zinc-only target and a Zinc
compile of a transitive rsc-then-zinc dependency.
This is only an issue for graphs like J -> S1 -> S2, where J is a zinc-only target,
S1/2 are rsc-then-zinc targets and S2 must be on the classpath to compile J successfully.
"""
return contexts[compile_target][0].workflow.resolve_for_enum_variant({'zinc-only': lambda : contexts[dep][0].workflow == self.JvmCompileWorkflowType.rsc_then_zinc, 'rsc-then-zinc': lambda : False})() |
def set_prev_sonorus(self):
"""
Выставляет параметры звонкости/глухости, для предыдущих согласных.
"""
prev = self.get_prev_letter()
if not prev:
return
if not (self.is_consonant() and prev.is_consonant()):
return
if self.is_sonorus() and self.is_paired_consonant():
if self._get_sound(False) != 'в':
prev.set_sonorus(True)
return
if self.is_deaf():
prev.set_sonorus(False)
return | def function[set_prev_sonorus, parameter[self]]:
constant[
Выставляет параметры звонкости/глухости, для предыдущих согласных.
]
variable[prev] assign[=] call[name[self].get_prev_letter, parameter[]]
if <ast.UnaryOp object at 0x7da18fe93430> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b2370130> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b2373100> begin[:]
if compare[call[name[self]._get_sound, parameter[constant[False]]] not_equal[!=] constant[в]] begin[:]
call[name[prev].set_sonorus, parameter[constant[True]]]
return[None]
if call[name[self].is_deaf, parameter[]] begin[:]
call[name[prev].set_sonorus, parameter[constant[False]]]
return[None] | keyword[def] identifier[set_prev_sonorus] ( identifier[self] ):
literal[string]
identifier[prev] = identifier[self] . identifier[get_prev_letter] ()
keyword[if] keyword[not] identifier[prev] :
keyword[return]
keyword[if] keyword[not] ( identifier[self] . identifier[is_consonant] () keyword[and] identifier[prev] . identifier[is_consonant] ()):
keyword[return]
keyword[if] identifier[self] . identifier[is_sonorus] () keyword[and] identifier[self] . identifier[is_paired_consonant] ():
keyword[if] identifier[self] . identifier[_get_sound] ( keyword[False] )!= literal[string] :
identifier[prev] . identifier[set_sonorus] ( keyword[True] )
keyword[return]
keyword[if] identifier[self] . identifier[is_deaf] ():
identifier[prev] . identifier[set_sonorus] ( keyword[False] )
keyword[return] | def set_prev_sonorus(self):
"""
Выставляет параметры звонкости/глухости, для предыдущих согласных.
"""
prev = self.get_prev_letter()
if not prev:
return # depends on [control=['if'], data=[]]
if not (self.is_consonant() and prev.is_consonant()):
return # depends on [control=['if'], data=[]]
if self.is_sonorus() and self.is_paired_consonant():
if self._get_sound(False) != 'в':
prev.set_sonorus(True) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
if self.is_deaf():
prev.set_sonorus(False)
return # depends on [control=['if'], data=[]] |
async def do_load_aldb(self, args):
"""Load the All-Link database for a device.
Usage:
load_aldb address|all [clear_prior]
Arguments:
address: NSTEON address of the device
all: Load the All-Link database for all devices
clear_prior: y|n
y - Clear the prior data and start fresh.
n - Keep the prior data and only apply changes
Default is y
This does NOT write to the database so no changes are made to the
device with this command.
"""
params = args.split()
addr = None
clear = True
try:
addr = params[0]
except IndexError:
_LOGGING.error('Device address required.')
self.do_help('load_aldb')
try:
clear_prior = params[1]
_LOGGING.info('param clear_prior %s', clear_prior)
if clear_prior.lower() == 'y':
clear = True
elif clear_prior.lower() == 'n':
clear = False
else:
_LOGGING.error('Invalid value for parameter `clear_prior`')
_LOGGING.error('Valid values are `y` or `n`')
except IndexError:
pass
if addr:
if addr.lower() == 'all':
await self.tools.load_all_aldb(clear)
else:
await self.tools.load_device_aldb(addr, clear)
else:
self.do_help('load_aldb') | <ast.AsyncFunctionDef object at 0x7da1b1a23730> | keyword[async] keyword[def] identifier[do_load_aldb] ( identifier[self] , identifier[args] ):
literal[string]
identifier[params] = identifier[args] . identifier[split] ()
identifier[addr] = keyword[None]
identifier[clear] = keyword[True]
keyword[try] :
identifier[addr] = identifier[params] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[_LOGGING] . identifier[error] ( literal[string] )
identifier[self] . identifier[do_help] ( literal[string] )
keyword[try] :
identifier[clear_prior] = identifier[params] [ literal[int] ]
identifier[_LOGGING] . identifier[info] ( literal[string] , identifier[clear_prior] )
keyword[if] identifier[clear_prior] . identifier[lower] ()== literal[string] :
identifier[clear] = keyword[True]
keyword[elif] identifier[clear_prior] . identifier[lower] ()== literal[string] :
identifier[clear] = keyword[False]
keyword[else] :
identifier[_LOGGING] . identifier[error] ( literal[string] )
identifier[_LOGGING] . identifier[error] ( literal[string] )
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[if] identifier[addr] :
keyword[if] identifier[addr] . identifier[lower] ()== literal[string] :
keyword[await] identifier[self] . identifier[tools] . identifier[load_all_aldb] ( identifier[clear] )
keyword[else] :
keyword[await] identifier[self] . identifier[tools] . identifier[load_device_aldb] ( identifier[addr] , identifier[clear] )
keyword[else] :
identifier[self] . identifier[do_help] ( literal[string] ) | async def do_load_aldb(self, args):
"""Load the All-Link database for a device.
Usage:
load_aldb address|all [clear_prior]
Arguments:
address: NSTEON address of the device
all: Load the All-Link database for all devices
clear_prior: y|n
y - Clear the prior data and start fresh.
n - Keep the prior data and only apply changes
Default is y
This does NOT write to the database so no changes are made to the
device with this command.
"""
params = args.split()
addr = None
clear = True
try:
addr = params[0] # depends on [control=['try'], data=[]]
except IndexError:
_LOGGING.error('Device address required.')
self.do_help('load_aldb') # depends on [control=['except'], data=[]]
try:
clear_prior = params[1]
_LOGGING.info('param clear_prior %s', clear_prior)
if clear_prior.lower() == 'y':
clear = True # depends on [control=['if'], data=[]]
elif clear_prior.lower() == 'n':
clear = False # depends on [control=['if'], data=[]]
else:
_LOGGING.error('Invalid value for parameter `clear_prior`')
_LOGGING.error('Valid values are `y` or `n`') # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]]
if addr:
if addr.lower() == 'all':
await self.tools.load_all_aldb(clear) # depends on [control=['if'], data=[]]
else:
await self.tools.load_device_aldb(addr, clear) # depends on [control=['if'], data=[]]
else:
self.do_help('load_aldb') |
def width(self):
"""
:return: The width of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.width
else:
value = self._buffer.width
except InvalidParameterException:
value = self._node_map.Width.value
return value | def function[width, parameter[self]]:
constant[
:return: The width of the data component in the buffer in number of pixels.
]
<ast.Try object at 0x7da20e955660>
return[name[value]] | keyword[def] identifier[width] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[_part] :
identifier[value] = identifier[self] . identifier[_part] . identifier[width]
keyword[else] :
identifier[value] = identifier[self] . identifier[_buffer] . identifier[width]
keyword[except] identifier[InvalidParameterException] :
identifier[value] = identifier[self] . identifier[_node_map] . identifier[Width] . identifier[value]
keyword[return] identifier[value] | def width(self):
"""
:return: The width of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.width # depends on [control=['if'], data=[]]
else:
value = self._buffer.width # depends on [control=['try'], data=[]]
except InvalidParameterException:
value = self._node_map.Width.value # depends on [control=['except'], data=[]]
return value |
def collections(self):
"""Get list of collections."""
# if cache server is configured, load collection from there
if self.cache:
return self.cache.get(
self.app.config['COLLECTIONS_CACHE_KEY']) | def function[collections, parameter[self]]:
constant[Get list of collections.]
if name[self].cache begin[:]
return[call[name[self].cache.get, parameter[call[name[self].app.config][constant[COLLECTIONS_CACHE_KEY]]]]] | keyword[def] identifier[collections] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[cache] :
keyword[return] identifier[self] . identifier[cache] . identifier[get] (
identifier[self] . identifier[app] . identifier[config] [ literal[string] ]) | def collections(self):
"""Get list of collections."""
# if cache server is configured, load collection from there
if self.cache:
return self.cache.get(self.app.config['COLLECTIONS_CACHE_KEY']) # depends on [control=['if'], data=[]] |
def append(self, row):
"""Append a result row and check its length.
>>> x = Results(['title', 'type'])
>>> x.append(('Konosuba', 'TV'))
>>> x
Results(['title', 'type'], [('Konosuba', 'TV')])
>>> x.append(('Konosuba',))
Traceback (most recent call last):
...
ValueError: Wrong result row length
"""
row = tuple(row)
if len(row) != self.table_width:
raise ValueError('Wrong result row length')
self.results.append(row) | def function[append, parameter[self, row]]:
constant[Append a result row and check its length.
>>> x = Results(['title', 'type'])
>>> x.append(('Konosuba', 'TV'))
>>> x
Results(['title', 'type'], [('Konosuba', 'TV')])
>>> x.append(('Konosuba',))
Traceback (most recent call last):
...
ValueError: Wrong result row length
]
variable[row] assign[=] call[name[tuple], parameter[name[row]]]
if compare[call[name[len], parameter[name[row]]] not_equal[!=] name[self].table_width] begin[:]
<ast.Raise object at 0x7da2045644c0>
call[name[self].results.append, parameter[name[row]]] | keyword[def] identifier[append] ( identifier[self] , identifier[row] ):
literal[string]
identifier[row] = identifier[tuple] ( identifier[row] )
keyword[if] identifier[len] ( identifier[row] )!= identifier[self] . identifier[table_width] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[results] . identifier[append] ( identifier[row] ) | def append(self, row):
"""Append a result row and check its length.
>>> x = Results(['title', 'type'])
>>> x.append(('Konosuba', 'TV'))
>>> x
Results(['title', 'type'], [('Konosuba', 'TV')])
>>> x.append(('Konosuba',))
Traceback (most recent call last):
...
ValueError: Wrong result row length
"""
row = tuple(row)
if len(row) != self.table_width:
raise ValueError('Wrong result row length') # depends on [control=['if'], data=[]]
self.results.append(row) |
def getSchedulesBuffer(self, period_group):
""" Return the requested tariff schedule :class:`~ekmmeters.SerialBlock` for meter.
Args:
period_group (int): A :class:`~ekmmeters.ReadSchedules` value.
Returns:
SerialBlock: The requested tariff schedules for meter.
"""
empty_return = SerialBlock()
if period_group == ReadSchedules.Schedules_1_To_4:
return self.m_schd_1_to_4
elif period_group == ReadSchedules.Schedules_5_To_6:
return self.m_schd_5_to_6
else:
return empty_return | def function[getSchedulesBuffer, parameter[self, period_group]]:
constant[ Return the requested tariff schedule :class:`~ekmmeters.SerialBlock` for meter.
Args:
period_group (int): A :class:`~ekmmeters.ReadSchedules` value.
Returns:
SerialBlock: The requested tariff schedules for meter.
]
variable[empty_return] assign[=] call[name[SerialBlock], parameter[]]
if compare[name[period_group] equal[==] name[ReadSchedules].Schedules_1_To_4] begin[:]
return[name[self].m_schd_1_to_4] | keyword[def] identifier[getSchedulesBuffer] ( identifier[self] , identifier[period_group] ):
literal[string]
identifier[empty_return] = identifier[SerialBlock] ()
keyword[if] identifier[period_group] == identifier[ReadSchedules] . identifier[Schedules_1_To_4] :
keyword[return] identifier[self] . identifier[m_schd_1_to_4]
keyword[elif] identifier[period_group] == identifier[ReadSchedules] . identifier[Schedules_5_To_6] :
keyword[return] identifier[self] . identifier[m_schd_5_to_6]
keyword[else] :
keyword[return] identifier[empty_return] | def getSchedulesBuffer(self, period_group):
""" Return the requested tariff schedule :class:`~ekmmeters.SerialBlock` for meter.
Args:
period_group (int): A :class:`~ekmmeters.ReadSchedules` value.
Returns:
SerialBlock: The requested tariff schedules for meter.
"""
empty_return = SerialBlock()
if period_group == ReadSchedules.Schedules_1_To_4:
return self.m_schd_1_to_4 # depends on [control=['if'], data=[]]
elif period_group == ReadSchedules.Schedules_5_To_6:
return self.m_schd_5_to_6 # depends on [control=['if'], data=[]]
else:
return empty_return |
def cythonize(*args, **kwargs):
'''
dirty hack, only import cythonize at the time you use it.
if you don't write Cython extension,
you won't fail even if you don't install Cython.
'''
global cythonize
from Cython.Build import cythonize
return cythonize(*args, **kwargs) | def function[cythonize, parameter[]]:
constant[
dirty hack, only import cythonize at the time you use it.
if you don't write Cython extension,
you won't fail even if you don't install Cython.
]
<ast.Global object at 0x7da18f00c850>
from relative_module[Cython.Build] import module[cythonize]
return[call[name[cythonize], parameter[<ast.Starred object at 0x7da18f00c6d0>]]] | keyword[def] identifier[cythonize] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[global] identifier[cythonize]
keyword[from] identifier[Cython] . identifier[Build] keyword[import] identifier[cythonize]
keyword[return] identifier[cythonize] (* identifier[args] ,** identifier[kwargs] ) | def cythonize(*args, **kwargs):
"""
dirty hack, only import cythonize at the time you use it.
if you don't write Cython extension,
you won't fail even if you don't install Cython.
"""
global cythonize
from Cython.Build import cythonize
return cythonize(*args, **kwargs) |
def _set_operation(self, v, load=False):
"""
Setter method for operation, mapped from YANG variable /rule/operation (rule-operation)
If this variable is read-only (config: false) in the
source YANG file, then _set_operation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_operation() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'read-write': {}, u'read-only': {}},), is_leaf=True, yang_name="operation", rest_name="operation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Operation for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-operation', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """operation must be of a type compatible with rule-operation""",
'defined-type': "brocade-aaa:rule-operation",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'read-write': {}, u'read-only': {}},), is_leaf=True, yang_name="operation", rest_name="operation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Operation for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-operation', is_config=True)""",
})
self.__operation = t
if hasattr(self, '_set'):
self._set() | def function[_set_operation, parameter[self, v, load]]:
constant[
Setter method for operation, mapped from YANG variable /rule/operation (rule-operation)
If this variable is read-only (config: false) in the
source YANG file, then _set_operation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_operation() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c76f2b0>
name[self].__operation assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_operation] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{}, literal[string] :{}},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__operation] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_operation(self, v, load=False):
"""
Setter method for operation, mapped from YANG variable /rule/operation (rule-operation)
If this variable is read-only (config: false) in the
source YANG file, then _set_operation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_operation() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'read-write': {}, u'read-only': {}}), is_leaf=True, yang_name='operation', rest_name='operation', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Operation for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-operation', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'operation must be of a type compatible with rule-operation', 'defined-type': 'brocade-aaa:rule-operation', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'read-write\': {}, u\'read-only\': {}},), is_leaf=True, yang_name="operation", rest_name="operation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Operation for the command\', u\'cli-optional-in-sequence\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-aaa\', defining_module=\'brocade-aaa\', yang_type=\'rule-operation\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__operation = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name)) | def function[main, parameter[argv]]:
constant[
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[input]]]
call[name[parser].add_argument, parameter[constant[--version]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[argv]]]
for taget[name[f]] in starred[name[args].input] begin[:]
if call[name[f].isatty, parameter[]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[constant[You are running chardetect interactively. Press ] + constant[CTRL-D twice at the start of a blank line to signal the ]] + constant[end of your input. If you want help, run chardetect ]] + constant[--help
]]]]
call[name[print], parameter[call[name[description_of], parameter[name[f], name[f].name]]]] | keyword[def] identifier[main] ( identifier[argv] = keyword[None] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] = literal[string] ,
identifier[formatter_class] = identifier[argparse] . identifier[ArgumentDefaultsHelpFormatter] ,
identifier[conflict_handler] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[type] = identifier[argparse] . identifier[FileType] ( literal[string] ), identifier[nargs] = literal[string] ,
identifier[default] =[ identifier[sys] . identifier[stdin] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[version] = literal[string] . identifier[format] ( identifier[__version__] ))
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[argv] )
keyword[for] identifier[f] keyword[in] identifier[args] . identifier[input] :
keyword[if] identifier[f] . identifier[isatty] ():
identifier[print] ( literal[string] +
literal[string] +
literal[string] +
literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( identifier[description_of] ( identifier[f] , identifier[f] . identifier[name] )) | def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(description='Takes one or more file paths and reports their detected encodings', formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument('input', help='File whose encoding we would like to determine.', type=argparse.FileType('rb'), nargs='*', default=[sys.stdin])
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print('You are running chardetect interactively. Press ' + 'CTRL-D twice at the start of a blank line to signal the ' + 'end of your input. If you want help, run chardetect ' + '--help\n', file=sys.stderr) # depends on [control=['if'], data=[]]
print(description_of(f, f.name)) # depends on [control=['for'], data=['f']] |
def _AddArtifactNodesAndEdges(self, artifact_names):
"""Add the artifact nodes to the graph.
For every artifact that has to be collected, add a node to the dependency
graph.
The edges represent the dependencies. An artifact has outgoing edges to the
attributes it provides and incoming edges from attributes it depends on.
Initially, only artifacts without incoming edges are reachable. An artifact
becomes reachable if all of its dependencies are reachable.
Args:
artifact_names: List of names of the artifacts to collect.
"""
for artifact_name in artifact_names:
self.graph[artifact_name] = self.Node(is_artifact=True)
rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)
self._AddDependencyEdges(rdf_artifact)
self._AddProvidesEdges(rdf_artifact) | def function[_AddArtifactNodesAndEdges, parameter[self, artifact_names]]:
constant[Add the artifact nodes to the graph.
For every artifact that has to be collected, add a node to the dependency
graph.
The edges represent the dependencies. An artifact has outgoing edges to the
attributes it provides and incoming edges from attributes it depends on.
Initially, only artifacts without incoming edges are reachable. An artifact
becomes reachable if all of its dependencies are reachable.
Args:
artifact_names: List of names of the artifacts to collect.
]
for taget[name[artifact_name]] in starred[name[artifact_names]] begin[:]
call[name[self].graph][name[artifact_name]] assign[=] call[name[self].Node, parameter[]]
variable[rdf_artifact] assign[=] call[name[artifact_registry].REGISTRY.GetArtifact, parameter[name[artifact_name]]]
call[name[self]._AddDependencyEdges, parameter[name[rdf_artifact]]]
call[name[self]._AddProvidesEdges, parameter[name[rdf_artifact]]] | keyword[def] identifier[_AddArtifactNodesAndEdges] ( identifier[self] , identifier[artifact_names] ):
literal[string]
keyword[for] identifier[artifact_name] keyword[in] identifier[artifact_names] :
identifier[self] . identifier[graph] [ identifier[artifact_name] ]= identifier[self] . identifier[Node] ( identifier[is_artifact] = keyword[True] )
identifier[rdf_artifact] = identifier[artifact_registry] . identifier[REGISTRY] . identifier[GetArtifact] ( identifier[artifact_name] )
identifier[self] . identifier[_AddDependencyEdges] ( identifier[rdf_artifact] )
identifier[self] . identifier[_AddProvidesEdges] ( identifier[rdf_artifact] ) | def _AddArtifactNodesAndEdges(self, artifact_names):
"""Add the artifact nodes to the graph.
For every artifact that has to be collected, add a node to the dependency
graph.
The edges represent the dependencies. An artifact has outgoing edges to the
attributes it provides and incoming edges from attributes it depends on.
Initially, only artifacts without incoming edges are reachable. An artifact
becomes reachable if all of its dependencies are reachable.
Args:
artifact_names: List of names of the artifacts to collect.
"""
for artifact_name in artifact_names:
self.graph[artifact_name] = self.Node(is_artifact=True)
rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)
self._AddDependencyEdges(rdf_artifact)
self._AddProvidesEdges(rdf_artifact) # depends on [control=['for'], data=['artifact_name']] |
def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data | def function[walnut_data, parameter[]]:
constant[Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
]
variable[url] assign[=] constant[http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat]
variable[dct] assign[=] call[name[get_data], parameter[constant[walnut.mat]]]
variable[data] assign[=] call[call[name[np].swapaxes, parameter[call[name[dct]][constant[sinogram1200]], constant[0], constant[1]]]][tuple[[<ast.Slice object at 0x7da1b20b4580>, <ast.Slice object at 0x7da1b20b4730>]]]
variable[data] assign[=] call[name[data].astype, parameter[constant[float]]]
variable[data] assign[=] <ast.UnaryOp object at 0x7da1b20b4190>
return[name[data]] | keyword[def] identifier[walnut_data] ():
literal[string]
identifier[url] = literal[string]
identifier[dct] = identifier[get_data] ( literal[string] , identifier[subset] = identifier[DATA_SUBSET] , identifier[url] = identifier[url] )
identifier[data] = identifier[np] . identifier[swapaxes] ( identifier[dct] [ literal[string] ], literal[int] , literal[int] )[::- literal[int] ,::- literal[int] ]
identifier[data] = identifier[data] . identifier[astype] ( literal[string] )
identifier[data] =- identifier[np] . identifier[log] ( identifier[data] / identifier[np] . identifier[max] ( identifier[data] , identifier[axis] = literal[int] )[:, keyword[None] ])
keyword[return] identifier[data] | def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data |
def publish(self, event_type: str, event_data: dict = None):
"""Publish an event associated with the scheduling object.
Note:
Ideally publish should not be used directly but by other methods
which perform actions on the object.
Args:
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
import inspect
import os.path
_stack = inspect.stack()
_origin = os.path.basename(_stack[3][1]) + '::' + \
_stack[3][3]+'::L{}'.format(_stack[3][2])
publish(event_type=event_type,
event_data=event_data,
object_type=self._type,
object_id=self._id,
object_key=self._key,
origin=_origin) | def function[publish, parameter[self, event_type, event_data]]:
constant[Publish an event associated with the scheduling object.
Note:
Ideally publish should not be used directly but by other methods
which perform actions on the object.
Args:
event_type (str): Type of event.
event_data (dict, optional): Event data.
]
import module[inspect]
import module[os.path]
variable[_stack] assign[=] call[name[inspect].stack, parameter[]]
variable[_origin] assign[=] binary_operation[binary_operation[binary_operation[call[name[os].path.basename, parameter[call[call[name[_stack]][constant[3]]][constant[1]]]] + constant[::]] + call[call[name[_stack]][constant[3]]][constant[3]]] + call[constant[::L{}].format, parameter[call[call[name[_stack]][constant[3]]][constant[2]]]]]
call[name[publish], parameter[]] | keyword[def] identifier[publish] ( identifier[self] , identifier[event_type] : identifier[str] , identifier[event_data] : identifier[dict] = keyword[None] ):
literal[string]
keyword[import] identifier[inspect]
keyword[import] identifier[os] . identifier[path]
identifier[_stack] = identifier[inspect] . identifier[stack] ()
identifier[_origin] = identifier[os] . identifier[path] . identifier[basename] ( identifier[_stack] [ literal[int] ][ literal[int] ])+ literal[string] + identifier[_stack] [ literal[int] ][ literal[int] ]+ literal[string] . identifier[format] ( identifier[_stack] [ literal[int] ][ literal[int] ])
identifier[publish] ( identifier[event_type] = identifier[event_type] ,
identifier[event_data] = identifier[event_data] ,
identifier[object_type] = identifier[self] . identifier[_type] ,
identifier[object_id] = identifier[self] . identifier[_id] ,
identifier[object_key] = identifier[self] . identifier[_key] ,
identifier[origin] = identifier[_origin] ) | def publish(self, event_type: str, event_data: dict=None):
"""Publish an event associated with the scheduling object.
Note:
Ideally publish should not be used directly but by other methods
which perform actions on the object.
Args:
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
import inspect
import os.path
_stack = inspect.stack()
_origin = os.path.basename(_stack[3][1]) + '::' + _stack[3][3] + '::L{}'.format(_stack[3][2])
publish(event_type=event_type, event_data=event_data, object_type=self._type, object_id=self._id, object_key=self._key, origin=_origin) |
def get_in_segmentlistdict(self, process_ids = None):
"""
Return a segmentlistdict mapping instrument to in segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict((ifo, segments.segmentlist([row.in_segment])) for ifo in ifos))
return seglists | def function[get_in_segmentlistdict, parameter[self, process_ids]]:
constant[
Return a segmentlistdict mapping instrument to in segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
]
variable[seglists] assign[=] call[name[segments].segmentlistdict, parameter[]]
for taget[name[row]] in starred[name[self]] begin[:]
variable[ifos] assign[=] <ast.BoolOp object at 0x7da1b0b712a0>
if <ast.BoolOp object at 0x7da1b0b72590> begin[:]
call[name[seglists].extend, parameter[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b0b73070>]]]]
return[name[seglists]] | keyword[def] identifier[get_in_segmentlistdict] ( identifier[self] , identifier[process_ids] = keyword[None] ):
literal[string]
identifier[seglists] = identifier[segments] . identifier[segmentlistdict] ()
keyword[for] identifier[row] keyword[in] identifier[self] :
identifier[ifos] = identifier[row] . identifier[instruments] keyword[or] ( keyword[None] ,)
keyword[if] identifier[process_ids] keyword[is] keyword[None] keyword[or] identifier[row] . identifier[process_id] keyword[in] identifier[process_ids] :
identifier[seglists] . identifier[extend] ( identifier[dict] (( identifier[ifo] , identifier[segments] . identifier[segmentlist] ([ identifier[row] . identifier[in_segment] ])) keyword[for] identifier[ifo] keyword[in] identifier[ifos] ))
keyword[return] identifier[seglists] | def get_in_segmentlistdict(self, process_ids=None):
"""
Return a segmentlistdict mapping instrument to in segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict(((ifo, segments.segmentlist([row.in_segment])) for ifo in ifos))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
return seglists |
def Prandtl_von_Karman_Nikuradse(Re):
r'''Calculates Darcy friction factor for smooth pipes as a function of
Reynolds number from the Prandtl-von Karman Nikuradse equation as given
in [1]_ and [2]_:
.. math::
\frac{1}{\sqrt{f}} = -2\log_{10}\left(\frac{2.51}{Re\sqrt{f}}\right)
Parameters
----------
Re : float
Reynolds number, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This equation is often stated as follows; the correct constant is not 0.8,
but 2log10(2.51) or approximately 0.7993474:
.. math::
\frac{1}{\sqrt{f}}\approx 2\log_{10}(\text{Re}\sqrt{f})-0.8
This function is calculable for all Reynolds numbers between 1E151 and
1E-151. It is solved with the LambertW function from SciPy. The solution is:
.. math::
f_d = \frac{\frac{1}{4}\log_{10}^2}{\left(\text{lambertW}\left(\frac{
\log(10)Re}{2(2.51)}\right)\right)^2}
Examples
--------
>>> Prandtl_von_Karman_Nikuradse(1E7)
0.008102669430874914
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] McGovern, Jim. "Technical Note: Friction Factor Diagrams for Pipe
Flow." Paper, October 3, 2011. http://arrow.dit.ie/engschmecart/28.
'''
# Good 1E150 to 1E-150
c1 = 1.151292546497022842008995727342182103801 # log(10)/2
c2 = 1.325474527619599502640416597148504422899 # log(10)**2/4
return c2/float(lambertw((c1*Re)/2.51).real)**2 | def function[Prandtl_von_Karman_Nikuradse, parameter[Re]]:
constant[Calculates Darcy friction factor for smooth pipes as a function of
Reynolds number from the Prandtl-von Karman Nikuradse equation as given
in [1]_ and [2]_:
.. math::
\frac{1}{\sqrt{f}} = -2\log_{10}\left(\frac{2.51}{Re\sqrt{f}}\right)
Parameters
----------
Re : float
Reynolds number, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This equation is often stated as follows; the correct constant is not 0.8,
but 2log10(2.51) or approximately 0.7993474:
.. math::
\frac{1}{\sqrt{f}}\approx 2\log_{10}(\text{Re}\sqrt{f})-0.8
This function is calculable for all Reynolds numbers between 1E151 and
1E-151. It is solved with the LambertW function from SciPy. The solution is:
.. math::
f_d = \frac{\frac{1}{4}\log_{10}^2}{\left(\text{lambertW}\left(\frac{
\log(10)Re}{2(2.51)}\right)\right)^2}
Examples
--------
>>> Prandtl_von_Karman_Nikuradse(1E7)
0.008102669430874914
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] McGovern, Jim. "Technical Note: Friction Factor Diagrams for Pipe
Flow." Paper, October 3, 2011. http://arrow.dit.ie/engschmecart/28.
]
variable[c1] assign[=] constant[1.151292546497023]
variable[c2] assign[=] constant[1.3254745276195996]
return[binary_operation[name[c2] / binary_operation[call[name[float], parameter[call[name[lambertw], parameter[binary_operation[binary_operation[name[c1] * name[Re]] / constant[2.51]]]].real]] ** constant[2]]]] | keyword[def] identifier[Prandtl_von_Karman_Nikuradse] ( identifier[Re] ):
literal[string]
identifier[c1] = literal[int]
identifier[c2] = literal[int]
keyword[return] identifier[c2] / identifier[float] ( identifier[lambertw] (( identifier[c1] * identifier[Re] )/ literal[int] ). identifier[real] )** literal[int] | def Prandtl_von_Karman_Nikuradse(Re):
"""Calculates Darcy friction factor for smooth pipes as a function of
Reynolds number from the Prandtl-von Karman Nikuradse equation as given
in [1]_ and [2]_:
.. math::
\\frac{1}{\\sqrt{f}} = -2\\log_{10}\\left(\\frac{2.51}{Re\\sqrt{f}}\\right)
Parameters
----------
Re : float
Reynolds number, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This equation is often stated as follows; the correct constant is not 0.8,
but 2log10(2.51) or approximately 0.7993474:
.. math::
\\frac{1}{\\sqrt{f}}\\approx 2\\log_{10}(\\text{Re}\\sqrt{f})-0.8
This function is calculable for all Reynolds numbers between 1E151 and
1E-151. It is solved with the LambertW function from SciPy. The solution is:
.. math::
f_d = \\frac{\\frac{1}{4}\\log_{10}^2}{\\left(\\text{lambertW}\\left(\\frac{
\\log(10)Re}{2(2.51)}\\right)\\right)^2}
Examples
--------
>>> Prandtl_von_Karman_Nikuradse(1E7)
0.008102669430874914
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] McGovern, Jim. "Technical Note: Friction Factor Diagrams for Pipe
Flow." Paper, October 3, 2011. http://arrow.dit.ie/engschmecart/28.
"""
# Good 1E150 to 1E-150
c1 = 1.151292546497023 # log(10)/2
c2 = 1.3254745276195996 # log(10)**2/4
return c2 / float(lambertw(c1 * Re / 2.51).real) ** 2 |
def backup(self, container, url):
"""
Backup a container to the given restic url
all restic urls are supported
:param container:
:param url: Url to restic repo
examples
(file:///path/to/restic/?password=<password>)
:return: Json response to the backup job (do .get() to get the snapshot ID
"""
args = {
'container': container,
'url': url,
}
return JSONResponse(self._client.raw('corex.backup', args)) | def function[backup, parameter[self, container, url]]:
constant[
Backup a container to the given restic url
all restic urls are supported
:param container:
:param url: Url to restic repo
examples
(file:///path/to/restic/?password=<password>)
:return: Json response to the backup job (do .get() to get the snapshot ID
]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b04da860>, <ast.Constant object at 0x7da1b04d8a00>], [<ast.Name object at 0x7da1b04d9f60>, <ast.Name object at 0x7da1b04da6e0>]]
return[call[name[JSONResponse], parameter[call[name[self]._client.raw, parameter[constant[corex.backup], name[args]]]]]] | keyword[def] identifier[backup] ( identifier[self] , identifier[container] , identifier[url] ):
literal[string]
identifier[args] ={
literal[string] : identifier[container] ,
literal[string] : identifier[url] ,
}
keyword[return] identifier[JSONResponse] ( identifier[self] . identifier[_client] . identifier[raw] ( literal[string] , identifier[args] )) | def backup(self, container, url):
"""
Backup a container to the given restic url
all restic urls are supported
:param container:
:param url: Url to restic repo
examples
(file:///path/to/restic/?password=<password>)
:return: Json response to the backup job (do .get() to get the snapshot ID
"""
args = {'container': container, 'url': url}
return JSONResponse(self._client.raw('corex.backup', args)) |
def validate(opts):
"""
Client facing validate function for command line arguments.
Perform validation operations on opts, a namespace created from
command line arguments. Returns True if all validation tests are successful.
If an exception is raised by the validations, this gracefully exits the
program and leaves a message to the user.
Required attributes on opts:
* input: String giving the path to input files
* output: String giving the path to output destination
* wrapper: String specifying the wrapper format
* extensions: List of strings specifying the file extensions to look for
* overwrite: Boolean specifying whether the original input files should
be overridden
:param opts: namespace containing necessary parameters
:return: True, if all tests are successful
"""
try:
return _validate(opts)
except ValidationException as e:
print("Command line arguments failed validation:")
print(e)
sys.exit(0)
except ValueError as e:
print("Incorrect type passed into anchorhub.validate_opts.validate()\n")
print(e)
sys.exit(0) | def function[validate, parameter[opts]]:
constant[
Client facing validate function for command line arguments.
Perform validation operations on opts, a namespace created from
command line arguments. Returns True if all validation tests are successful.
If an exception is raised by the validations, this gracefully exits the
program and leaves a message to the user.
Required attributes on opts:
* input: String giving the path to input files
* output: String giving the path to output destination
* wrapper: String specifying the wrapper format
* extensions: List of strings specifying the file extensions to look for
* overwrite: Boolean specifying whether the original input files should
be overridden
:param opts: namespace containing necessary parameters
:return: True, if all tests are successful
]
<ast.Try object at 0x7da1b0851ff0> | keyword[def] identifier[validate] ( identifier[opts] ):
literal[string]
keyword[try] :
keyword[return] identifier[_validate] ( identifier[opts] )
keyword[except] identifier[ValidationException] keyword[as] identifier[e] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] ) | def validate(opts):
"""
Client facing validate function for command line arguments.
Perform validation operations on opts, a namespace created from
command line arguments. Returns True if all validation tests are successful.
If an exception is raised by the validations, this gracefully exits the
program and leaves a message to the user.
Required attributes on opts:
* input: String giving the path to input files
* output: String giving the path to output destination
* wrapper: String specifying the wrapper format
* extensions: List of strings specifying the file extensions to look for
* overwrite: Boolean specifying whether the original input files should
be overridden
:param opts: namespace containing necessary parameters
:return: True, if all tests are successful
"""
try:
return _validate(opts) # depends on [control=['try'], data=[]]
except ValidationException as e:
print('Command line arguments failed validation:')
print(e)
sys.exit(0) # depends on [control=['except'], data=['e']]
except ValueError as e:
print('Incorrect type passed into anchorhub.validate_opts.validate()\n')
print(e)
sys.exit(0) # depends on [control=['except'], data=['e']] |
def _get_next_line_indent_delta(self, newline_token):
"""
Returns the change in indentation. The return units are in
indentations rather than spaces/tabs.
If the next line's indent isn't relevant (e.g. it's a comment),
returns None. Since the return value might be 0, the caller should
explicitly check the return type, rather than rely on truthiness.
"""
assert newline_token.type == 'NEWLINE', \
'Can only search for a dent starting from a newline.'
next_line_pos = newline_token.lexpos + len(newline_token.value)
if next_line_pos == len(newline_token.lexer.lexdata):
# Reached end of file
return None
line = newline_token.lexer.lexdata[next_line_pos:].split(os.linesep, 1)[0]
if not line:
return None
lstripped_line = line.lstrip()
lstripped_line_length = len(lstripped_line)
if lstripped_line_length == 0:
# If the next line is composed of only spaces, ignore indentation.
return None
if lstripped_line[0] == '#':
# If it's a comment line, ignore indentation.
return None
indent = len(line) - lstripped_line_length
if indent % 4 > 0:
self.errors.append(
('Indent is not divisible by 4.', newline_token.lexer.lineno))
return None
indent_delta = indent - _indent_level_to_spaces_count(self.cur_indent)
return indent_delta // 4 | def function[_get_next_line_indent_delta, parameter[self, newline_token]]:
constant[
Returns the change in indentation. The return units are in
indentations rather than spaces/tabs.
If the next line's indent isn't relevant (e.g. it's a comment),
returns None. Since the return value might be 0, the caller should
explicitly check the return type, rather than rely on truthiness.
]
assert[compare[name[newline_token].type equal[==] constant[NEWLINE]]]
variable[next_line_pos] assign[=] binary_operation[name[newline_token].lexpos + call[name[len], parameter[name[newline_token].value]]]
if compare[name[next_line_pos] equal[==] call[name[len], parameter[name[newline_token].lexer.lexdata]]] begin[:]
return[constant[None]]
variable[line] assign[=] call[call[call[name[newline_token].lexer.lexdata][<ast.Slice object at 0x7da20c795c30>].split, parameter[name[os].linesep, constant[1]]]][constant[0]]
if <ast.UnaryOp object at 0x7da20c795d80> begin[:]
return[constant[None]]
variable[lstripped_line] assign[=] call[name[line].lstrip, parameter[]]
variable[lstripped_line_length] assign[=] call[name[len], parameter[name[lstripped_line]]]
if compare[name[lstripped_line_length] equal[==] constant[0]] begin[:]
return[constant[None]]
if compare[call[name[lstripped_line]][constant[0]] equal[==] constant[#]] begin[:]
return[constant[None]]
variable[indent] assign[=] binary_operation[call[name[len], parameter[name[line]]] - name[lstripped_line_length]]
if compare[binary_operation[name[indent] <ast.Mod object at 0x7da2590d6920> constant[4]] greater[>] constant[0]] begin[:]
call[name[self].errors.append, parameter[tuple[[<ast.Constant object at 0x7da20c7950f0>, <ast.Attribute object at 0x7da20c7945e0>]]]]
return[constant[None]]
variable[indent_delta] assign[=] binary_operation[name[indent] - call[name[_indent_level_to_spaces_count], parameter[name[self].cur_indent]]]
return[binary_operation[name[indent_delta] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]]] | keyword[def] identifier[_get_next_line_indent_delta] ( identifier[self] , identifier[newline_token] ):
literal[string]
keyword[assert] identifier[newline_token] . identifier[type] == literal[string] , literal[string]
identifier[next_line_pos] = identifier[newline_token] . identifier[lexpos] + identifier[len] ( identifier[newline_token] . identifier[value] )
keyword[if] identifier[next_line_pos] == identifier[len] ( identifier[newline_token] . identifier[lexer] . identifier[lexdata] ):
keyword[return] keyword[None]
identifier[line] = identifier[newline_token] . identifier[lexer] . identifier[lexdata] [ identifier[next_line_pos] :]. identifier[split] ( identifier[os] . identifier[linesep] , literal[int] )[ literal[int] ]
keyword[if] keyword[not] identifier[line] :
keyword[return] keyword[None]
identifier[lstripped_line] = identifier[line] . identifier[lstrip] ()
identifier[lstripped_line_length] = identifier[len] ( identifier[lstripped_line] )
keyword[if] identifier[lstripped_line_length] == literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[lstripped_line] [ literal[int] ]== literal[string] :
keyword[return] keyword[None]
identifier[indent] = identifier[len] ( identifier[line] )- identifier[lstripped_line_length]
keyword[if] identifier[indent] % literal[int] > literal[int] :
identifier[self] . identifier[errors] . identifier[append] (
( literal[string] , identifier[newline_token] . identifier[lexer] . identifier[lineno] ))
keyword[return] keyword[None]
identifier[indent_delta] = identifier[indent] - identifier[_indent_level_to_spaces_count] ( identifier[self] . identifier[cur_indent] )
keyword[return] identifier[indent_delta] // literal[int] | def _get_next_line_indent_delta(self, newline_token):
"""
Returns the change in indentation. The return units are in
indentations rather than spaces/tabs.
If the next line's indent isn't relevant (e.g. it's a comment),
returns None. Since the return value might be 0, the caller should
explicitly check the return type, rather than rely on truthiness.
"""
assert newline_token.type == 'NEWLINE', 'Can only search for a dent starting from a newline.'
next_line_pos = newline_token.lexpos + len(newline_token.value)
if next_line_pos == len(newline_token.lexer.lexdata):
# Reached end of file
return None # depends on [control=['if'], data=[]]
line = newline_token.lexer.lexdata[next_line_pos:].split(os.linesep, 1)[0]
if not line:
return None # depends on [control=['if'], data=[]]
lstripped_line = line.lstrip()
lstripped_line_length = len(lstripped_line)
if lstripped_line_length == 0:
# If the next line is composed of only spaces, ignore indentation.
return None # depends on [control=['if'], data=[]]
if lstripped_line[0] == '#':
# If it's a comment line, ignore indentation.
return None # depends on [control=['if'], data=[]]
indent = len(line) - lstripped_line_length
if indent % 4 > 0:
self.errors.append(('Indent is not divisible by 4.', newline_token.lexer.lineno))
return None # depends on [control=['if'], data=[]]
indent_delta = indent - _indent_level_to_spaces_count(self.cur_indent)
return indent_delta // 4 |
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
if not self._season:
return None
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return pd.DataFrame(rows, index=[indices]) | def function[dataframe, parameter[self]]:
constant[
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
]
variable[temp_index] assign[=] name[self]._index
variable[rows] assign[=] list[[]]
variable[indices] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b0cf7c10> begin[:]
return[constant[None]]
for taget[name[season]] in starred[name[self]._season] begin[:]
name[self]._index assign[=] call[name[self]._season.index, parameter[name[season]]]
call[name[rows].append, parameter[call[name[self]._dataframe_fields, parameter[]]]]
call[name[indices].append, parameter[name[season]]]
name[self]._index assign[=] name[temp_index]
return[call[name[pd].DataFrame, parameter[name[rows]]]] | keyword[def] identifier[dataframe] ( identifier[self] ):
literal[string]
identifier[temp_index] = identifier[self] . identifier[_index]
identifier[rows] =[]
identifier[indices] =[]
keyword[if] keyword[not] identifier[self] . identifier[_season] :
keyword[return] keyword[None]
keyword[for] identifier[season] keyword[in] identifier[self] . identifier[_season] :
identifier[self] . identifier[_index] = identifier[self] . identifier[_season] . identifier[index] ( identifier[season] )
identifier[rows] . identifier[append] ( identifier[self] . identifier[_dataframe_fields] ())
identifier[indices] . identifier[append] ( identifier[season] )
identifier[self] . identifier[_index] = identifier[temp_index]
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[rows] , identifier[index] =[ identifier[indices] ]) | def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
if not self._season:
return None # depends on [control=['if'], data=[]]
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season) # depends on [control=['for'], data=['season']]
self._index = temp_index
return pd.DataFrame(rows, index=[indices]) |
def invert(self):
"""
Invert the mesh in- place by reversing the winding of every
face and negating normals without dumping the cache.
Alters
---------
self.faces : columns reversed
self.face_normals : negated if defined
self.vertex_normals : negated if defined
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals *= -1.0
if 'vertex_normals' in self._cache:
self.vertex_normals *= -1.0
self.faces = np.fliplr(self.faces)
# save our normals
self._cache.clear(exclude=['face_normals',
'vertex_normals']) | def function[invert, parameter[self]]:
constant[
Invert the mesh in- place by reversing the winding of every
face and negating normals without dumping the cache.
Alters
---------
self.faces : columns reversed
self.face_normals : negated if defined
self.vertex_normals : negated if defined
]
with name[self]._cache begin[:]
if compare[constant[face_normals] in name[self]._cache] begin[:]
<ast.AugAssign object at 0x7da2044c0550>
if compare[constant[vertex_normals] in name[self]._cache] begin[:]
<ast.AugAssign object at 0x7da2044c33a0>
name[self].faces assign[=] call[name[np].fliplr, parameter[name[self].faces]]
call[name[self]._cache.clear, parameter[]] | keyword[def] identifier[invert] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_cache] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_cache] :
identifier[self] . identifier[face_normals] *=- literal[int]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_cache] :
identifier[self] . identifier[vertex_normals] *=- literal[int]
identifier[self] . identifier[faces] = identifier[np] . identifier[fliplr] ( identifier[self] . identifier[faces] )
identifier[self] . identifier[_cache] . identifier[clear] ( identifier[exclude] =[ literal[string] ,
literal[string] ]) | def invert(self):
"""
Invert the mesh in- place by reversing the winding of every
face and negating normals without dumping the cache.
Alters
---------
self.faces : columns reversed
self.face_normals : negated if defined
self.vertex_normals : negated if defined
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals *= -1.0 # depends on [control=['if'], data=[]]
if 'vertex_normals' in self._cache:
self.vertex_normals *= -1.0 # depends on [control=['if'], data=[]]
self.faces = np.fliplr(self.faces) # depends on [control=['with'], data=[]]
# save our normals
self._cache.clear(exclude=['face_normals', 'vertex_normals']) |
def start(self):
"start the pool's workers"
for i in xrange(self.size):
scheduler.schedule(self._runner)
self._closing = False | def function[start, parameter[self]]:
constant[start the pool's workers]
for taget[name[i]] in starred[call[name[xrange], parameter[name[self].size]]] begin[:]
call[name[scheduler].schedule, parameter[name[self]._runner]]
name[self]._closing assign[=] constant[False] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[size] ):
identifier[scheduler] . identifier[schedule] ( identifier[self] . identifier[_runner] )
identifier[self] . identifier[_closing] = keyword[False] | def start(self):
"""start the pool's workers"""
for i in xrange(self.size):
scheduler.schedule(self._runner) # depends on [control=['for'], data=[]]
self._closing = False |
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * log(n) - k * log(k) - (n - k) * log(n - k) | def function[LogBinomialCoef, parameter[n, k]]:
constant[Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
]
return[binary_operation[binary_operation[binary_operation[name[n] * call[name[log], parameter[name[n]]]] - binary_operation[name[k] * call[name[log], parameter[name[k]]]]] - binary_operation[binary_operation[name[n] - name[k]] * call[name[log], parameter[binary_operation[name[n] - name[k]]]]]]] | keyword[def] identifier[LogBinomialCoef] ( identifier[n] , identifier[k] ):
literal[string]
keyword[return] identifier[n] * identifier[log] ( identifier[n] )- identifier[k] * identifier[log] ( identifier[k] )-( identifier[n] - identifier[k] )* identifier[log] ( identifier[n] - identifier[k] ) | def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * log(n) - k * log(k) - (n - k) * log(n - k) |
def get_client(
client, profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def function[get_client, parameter[client, profile_name, aws_access_key_id, aws_secret_access_key, region]]:
constant[Shortcut for getting an initialized instance of the boto3 client.]
call[name[boto3].setup_default_session, parameter[]]
return[call[name[boto3].client, parameter[name[client]]]] | keyword[def] identifier[get_client] (
identifier[client] , identifier[profile_name] , identifier[aws_access_key_id] , identifier[aws_secret_access_key] ,
identifier[region] = keyword[None] ,
):
literal[string]
identifier[boto3] . identifier[setup_default_session] (
identifier[profile_name] = identifier[profile_name] ,
identifier[aws_access_key_id] = identifier[aws_access_key_id] ,
identifier[aws_secret_access_key] = identifier[aws_secret_access_key] ,
identifier[region_name] = identifier[region] ,
)
keyword[return] identifier[boto3] . identifier[client] ( identifier[client] ) | def get_client(client, profile_name, aws_access_key_id, aws_secret_access_key, region=None):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(profile_name=profile_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region)
return boto3.client(client) |
def _RunOsLoginControl(self, params):
"""Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
"""
try:
return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params)
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise | def function[_RunOsLoginControl, parameter[self, params]]:
constant[Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
]
<ast.Try object at 0x7da2044c0a00> | keyword[def] identifier[_RunOsLoginControl] ( identifier[self] , identifier[params] ):
literal[string]
keyword[try] :
keyword[return] identifier[subprocess] . identifier[call] ([ identifier[constants] . identifier[OSLOGIN_CONTROL_SCRIPT] ]+ identifier[params] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[return] keyword[None]
keyword[else] :
keyword[raise] | def _RunOsLoginControl(self, params):
"""Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
"""
try:
return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params) # depends on [control=['try'], data=[]]
except OSError as e:
if e.errno == errno.ENOENT:
return None # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']] |
def project_home_breadcrumb_bs4(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 4 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs4 %} {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs4 'Custom Label' %}
"""
url = home_url()
if url:
return format_html(
'<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{}">{}</a></li>',
url, label)
else:
return format_html(
'<li class="breadcrumb-item" aria-label="breadcrumb">{}</li>',
label) | def function[project_home_breadcrumb_bs4, parameter[label]]:
constant[A template tag to return the project's home URL and label
formatted as a Bootstrap 4 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs4 %} {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs4 'Custom Label' %}
]
variable[url] assign[=] call[name[home_url], parameter[]]
if name[url] begin[:]
return[call[name[format_html], parameter[constant[<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{}">{}</a></li>], name[url], name[label]]]] | keyword[def] identifier[project_home_breadcrumb_bs4] ( identifier[label] ):
literal[string]
identifier[url] = identifier[home_url] ()
keyword[if] identifier[url] :
keyword[return] identifier[format_html] (
literal[string] ,
identifier[url] , identifier[label] )
keyword[else] :
keyword[return] identifier[format_html] (
literal[string] ,
identifier[label] ) | def project_home_breadcrumb_bs4(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 4 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs4 %} {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs4 'Custom Label' %}
"""
url = home_url()
if url:
return format_html('<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{}">{}</a></li>', url, label) # depends on [control=['if'], data=[]]
else:
return format_html('<li class="breadcrumb-item" aria-label="breadcrumb">{}</li>', label) |
def _prepare_photometry_input(data, error, mask, wcs, unit):
"""
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
"""
if isinstance(data, fits.HDUList):
for i in range(len(data)):
if data[i].data is not None:
warnings.warn("Input data is a HDUList object, photometry is "
"run only for the {0} HDU."
.format(i), AstropyUserWarning)
data = data[i]
break
if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
header = data.header
data = data.data
if 'BUNIT' in header:
bunit = u.Unit(header['BUNIT'], parse_strict='warn')
if isinstance(bunit, u.UnrecognizedUnit):
warnings.warn('The BUNIT in the header of the input data is '
'not parseable as a valid unit.',
AstropyUserWarning)
else:
data = u.Quantity(data, unit=bunit)
if wcs is None:
try:
wcs = WCS(header)
except Exception:
# A valid WCS was not found in the header. Let the calling
# application raise an exception if it needs a WCS.
pass
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if unit is not None:
unit = u.Unit(unit, parse_strict='warn')
if isinstance(unit, u.UnrecognizedUnit):
warnings.warn('The input unit is not parseable as a valid '
'unit.', AstropyUserWarning)
unit = None
if isinstance(data, u.Quantity):
if unit is not None and data.unit != unit:
warnings.warn('The input unit does not agree with the data '
'unit.', AstropyUserWarning)
else:
if unit is not None:
data = u.Quantity(data, unit=unit)
if error is not None:
if isinstance(error, u.Quantity):
if unit is not None and error.unit != unit:
warnings.warn('The input unit does not agree with the error '
'unit.', AstropyUserWarning)
if np.isscalar(error.value):
error = u.Quantity(np.broadcast_arrays(error, data),
unit=error.unit)[0]
else:
if np.isscalar(error):
error = np.broadcast_arrays(error, data)[0]
if unit is not None:
error = u.Quantity(error, unit=unit)
error = np.asanyarray(error)
if error.shape != data.shape:
raise ValueError('error and data must have the same shape.')
if mask is not None:
mask = np.asanyarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
return data, error, mask, wcs | def function[_prepare_photometry_input, parameter[data, error, mask, wcs, unit]]:
constant[
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
]
if call[name[isinstance], parameter[name[data], name[fits].HDUList]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[data]]]]]] begin[:]
if compare[call[name[data]][name[i]].data is_not constant[None]] begin[:]
call[name[warnings].warn, parameter[call[constant[Input data is a HDUList object, photometry is run only for the {0} HDU.].format, parameter[name[i]]], name[AstropyUserWarning]]]
variable[data] assign[=] call[name[data]][name[i]]
break
if call[name[isinstance], parameter[name[data], tuple[[<ast.Attribute object at 0x7da1b11ba740>, <ast.Attribute object at 0x7da1b11ba980>]]]] begin[:]
variable[header] assign[=] name[data].header
variable[data] assign[=] name[data].data
if compare[constant[BUNIT] in name[header]] begin[:]
variable[bunit] assign[=] call[name[u].Unit, parameter[call[name[header]][constant[BUNIT]]]]
if call[name[isinstance], parameter[name[bunit], name[u].UnrecognizedUnit]] begin[:]
call[name[warnings].warn, parameter[constant[The BUNIT in the header of the input data is not parseable as a valid unit.], name[AstropyUserWarning]]]
if compare[name[wcs] is constant[None]] begin[:]
<ast.Try object at 0x7da1b11bb130>
variable[data] assign[=] call[name[np].asanyarray, parameter[name[data]]]
if compare[name[data].ndim not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b11b8af0>
if compare[name[unit] is_not constant[None]] begin[:]
variable[unit] assign[=] call[name[u].Unit, parameter[name[unit]]]
if call[name[isinstance], parameter[name[unit], name[u].UnrecognizedUnit]] begin[:]
call[name[warnings].warn, parameter[constant[The input unit is not parseable as a valid unit.], name[AstropyUserWarning]]]
variable[unit] assign[=] constant[None]
if call[name[isinstance], parameter[name[data], name[u].Quantity]] begin[:]
if <ast.BoolOp object at 0x7da1b11fa1a0> begin[:]
call[name[warnings].warn, parameter[constant[The input unit does not agree with the data unit.], name[AstropyUserWarning]]]
if compare[name[error] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[error], name[u].Quantity]] begin[:]
if <ast.BoolOp object at 0x7da1b11fb340> begin[:]
call[name[warnings].warn, parameter[constant[The input unit does not agree with the error unit.], name[AstropyUserWarning]]]
if call[name[np].isscalar, parameter[name[error].value]] begin[:]
variable[error] assign[=] call[call[name[u].Quantity, parameter[call[name[np].broadcast_arrays, parameter[name[error], name[data]]]]]][constant[0]]
if compare[name[error].shape not_equal[!=] name[data].shape] begin[:]
<ast.Raise object at 0x7da1b12c7ac0>
if compare[name[mask] is_not constant[None]] begin[:]
variable[mask] assign[=] call[name[np].asanyarray, parameter[name[mask]]]
if compare[name[mask].shape not_equal[!=] name[data].shape] begin[:]
<ast.Raise object at 0x7da1b12c5fc0>
return[tuple[[<ast.Name object at 0x7da1b12c5a50>, <ast.Name object at 0x7da1b12c7070>, <ast.Name object at 0x7da1b12c7b80>, <ast.Name object at 0x7da1b12c6dd0>]]] | keyword[def] identifier[_prepare_photometry_input] ( identifier[data] , identifier[error] , identifier[mask] , identifier[wcs] , identifier[unit] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[fits] . identifier[HDUList] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[data] )):
keyword[if] identifier[data] [ identifier[i] ]. identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
. identifier[format] ( identifier[i] ), identifier[AstropyUserWarning] )
identifier[data] = identifier[data] [ identifier[i] ]
keyword[break]
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[fits] . identifier[PrimaryHDU] , identifier[fits] . identifier[ImageHDU] )):
identifier[header] = identifier[data] . identifier[header]
identifier[data] = identifier[data] . identifier[data]
keyword[if] literal[string] keyword[in] identifier[header] :
identifier[bunit] = identifier[u] . identifier[Unit] ( identifier[header] [ literal[string] ], identifier[parse_strict] = literal[string] )
keyword[if] identifier[isinstance] ( identifier[bunit] , identifier[u] . identifier[UnrecognizedUnit] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] ,
identifier[AstropyUserWarning] )
keyword[else] :
identifier[data] = identifier[u] . identifier[Quantity] ( identifier[data] , identifier[unit] = identifier[bunit] )
keyword[if] identifier[wcs] keyword[is] keyword[None] :
keyword[try] :
identifier[wcs] = identifier[WCS] ( identifier[header] )
keyword[except] identifier[Exception] :
keyword[pass]
identifier[data] = identifier[np] . identifier[asanyarray] ( identifier[data] )
keyword[if] identifier[data] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[unit] keyword[is] keyword[not] keyword[None] :
identifier[unit] = identifier[u] . identifier[Unit] ( identifier[unit] , identifier[parse_strict] = literal[string] )
keyword[if] identifier[isinstance] ( identifier[unit] , identifier[u] . identifier[UnrecognizedUnit] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[AstropyUserWarning] )
identifier[unit] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[u] . identifier[Quantity] ):
keyword[if] identifier[unit] keyword[is] keyword[not] keyword[None] keyword[and] identifier[data] . identifier[unit] != identifier[unit] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[AstropyUserWarning] )
keyword[else] :
keyword[if] identifier[unit] keyword[is] keyword[not] keyword[None] :
identifier[data] = identifier[u] . identifier[Quantity] ( identifier[data] , identifier[unit] = identifier[unit] )
keyword[if] identifier[error] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[error] , identifier[u] . identifier[Quantity] ):
keyword[if] identifier[unit] keyword[is] keyword[not] keyword[None] keyword[and] identifier[error] . identifier[unit] != identifier[unit] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[AstropyUserWarning] )
keyword[if] identifier[np] . identifier[isscalar] ( identifier[error] . identifier[value] ):
identifier[error] = identifier[u] . identifier[Quantity] ( identifier[np] . identifier[broadcast_arrays] ( identifier[error] , identifier[data] ),
identifier[unit] = identifier[error] . identifier[unit] )[ literal[int] ]
keyword[else] :
keyword[if] identifier[np] . identifier[isscalar] ( identifier[error] ):
identifier[error] = identifier[np] . identifier[broadcast_arrays] ( identifier[error] , identifier[data] )[ literal[int] ]
keyword[if] identifier[unit] keyword[is] keyword[not] keyword[None] :
identifier[error] = identifier[u] . identifier[Quantity] ( identifier[error] , identifier[unit] = identifier[unit] )
identifier[error] = identifier[np] . identifier[asanyarray] ( identifier[error] )
keyword[if] identifier[error] . identifier[shape] != identifier[data] . identifier[shape] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[mask] = identifier[np] . identifier[asanyarray] ( identifier[mask] )
keyword[if] identifier[mask] . identifier[shape] != identifier[data] . identifier[shape] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[data] , identifier[error] , identifier[mask] , identifier[wcs] | def _prepare_photometry_input(data, error, mask, wcs, unit):
"""
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
"""
if isinstance(data, fits.HDUList):
for i in range(len(data)):
if data[i].data is not None:
warnings.warn('Input data is a HDUList object, photometry is run only for the {0} HDU.'.format(i), AstropyUserWarning)
data = data[i]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
header = data.header
data = data.data
if 'BUNIT' in header:
bunit = u.Unit(header['BUNIT'], parse_strict='warn')
if isinstance(bunit, u.UnrecognizedUnit):
warnings.warn('The BUNIT in the header of the input data is not parseable as a valid unit.', AstropyUserWarning) # depends on [control=['if'], data=[]]
else:
data = u.Quantity(data, unit=bunit) # depends on [control=['if'], data=['header']] # depends on [control=['if'], data=[]]
if wcs is None:
try:
wcs = WCS(header) # depends on [control=['try'], data=[]]
except Exception:
# A valid WCS was not found in the header. Let the calling
# application raise an exception if it needs a WCS.
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['wcs']]
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.') # depends on [control=['if'], data=[]]
if unit is not None:
unit = u.Unit(unit, parse_strict='warn')
if isinstance(unit, u.UnrecognizedUnit):
warnings.warn('The input unit is not parseable as a valid unit.', AstropyUserWarning)
unit = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['unit']]
if isinstance(data, u.Quantity):
if unit is not None and data.unit != unit:
warnings.warn('The input unit does not agree with the data unit.', AstropyUserWarning) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif unit is not None:
data = u.Quantity(data, unit=unit) # depends on [control=['if'], data=['unit']]
if error is not None:
if isinstance(error, u.Quantity):
if unit is not None and error.unit != unit:
warnings.warn('The input unit does not agree with the error unit.', AstropyUserWarning) # depends on [control=['if'], data=[]]
if np.isscalar(error.value):
error = u.Quantity(np.broadcast_arrays(error, data), unit=error.unit)[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if np.isscalar(error):
error = np.broadcast_arrays(error, data)[0] # depends on [control=['if'], data=[]]
if unit is not None:
error = u.Quantity(error, unit=unit) # depends on [control=['if'], data=['unit']]
error = np.asanyarray(error)
if error.shape != data.shape:
raise ValueError('error and data must have the same shape.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['error']]
if mask is not None:
mask = np.asanyarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['mask']]
return (data, error, mask, wcs) |
def get_max_network_adapters_of_type(self, chipset, type_p):
"""Maximum number of network adapters of a given attachment type,
associated with every :py:class:`IMachine` instance.
in chipset of type :class:`ChipsetType`
The chipset type to get the value for.
in type_p of type :class:`NetworkAttachmentType`
Type of attachment.
return max_network_adapters of type int
The maximum number of network adapters allowed for
particular chipset and attachment type.
"""
if not isinstance(chipset, ChipsetType):
raise TypeError("chipset can only be an instance of type ChipsetType")
if not isinstance(type_p, NetworkAttachmentType):
raise TypeError("type_p can only be an instance of type NetworkAttachmentType")
max_network_adapters = self._call("getMaxNetworkAdaptersOfType",
in_p=[chipset, type_p])
return max_network_adapters | def function[get_max_network_adapters_of_type, parameter[self, chipset, type_p]]:
constant[Maximum number of network adapters of a given attachment type,
associated with every :py:class:`IMachine` instance.
in chipset of type :class:`ChipsetType`
The chipset type to get the value for.
in type_p of type :class:`NetworkAttachmentType`
Type of attachment.
return max_network_adapters of type int
The maximum number of network adapters allowed for
particular chipset and attachment type.
]
if <ast.UnaryOp object at 0x7da20c6c41c0> begin[:]
<ast.Raise object at 0x7da20c6c7580>
if <ast.UnaryOp object at 0x7da20c6c7b20> begin[:]
<ast.Raise object at 0x7da2044c2440>
variable[max_network_adapters] assign[=] call[name[self]._call, parameter[constant[getMaxNetworkAdaptersOfType]]]
return[name[max_network_adapters]] | keyword[def] identifier[get_max_network_adapters_of_type] ( identifier[self] , identifier[chipset] , identifier[type_p] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[chipset] , identifier[ChipsetType] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[type_p] , identifier[NetworkAttachmentType] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[max_network_adapters] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[chipset] , identifier[type_p] ])
keyword[return] identifier[max_network_adapters] | def get_max_network_adapters_of_type(self, chipset, type_p):
"""Maximum number of network adapters of a given attachment type,
associated with every :py:class:`IMachine` instance.
in chipset of type :class:`ChipsetType`
The chipset type to get the value for.
in type_p of type :class:`NetworkAttachmentType`
Type of attachment.
return max_network_adapters of type int
The maximum number of network adapters allowed for
particular chipset and attachment type.
"""
if not isinstance(chipset, ChipsetType):
raise TypeError('chipset can only be an instance of type ChipsetType') # depends on [control=['if'], data=[]]
if not isinstance(type_p, NetworkAttachmentType):
raise TypeError('type_p can only be an instance of type NetworkAttachmentType') # depends on [control=['if'], data=[]]
max_network_adapters = self._call('getMaxNetworkAdaptersOfType', in_p=[chipset, type_p])
return max_network_adapters |
def database(self):
"""
Enters all the metadata into a database
"""
import sqlite3
try:
os.remove('{}/metadatabase.sqlite'.format(self.reportpath))
except OSError:
pass
# Set the name of the database
db = sqlite3.connect('{}/metadatabase.sqlite'.format(self.reportpath))
# Create a cursor to allow access to the database
cursor = db.cursor()
# Set up the db
cursor.execute('''
CREATE TABLE IF NOT EXISTS Samples (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
)
''')
# Create a variable to store the names of the header values for each individual table
# This will store a set of all the headers from all the strains, as there can be some variability present, as
# not all analyses are available for all taxonomic groups
columns = dict()
for sample in self.metadata:
# Create a metadata object to store the new tables
data = MetadataObject()
data.name = sample.name
# Insert each strain name into the Samples table
cursor.execute('''
INSERT OR IGNORE INTO Samples (name)
VALUES ( ? )
''', (sample.name, ))
# Each header in the .json file represents a major category e.g. ARMI, GeneSeekr, commands, etc. and
# will be made into a separate table
for header in sample.datastore.items():
# Allow for certain analyses, such as core genome, not being performed on all strains
try:
# Key and value: data description and data value e.g. targets present: 1012, etc.
for key, value in sorted(header[1].datastore.items()):
# Only the values consisting of dictionaries are of interest
if type(value) == dict:
# Clean the column names so there are no issues entering names into the database
cleanedcolumn = self.columnclean(key)
# Set the table name
tablename = '{}_{}'.format(header[0].replace('.', '_'), cleanedcolumn)
# Create the table (if it doesn't already exist)
cursor.execute('''
CREATE TABLE IF NOT EXISTS {} (
sample_id INTEGER
)
'''.format(tablename))
# Add the attributes with the dictionaries (values) to the metadata object
setattr(data, tablename, GenObject(value))
for gene, result in sorted(value.items()):
# Add the data header to the dictionary
try:
columns[tablename].add(gene)
# Initialise the dictionary the first time a table name is encountered
except KeyError:
columns[tablename] = set()
columns[tablename].add(str(gene))
except (AttributeError, IndexError):
pass
self.tabledata.append(data)
# Iterate through the dictionary containing all the data headers
for table, setofheaders in sorted(columns.items()):
# Each header will be used as a column in the appropriate table
for cleanedcolumn in sorted(setofheaders):
# Alter the table by adding each header as a column
cursor.execute('''
ALTER TABLE {}
ADD COLUMN {} TEXT
'''.format(table, cleanedcolumn))
# Iterate through the samples and pull out the data for each table/column
# for sample in self.metadata:
for sample in self.tabledata:
# Find the id associated with each sample in the Sample table
cursor.execute('''
SELECT id from Samples WHERE name=?
''', (sample.name,))
sampleid = cursor.fetchone()[0]
# Add the sample_id to the table
cursor.execute('''
INSERT OR IGNORE INTO {}
(sample_id) VALUES ("{}")
'''.format(table, sampleid))
# Add the data to the table
try:
# Find the data for each table/column
for item in sorted(sample[table].datastore.items()):
# Clean the names
cleanedcolumn = self.columnclean(str(item[0]))
# Add the data to the column of the appropriate table,
# where the sample_id matches the current strain
cursor.execute('''
UPDATE {}
SET {} = ?
WHERE sample_id = {}
'''.format(table, cleanedcolumn, sampleid), (str(item[1]), ))
except KeyError:
pass
# Commit the changes to the database
db.commit() | def function[database, parameter[self]]:
constant[
Enters all the metadata into a database
]
import module[sqlite3]
<ast.Try object at 0x7da18f58cb20>
variable[db] assign[=] call[name[sqlite3].connect, parameter[call[constant[{}/metadatabase.sqlite].format, parameter[name[self].reportpath]]]]
variable[cursor] assign[=] call[name[db].cursor, parameter[]]
call[name[cursor].execute, parameter[constant[
CREATE TABLE IF NOT EXISTS Samples (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
)
]]]
variable[columns] assign[=] call[name[dict], parameter[]]
for taget[name[sample]] in starred[name[self].metadata] begin[:]
variable[data] assign[=] call[name[MetadataObject], parameter[]]
name[data].name assign[=] name[sample].name
call[name[cursor].execute, parameter[constant[
INSERT OR IGNORE INTO Samples (name)
VALUES ( ? )
], tuple[[<ast.Attribute object at 0x7da18f58c580>]]]]
for taget[name[header]] in starred[call[name[sample].datastore.items, parameter[]]] begin[:]
<ast.Try object at 0x7da20c76d930>
call[name[self].tabledata.append, parameter[name[data]]]
for taget[tuple[[<ast.Name object at 0x7da20c76cb20>, <ast.Name object at 0x7da20c76d660>]]] in starred[call[name[sorted], parameter[call[name[columns].items, parameter[]]]]] begin[:]
for taget[name[cleanedcolumn]] in starred[call[name[sorted], parameter[name[setofheaders]]]] begin[:]
call[name[cursor].execute, parameter[call[constant[
ALTER TABLE {}
ADD COLUMN {} TEXT
].format, parameter[name[table], name[cleanedcolumn]]]]]
for taget[name[sample]] in starred[name[self].tabledata] begin[:]
call[name[cursor].execute, parameter[constant[
SELECT id from Samples WHERE name=?
], tuple[[<ast.Attribute object at 0x7da20c76ea70>]]]]
variable[sampleid] assign[=] call[call[name[cursor].fetchone, parameter[]]][constant[0]]
call[name[cursor].execute, parameter[call[constant[
INSERT OR IGNORE INTO {}
(sample_id) VALUES ("{}")
].format, parameter[name[table], name[sampleid]]]]]
<ast.Try object at 0x7da20c76cc70>
call[name[db].commit, parameter[]] | keyword[def] identifier[database] ( identifier[self] ):
literal[string]
keyword[import] identifier[sqlite3]
keyword[try] :
identifier[os] . identifier[remove] ( literal[string] . identifier[format] ( identifier[self] . identifier[reportpath] ))
keyword[except] identifier[OSError] :
keyword[pass]
identifier[db] = identifier[sqlite3] . identifier[connect] ( literal[string] . identifier[format] ( identifier[self] . identifier[reportpath] ))
identifier[cursor] = identifier[db] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( literal[string] )
identifier[columns] = identifier[dict] ()
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[metadata] :
identifier[data] = identifier[MetadataObject] ()
identifier[data] . identifier[name] = identifier[sample] . identifier[name]
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[sample] . identifier[name] ,))
keyword[for] identifier[header] keyword[in] identifier[sample] . identifier[datastore] . identifier[items] ():
keyword[try] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sorted] ( identifier[header] [ literal[int] ]. identifier[datastore] . identifier[items] ()):
keyword[if] identifier[type] ( identifier[value] )== identifier[dict] :
identifier[cleanedcolumn] = identifier[self] . identifier[columnclean] ( identifier[key] )
identifier[tablename] = literal[string] . identifier[format] ( identifier[header] [ literal[int] ]. identifier[replace] ( literal[string] , literal[string] ), identifier[cleanedcolumn] )
identifier[cursor] . identifier[execute] ( literal[string] . identifier[format] ( identifier[tablename] ))
identifier[setattr] ( identifier[data] , identifier[tablename] , identifier[GenObject] ( identifier[value] ))
keyword[for] identifier[gene] , identifier[result] keyword[in] identifier[sorted] ( identifier[value] . identifier[items] ()):
keyword[try] :
identifier[columns] [ identifier[tablename] ]. identifier[add] ( identifier[gene] )
keyword[except] identifier[KeyError] :
identifier[columns] [ identifier[tablename] ]= identifier[set] ()
identifier[columns] [ identifier[tablename] ]. identifier[add] ( identifier[str] ( identifier[gene] ))
keyword[except] ( identifier[AttributeError] , identifier[IndexError] ):
keyword[pass]
identifier[self] . identifier[tabledata] . identifier[append] ( identifier[data] )
keyword[for] identifier[table] , identifier[setofheaders] keyword[in] identifier[sorted] ( identifier[columns] . identifier[items] ()):
keyword[for] identifier[cleanedcolumn] keyword[in] identifier[sorted] ( identifier[setofheaders] ):
identifier[cursor] . identifier[execute] ( literal[string] . identifier[format] ( identifier[table] , identifier[cleanedcolumn] ))
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[tabledata] :
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[sample] . identifier[name] ,))
identifier[sampleid] = identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
identifier[cursor] . identifier[execute] ( literal[string] . identifier[format] ( identifier[table] , identifier[sampleid] ))
keyword[try] :
keyword[for] identifier[item] keyword[in] identifier[sorted] ( identifier[sample] [ identifier[table] ]. identifier[datastore] . identifier[items] ()):
identifier[cleanedcolumn] = identifier[self] . identifier[columnclean] ( identifier[str] ( identifier[item] [ literal[int] ]))
identifier[cursor] . identifier[execute] ( literal[string] . identifier[format] ( identifier[table] , identifier[cleanedcolumn] , identifier[sampleid] ),( identifier[str] ( identifier[item] [ literal[int] ]),))
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[db] . identifier[commit] () | def database(self):
"""
Enters all the metadata into a database
"""
import sqlite3
try:
os.remove('{}/metadatabase.sqlite'.format(self.reportpath)) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
# Set the name of the database
db = sqlite3.connect('{}/metadatabase.sqlite'.format(self.reportpath))
# Create a cursor to allow access to the database
cursor = db.cursor()
# Set up the db
cursor.execute('\n CREATE TABLE IF NOT EXISTS Samples (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n name TEXT UNIQUE\n )\n ')
# Create a variable to store the names of the header values for each individual table
# This will store a set of all the headers from all the strains, as there can be some variability present, as
# not all analyses are available for all taxonomic groups
columns = dict()
for sample in self.metadata:
# Create a metadata object to store the new tables
data = MetadataObject()
data.name = sample.name
# Insert each strain name into the Samples table
cursor.execute('\n INSERT OR IGNORE INTO Samples (name)\n VALUES ( ? )\n ', (sample.name,))
# Each header in the .json file represents a major category e.g. ARMI, GeneSeekr, commands, etc. and
# will be made into a separate table
for header in sample.datastore.items():
# Allow for certain analyses, such as core genome, not being performed on all strains
try:
# Key and value: data description and data value e.g. targets present: 1012, etc.
for (key, value) in sorted(header[1].datastore.items()):
# Only the values consisting of dictionaries are of interest
if type(value) == dict:
# Clean the column names so there are no issues entering names into the database
cleanedcolumn = self.columnclean(key)
# Set the table name
tablename = '{}_{}'.format(header[0].replace('.', '_'), cleanedcolumn)
# Create the table (if it doesn't already exist)
cursor.execute('\n CREATE TABLE IF NOT EXISTS {} (\n sample_id INTEGER\n )\n '.format(tablename))
# Add the attributes with the dictionaries (values) to the metadata object
setattr(data, tablename, GenObject(value))
for (gene, result) in sorted(value.items()):
# Add the data header to the dictionary
try:
columns[tablename].add(gene) # depends on [control=['try'], data=[]]
# Initialise the dictionary the first time a table name is encountered
except KeyError:
columns[tablename] = set()
columns[tablename].add(str(gene)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except (AttributeError, IndexError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['header']]
self.tabledata.append(data) # depends on [control=['for'], data=['sample']]
# Iterate through the dictionary containing all the data headers
for (table, setofheaders) in sorted(columns.items()):
# Each header will be used as a column in the appropriate table
for cleanedcolumn in sorted(setofheaders):
# Alter the table by adding each header as a column
cursor.execute('\n ALTER TABLE {}\n ADD COLUMN {} TEXT\n '.format(table, cleanedcolumn)) # depends on [control=['for'], data=['cleanedcolumn']]
# Iterate through the samples and pull out the data for each table/column
# for sample in self.metadata:
for sample in self.tabledata:
# Find the id associated with each sample in the Sample table
cursor.execute('\n SELECT id from Samples WHERE name=?\n ', (sample.name,))
sampleid = cursor.fetchone()[0]
# Add the sample_id to the table
cursor.execute('\n INSERT OR IGNORE INTO {}\n (sample_id) VALUES ("{}")\n '.format(table, sampleid))
# Add the data to the table
try:
# Find the data for each table/column
for item in sorted(sample[table].datastore.items()):
# Clean the names
cleanedcolumn = self.columnclean(str(item[0]))
# Add the data to the column of the appropriate table,
# where the sample_id matches the current strain
cursor.execute('\n UPDATE {}\n SET {} = ?\n WHERE sample_id = {}\n '.format(table, cleanedcolumn, sampleid), (str(item[1]),)) # depends on [control=['for'], data=['item']] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['sample']] # depends on [control=['for'], data=[]]
# Commit the changes to the database
db.commit() |
def _intersect_edge_arrays(self, lines1, lines2):
"""Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
"""
# vector for each line in lines1
l1 = lines1[..., 1, :] - lines1[..., 0, :]
# vector for each line in lines2
l2 = lines2[..., 1, :] - lines2[..., 0, :]
# vector between first point of each line
diff = lines1[..., 0, :] - lines2[..., 0, :]
p = l1.copy()[..., ::-1] # vectors perpendicular to l1
p[..., 0] *= -1
f = (l2 * p).sum(axis=-1) # l2 dot p
# tempting, but bad idea!
#f = np.where(f==0, 1, f)
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
h = (diff * p).sum(axis=-1) / f # diff dot p / f
finally:
np.seterr(**err)
return h | def function[_intersect_edge_arrays, parameter[self, lines1, lines2]]:
constant[Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
]
variable[l1] assign[=] binary_operation[call[name[lines1]][tuple[[<ast.Constant object at 0x7da1b0f2aad0>, <ast.Constant object at 0x7da1b0f2a2f0>, <ast.Slice object at 0x7da1b0f2ab90>]]] - call[name[lines1]][tuple[[<ast.Constant object at 0x7da1b0f2b4c0>, <ast.Constant object at 0x7da1b0f2bb20>, <ast.Slice object at 0x7da1b0f2bd60>]]]]
variable[l2] assign[=] binary_operation[call[name[lines2]][tuple[[<ast.Constant object at 0x7da1b0f2baf0>, <ast.Constant object at 0x7da1b0f2aef0>, <ast.Slice object at 0x7da1b0f2a140>]]] - call[name[lines2]][tuple[[<ast.Constant object at 0x7da1b0f2abf0>, <ast.Constant object at 0x7da1b0f2bb50>, <ast.Slice object at 0x7da1b0f901c0>]]]]
variable[diff] assign[=] binary_operation[call[name[lines1]][tuple[[<ast.Constant object at 0x7da1b0f91cc0>, <ast.Constant object at 0x7da1b0f90430>, <ast.Slice object at 0x7da1b0f939d0>]]] - call[name[lines2]][tuple[[<ast.Constant object at 0x7da1b0f900d0>, <ast.Constant object at 0x7da1b0f921a0>, <ast.Slice object at 0x7da1b0f93fa0>]]]]
variable[p] assign[=] call[call[name[l1].copy, parameter[]]][tuple[[<ast.Constant object at 0x7da1b0f937c0>, <ast.Slice object at 0x7da1b0f90460>]]]
<ast.AugAssign object at 0x7da1b0f91b70>
variable[f] assign[=] call[binary_operation[name[l2] * name[p]].sum, parameter[]]
variable[err] assign[=] call[name[np].geterr, parameter[]]
call[name[np].seterr, parameter[]]
<ast.Try object at 0x7da1b0f929e0>
return[name[h]] | keyword[def] identifier[_intersect_edge_arrays] ( identifier[self] , identifier[lines1] , identifier[lines2] ):
literal[string]
identifier[l1] = identifier[lines1] [..., literal[int] ,:]- identifier[lines1] [..., literal[int] ,:]
identifier[l2] = identifier[lines2] [..., literal[int] ,:]- identifier[lines2] [..., literal[int] ,:]
identifier[diff] = identifier[lines1] [..., literal[int] ,:]- identifier[lines2] [..., literal[int] ,:]
identifier[p] = identifier[l1] . identifier[copy] ()[...,::- literal[int] ]
identifier[p] [..., literal[int] ]*=- literal[int]
identifier[f] =( identifier[l2] * identifier[p] ). identifier[sum] ( identifier[axis] =- literal[int] )
identifier[err] = identifier[np] . identifier[geterr] ()
identifier[np] . identifier[seterr] ( identifier[divide] = literal[string] , identifier[invalid] = literal[string] )
keyword[try] :
identifier[h] =( identifier[diff] * identifier[p] ). identifier[sum] ( identifier[axis] =- literal[int] )/ identifier[f]
keyword[finally] :
identifier[np] . identifier[seterr] (** identifier[err] )
keyword[return] identifier[h] | def _intersect_edge_arrays(self, lines1, lines2):
"""Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
"""
# vector for each line in lines1
l1 = lines1[..., 1, :] - lines1[..., 0, :]
# vector for each line in lines2
l2 = lines2[..., 1, :] - lines2[..., 0, :]
# vector between first point of each line
diff = lines1[..., 0, :] - lines2[..., 0, :]
p = l1.copy()[..., ::-1] # vectors perpendicular to l1
p[..., 0] *= -1
f = (l2 * p).sum(axis=-1) # l2 dot p
# tempting, but bad idea!
#f = np.where(f==0, 1, f)
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
h = (diff * p).sum(axis=-1) / f # diff dot p / f # depends on [control=['try'], data=[]]
finally:
np.seterr(**err)
return h |
def listFileParents(self, logical_file_name="", block_id=0, block_name=""):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input', \
"Logical_file_name, block_id or block_name is required for fileparents api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v)
for k, v in d.iteritems():
yield {'logical_file_name':k, 'parent_logical_file_name': v}
del d | def function[listFileParents, parameter[self, logical_file_name, block_id, block_name]]:
constant[
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
]
if <ast.BoolOp object at 0x7da18bcc90c0> begin[:]
call[name[dbsExceptionHandler], parameter[constant[dbsException-invalid-input], constant[Logical_file_name, block_id or block_name is required for fileparents api], name[self].logger.exception]]
with call[name[self].dbi.connection, parameter[]] begin[:]
variable[sqlresult] assign[=] call[name[self].fileparentlist.execute, parameter[name[conn], name[logical_file_name], name[block_id], name[block_name]]]
variable[d] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[sqlresult]] begin[:]
variable[k] assign[=] call[name[i]][constant[this_logical_file_name]]
variable[v] assign[=] call[name[i]][constant[parent_logical_file_name]]
call[call[name[d].setdefault, parameter[name[k], list[[]]]].append, parameter[name[v]]]
for taget[tuple[[<ast.Name object at 0x7da18fe93670>, <ast.Name object at 0x7da18fe92680>]]] in starred[call[name[d].iteritems, parameter[]]] begin[:]
<ast.Yield object at 0x7da18fe933d0>
<ast.Delete object at 0x7da18fe93970> | keyword[def] identifier[listFileParents] ( identifier[self] , identifier[logical_file_name] = literal[string] , identifier[block_id] = literal[int] , identifier[block_name] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[logical_file_name] keyword[and] keyword[not] identifier[block_name] keyword[and] keyword[not] identifier[block_id] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] )
keyword[with] identifier[self] . identifier[dbi] . identifier[connection] () keyword[as] identifier[conn] :
identifier[sqlresult] = identifier[self] . identifier[fileparentlist] . identifier[execute] ( identifier[conn] , identifier[logical_file_name] , identifier[block_id] , identifier[block_name] )
identifier[d] ={}
keyword[for] identifier[i] keyword[in] identifier[sqlresult] :
identifier[k] = identifier[i] [ literal[string] ]
identifier[v] = identifier[i] [ literal[string] ]
identifier[d] . identifier[setdefault] ( identifier[k] ,[]). identifier[append] ( identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[iteritems] ():
keyword[yield] { literal[string] : identifier[k] , literal[string] : identifier[v] }
keyword[del] identifier[d] | def listFileParents(self, logical_file_name='', block_id=0, block_name=''):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and (not block_name) and (not block_id):
dbsExceptionHandler('dbsException-invalid-input', 'Logical_file_name, block_id or block_name is required for fileparents api', self.logger.exception) # depends on [control=['if'], data=[]]
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v) # depends on [control=['for'], data=['i']]
for (k, v) in d.iteritems():
yield {'logical_file_name': k, 'parent_logical_file_name': v} # depends on [control=['for'], data=[]]
del d # depends on [control=['with'], data=['conn']] |
def all_default_fields():
"""Helper to retrieve all fields which has default value.
:returns: List of default fields.
:rtype: list
"""
default_fields = []
for item in dir(fields):
if not item.startswith("__"):
var = getattr(definitions, item)
if isinstance(var, dict):
if var.get('replace_null', False):
default_fields.append(var)
return default_fields | def function[all_default_fields, parameter[]]:
constant[Helper to retrieve all fields which has default value.
:returns: List of default fields.
:rtype: list
]
variable[default_fields] assign[=] list[[]]
for taget[name[item]] in starred[call[name[dir], parameter[name[fields]]]] begin[:]
if <ast.UnaryOp object at 0x7da20e9b3070> begin[:]
variable[var] assign[=] call[name[getattr], parameter[name[definitions], name[item]]]
if call[name[isinstance], parameter[name[var], name[dict]]] begin[:]
if call[name[var].get, parameter[constant[replace_null], constant[False]]] begin[:]
call[name[default_fields].append, parameter[name[var]]]
return[name[default_fields]] | keyword[def] identifier[all_default_fields] ():
literal[string]
identifier[default_fields] =[]
keyword[for] identifier[item] keyword[in] identifier[dir] ( identifier[fields] ):
keyword[if] keyword[not] identifier[item] . identifier[startswith] ( literal[string] ):
identifier[var] = identifier[getattr] ( identifier[definitions] , identifier[item] )
keyword[if] identifier[isinstance] ( identifier[var] , identifier[dict] ):
keyword[if] identifier[var] . identifier[get] ( literal[string] , keyword[False] ):
identifier[default_fields] . identifier[append] ( identifier[var] )
keyword[return] identifier[default_fields] | def all_default_fields():
"""Helper to retrieve all fields which has default value.
:returns: List of default fields.
:rtype: list
"""
default_fields = []
for item in dir(fields):
if not item.startswith('__'):
var = getattr(definitions, item)
if isinstance(var, dict):
if var.get('replace_null', False):
default_fields.append(var) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return default_fields |
def item_selection_changed(self):
"""List widget item selection change handler."""
row = self.current_row()
if self.count() and row >= 0:
if '</b></big><br>' in self.list.currentItem().text() and row == 0:
self.next_row()
if self.mode == self.FILE_MODE:
try:
stack_index = self.paths.index(self.filtered_path[row])
self.plugin = self.widgets[stack_index][1]
self.goto_line(self.line_number)
try:
self.plugin.switch_to_plugin()
self.raise_()
except AttributeError:
# The widget using the fileswitcher is not a plugin
pass
self.edit.setFocus()
except ValueError:
pass
else:
line_number = self.filtered_symbol_lines[row]
self.goto_line(line_number) | def function[item_selection_changed, parameter[self]]:
constant[List widget item selection change handler.]
variable[row] assign[=] call[name[self].current_row, parameter[]]
if <ast.BoolOp object at 0x7da1b1fa3a60> begin[:]
if <ast.BoolOp object at 0x7da1b1fa2b00> begin[:]
call[name[self].next_row, parameter[]]
if compare[name[self].mode equal[==] name[self].FILE_MODE] begin[:]
<ast.Try object at 0x7da1b1fa3df0> | keyword[def] identifier[item_selection_changed] ( identifier[self] ):
literal[string]
identifier[row] = identifier[self] . identifier[current_row] ()
keyword[if] identifier[self] . identifier[count] () keyword[and] identifier[row] >= literal[int] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[list] . identifier[currentItem] (). identifier[text] () keyword[and] identifier[row] == literal[int] :
identifier[self] . identifier[next_row] ()
keyword[if] identifier[self] . identifier[mode] == identifier[self] . identifier[FILE_MODE] :
keyword[try] :
identifier[stack_index] = identifier[self] . identifier[paths] . identifier[index] ( identifier[self] . identifier[filtered_path] [ identifier[row] ])
identifier[self] . identifier[plugin] = identifier[self] . identifier[widgets] [ identifier[stack_index] ][ literal[int] ]
identifier[self] . identifier[goto_line] ( identifier[self] . identifier[line_number] )
keyword[try] :
identifier[self] . identifier[plugin] . identifier[switch_to_plugin] ()
identifier[self] . identifier[raise_] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[self] . identifier[edit] . identifier[setFocus] ()
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[else] :
identifier[line_number] = identifier[self] . identifier[filtered_symbol_lines] [ identifier[row] ]
identifier[self] . identifier[goto_line] ( identifier[line_number] ) | def item_selection_changed(self):
"""List widget item selection change handler."""
row = self.current_row()
if self.count() and row >= 0:
if '</b></big><br>' in self.list.currentItem().text() and row == 0:
self.next_row() # depends on [control=['if'], data=[]]
if self.mode == self.FILE_MODE:
try:
stack_index = self.paths.index(self.filtered_path[row])
self.plugin = self.widgets[stack_index][1]
self.goto_line(self.line_number)
try:
self.plugin.switch_to_plugin()
self.raise_() # depends on [control=['try'], data=[]]
except AttributeError:
# The widget using the fileswitcher is not a plugin
pass # depends on [control=['except'], data=[]]
self.edit.setFocus() # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
line_number = self.filtered_symbol_lines[row]
self.goto_line(line_number) # depends on [control=['if'], data=[]] |
def delete_license(license_id):
"""
Delete a License by ID
"""
response = utils.checked_api_call(pnc_api.licenses, 'delete', id=license_id)
if response:
return utils.format_json(response.content) | def function[delete_license, parameter[license_id]]:
constant[
Delete a License by ID
]
variable[response] assign[=] call[name[utils].checked_api_call, parameter[name[pnc_api].licenses, constant[delete]]]
if name[response] begin[:]
return[call[name[utils].format_json, parameter[name[response].content]]] | keyword[def] identifier[delete_license] ( identifier[license_id] ):
literal[string]
identifier[response] = identifier[utils] . identifier[checked_api_call] ( identifier[pnc_api] . identifier[licenses] , literal[string] , identifier[id] = identifier[license_id] )
keyword[if] identifier[response] :
keyword[return] identifier[utils] . identifier[format_json] ( identifier[response] . identifier[content] ) | def delete_license(license_id):
"""
Delete a License by ID
"""
response = utils.checked_api_call(pnc_api.licenses, 'delete', id=license_id)
if response:
return utils.format_json(response.content) # depends on [control=['if'], data=[]] |
def build_reduce(function: Callable[[Any, Any], Any] = None, *,
init: Any = NONE):
""" Decorator to wrap a function to return a Reduce operator.
:param function: function to be wrapped
:param init: optional initialization for state
"""
_init = init
def _build_reduce(function: Callable[[Any, Any], Any]):
@wraps(function)
def _wrapper(init=NONE) -> Reduce:
init = _init if init is NONE else init
if init is NONE:
raise TypeError('init argument has to be defined')
return Reduce(function, init=init)
return _wrapper
if function:
return _build_reduce(function)
return _build_reduce | def function[build_reduce, parameter[function]]:
constant[ Decorator to wrap a function to return a Reduce operator.
:param function: function to be wrapped
:param init: optional initialization for state
]
variable[_init] assign[=] name[init]
def function[_build_reduce, parameter[function]]:
def function[_wrapper, parameter[init]]:
variable[init] assign[=] <ast.IfExp object at 0x7da1b12f1990>
if compare[name[init] is name[NONE]] begin[:]
<ast.Raise object at 0x7da1b12f2500>
return[call[name[Reduce], parameter[name[function]]]]
return[name[_wrapper]]
if name[function] begin[:]
return[call[name[_build_reduce], parameter[name[function]]]]
return[name[_build_reduce]] | keyword[def] identifier[build_reduce] ( identifier[function] : identifier[Callable] [[ identifier[Any] , identifier[Any] ], identifier[Any] ]= keyword[None] ,*,
identifier[init] : identifier[Any] = identifier[NONE] ):
literal[string]
identifier[_init] = identifier[init]
keyword[def] identifier[_build_reduce] ( identifier[function] : identifier[Callable] [[ identifier[Any] , identifier[Any] ], identifier[Any] ]):
@ identifier[wraps] ( identifier[function] )
keyword[def] identifier[_wrapper] ( identifier[init] = identifier[NONE] )-> identifier[Reduce] :
identifier[init] = identifier[_init] keyword[if] identifier[init] keyword[is] identifier[NONE] keyword[else] identifier[init]
keyword[if] identifier[init] keyword[is] identifier[NONE] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[Reduce] ( identifier[function] , identifier[init] = identifier[init] )
keyword[return] identifier[_wrapper]
keyword[if] identifier[function] :
keyword[return] identifier[_build_reduce] ( identifier[function] )
keyword[return] identifier[_build_reduce] | def build_reduce(function: Callable[[Any, Any], Any]=None, *, init: Any=NONE):
""" Decorator to wrap a function to return a Reduce operator.
:param function: function to be wrapped
:param init: optional initialization for state
"""
_init = init
def _build_reduce(function: Callable[[Any, Any], Any]):
@wraps(function)
def _wrapper(init=NONE) -> Reduce:
init = _init if init is NONE else init
if init is NONE:
raise TypeError('init argument has to be defined') # depends on [control=['if'], data=[]]
return Reduce(function, init=init)
return _wrapper
if function:
return _build_reduce(function) # depends on [control=['if'], data=[]]
return _build_reduce |
def persist(self, container: Container, image: str) -> None:
"""
Persists the state of a given container to a BugZoo image on this
server.
Parameters:
container: the container to persist.
image: the name of the Docker image that should be created.
Raises:
ImageAlreadyExists: if the image name is already in use by another
Docker image on this server.
"""
logger_c = logger.getChild(container.uid)
logger_c.debug("Persisting container as a Docker image: %s", image)
try:
docker_container = self.__dockerc[container.uid]
except KeyError:
logger_c.exception("Failed to persist container: container no longer exists.") # noqa: pycodestyle
raise
try:
_ = self.__client_docker.images.get(image)
logger_c.error("Failed to persist container: image, '%s', already exists.", # noqa: pycodestyle
image)
raise ImageAlreadyExists(image)
except docker.errors.ImageNotFound:
pass
cmd = "docker commit {} {}"
cmd = cmd.format(docker_container.id, image)
try:
subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
logger.exception("Failed to persist container (%s) to image (%s).", # noqa: pycodestyle
container.uid, image)
raise
logger_c.debug("Persisted container as a Docker image: %s", image) | def function[persist, parameter[self, container, image]]:
constant[
Persists the state of a given container to a BugZoo image on this
server.
Parameters:
container: the container to persist.
image: the name of the Docker image that should be created.
Raises:
ImageAlreadyExists: if the image name is already in use by another
Docker image on this server.
]
variable[logger_c] assign[=] call[name[logger].getChild, parameter[name[container].uid]]
call[name[logger_c].debug, parameter[constant[Persisting container as a Docker image: %s], name[image]]]
<ast.Try object at 0x7da1b0ca7010>
<ast.Try object at 0x7da1b0c32e60>
variable[cmd] assign[=] constant[docker commit {} {}]
variable[cmd] assign[=] call[name[cmd].format, parameter[name[docker_container].id, name[image]]]
<ast.Try object at 0x7da1b0c48940>
call[name[logger_c].debug, parameter[constant[Persisted container as a Docker image: %s], name[image]]] | keyword[def] identifier[persist] ( identifier[self] , identifier[container] : identifier[Container] , identifier[image] : identifier[str] )-> keyword[None] :
literal[string]
identifier[logger_c] = identifier[logger] . identifier[getChild] ( identifier[container] . identifier[uid] )
identifier[logger_c] . identifier[debug] ( literal[string] , identifier[image] )
keyword[try] :
identifier[docker_container] = identifier[self] . identifier[__dockerc] [ identifier[container] . identifier[uid] ]
keyword[except] identifier[KeyError] :
identifier[logger_c] . identifier[exception] ( literal[string] )
keyword[raise]
keyword[try] :
identifier[_] = identifier[self] . identifier[__client_docker] . identifier[images] . identifier[get] ( identifier[image] )
identifier[logger_c] . identifier[error] ( literal[string] ,
identifier[image] )
keyword[raise] identifier[ImageAlreadyExists] ( identifier[image] )
keyword[except] identifier[docker] . identifier[errors] . identifier[ImageNotFound] :
keyword[pass]
identifier[cmd] = literal[string]
identifier[cmd] = identifier[cmd] . identifier[format] ( identifier[docker_container] . identifier[id] , identifier[image] )
keyword[try] :
identifier[subprocess] . identifier[check_output] ( identifier[cmd] , identifier[shell] = keyword[True] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[container] . identifier[uid] , identifier[image] )
keyword[raise]
identifier[logger_c] . identifier[debug] ( literal[string] , identifier[image] ) | def persist(self, container: Container, image: str) -> None:
"""
Persists the state of a given container to a BugZoo image on this
server.
Parameters:
container: the container to persist.
image: the name of the Docker image that should be created.
Raises:
ImageAlreadyExists: if the image name is already in use by another
Docker image on this server.
"""
logger_c = logger.getChild(container.uid)
logger_c.debug('Persisting container as a Docker image: %s', image)
try:
docker_container = self.__dockerc[container.uid] # depends on [control=['try'], data=[]]
except KeyError:
logger_c.exception('Failed to persist container: container no longer exists.') # noqa: pycodestyle
raise # depends on [control=['except'], data=[]]
try:
_ = self.__client_docker.images.get(image) # noqa: pycodestyle
logger_c.error("Failed to persist container: image, '%s', already exists.", image)
raise ImageAlreadyExists(image) # depends on [control=['try'], data=[]]
except docker.errors.ImageNotFound:
pass # depends on [control=['except'], data=[]]
cmd = 'docker commit {} {}'
cmd = cmd.format(docker_container.id, image)
try:
subprocess.check_output(cmd, shell=True) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError: # noqa: pycodestyle
logger.exception('Failed to persist container (%s) to image (%s).', container.uid, image)
raise # depends on [control=['except'], data=[]]
logger_c.debug('Persisted container as a Docker image: %s', image) |
def dl_to_rphi_2d(d,l,degree=False,ro=1.,phio=0.):
"""
NAME:
dl_to_rphi_2d
PURPOSE:
convert Galactic longitude and distance to Galactocentric radius and azimuth
INPUT:
d - distance
l - Galactic longitude [rad/deg if degree]
KEYWORDS:
degree= (False): l is in degrees rather than rad
ro= (1) Galactocentric radius of the observer
phio= (0) Galactocentric azimuth of the observer [rad/deg if degree]
OUTPUT:
(R,phi); phi in degree if degree
HISTORY:
2012-01-04 - Written - Bovy (IAS)
"""
scalarOut, listOut= False, False
if isinstance(d,(int,float)):
d= sc.array([d])
scalarOut= True
elif isinstance(d,list):
d= sc.array(d)
listOut= True
if isinstance(l,(int,float)):
l= sc.array([l])
elif isinstance(l,list):
l= sc.array(l)
if degree:
l*= _DEGTORAD
R= sc.sqrt(ro**2.+d**2.-2.*d*ro*sc.cos(l))
phi= sc.arcsin(d/R*sc.sin(l))
indx= (ro/sc.cos(l) < d)*(sc.cos(l) > 0.)
phi[indx]= sc.pi-sc.arcsin(d[indx]/R[indx]*sc.sin(l[indx]))
if degree:
phi/= _DEGTORAD
phi+= phio
if scalarOut:
return (R[0],phi[0])
elif listOut:
return (list(R),list(phi))
else:
return (R,phi) | def function[dl_to_rphi_2d, parameter[d, l, degree, ro, phio]]:
constant[
NAME:
dl_to_rphi_2d
PURPOSE:
convert Galactic longitude and distance to Galactocentric radius and azimuth
INPUT:
d - distance
l - Galactic longitude [rad/deg if degree]
KEYWORDS:
degree= (False): l is in degrees rather than rad
ro= (1) Galactocentric radius of the observer
phio= (0) Galactocentric azimuth of the observer [rad/deg if degree]
OUTPUT:
(R,phi); phi in degree if degree
HISTORY:
2012-01-04 - Written - Bovy (IAS)
]
<ast.Tuple object at 0x7da1b0cf50f0> assign[=] tuple[[<ast.Constant object at 0x7da1b0cf6410>, <ast.Constant object at 0x7da1b0cf7610>]]
if call[name[isinstance], parameter[name[d], tuple[[<ast.Name object at 0x7da1b0cf47c0>, <ast.Name object at 0x7da1b0cf5a20>]]]] begin[:]
variable[d] assign[=] call[name[sc].array, parameter[list[[<ast.Name object at 0x7da1b0cf58d0>]]]]
variable[scalarOut] assign[=] constant[True]
if call[name[isinstance], parameter[name[l], tuple[[<ast.Name object at 0x7da1b0cf5210>, <ast.Name object at 0x7da1b0cf7b20>]]]] begin[:]
variable[l] assign[=] call[name[sc].array, parameter[list[[<ast.Name object at 0x7da1b0cf49a0>]]]]
if name[degree] begin[:]
<ast.AugAssign object at 0x7da1b0cf50c0>
variable[R] assign[=] call[name[sc].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[ro] ** constant[2.0]] + binary_operation[name[d] ** constant[2.0]]] - binary_operation[binary_operation[binary_operation[constant[2.0] * name[d]] * name[ro]] * call[name[sc].cos, parameter[name[l]]]]]]]
variable[phi] assign[=] call[name[sc].arcsin, parameter[binary_operation[binary_operation[name[d] / name[R]] * call[name[sc].sin, parameter[name[l]]]]]]
variable[indx] assign[=] binary_operation[compare[binary_operation[name[ro] / call[name[sc].cos, parameter[name[l]]]] less[<] name[d]] * compare[call[name[sc].cos, parameter[name[l]]] greater[>] constant[0.0]]]
call[name[phi]][name[indx]] assign[=] binary_operation[name[sc].pi - call[name[sc].arcsin, parameter[binary_operation[binary_operation[call[name[d]][name[indx]] / call[name[R]][name[indx]]] * call[name[sc].sin, parameter[call[name[l]][name[indx]]]]]]]]
if name[degree] begin[:]
<ast.AugAssign object at 0x7da18fe90b80>
<ast.AugAssign object at 0x7da18fe919f0>
if name[scalarOut] begin[:]
return[tuple[[<ast.Subscript object at 0x7da18fe908b0>, <ast.Subscript object at 0x7da18fe91780>]]] | keyword[def] identifier[dl_to_rphi_2d] ( identifier[d] , identifier[l] , identifier[degree] = keyword[False] , identifier[ro] = literal[int] , identifier[phio] = literal[int] ):
literal[string]
identifier[scalarOut] , identifier[listOut] = keyword[False] , keyword[False]
keyword[if] identifier[isinstance] ( identifier[d] ,( identifier[int] , identifier[float] )):
identifier[d] = identifier[sc] . identifier[array] ([ identifier[d] ])
identifier[scalarOut] = keyword[True]
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[list] ):
identifier[d] = identifier[sc] . identifier[array] ( identifier[d] )
identifier[listOut] = keyword[True]
keyword[if] identifier[isinstance] ( identifier[l] ,( identifier[int] , identifier[float] )):
identifier[l] = identifier[sc] . identifier[array] ([ identifier[l] ])
keyword[elif] identifier[isinstance] ( identifier[l] , identifier[list] ):
identifier[l] = identifier[sc] . identifier[array] ( identifier[l] )
keyword[if] identifier[degree] :
identifier[l] *= identifier[_DEGTORAD]
identifier[R] = identifier[sc] . identifier[sqrt] ( identifier[ro] ** literal[int] + identifier[d] ** literal[int] - literal[int] * identifier[d] * identifier[ro] * identifier[sc] . identifier[cos] ( identifier[l] ))
identifier[phi] = identifier[sc] . identifier[arcsin] ( identifier[d] / identifier[R] * identifier[sc] . identifier[sin] ( identifier[l] ))
identifier[indx] =( identifier[ro] / identifier[sc] . identifier[cos] ( identifier[l] )< identifier[d] )*( identifier[sc] . identifier[cos] ( identifier[l] )> literal[int] )
identifier[phi] [ identifier[indx] ]= identifier[sc] . identifier[pi] - identifier[sc] . identifier[arcsin] ( identifier[d] [ identifier[indx] ]/ identifier[R] [ identifier[indx] ]* identifier[sc] . identifier[sin] ( identifier[l] [ identifier[indx] ]))
keyword[if] identifier[degree] :
identifier[phi] /= identifier[_DEGTORAD]
identifier[phi] += identifier[phio]
keyword[if] identifier[scalarOut] :
keyword[return] ( identifier[R] [ literal[int] ], identifier[phi] [ literal[int] ])
keyword[elif] identifier[listOut] :
keyword[return] ( identifier[list] ( identifier[R] ), identifier[list] ( identifier[phi] ))
keyword[else] :
keyword[return] ( identifier[R] , identifier[phi] ) | def dl_to_rphi_2d(d, l, degree=False, ro=1.0, phio=0.0):
"""
NAME:
dl_to_rphi_2d
PURPOSE:
convert Galactic longitude and distance to Galactocentric radius and azimuth
INPUT:
d - distance
l - Galactic longitude [rad/deg if degree]
KEYWORDS:
degree= (False): l is in degrees rather than rad
ro= (1) Galactocentric radius of the observer
phio= (0) Galactocentric azimuth of the observer [rad/deg if degree]
OUTPUT:
(R,phi); phi in degree if degree
HISTORY:
2012-01-04 - Written - Bovy (IAS)
"""
(scalarOut, listOut) = (False, False)
if isinstance(d, (int, float)):
d = sc.array([d])
scalarOut = True # depends on [control=['if'], data=[]]
elif isinstance(d, list):
d = sc.array(d)
listOut = True # depends on [control=['if'], data=[]]
if isinstance(l, (int, float)):
l = sc.array([l]) # depends on [control=['if'], data=[]]
elif isinstance(l, list):
l = sc.array(l) # depends on [control=['if'], data=[]]
if degree:
l *= _DEGTORAD # depends on [control=['if'], data=[]]
R = sc.sqrt(ro ** 2.0 + d ** 2.0 - 2.0 * d * ro * sc.cos(l))
phi = sc.arcsin(d / R * sc.sin(l))
indx = (ro / sc.cos(l) < d) * (sc.cos(l) > 0.0)
phi[indx] = sc.pi - sc.arcsin(d[indx] / R[indx] * sc.sin(l[indx]))
if degree:
phi /= _DEGTORAD # depends on [control=['if'], data=[]]
phi += phio
if scalarOut:
return (R[0], phi[0]) # depends on [control=['if'], data=[]]
elif listOut:
return (list(R), list(phi)) # depends on [control=['if'], data=[]]
else:
return (R, phi) |
def guest_start(self, userid):
"""Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
"""
action = "start guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_start(userid) | def function[guest_start, parameter[self, userid]]:
constant[Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
]
variable[action] assign[=] binary_operation[constant[start guest '%s'] <ast.Mod object at 0x7da2590d6920> name[userid]]
with call[name[zvmutils].log_and_reraise_sdkbase_error, parameter[name[action]]] begin[:]
call[name[self]._vmops.guest_start, parameter[name[userid]]] | keyword[def] identifier[guest_start] ( identifier[self] , identifier[userid] ):
literal[string]
identifier[action] = literal[string] % identifier[userid]
keyword[with] identifier[zvmutils] . identifier[log_and_reraise_sdkbase_error] ( identifier[action] ):
identifier[self] . identifier[_vmops] . identifier[guest_start] ( identifier[userid] ) | def guest_start(self, userid):
"""Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
"""
action = "start guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_start(userid) # depends on [control=['with'], data=[]] |
def cross_v3(vec_a, vec_b):
"""Return the crossproduct between vec_a and vec_b."""
return Vec3(vec_a.y * vec_b.z - vec_a.z * vec_b.y,
vec_a.z * vec_b.x - vec_a.x * vec_b.z,
vec_a.x * vec_b.y - vec_a.y * vec_b.x) | def function[cross_v3, parameter[vec_a, vec_b]]:
constant[Return the crossproduct between vec_a and vec_b.]
return[call[name[Vec3], parameter[binary_operation[binary_operation[name[vec_a].y * name[vec_b].z] - binary_operation[name[vec_a].z * name[vec_b].y]], binary_operation[binary_operation[name[vec_a].z * name[vec_b].x] - binary_operation[name[vec_a].x * name[vec_b].z]], binary_operation[binary_operation[name[vec_a].x * name[vec_b].y] - binary_operation[name[vec_a].y * name[vec_b].x]]]]] | keyword[def] identifier[cross_v3] ( identifier[vec_a] , identifier[vec_b] ):
literal[string]
keyword[return] identifier[Vec3] ( identifier[vec_a] . identifier[y] * identifier[vec_b] . identifier[z] - identifier[vec_a] . identifier[z] * identifier[vec_b] . identifier[y] ,
identifier[vec_a] . identifier[z] * identifier[vec_b] . identifier[x] - identifier[vec_a] . identifier[x] * identifier[vec_b] . identifier[z] ,
identifier[vec_a] . identifier[x] * identifier[vec_b] . identifier[y] - identifier[vec_a] . identifier[y] * identifier[vec_b] . identifier[x] ) | def cross_v3(vec_a, vec_b):
"""Return the crossproduct between vec_a and vec_b."""
return Vec3(vec_a.y * vec_b.z - vec_a.z * vec_b.y, vec_a.z * vec_b.x - vec_a.x * vec_b.z, vec_a.x * vec_b.y - vec_a.y * vec_b.x) |
def _import_lua_dependencies(lua, lua_globals):
"""
Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib.
"""
if sys.platform not in ('darwin', 'windows'):
import ctypes
ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL)
try:
lua_globals.cjson = lua.eval('require "cjson"')
except RuntimeError:
raise RuntimeError("cjson not installed") | def function[_import_lua_dependencies, parameter[lua, lua_globals]]:
constant[
Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib.
]
if compare[name[sys].platform <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da204566dd0>, <ast.Constant object at 0x7da204566560>]]] begin[:]
import module[ctypes]
call[name[ctypes].CDLL, parameter[constant[liblua5.2.so]]]
<ast.Try object at 0x7da204566980> | keyword[def] identifier[_import_lua_dependencies] ( identifier[lua] , identifier[lua_globals] ):
literal[string]
keyword[if] identifier[sys] . identifier[platform] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[import] identifier[ctypes]
identifier[ctypes] . identifier[CDLL] ( literal[string] , identifier[mode] = identifier[ctypes] . identifier[RTLD_GLOBAL] )
keyword[try] :
identifier[lua_globals] . identifier[cjson] = identifier[lua] . identifier[eval] ( literal[string] )
keyword[except] identifier[RuntimeError] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def _import_lua_dependencies(lua, lua_globals):
"""
Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib.
"""
if sys.platform not in ('darwin', 'windows'):
import ctypes
ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) # depends on [control=['if'], data=[]]
try:
lua_globals.cjson = lua.eval('require "cjson"') # depends on [control=['try'], data=[]]
except RuntimeError:
raise RuntimeError('cjson not installed') # depends on [control=['except'], data=[]] |
def create_object(self, subject_id, image_group_id, properties, fmri_data_id=None):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
subject_id : string
Unique identifier of subject
image_group_id : string
Unique identifier of image group
properties : Dictionary
Set of experiment properties. Is required to contain at least the
experiment name
fmri_data_id : string, optional
Unique identifier of functional MRI data object
Returns
-------
ExperimentHandle
Handle for created experiment object in database
"""
# Ensure that experiment name is given in property list.
if not datastore.PROPERTY_NAME in properties:
raise ValueError('missing experiment name')
elif properties[datastore.PROPERTY_NAME] is None:
raise ValueError('invalid experiment name')
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-','')
# Create object handle and store it in database before returning it
obj = ExperimentHandle(
identifier,
properties,
subject_id,
image_group_id,
fmri_data_id=fmri_data_id
)
self.insert_object(obj)
return obj | def function[create_object, parameter[self, subject_id, image_group_id, properties, fmri_data_id]]:
constant[Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
subject_id : string
Unique identifier of subject
image_group_id : string
Unique identifier of image group
properties : Dictionary
Set of experiment properties. Is required to contain at least the
experiment name
fmri_data_id : string, optional
Unique identifier of functional MRI data object
Returns
-------
ExperimentHandle
Handle for created experiment object in database
]
if <ast.UnaryOp object at 0x7da1b15b02b0> begin[:]
<ast.Raise object at 0x7da1b15b0370>
variable[identifier] assign[=] call[call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]].replace, parameter[constant[-], constant[]]]
variable[obj] assign[=] call[name[ExperimentHandle], parameter[name[identifier], name[properties], name[subject_id], name[image_group_id]]]
call[name[self].insert_object, parameter[name[obj]]]
return[name[obj]] | keyword[def] identifier[create_object] ( identifier[self] , identifier[subject_id] , identifier[image_group_id] , identifier[properties] , identifier[fmri_data_id] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[datastore] . identifier[PROPERTY_NAME] keyword[in] identifier[properties] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[properties] [ identifier[datastore] . identifier[PROPERTY_NAME] ] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[identifier] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()). identifier[replace] ( literal[string] , literal[string] )
identifier[obj] = identifier[ExperimentHandle] (
identifier[identifier] ,
identifier[properties] ,
identifier[subject_id] ,
identifier[image_group_id] ,
identifier[fmri_data_id] = identifier[fmri_data_id]
)
identifier[self] . identifier[insert_object] ( identifier[obj] )
keyword[return] identifier[obj] | def create_object(self, subject_id, image_group_id, properties, fmri_data_id=None):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
subject_id : string
Unique identifier of subject
image_group_id : string
Unique identifier of image group
properties : Dictionary
Set of experiment properties. Is required to contain at least the
experiment name
fmri_data_id : string, optional
Unique identifier of functional MRI data object
Returns
-------
ExperimentHandle
Handle for created experiment object in database
"""
# Ensure that experiment name is given in property list.
if not datastore.PROPERTY_NAME in properties:
raise ValueError('missing experiment name') # depends on [control=['if'], data=[]]
elif properties[datastore.PROPERTY_NAME] is None:
raise ValueError('invalid experiment name') # depends on [control=['if'], data=[]]
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-', '')
# Create object handle and store it in database before returning it
obj = ExperimentHandle(identifier, properties, subject_id, image_group_id, fmri_data_id=fmri_data_id)
self.insert_object(obj)
return obj |
def metrics_by_name_list(names):
"""
Return a dictionary with {metric name: metric value} for all the metrics with the given names.
"""
results = {}
for name in names:
# no lock - a metric could have been removed in the meanwhile
try:
results[name] = get(name)
except InvalidMetricError:
continue
return results | def function[metrics_by_name_list, parameter[names]]:
constant[
Return a dictionary with {metric name: metric value} for all the metrics with the given names.
]
variable[results] assign[=] dictionary[[], []]
for taget[name[name]] in starred[name[names]] begin[:]
<ast.Try object at 0x7da1b0b56770>
return[name[results]] | keyword[def] identifier[metrics_by_name_list] ( identifier[names] ):
literal[string]
identifier[results] ={}
keyword[for] identifier[name] keyword[in] identifier[names] :
keyword[try] :
identifier[results] [ identifier[name] ]= identifier[get] ( identifier[name] )
keyword[except] identifier[InvalidMetricError] :
keyword[continue]
keyword[return] identifier[results] | def metrics_by_name_list(names):
"""
Return a dictionary with {metric name: metric value} for all the metrics with the given names.
"""
results = {}
for name in names:
# no lock - a metric could have been removed in the meanwhile
try:
results[name] = get(name) # depends on [control=['try'], data=[]]
except InvalidMetricError:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']]
return results |
def deepcopy(self, line_strings=None, shape=None):
"""
Create a deep copy of the LineStringsOnImage object.
Parameters
----------
line_strings : None \
or list of imgaug.augmentables.lines.LineString, optional
List of line strings on the image.
If not ``None``, then the ``line_strings`` attribute of the copied
object will be set to this value.
shape : None or tuple of int or ndarray, optional
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
If not ``None``, then the ``shape`` attribute of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.lines.LineStringsOnImage
Deep copy.
"""
lss = self.line_strings if line_strings is None else line_strings
shape = self.shape if shape is None else shape
return LineStringsOnImage(
line_strings=[ls.deepcopy() for ls in lss],
shape=tuple(shape)) | def function[deepcopy, parameter[self, line_strings, shape]]:
constant[
Create a deep copy of the LineStringsOnImage object.
Parameters
----------
line_strings : None or list of imgaug.augmentables.lines.LineString, optional
List of line strings on the image.
If not ``None``, then the ``line_strings`` attribute of the copied
object will be set to this value.
shape : None or tuple of int or ndarray, optional
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
If not ``None``, then the ``shape`` attribute of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.lines.LineStringsOnImage
Deep copy.
]
variable[lss] assign[=] <ast.IfExp object at 0x7da1b015a470>
variable[shape] assign[=] <ast.IfExp object at 0x7da1b0037a90>
return[call[name[LineStringsOnImage], parameter[]]] | keyword[def] identifier[deepcopy] ( identifier[self] , identifier[line_strings] = keyword[None] , identifier[shape] = keyword[None] ):
literal[string]
identifier[lss] = identifier[self] . identifier[line_strings] keyword[if] identifier[line_strings] keyword[is] keyword[None] keyword[else] identifier[line_strings]
identifier[shape] = identifier[self] . identifier[shape] keyword[if] identifier[shape] keyword[is] keyword[None] keyword[else] identifier[shape]
keyword[return] identifier[LineStringsOnImage] (
identifier[line_strings] =[ identifier[ls] . identifier[deepcopy] () keyword[for] identifier[ls] keyword[in] identifier[lss] ],
identifier[shape] = identifier[tuple] ( identifier[shape] )) | def deepcopy(self, line_strings=None, shape=None):
"""
Create a deep copy of the LineStringsOnImage object.
Parameters
----------
line_strings : None or list of imgaug.augmentables.lines.LineString, optional
List of line strings on the image.
If not ``None``, then the ``line_strings`` attribute of the copied
object will be set to this value.
shape : None or tuple of int or ndarray, optional
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
If not ``None``, then the ``shape`` attribute of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.lines.LineStringsOnImage
Deep copy.
"""
lss = self.line_strings if line_strings is None else line_strings
shape = self.shape if shape is None else shape
return LineStringsOnImage(line_strings=[ls.deepcopy() for ls in lss], shape=tuple(shape)) |
def urlQueryParser(url, querydict):
"""
parse a url query
"""
address_parse = urlparse(url)
return urlunparse(address_parse._replace(query=urlencode(querydict))) | def function[urlQueryParser, parameter[url, querydict]]:
constant[
parse a url query
]
variable[address_parse] assign[=] call[name[urlparse], parameter[name[url]]]
return[call[name[urlunparse], parameter[call[name[address_parse]._replace, parameter[]]]]] | keyword[def] identifier[urlQueryParser] ( identifier[url] , identifier[querydict] ):
literal[string]
identifier[address_parse] = identifier[urlparse] ( identifier[url] )
keyword[return] identifier[urlunparse] ( identifier[address_parse] . identifier[_replace] ( identifier[query] = identifier[urlencode] ( identifier[querydict] ))) | def urlQueryParser(url, querydict):
"""
parse a url query
"""
address_parse = urlparse(url)
return urlunparse(address_parse._replace(query=urlencode(querydict))) |
def get_signature_size(msg):
"""
Returns a signature size for the input
:param msg:
:return:
"""
if isinstance(msg, (TxinGen, TxinToScript, TxinToScriptHash)):
return 0
elif isinstance(msg, TxinToKey):
return len(msg.key_offsets)
else:
raise ValueError('Unknown tx in') | def function[get_signature_size, parameter[msg]]:
constant[
Returns a signature size for the input
:param msg:
:return:
]
if call[name[isinstance], parameter[name[msg], tuple[[<ast.Name object at 0x7da1b247cb20>, <ast.Name object at 0x7da1b247e080>, <ast.Name object at 0x7da1b247d630>]]]] begin[:]
return[constant[0]] | keyword[def] identifier[get_signature_size] ( identifier[msg] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[msg] ,( identifier[TxinGen] , identifier[TxinToScript] , identifier[TxinToScriptHash] )):
keyword[return] literal[int]
keyword[elif] identifier[isinstance] ( identifier[msg] , identifier[TxinToKey] ):
keyword[return] identifier[len] ( identifier[msg] . identifier[key_offsets] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def get_signature_size(msg):
"""
Returns a signature size for the input
:param msg:
:return:
"""
if isinstance(msg, (TxinGen, TxinToScript, TxinToScriptHash)):
return 0 # depends on [control=['if'], data=[]]
elif isinstance(msg, TxinToKey):
return len(msg.key_offsets) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown tx in') |
def __generate_tree(self, top, src, resources, models, ctrls, views, utils):
"""Creates directories and packages"""
res = self.__mkdir(top)
for fn in (src, models, ctrls, views, utils): res = self.__mkpkg(fn) or res
res = self.__mkdir(resources) or res
res = self.__mkdir(os.path.join(resources, "ui", "builder")) or res
res = self.__mkdir(os.path.join(resources, "ui", "styles")) or res
res = self.__mkdir(os.path.join(resources, "external")) or res
return res | def function[__generate_tree, parameter[self, top, src, resources, models, ctrls, views, utils]]:
constant[Creates directories and packages]
variable[res] assign[=] call[name[self].__mkdir, parameter[name[top]]]
for taget[name[fn]] in starred[tuple[[<ast.Name object at 0x7da1b142baf0>, <ast.Name object at 0x7da1b1429570>, <ast.Name object at 0x7da1b1429300>, <ast.Name object at 0x7da1b14292a0>, <ast.Name object at 0x7da1b142b700>]]] begin[:]
variable[res] assign[=] <ast.BoolOp object at 0x7da1b142b820>
variable[res] assign[=] <ast.BoolOp object at 0x7da1b142b8e0>
variable[res] assign[=] <ast.BoolOp object at 0x7da1b142bf70>
variable[res] assign[=] <ast.BoolOp object at 0x7da1b142bcd0>
variable[res] assign[=] <ast.BoolOp object at 0x7da1b142a260>
return[name[res]] | keyword[def] identifier[__generate_tree] ( identifier[self] , identifier[top] , identifier[src] , identifier[resources] , identifier[models] , identifier[ctrls] , identifier[views] , identifier[utils] ):
literal[string]
identifier[res] = identifier[self] . identifier[__mkdir] ( identifier[top] )
keyword[for] identifier[fn] keyword[in] ( identifier[src] , identifier[models] , identifier[ctrls] , identifier[views] , identifier[utils] ): identifier[res] = identifier[self] . identifier[__mkpkg] ( identifier[fn] ) keyword[or] identifier[res]
identifier[res] = identifier[self] . identifier[__mkdir] ( identifier[resources] ) keyword[or] identifier[res]
identifier[res] = identifier[self] . identifier[__mkdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[resources] , literal[string] , literal[string] )) keyword[or] identifier[res]
identifier[res] = identifier[self] . identifier[__mkdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[resources] , literal[string] , literal[string] )) keyword[or] identifier[res]
identifier[res] = identifier[self] . identifier[__mkdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[resources] , literal[string] )) keyword[or] identifier[res]
keyword[return] identifier[res] | def __generate_tree(self, top, src, resources, models, ctrls, views, utils):
"""Creates directories and packages"""
res = self.__mkdir(top)
for fn in (src, models, ctrls, views, utils):
res = self.__mkpkg(fn) or res # depends on [control=['for'], data=['fn']]
res = self.__mkdir(resources) or res
res = self.__mkdir(os.path.join(resources, 'ui', 'builder')) or res
res = self.__mkdir(os.path.join(resources, 'ui', 'styles')) or res
res = self.__mkdir(os.path.join(resources, 'external')) or res
return res |
def rmap(fn, coll, is_iterable=None):
"""A recursive map
:param fn: a function
:param coll: a list
:param isiterable: a predicate function determining whether a value is iterable.
:returns: a list
>>> rmap(lambda x: 2*x, [1, 2, [3, 4]])
[2, 4, [6, 8]]
"""
result = []
for x in coll:
if is_iterable is None:
is_iterable = isiterable
if is_iterable(x):
y = rmap(fn, x)
else:
y = fn(x)
result.append(y)
return result | def function[rmap, parameter[fn, coll, is_iterable]]:
constant[A recursive map
:param fn: a function
:param coll: a list
:param isiterable: a predicate function determining whether a value is iterable.
:returns: a list
>>> rmap(lambda x: 2*x, [1, 2, [3, 4]])
[2, 4, [6, 8]]
]
variable[result] assign[=] list[[]]
for taget[name[x]] in starred[name[coll]] begin[:]
if compare[name[is_iterable] is constant[None]] begin[:]
variable[is_iterable] assign[=] name[isiterable]
if call[name[is_iterable], parameter[name[x]]] begin[:]
variable[y] assign[=] call[name[rmap], parameter[name[fn], name[x]]]
call[name[result].append, parameter[name[y]]]
return[name[result]] | keyword[def] identifier[rmap] ( identifier[fn] , identifier[coll] , identifier[is_iterable] = keyword[None] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[x] keyword[in] identifier[coll] :
keyword[if] identifier[is_iterable] keyword[is] keyword[None] :
identifier[is_iterable] = identifier[isiterable]
keyword[if] identifier[is_iterable] ( identifier[x] ):
identifier[y] = identifier[rmap] ( identifier[fn] , identifier[x] )
keyword[else] :
identifier[y] = identifier[fn] ( identifier[x] )
identifier[result] . identifier[append] ( identifier[y] )
keyword[return] identifier[result] | def rmap(fn, coll, is_iterable=None):
"""A recursive map
:param fn: a function
:param coll: a list
:param isiterable: a predicate function determining whether a value is iterable.
:returns: a list
>>> rmap(lambda x: 2*x, [1, 2, [3, 4]])
[2, 4, [6, 8]]
"""
result = []
for x in coll:
if is_iterable is None:
is_iterable = isiterable # depends on [control=['if'], data=['is_iterable']]
if is_iterable(x):
y = rmap(fn, x) # depends on [control=['if'], data=[]]
else:
y = fn(x)
result.append(y) # depends on [control=['for'], data=['x']]
return result |
async def peer_delete(self, *, dc=None, address):
"""Remove the server with given address from the Raft configuration
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
address (str): "IP:port" of the server to remove.
Returns:
bool: ``True`` on success
There are rare cases where a peer may be left behind in the Raft
configuration even though the server is no longer present and known
to the cluster. This endpoint can be used to remove the failed server
so that it is no longer affects the Raft quorum.
"""
address = extract_attr(address, keys=["Address"])
params = {"dc": dc, "address": address}
response = await self._api.delete("/v1/operator/raft/peer",
params=params)
return response.status < 400 | <ast.AsyncFunctionDef object at 0x7da2043444c0> | keyword[async] keyword[def] identifier[peer_delete] ( identifier[self] ,*, identifier[dc] = keyword[None] , identifier[address] ):
literal[string]
identifier[address] = identifier[extract_attr] ( identifier[address] , identifier[keys] =[ literal[string] ])
identifier[params] ={ literal[string] : identifier[dc] , literal[string] : identifier[address] }
identifier[response] = keyword[await] identifier[self] . identifier[_api] . identifier[delete] ( literal[string] ,
identifier[params] = identifier[params] )
keyword[return] identifier[response] . identifier[status] < literal[int] | async def peer_delete(self, *, dc=None, address):
"""Remove the server with given address from the Raft configuration
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
address (str): "IP:port" of the server to remove.
Returns:
bool: ``True`` on success
There are rare cases where a peer may be left behind in the Raft
configuration even though the server is no longer present and known
to the cluster. This endpoint can be used to remove the failed server
so that it is no longer affects the Raft quorum.
"""
address = extract_attr(address, keys=['Address'])
params = {'dc': dc, 'address': address}
response = await self._api.delete('/v1/operator/raft/peer', params=params)
return response.status < 400 |
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
"""
if inspect.ismethod(func):
func = func.__func__
elif not inspect.isroutine(func):
raise TypeError("'{!r}' is not a Python function".format(func))
# AMVMOD: deal with python 2 builtins that don't define these
code = getattr(func, '__code__', None)
closure = getattr(func, '__closure__', None)
co_names = getattr(code, 'co_names', ())
glb = getattr(func, '__globals__', {})
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if closure is None:
nonlocal_vars = {}
else:
nonlocal_vars = {var: cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = glb
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if inspect.ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return {'nonlocal': nonlocal_vars,
'global': global_vars,
'builtin': builtin_vars,
'unbound': unbound_names} | def function[getclosurevars, parameter[func]]:
constant[
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
]
if call[name[inspect].ismethod, parameter[name[func]]] begin[:]
variable[func] assign[=] name[func].__func__
variable[code] assign[=] call[name[getattr], parameter[name[func], constant[__code__], constant[None]]]
variable[closure] assign[=] call[name[getattr], parameter[name[func], constant[__closure__], constant[None]]]
variable[co_names] assign[=] call[name[getattr], parameter[name[code], constant[co_names], tuple[[]]]]
variable[glb] assign[=] call[name[getattr], parameter[name[func], constant[__globals__], dictionary[[], []]]]
if compare[name[closure] is constant[None]] begin[:]
variable[nonlocal_vars] assign[=] dictionary[[], []]
variable[global_ns] assign[=] name[glb]
variable[builtin_ns] assign[=] call[name[global_ns].get, parameter[constant[__builtins__], name[builtins].__dict__]]
if call[name[inspect].ismodule, parameter[name[builtin_ns]]] begin[:]
variable[builtin_ns] assign[=] name[builtin_ns].__dict__
variable[global_vars] assign[=] dictionary[[], []]
variable[builtin_vars] assign[=] dictionary[[], []]
variable[unbound_names] assign[=] call[name[set], parameter[]]
for taget[name[name]] in starred[name[co_names]] begin[:]
if compare[name[name] in tuple[[<ast.Constant object at 0x7da20e963370>, <ast.Constant object at 0x7da20e962470>, <ast.Constant object at 0x7da20e963820>]]] begin[:]
continue
<ast.Try object at 0x7da20e9626e0>
return[dictionary[[<ast.Constant object at 0x7da20e960be0>, <ast.Constant object at 0x7da20e961510>, <ast.Constant object at 0x7da20e961ff0>, <ast.Constant object at 0x7da20e961e70>], [<ast.Name object at 0x7da20e962890>, <ast.Name object at 0x7da20e963640>, <ast.Name object at 0x7da20e963a30>, <ast.Name object at 0x7da20e961f60>]]] | keyword[def] identifier[getclosurevars] ( identifier[func] ):
literal[string]
keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[func] ):
identifier[func] = identifier[func] . identifier[__func__]
keyword[elif] keyword[not] identifier[inspect] . identifier[isroutine] ( identifier[func] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[func] ))
identifier[code] = identifier[getattr] ( identifier[func] , literal[string] , keyword[None] )
identifier[closure] = identifier[getattr] ( identifier[func] , literal[string] , keyword[None] )
identifier[co_names] = identifier[getattr] ( identifier[code] , literal[string] ,())
identifier[glb] = identifier[getattr] ( identifier[func] , literal[string] ,{})
keyword[if] identifier[closure] keyword[is] keyword[None] :
identifier[nonlocal_vars] ={}
keyword[else] :
identifier[nonlocal_vars] ={ identifier[var] : identifier[cell] . identifier[cell_contents]
keyword[for] identifier[var] , identifier[cell] keyword[in] identifier[zip] ( identifier[code] . identifier[co_freevars] , identifier[func] . identifier[__closure__] )}
identifier[global_ns] = identifier[glb]
identifier[builtin_ns] = identifier[global_ns] . identifier[get] ( literal[string] , identifier[builtins] . identifier[__dict__] )
keyword[if] identifier[inspect] . identifier[ismodule] ( identifier[builtin_ns] ):
identifier[builtin_ns] = identifier[builtin_ns] . identifier[__dict__]
identifier[global_vars] ={}
identifier[builtin_vars] ={}
identifier[unbound_names] = identifier[set] ()
keyword[for] identifier[name] keyword[in] identifier[co_names] :
keyword[if] identifier[name] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[continue]
keyword[try] :
identifier[global_vars] [ identifier[name] ]= identifier[global_ns] [ identifier[name] ]
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[builtin_vars] [ identifier[name] ]= identifier[builtin_ns] [ identifier[name] ]
keyword[except] identifier[KeyError] :
identifier[unbound_names] . identifier[add] ( identifier[name] )
keyword[return] { literal[string] : identifier[nonlocal_vars] ,
literal[string] : identifier[global_vars] ,
literal[string] : identifier[builtin_vars] ,
literal[string] : identifier[unbound_names] } | def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
"""
if inspect.ismethod(func):
func = func.__func__ # depends on [control=['if'], data=[]]
elif not inspect.isroutine(func):
raise TypeError("'{!r}' is not a Python function".format(func)) # depends on [control=['if'], data=[]]
# AMVMOD: deal with python 2 builtins that don't define these
code = getattr(func, '__code__', None)
closure = getattr(func, '__closure__', None)
co_names = getattr(code, 'co_names', ())
glb = getattr(func, '__globals__', {})
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if closure is None:
nonlocal_vars = {} # depends on [control=['if'], data=[]]
else:
nonlocal_vars = {var: cell.cell_contents for (var, cell) in zip(code.co_freevars, func.__closure__)}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = glb
builtin_ns = global_ns.get('__builtins__', builtins.__dict__)
if inspect.ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__ # depends on [control=['if'], data=[]]
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in co_names:
if name in ('None', 'True', 'False'):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue # depends on [control=['if'], data=[]]
try:
global_vars[name] = global_ns[name] # depends on [control=['try'], data=[]]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name] # depends on [control=['try'], data=[]]
except KeyError:
unbound_names.add(name) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']]
return {'nonlocal': nonlocal_vars, 'global': global_vars, 'builtin': builtin_vars, 'unbound': unbound_names} |
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records) | def function[select, parameter[self, filter_by]]:
constant[
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
]
variable[records] assign[=] <ast.IfExp object at 0x7da1b02a5e70>
return[call[name[Queryset], parameter[name[self]]]] | keyword[def] identifier[select] ( identifier[self] , identifier[filter_by] = keyword[None] ):
literal[string]
identifier[records] = identifier[self] . identifier[_records] . identifier[values] () keyword[if] identifier[filter_by] keyword[is] keyword[None] keyword[else] identifier[filter] ( identifier[filter_by] , identifier[self] . identifier[_records] . identifier[values] ())
keyword[return] identifier[Queryset] ( identifier[self] , identifier[records] = identifier[records] ) | def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records) |
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
path = os.path.join(path, appname)
return path | def function[user_config_dir, parameter[appname, roaming]]:
constant[Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
]
if name[WINDOWS] begin[:]
variable[path] assign[=] call[name[user_data_dir], parameter[name[appname]]]
return[name[path]] | keyword[def] identifier[user_config_dir] ( identifier[appname] , identifier[roaming] = keyword[True] ):
literal[string]
keyword[if] identifier[WINDOWS] :
identifier[path] = identifier[user_data_dir] ( identifier[appname] , identifier[roaming] = identifier[roaming] )
keyword[elif] identifier[sys] . identifier[platform] == literal[string] :
identifier[path] = identifier[user_data_dir] ( identifier[appname] )
keyword[else] :
identifier[path] = identifier[os] . identifier[getenv] ( literal[string] , identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ))
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[appname] )
keyword[return] identifier[path] | def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming) # depends on [control=['if'], data=[]]
elif sys.platform == 'darwin':
path = user_data_dir(appname) # depends on [control=['if'], data=[]]
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
path = os.path.join(path, appname)
return path |
def get_bgcolor(self, index):
"""Background color depending on value"""
if index.column() == 0:
color = QColor(Qt.lightGray)
color.setAlphaF(.05)
elif index.column() < 3:
color = QColor(Qt.lightGray)
color.setAlphaF(.2)
else:
color = QColor(Qt.lightGray)
color.setAlphaF(.3)
return color | def function[get_bgcolor, parameter[self, index]]:
constant[Background color depending on value]
if compare[call[name[index].column, parameter[]] equal[==] constant[0]] begin[:]
variable[color] assign[=] call[name[QColor], parameter[name[Qt].lightGray]]
call[name[color].setAlphaF, parameter[constant[0.05]]]
return[name[color]] | keyword[def] identifier[get_bgcolor] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[index] . identifier[column] ()== literal[int] :
identifier[color] = identifier[QColor] ( identifier[Qt] . identifier[lightGray] )
identifier[color] . identifier[setAlphaF] ( literal[int] )
keyword[elif] identifier[index] . identifier[column] ()< literal[int] :
identifier[color] = identifier[QColor] ( identifier[Qt] . identifier[lightGray] )
identifier[color] . identifier[setAlphaF] ( literal[int] )
keyword[else] :
identifier[color] = identifier[QColor] ( identifier[Qt] . identifier[lightGray] )
identifier[color] . identifier[setAlphaF] ( literal[int] )
keyword[return] identifier[color] | def get_bgcolor(self, index):
"""Background color depending on value"""
if index.column() == 0:
color = QColor(Qt.lightGray)
color.setAlphaF(0.05) # depends on [control=['if'], data=[]]
elif index.column() < 3:
color = QColor(Qt.lightGray)
color.setAlphaF(0.2) # depends on [control=['if'], data=[]]
else:
color = QColor(Qt.lightGray)
color.setAlphaF(0.3)
return color |
def parse_schema_files(files):
"""
Parse a list of SQL files and return a dictionary of valid schema
files where each key is a valid schema file and the corresponding value is
a tuple containing the source and the target schema.
"""
f_dict = {}
for f in files:
root, ext = os.path.splitext(f)
if ext != ".sql":
continue
vto, vfrom = os.path.split(root)
vto = os.path.split(vto)[1]
if is_schema(vto) and is_schema(vfrom):
f_dict[f] = (vfrom, vto)
return f_dict | def function[parse_schema_files, parameter[files]]:
constant[
Parse a list of SQL files and return a dictionary of valid schema
files where each key is a valid schema file and the corresponding value is
a tuple containing the source and the target schema.
]
variable[f_dict] assign[=] dictionary[[], []]
for taget[name[f]] in starred[name[files]] begin[:]
<ast.Tuple object at 0x7da1b0fea0e0> assign[=] call[name[os].path.splitext, parameter[name[f]]]
if compare[name[ext] not_equal[!=] constant[.sql]] begin[:]
continue
<ast.Tuple object at 0x7da1b0feaf80> assign[=] call[name[os].path.split, parameter[name[root]]]
variable[vto] assign[=] call[call[name[os].path.split, parameter[name[vto]]]][constant[1]]
if <ast.BoolOp object at 0x7da18f09f430> begin[:]
call[name[f_dict]][name[f]] assign[=] tuple[[<ast.Name object at 0x7da20c7c9390>, <ast.Name object at 0x7da20c7c9ba0>]]
return[name[f_dict]] | keyword[def] identifier[parse_schema_files] ( identifier[files] ):
literal[string]
identifier[f_dict] ={}
keyword[for] identifier[f] keyword[in] identifier[files] :
identifier[root] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[f] )
keyword[if] identifier[ext] != literal[string] :
keyword[continue]
identifier[vto] , identifier[vfrom] = identifier[os] . identifier[path] . identifier[split] ( identifier[root] )
identifier[vto] = identifier[os] . identifier[path] . identifier[split] ( identifier[vto] )[ literal[int] ]
keyword[if] identifier[is_schema] ( identifier[vto] ) keyword[and] identifier[is_schema] ( identifier[vfrom] ):
identifier[f_dict] [ identifier[f] ]=( identifier[vfrom] , identifier[vto] )
keyword[return] identifier[f_dict] | def parse_schema_files(files):
"""
Parse a list of SQL files and return a dictionary of valid schema
files where each key is a valid schema file and the corresponding value is
a tuple containing the source and the target schema.
"""
f_dict = {}
for f in files:
(root, ext) = os.path.splitext(f)
if ext != '.sql':
continue # depends on [control=['if'], data=[]]
(vto, vfrom) = os.path.split(root)
vto = os.path.split(vto)[1]
if is_schema(vto) and is_schema(vfrom):
f_dict[f] = (vfrom, vto) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return f_dict |
def available_categories(cls, user, products=AllProducts):
''' Returns the categories available to the user. Specify `products` if
you want to restrict to just the categories that hold the specified
products, otherwise it'll do all. '''
# STOPGAP -- this needs to be elsewhere tbqh
from .product import ProductController
if products is AllProducts:
products = inventory.Product.objects.all().select_related(
"category",
)
available = ProductController.available_products(
user,
products=products,
)
return sorted(set(i.category for i in available), key=attrgetter("order")) | def function[available_categories, parameter[cls, user, products]]:
constant[ Returns the categories available to the user. Specify `products` if
you want to restrict to just the categories that hold the specified
products, otherwise it'll do all. ]
from relative_module[product] import module[ProductController]
if compare[name[products] is name[AllProducts]] begin[:]
variable[products] assign[=] call[call[name[inventory].Product.objects.all, parameter[]].select_related, parameter[constant[category]]]
variable[available] assign[=] call[name[ProductController].available_products, parameter[name[user]]]
return[call[name[sorted], parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da207f03070>]]]]] | keyword[def] identifier[available_categories] ( identifier[cls] , identifier[user] , identifier[products] = identifier[AllProducts] ):
literal[string]
keyword[from] . identifier[product] keyword[import] identifier[ProductController]
keyword[if] identifier[products] keyword[is] identifier[AllProducts] :
identifier[products] = identifier[inventory] . identifier[Product] . identifier[objects] . identifier[all] (). identifier[select_related] (
literal[string] ,
)
identifier[available] = identifier[ProductController] . identifier[available_products] (
identifier[user] ,
identifier[products] = identifier[products] ,
)
keyword[return] identifier[sorted] ( identifier[set] ( identifier[i] . identifier[category] keyword[for] identifier[i] keyword[in] identifier[available] ), identifier[key] = identifier[attrgetter] ( literal[string] )) | def available_categories(cls, user, products=AllProducts):
""" Returns the categories available to the user. Specify `products` if
you want to restrict to just the categories that hold the specified
products, otherwise it'll do all. """
# STOPGAP -- this needs to be elsewhere tbqh
from .product import ProductController
if products is AllProducts:
products = inventory.Product.objects.all().select_related('category') # depends on [control=['if'], data=['products']]
available = ProductController.available_products(user, products=products)
return sorted(set((i.category for i in available)), key=attrgetter('order')) |
def incr(self, key, delta=1):
"""Increments the specified key value by the specified value.
:param str|unicode key:
:param int delta:
:rtype: bool
"""
return uwsgi.cache_inc(key, delta, self.timeout, self.name) | def function[incr, parameter[self, key, delta]]:
constant[Increments the specified key value by the specified value.
:param str|unicode key:
:param int delta:
:rtype: bool
]
return[call[name[uwsgi].cache_inc, parameter[name[key], name[delta], name[self].timeout, name[self].name]]] | keyword[def] identifier[incr] ( identifier[self] , identifier[key] , identifier[delta] = literal[int] ):
literal[string]
keyword[return] identifier[uwsgi] . identifier[cache_inc] ( identifier[key] , identifier[delta] , identifier[self] . identifier[timeout] , identifier[self] . identifier[name] ) | def incr(self, key, delta=1):
"""Increments the specified key value by the specified value.
:param str|unicode key:
:param int delta:
:rtype: bool
"""
return uwsgi.cache_inc(key, delta, self.timeout, self.name) |
def _basic_post(self, url, data=None):
"""
Because basically every post request is the same
Parameters
----------
url : str
data : str, optional
Returns
-------
requests.Response
"""
_url = urljoin(self.base_url, url)
r = self.session.post(_url, data=data, headers=self.headers, timeout=5)
r.raise_for_status()
return r | def function[_basic_post, parameter[self, url, data]]:
constant[
Because basically every post request is the same
Parameters
----------
url : str
data : str, optional
Returns
-------
requests.Response
]
variable[_url] assign[=] call[name[urljoin], parameter[name[self].base_url, name[url]]]
variable[r] assign[=] call[name[self].session.post, parameter[name[_url]]]
call[name[r].raise_for_status, parameter[]]
return[name[r]] | keyword[def] identifier[_basic_post] ( identifier[self] , identifier[url] , identifier[data] = keyword[None] ):
literal[string]
identifier[_url] = identifier[urljoin] ( identifier[self] . identifier[base_url] , identifier[url] )
identifier[r] = identifier[self] . identifier[session] . identifier[post] ( identifier[_url] , identifier[data] = identifier[data] , identifier[headers] = identifier[self] . identifier[headers] , identifier[timeout] = literal[int] )
identifier[r] . identifier[raise_for_status] ()
keyword[return] identifier[r] | def _basic_post(self, url, data=None):
"""
Because basically every post request is the same
Parameters
----------
url : str
data : str, optional
Returns
-------
requests.Response
"""
_url = urljoin(self.base_url, url)
r = self.session.post(_url, data=data, headers=self.headers, timeout=5)
r.raise_for_status()
return r |
def get(self, name, hint):
"""Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
"""
if name:
return name
if hint not in self._counter:
self._counter[hint] = 0
name = '%s%d' % (hint, self._counter[hint])
self._counter[hint] += 1
return name | def function[get, parameter[self, name, hint]]:
constant[Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
]
if name[name] begin[:]
return[name[name]]
if compare[name[hint] <ast.NotIn object at 0x7da2590d7190> name[self]._counter] begin[:]
call[name[self]._counter][name[hint]] assign[=] constant[0]
variable[name] assign[=] binary_operation[constant[%s%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2016650>, <ast.Subscript object at 0x7da1b2015840>]]]
<ast.AugAssign object at 0x7da1b2014cd0>
return[name[name]] | keyword[def] identifier[get] ( identifier[self] , identifier[name] , identifier[hint] ):
literal[string]
keyword[if] identifier[name] :
keyword[return] identifier[name]
keyword[if] identifier[hint] keyword[not] keyword[in] identifier[self] . identifier[_counter] :
identifier[self] . identifier[_counter] [ identifier[hint] ]= literal[int]
identifier[name] = literal[string] %( identifier[hint] , identifier[self] . identifier[_counter] [ identifier[hint] ])
identifier[self] . identifier[_counter] [ identifier[hint] ]+= literal[int]
keyword[return] identifier[name] | def get(self, name, hint):
"""Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
"""
if name:
return name # depends on [control=['if'], data=[]]
if hint not in self._counter:
self._counter[hint] = 0 # depends on [control=['if'], data=['hint']]
name = '%s%d' % (hint, self._counter[hint])
self._counter[hint] += 1
return name |
def set_coordsys(self, coordsys):
"""
Transform coordinate system
# TODO: needs expert attention
"""
if coordsys in self.coordsys_mapping:
self.coordsys = self.coordsys_mapping[coordsys]
else:
self.coordsys = coordsys | def function[set_coordsys, parameter[self, coordsys]]:
constant[
Transform coordinate system
# TODO: needs expert attention
]
if compare[name[coordsys] in name[self].coordsys_mapping] begin[:]
name[self].coordsys assign[=] call[name[self].coordsys_mapping][name[coordsys]] | keyword[def] identifier[set_coordsys] ( identifier[self] , identifier[coordsys] ):
literal[string]
keyword[if] identifier[coordsys] keyword[in] identifier[self] . identifier[coordsys_mapping] :
identifier[self] . identifier[coordsys] = identifier[self] . identifier[coordsys_mapping] [ identifier[coordsys] ]
keyword[else] :
identifier[self] . identifier[coordsys] = identifier[coordsys] | def set_coordsys(self, coordsys):
"""
Transform coordinate system
# TODO: needs expert attention
"""
if coordsys in self.coordsys_mapping:
self.coordsys = self.coordsys_mapping[coordsys] # depends on [control=['if'], data=['coordsys']]
else:
self.coordsys = coordsys |
def modify_tag(self, name, description=None, servers=None, new_name=None):
"""
PUT /tag/name. Returns a new Tag object based on the API response.
"""
res = self._modify_tag(name, description, servers, new_name)
return Tag(cloud_manager=self, **res['tag']) | def function[modify_tag, parameter[self, name, description, servers, new_name]]:
constant[
PUT /tag/name. Returns a new Tag object based on the API response.
]
variable[res] assign[=] call[name[self]._modify_tag, parameter[name[name], name[description], name[servers], name[new_name]]]
return[call[name[Tag], parameter[]]] | keyword[def] identifier[modify_tag] ( identifier[self] , identifier[name] , identifier[description] = keyword[None] , identifier[servers] = keyword[None] , identifier[new_name] = keyword[None] ):
literal[string]
identifier[res] = identifier[self] . identifier[_modify_tag] ( identifier[name] , identifier[description] , identifier[servers] , identifier[new_name] )
keyword[return] identifier[Tag] ( identifier[cloud_manager] = identifier[self] ,** identifier[res] [ literal[string] ]) | def modify_tag(self, name, description=None, servers=None, new_name=None):
"""
PUT /tag/name. Returns a new Tag object based on the API response.
"""
res = self._modify_tag(name, description, servers, new_name)
return Tag(cloud_manager=self, **res['tag']) |
def notify(self, value):
"""
Increment or decrement the value, according to the given value's sign
The value should be an integer, an attempt to cast it to integer will be made
"""
value = int(value)
with self.lock:
self.value += value | def function[notify, parameter[self, value]]:
constant[
Increment or decrement the value, according to the given value's sign
The value should be an integer, an attempt to cast it to integer will be made
]
variable[value] assign[=] call[name[int], parameter[name[value]]]
with name[self].lock begin[:]
<ast.AugAssign object at 0x7da1b0b54d30> | keyword[def] identifier[notify] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] = identifier[int] ( identifier[value] )
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[value] += identifier[value] | def notify(self, value):
"""
Increment or decrement the value, according to the given value's sign
The value should be an integer, an attempt to cast it to integer will be made
"""
value = int(value)
with self.lock:
self.value += value # depends on [control=['with'], data=[]] |
def pipe():
"""Return the optimum pipe implementation for the capabilities of the active system."""
try:
from os import pipe
return pipe()
except:
pipe = Pipe()
return pipe.reader_fd, pipe.writer_fd | def function[pipe, parameter[]]:
constant[Return the optimum pipe implementation for the capabilities of the active system.]
<ast.Try object at 0x7da1b236ad40> | keyword[def] identifier[pipe] ():
literal[string]
keyword[try] :
keyword[from] identifier[os] keyword[import] identifier[pipe]
keyword[return] identifier[pipe] ()
keyword[except] :
identifier[pipe] = identifier[Pipe] ()
keyword[return] identifier[pipe] . identifier[reader_fd] , identifier[pipe] . identifier[writer_fd] | def pipe():
"""Return the optimum pipe implementation for the capabilities of the active system."""
try:
from os import pipe
return pipe() # depends on [control=['try'], data=[]]
except:
pipe = Pipe()
return (pipe.reader_fd, pipe.writer_fd) # depends on [control=['except'], data=[]] |
def update_metadata(self, params):
""" Generic method for a resource's Update Metadata endpoint.
Example endpoints:
* `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_
* `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_
* `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_
:param params: The metadata being updated
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
return self.api.put(self.metadata_path(), data=params) | def function[update_metadata, parameter[self, params]]:
constant[ Generic method for a resource's Update Metadata endpoint.
Example endpoints:
* `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_
* `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_
* `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_
:param params: The metadata being updated
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
]
return[call[name[self].api.put, parameter[call[name[self].metadata_path, parameter[]]]]] | keyword[def] identifier[update_metadata] ( identifier[self] , identifier[params] ):
literal[string]
keyword[return] identifier[self] . identifier[api] . identifier[put] ( identifier[self] . identifier[metadata_path] (), identifier[data] = identifier[params] ) | def update_metadata(self, params):
""" Generic method for a resource's Update Metadata endpoint.
Example endpoints:
* `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_
* `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_
* `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_
:param params: The metadata being updated
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
return self.api.put(self.metadata_path(), data=params) |
def date_struct(year, month, day, tz = "UTC"):
"""
Given year, month and day numeric values and a timezone
convert to structured date object
"""
ymdtz = (year, month, day, tz)
if None in ymdtz:
#logger.debug("a year, month, day or tz value was empty: %s" % str(ymdtz))
return None # return early if we have a bad value
try:
return time.strptime("%s-%s-%s %s" % ymdtz, "%Y-%m-%d %Z")
except(TypeError, ValueError):
#logger.debug("date failed to convert: %s" % str(ymdtz))
pass | def function[date_struct, parameter[year, month, day, tz]]:
constant[
Given year, month and day numeric values and a timezone
convert to structured date object
]
variable[ymdtz] assign[=] tuple[[<ast.Name object at 0x7da18eb571f0>, <ast.Name object at 0x7da18eb572e0>, <ast.Name object at 0x7da18eb56bf0>, <ast.Name object at 0x7da18eb54520>]]
if compare[constant[None] in name[ymdtz]] begin[:]
return[constant[None]]
<ast.Try object at 0x7da18eb556c0> | keyword[def] identifier[date_struct] ( identifier[year] , identifier[month] , identifier[day] , identifier[tz] = literal[string] ):
literal[string]
identifier[ymdtz] =( identifier[year] , identifier[month] , identifier[day] , identifier[tz] )
keyword[if] keyword[None] keyword[in] identifier[ymdtz] :
keyword[return] keyword[None]
keyword[try] :
keyword[return] identifier[time] . identifier[strptime] ( literal[string] % identifier[ymdtz] , literal[string] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[pass] | def date_struct(year, month, day, tz='UTC'):
"""
Given year, month and day numeric values and a timezone
convert to structured date object
"""
ymdtz = (year, month, day, tz)
if None in ymdtz:
#logger.debug("a year, month, day or tz value was empty: %s" % str(ymdtz))
return None # return early if we have a bad value # depends on [control=['if'], data=[]]
try:
return time.strptime('%s-%s-%s %s' % ymdtz, '%Y-%m-%d %Z') # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
#logger.debug("date failed to convert: %s" % str(ymdtz))
pass # depends on [control=['except'], data=[]] |
def get_dir(self):
"""
Choose a working directory dialog.
Called by self.get_dm_and_wd.
"""
if "-WD" in sys.argv and self.FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = os.path.abspath(sys.argv[ind+1])
os.chdir(self.WD)
self.WD = os.getcwd()
self.dir_path.SetValue(self.WD)
else:
self.on_change_dir_button(None)
#self.WD = os.getcwd()
self.FIRST_RUN = False | def function[get_dir, parameter[self]]:
constant[
Choose a working directory dialog.
Called by self.get_dm_and_wd.
]
if <ast.BoolOp object at 0x7da1b039ae90> begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-WD]]]
name[self].WD assign[=] call[name[os].path.abspath, parameter[call[name[sys].argv][binary_operation[name[ind] + constant[1]]]]]
call[name[os].chdir, parameter[name[self].WD]]
name[self].WD assign[=] call[name[os].getcwd, parameter[]]
call[name[self].dir_path.SetValue, parameter[name[self].WD]]
name[self].FIRST_RUN assign[=] constant[False] | keyword[def] identifier[get_dir] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] keyword[and] identifier[self] . identifier[FIRST_RUN] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[self] . identifier[WD] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ])
identifier[os] . identifier[chdir] ( identifier[self] . identifier[WD] )
identifier[self] . identifier[WD] = identifier[os] . identifier[getcwd] ()
identifier[self] . identifier[dir_path] . identifier[SetValue] ( identifier[self] . identifier[WD] )
keyword[else] :
identifier[self] . identifier[on_change_dir_button] ( keyword[None] )
identifier[self] . identifier[FIRST_RUN] = keyword[False] | def get_dir(self):
"""
Choose a working directory dialog.
Called by self.get_dm_and_wd.
"""
if '-WD' in sys.argv and self.FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = os.path.abspath(sys.argv[ind + 1])
os.chdir(self.WD)
self.WD = os.getcwd()
self.dir_path.SetValue(self.WD) # depends on [control=['if'], data=[]]
else:
self.on_change_dir_button(None)
#self.WD = os.getcwd()
self.FIRST_RUN = False |
def enable_tracing(self, thread_trace_func=None):
'''
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
'''
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
return
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func) | def function[enable_tracing, parameter[self, thread_trace_func]]:
constant[
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
]
if compare[name[self].frame_eval_func is_not constant[None]] begin[:]
call[name[self].frame_eval_func, parameter[]]
call[name[pydevd_tracing].SetTrace, parameter[name[self].dummy_trace_dispatch]]
return[None]
if compare[name[thread_trace_func] is constant[None]] begin[:]
variable[thread_trace_func] assign[=] call[name[self].get_thread_local_trace_func, parameter[]]
call[name[pydevd_tracing].SetTrace, parameter[name[thread_trace_func]]] | keyword[def] identifier[enable_tracing] ( identifier[self] , identifier[thread_trace_func] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[frame_eval_func] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[frame_eval_func] ()
identifier[pydevd_tracing] . identifier[SetTrace] ( identifier[self] . identifier[dummy_trace_dispatch] )
keyword[return]
keyword[if] identifier[thread_trace_func] keyword[is] keyword[None] :
identifier[thread_trace_func] = identifier[self] . identifier[get_thread_local_trace_func] ()
keyword[else] :
identifier[self] . identifier[_local_thread_trace_func] . identifier[thread_trace_func] = identifier[thread_trace_func]
identifier[pydevd_tracing] . identifier[SetTrace] ( identifier[thread_trace_func] ) | def enable_tracing(self, thread_trace_func=None):
"""
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
"""
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
return # depends on [control=['if'], data=[]]
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func() # depends on [control=['if'], data=['thread_trace_func']]
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func) |
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed) | def function[exponentialRDD, parameter[sc, mean, size, numPartitions, seed]]:
constant[
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
]
return[call[name[callMLlibFunc], parameter[constant[exponentialRDD], name[sc]._jsc, call[name[float], parameter[name[mean]]], name[size], name[numPartitions], name[seed]]]] | keyword[def] identifier[exponentialRDD] ( identifier[sc] , identifier[mean] , identifier[size] , identifier[numPartitions] = keyword[None] , identifier[seed] = keyword[None] ):
literal[string]
keyword[return] identifier[callMLlibFunc] ( literal[string] , identifier[sc] . identifier[_jsc] , identifier[float] ( identifier[mean] ), identifier[size] , identifier[numPartitions] , identifier[seed] ) | def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc('exponentialRDD', sc._jsc, float(mean), size, numPartitions, seed) |
def get_primitives_paths():
"""Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
"""
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path)
return _PRIMITIVES_PATHS + primitives_paths | def function[get_primitives_paths, parameter[]]:
constant[Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
]
variable[primitives_paths] assign[=] call[name[list], parameter[]]
variable[entry_points] assign[=] call[name[pkg_resources].iter_entry_points, parameter[constant[mlprimitives]]]
for taget[name[entry_point]] in starred[name[entry_points]] begin[:]
if compare[name[entry_point].name equal[==] constant[jsons_path]] begin[:]
variable[path] assign[=] call[name[entry_point].load, parameter[]]
call[name[primitives_paths].append, parameter[name[path]]]
return[binary_operation[name[_PRIMITIVES_PATHS] + name[primitives_paths]]] | keyword[def] identifier[get_primitives_paths] ():
literal[string]
identifier[primitives_paths] = identifier[list] ()
identifier[entry_points] = identifier[pkg_resources] . identifier[iter_entry_points] ( literal[string] )
keyword[for] identifier[entry_point] keyword[in] identifier[entry_points] :
keyword[if] identifier[entry_point] . identifier[name] == literal[string] :
identifier[path] = identifier[entry_point] . identifier[load] ()
identifier[primitives_paths] . identifier[append] ( identifier[path] )
keyword[return] identifier[_PRIMITIVES_PATHS] + identifier[primitives_paths] | def get_primitives_paths():
"""Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
"""
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry_point']]
return _PRIMITIVES_PATHS + primitives_paths |
def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200):
r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
'''
from fluids.optional.pychebfun import Chebfun
to_fit = lambda h: self.V_from_h(h, 'full')
# These high-degree polynomials cannot safety be evaluated using Horner's methods
# chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk
self.c_forward = Chebfun.from_function(np.vectorize(to_fit),
[0.0, self.h_max], N=deg_forward).coefficients().tolist()
self.V_from_h_cheb = lambda x : chebval((2.0*x-self.h_max)/(self.h_max), self.c_forward)
to_fit = lambda h: self.h_from_V(h, 'brenth')
self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist()
self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward)
self.chebyshev = True | def function[set_chebyshev_approximators, parameter[self, deg_forward, deg_backwards]]:
constant[Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
]
from relative_module[fluids.optional.pychebfun] import module[Chebfun]
variable[to_fit] assign[=] <ast.Lambda object at 0x7da1b12cb460>
name[self].c_forward assign[=] call[call[call[name[Chebfun].from_function, parameter[call[name[np].vectorize, parameter[name[to_fit]]], list[[<ast.Constant object at 0x7da1b12ca7d0>, <ast.Attribute object at 0x7da1b12cb850>]]]].coefficients, parameter[]].tolist, parameter[]]
name[self].V_from_h_cheb assign[=] <ast.Lambda object at 0x7da1b12ca8f0>
variable[to_fit] assign[=] <ast.Lambda object at 0x7da1b12ca800>
name[self].c_backward assign[=] call[call[call[name[Chebfun].from_function, parameter[call[name[np].vectorize, parameter[name[to_fit]]], list[[<ast.Constant object at 0x7da1b12c8130>, <ast.Attribute object at 0x7da1b12c8190>]]]].coefficients, parameter[]].tolist, parameter[]]
name[self].h_from_V_cheb assign[=] <ast.Lambda object at 0x7da1b12cb3d0>
name[self].chebyshev assign[=] constant[True] | keyword[def] identifier[set_chebyshev_approximators] ( identifier[self] , identifier[deg_forward] = literal[int] , identifier[deg_backwards] = literal[int] ):
literal[string]
keyword[from] identifier[fluids] . identifier[optional] . identifier[pychebfun] keyword[import] identifier[Chebfun]
identifier[to_fit] = keyword[lambda] identifier[h] : identifier[self] . identifier[V_from_h] ( identifier[h] , literal[string] )
identifier[self] . identifier[c_forward] = identifier[Chebfun] . identifier[from_function] ( identifier[np] . identifier[vectorize] ( identifier[to_fit] ),
[ literal[int] , identifier[self] . identifier[h_max] ], identifier[N] = identifier[deg_forward] ). identifier[coefficients] (). identifier[tolist] ()
identifier[self] . identifier[V_from_h_cheb] = keyword[lambda] identifier[x] : identifier[chebval] (( literal[int] * identifier[x] - identifier[self] . identifier[h_max] )/( identifier[self] . identifier[h_max] ), identifier[self] . identifier[c_forward] )
identifier[to_fit] = keyword[lambda] identifier[h] : identifier[self] . identifier[h_from_V] ( identifier[h] , literal[string] )
identifier[self] . identifier[c_backward] = identifier[Chebfun] . identifier[from_function] ( identifier[np] . identifier[vectorize] ( identifier[to_fit] ),[ literal[int] , identifier[self] . identifier[V_total] ], identifier[N] = identifier[deg_backwards] ). identifier[coefficients] (). identifier[tolist] ()
identifier[self] . identifier[h_from_V_cheb] = keyword[lambda] identifier[x] : identifier[chebval] (( literal[int] * identifier[x] - identifier[self] . identifier[V_total] )/( identifier[self] . identifier[V_total] ), identifier[self] . identifier[c_backward] )
identifier[self] . identifier[chebyshev] = keyword[True] | def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200):
"""Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
"""
from fluids.optional.pychebfun import Chebfun
to_fit = lambda h: self.V_from_h(h, 'full') # These high-degree polynomials cannot safety be evaluated using Horner's methods
# chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk
self.c_forward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.h_max], N=deg_forward).coefficients().tolist()
self.V_from_h_cheb = lambda x: chebval((2.0 * x - self.h_max) / self.h_max, self.c_forward)
to_fit = lambda h: self.h_from_V(h, 'brenth')
self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist()
self.h_from_V_cheb = lambda x: chebval((2.0 * x - self.V_total) / self.V_total, self.c_backward)
self.chebyshev = True |
def _create_placeholders(self, n_features, n_classes):
"""Create the TensorFlow placeholders for the model.
:param n_features: number of features of the first layer
:param n_classes: number of classes
:return: self
"""
self.input_data = tf.placeholder(
tf.float32, [None, n_features], name='x-input')
self.input_labels = tf.placeholder(
tf.float32, [None, n_classes], name='y-input')
self.keep_prob = tf.placeholder(
tf.float32, name='keep-probs') | def function[_create_placeholders, parameter[self, n_features, n_classes]]:
constant[Create the TensorFlow placeholders for the model.
:param n_features: number of features of the first layer
:param n_classes: number of classes
:return: self
]
name[self].input_data assign[=] call[name[tf].placeholder, parameter[name[tf].float32, list[[<ast.Constant object at 0x7da1b0781ff0>, <ast.Name object at 0x7da1b0780eb0>]]]]
name[self].input_labels assign[=] call[name[tf].placeholder, parameter[name[tf].float32, list[[<ast.Constant object at 0x7da1b07801f0>, <ast.Name object at 0x7da1b0780670>]]]]
name[self].keep_prob assign[=] call[name[tf].placeholder, parameter[name[tf].float32]] | keyword[def] identifier[_create_placeholders] ( identifier[self] , identifier[n_features] , identifier[n_classes] ):
literal[string]
identifier[self] . identifier[input_data] = identifier[tf] . identifier[placeholder] (
identifier[tf] . identifier[float32] ,[ keyword[None] , identifier[n_features] ], identifier[name] = literal[string] )
identifier[self] . identifier[input_labels] = identifier[tf] . identifier[placeholder] (
identifier[tf] . identifier[float32] ,[ keyword[None] , identifier[n_classes] ], identifier[name] = literal[string] )
identifier[self] . identifier[keep_prob] = identifier[tf] . identifier[placeholder] (
identifier[tf] . identifier[float32] , identifier[name] = literal[string] ) | def _create_placeholders(self, n_features, n_classes):
"""Create the TensorFlow placeholders for the model.
:param n_features: number of features of the first layer
:param n_classes: number of classes
:return: self
"""
self.input_data = tf.placeholder(tf.float32, [None, n_features], name='x-input')
self.input_labels = tf.placeholder(tf.float32, [None, n_classes], name='y-input')
self.keep_prob = tf.placeholder(tf.float32, name='keep-probs') |
async def make_response(self, result: ResponseReturnValue) -> Response:
"""Make a Response from the result of the route handler.
The result itself can either be:
- A Response object (or subclass).
- A tuple of a ResponseValue and a header dictionary.
- A tuple of a ResponseValue, status code and a header dictionary.
A ResponseValue is either a Response object (or subclass) or a str.
"""
status_or_headers = None
headers = None
status = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (3 - len(result))
else:
value = result
if value is None:
raise TypeError('The response value returned by the view function cannot be None')
if isinstance(status_or_headers, (dict, list)):
headers = status_or_headers
status = None
elif status_or_headers is not None:
status = status_or_headers
if not isinstance(value, Response):
response = self.response_class( # type: ignore
value, timeout=self.config['RESPONSE_TIMEOUT'],
)
else:
response = value
if status is not None:
response.status_code = status # type: ignore
if headers is not None:
response.headers.update(headers) # type: ignore
return response | <ast.AsyncFunctionDef object at 0x7da18f58dc60> | keyword[async] keyword[def] identifier[make_response] ( identifier[self] , identifier[result] : identifier[ResponseReturnValue] )-> identifier[Response] :
literal[string]
identifier[status_or_headers] = keyword[None]
identifier[headers] = keyword[None]
identifier[status] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[result] , identifier[tuple] ):
identifier[value] , identifier[status_or_headers] , identifier[headers] = identifier[result] +( keyword[None] ,)*( literal[int] - identifier[len] ( identifier[result] ))
keyword[else] :
identifier[value] = identifier[result]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[status_or_headers] ,( identifier[dict] , identifier[list] )):
identifier[headers] = identifier[status_or_headers]
identifier[status] = keyword[None]
keyword[elif] identifier[status_or_headers] keyword[is] keyword[not] keyword[None] :
identifier[status] = identifier[status_or_headers]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[Response] ):
identifier[response] = identifier[self] . identifier[response_class] (
identifier[value] , identifier[timeout] = identifier[self] . identifier[config] [ literal[string] ],
)
keyword[else] :
identifier[response] = identifier[value]
keyword[if] identifier[status] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[status_code] = identifier[status]
keyword[if] identifier[headers] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[headers] . identifier[update] ( identifier[headers] )
keyword[return] identifier[response] | async def make_response(self, result: ResponseReturnValue) -> Response:
"""Make a Response from the result of the route handler.
The result itself can either be:
- A Response object (or subclass).
- A tuple of a ResponseValue and a header dictionary.
- A tuple of a ResponseValue, status code and a header dictionary.
A ResponseValue is either a Response object (or subclass) or a str.
"""
status_or_headers = None
headers = None
status = None
if isinstance(result, tuple):
(value, status_or_headers, headers) = result + (None,) * (3 - len(result)) # depends on [control=['if'], data=[]]
else:
value = result
if value is None:
raise TypeError('The response value returned by the view function cannot be None') # depends on [control=['if'], data=[]]
if isinstance(status_or_headers, (dict, list)):
headers = status_or_headers
status = None # depends on [control=['if'], data=[]]
elif status_or_headers is not None:
status = status_or_headers # depends on [control=['if'], data=['status_or_headers']]
if not isinstance(value, Response): # type: ignore
response = self.response_class(value, timeout=self.config['RESPONSE_TIMEOUT']) # depends on [control=['if'], data=[]]
else:
response = value
if status is not None:
response.status_code = status # type: ignore # depends on [control=['if'], data=['status']]
if headers is not None:
response.headers.update(headers) # type: ignore # depends on [control=['if'], data=['headers']]
return response |
def images():
'''
Show the list of registered images in this cluster.
'''
fields = [
('Name', 'name'),
('Registry', 'registry'),
('Tag', 'tag'),
('Digest', 'digest'),
('Size', 'size_bytes'),
('Aliases', 'aliases'),
]
with Session() as session:
try:
items = session.Image.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no registered images.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields),
floatfmt=',.0f')) | def function[images, parameter[]]:
constant[
Show the list of registered images in this cluster.
]
variable[fields] assign[=] list[[<ast.Tuple object at 0x7da20c6aa530>, <ast.Tuple object at 0x7da20c6a8850>, <ast.Tuple object at 0x7da20c6a9930>, <ast.Tuple object at 0x7da20c6a9f90>, <ast.Tuple object at 0x7da20c6a9690>, <ast.Tuple object at 0x7da20c6a94b0>]]
with call[name[Session], parameter[]] begin[:]
<ast.Try object at 0x7da20c6a9b40>
if compare[call[name[len], parameter[name[items]]] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[There are no registered images.]]]
return[None]
call[name[print], parameter[call[name[tabulate], parameter[<ast.GeneratorExp object at 0x7da20c6aae60>]]]] | keyword[def] identifier[images] ():
literal[string]
identifier[fields] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
]
keyword[with] identifier[Session] () keyword[as] identifier[session] :
keyword[try] :
identifier[items] = identifier[session] . identifier[Image] . identifier[list] ( identifier[fields] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print_error] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
identifier[print] ( literal[string] )
keyword[return]
identifier[print] ( identifier[tabulate] (( identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ),
identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ),
identifier[floatfmt] = literal[string] )) | def images():
"""
Show the list of registered images in this cluster.
"""
fields = [('Name', 'name'), ('Registry', 'registry'), ('Tag', 'tag'), ('Digest', 'digest'), ('Size', 'size_bytes'), ('Aliases', 'aliases')]
with Session() as session:
try:
items = session.Image.list(fields=(item[1] for item in fields)) # depends on [control=['try'], data=[]]
except Exception as e:
print_error(e)
sys.exit(1) # depends on [control=['except'], data=['e']]
if len(items) == 0:
print('There are no registered images.')
return # depends on [control=['if'], data=[]]
print(tabulate((item.values() for item in items), headers=(item[0] for item in fields), floatfmt=',.0f')) # depends on [control=['with'], data=['session']] |
def up(tarball_url, auth_token, env, app_name):
"""Brings up a Heroku app."""
tarball_url = tarball_url or _infer_tarball_url()
if not tarball_url:
click.echo('No tarball URL found.')
sys.exit(1)
if env:
# Split ["KEY=value", ...] into {"KEY": "value", ...}
env = {
arg.split('=')[0]: arg.split('=')[1]
for arg in env
}
happy = Happy(auth_token=auth_token)
click.echo('Creating app... ', nl=False)
build_id, app_name = happy.create(
tarball_url=tarball_url,
env=env,
app_name=app_name,
)
click.echo(app_name)
click.echo('Building... ', nl=False)
happy.wait(build_id)
_write_app_name(app_name)
click.echo('done')
click.echo("It's up! :) https://%s.herokuapp.com" % app_name) | def function[up, parameter[tarball_url, auth_token, env, app_name]]:
constant[Brings up a Heroku app.]
variable[tarball_url] assign[=] <ast.BoolOp object at 0x7da20cabe200>
if <ast.UnaryOp object at 0x7da20e963250> begin[:]
call[name[click].echo, parameter[constant[No tarball URL found.]]]
call[name[sys].exit, parameter[constant[1]]]
if name[env] begin[:]
variable[env] assign[=] <ast.DictComp object at 0x7da20e962050>
variable[happy] assign[=] call[name[Happy], parameter[]]
call[name[click].echo, parameter[constant[Creating app... ]]]
<ast.Tuple object at 0x7da20e961510> assign[=] call[name[happy].create, parameter[]]
call[name[click].echo, parameter[name[app_name]]]
call[name[click].echo, parameter[constant[Building... ]]]
call[name[happy].wait, parameter[name[build_id]]]
call[name[_write_app_name], parameter[name[app_name]]]
call[name[click].echo, parameter[constant[done]]]
call[name[click].echo, parameter[binary_operation[constant[It's up! :) https://%s.herokuapp.com] <ast.Mod object at 0x7da2590d6920> name[app_name]]]] | keyword[def] identifier[up] ( identifier[tarball_url] , identifier[auth_token] , identifier[env] , identifier[app_name] ):
literal[string]
identifier[tarball_url] = identifier[tarball_url] keyword[or] identifier[_infer_tarball_url] ()
keyword[if] keyword[not] identifier[tarball_url] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[env] :
identifier[env] ={
identifier[arg] . identifier[split] ( literal[string] )[ literal[int] ]: identifier[arg] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[for] identifier[arg] keyword[in] identifier[env]
}
identifier[happy] = identifier[Happy] ( identifier[auth_token] = identifier[auth_token] )
identifier[click] . identifier[echo] ( literal[string] , identifier[nl] = keyword[False] )
identifier[build_id] , identifier[app_name] = identifier[happy] . identifier[create] (
identifier[tarball_url] = identifier[tarball_url] ,
identifier[env] = identifier[env] ,
identifier[app_name] = identifier[app_name] ,
)
identifier[click] . identifier[echo] ( identifier[app_name] )
identifier[click] . identifier[echo] ( literal[string] , identifier[nl] = keyword[False] )
identifier[happy] . identifier[wait] ( identifier[build_id] )
identifier[_write_app_name] ( identifier[app_name] )
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] % identifier[app_name] ) | def up(tarball_url, auth_token, env, app_name):
"""Brings up a Heroku app."""
tarball_url = tarball_url or _infer_tarball_url()
if not tarball_url:
click.echo('No tarball URL found.')
sys.exit(1) # depends on [control=['if'], data=[]]
if env:
# Split ["KEY=value", ...] into {"KEY": "value", ...}
env = {arg.split('=')[0]: arg.split('=')[1] for arg in env} # depends on [control=['if'], data=[]]
happy = Happy(auth_token=auth_token)
click.echo('Creating app... ', nl=False)
(build_id, app_name) = happy.create(tarball_url=tarball_url, env=env, app_name=app_name)
click.echo(app_name)
click.echo('Building... ', nl=False)
happy.wait(build_id)
_write_app_name(app_name)
click.echo('done')
click.echo("It's up! :) https://%s.herokuapp.com" % app_name) |
def create_relationships(cls, id, related_collection_name, request_json):
r"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so update_relationship should be used
r = application_codes.error_response([application_codes.FORBIDDEN_VIOLATION])
else:
data = request_json['data']
for rsrc_identifier in data:
the_new_node = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id'])
rel_attrs = rsrc_identifier.get('meta')
if not rel_attrs or isinstance(rel_attrs, dict):
related_collection.connect(the_new_node, rel_attrs)
else:
raise WrongTypeError
#r = this_resource.relationship_collection_response(related_collection_name)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, TypeError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION])
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION])
return r | def function[create_relationships, parameter[cls, id, related_collection_name, request_json]]:
constant[
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
]
<ast.Try object at 0x7da1b09157e0>
return[name[r]] | keyword[def] identifier[create_relationships] ( identifier[cls] , identifier[id] , identifier[related_collection_name] , identifier[request_json] ):
literal[string]
keyword[try] :
identifier[this_resource] = identifier[cls] . identifier[nodes] . identifier[get] ( identifier[id] = identifier[id] , identifier[active] = keyword[True] )
identifier[related_collection] = identifier[getattr] ( identifier[this_resource] , identifier[related_collection_name] )
keyword[if] identifier[type] ( identifier[related_collection] ) keyword[in] ( identifier[One] , identifier[ZeroOrOne] ):
identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[FORBIDDEN_VIOLATION] ])
keyword[else] :
identifier[data] = identifier[request_json] [ literal[string] ]
keyword[for] identifier[rsrc_identifier] keyword[in] identifier[data] :
identifier[the_new_node] = identifier[cls] . identifier[get_class_from_type] ( identifier[rsrc_identifier] [ literal[string] ]). identifier[nodes] . identifier[get] ( identifier[id] = identifier[rsrc_identifier] [ literal[string] ])
identifier[rel_attrs] = identifier[rsrc_identifier] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[rel_attrs] keyword[or] identifier[isinstance] ( identifier[rel_attrs] , identifier[dict] ):
identifier[related_collection] . identifier[connect] ( identifier[the_new_node] , identifier[rel_attrs] )
keyword[else] :
keyword[raise] identifier[WrongTypeError]
identifier[r] = identifier[make_response] ( literal[string] )
identifier[r] . identifier[status_code] = identifier[http_error_codes] . identifier[NO_CONTENT]
identifier[r] . identifier[headers] [ literal[string] ]= identifier[CONTENT_TYPE]
keyword[except] identifier[DoesNotExist] :
identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[RESOURCE_NOT_FOUND] ])
keyword[except] ( identifier[KeyError] , identifier[TypeError] , identifier[WrongTypeError] ):
identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[BAD_FORMAT_VIOLATION] ])
keyword[except] identifier[AttemptedCardinalityViolation] :
identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[ATTEMPTED_CARDINALITY_VIOLATION] ])
keyword[except] identifier[MultipleNodesReturned] :
identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[MULTIPLE_NODES_WITH_ID_VIOLATION] ])
keyword[return] identifier[r] | def create_relationships(cls, id, related_collection_name, request_json):
"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \\
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \\
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \\
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so update_relationship should be used
r = application_codes.error_response([application_codes.FORBIDDEN_VIOLATION]) # depends on [control=['if'], data=[]]
else:
data = request_json['data']
for rsrc_identifier in data:
the_new_node = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id'])
rel_attrs = rsrc_identifier.get('meta')
if not rel_attrs or isinstance(rel_attrs, dict):
related_collection.connect(the_new_node, rel_attrs) # depends on [control=['if'], data=[]]
else:
raise WrongTypeError # depends on [control=['for'], data=['rsrc_identifier']]
#r = this_resource.relationship_collection_response(related_collection_name)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE # depends on [control=['try'], data=[]]
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND]) # depends on [control=['except'], data=[]]
except (KeyError, TypeError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION]) # depends on [control=['except'], data=[]]
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION]) # depends on [control=['except'], data=[]]
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION]) # depends on [control=['except'], data=[]]
return r |
def find_upstream_changed(self, kind):
"""
Return list of files that have been changed upstream belonging to a particular kind of change
"""
output = subprocess.check_output([
'git', 'log', '{}..origin/{}'.format(self.branch_name, self.branch_name),
'--oneline', '--name-status'
], cwd=self.repo_dir).decode()
files = []
for line in output.split('\n'):
if line.startswith(kind):
files.append(os.path.join(self.repo_dir, line.split('\t', 1)[1]))
return files | def function[find_upstream_changed, parameter[self, kind]]:
constant[
Return list of files that have been changed upstream belonging to a particular kind of change
]
variable[output] assign[=] call[call[name[subprocess].check_output, parameter[list[[<ast.Constant object at 0x7da1b1178e80>, <ast.Constant object at 0x7da1b1179e10>, <ast.Call object at 0x7da1b1178790>, <ast.Constant object at 0x7da1b1179c30>, <ast.Constant object at 0x7da1b117a380>]]]].decode, parameter[]]
variable[files] assign[=] list[[]]
for taget[name[line]] in starred[call[name[output].split, parameter[constant[
]]]] begin[:]
if call[name[line].startswith, parameter[name[kind]]] begin[:]
call[name[files].append, parameter[call[name[os].path.join, parameter[name[self].repo_dir, call[call[name[line].split, parameter[constant[ ], constant[1]]]][constant[1]]]]]]
return[name[files]] | keyword[def] identifier[find_upstream_changed] ( identifier[self] , identifier[kind] ):
literal[string]
identifier[output] = identifier[subprocess] . identifier[check_output] ([
literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[branch_name] , identifier[self] . identifier[branch_name] ),
literal[string] , literal[string]
], identifier[cwd] = identifier[self] . identifier[repo_dir] ). identifier[decode] ()
identifier[files] =[]
keyword[for] identifier[line] keyword[in] identifier[output] . identifier[split] ( literal[string] ):
keyword[if] identifier[line] . identifier[startswith] ( identifier[kind] ):
identifier[files] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[repo_dir] , identifier[line] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]))
keyword[return] identifier[files] | def find_upstream_changed(self, kind):
"""
Return list of files that have been changed upstream belonging to a particular kind of change
"""
output = subprocess.check_output(['git', 'log', '{}..origin/{}'.format(self.branch_name, self.branch_name), '--oneline', '--name-status'], cwd=self.repo_dir).decode()
files = []
for line in output.split('\n'):
if line.startswith(kind):
files.append(os.path.join(self.repo_dir, line.split('\t', 1)[1])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return files |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.