code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def url_for(endpoint, default=DEFAULT_ENDPOINT, **values):
"""Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
"""
try:
return router.url_for(endpoint, force_external=True, values=values)
except Exception:
logger.warn("Could not build API URL for endpoint '%s'. "
"No route provider registered?" % endpoint)
# build generic API URL
return router.url_for(default, force_external=True, values=values)
|
def function[url_for, parameter[endpoint, default]]:
constant[Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
]
<ast.Try object at 0x7da1b2651540>
|
keyword[def] identifier[url_for] ( identifier[endpoint] , identifier[default] = identifier[DEFAULT_ENDPOINT] ,** identifier[values] ):
literal[string]
keyword[try] :
keyword[return] identifier[router] . identifier[url_for] ( identifier[endpoint] , identifier[force_external] = keyword[True] , identifier[values] = identifier[values] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[warn] ( literal[string]
literal[string] % identifier[endpoint] )
keyword[return] identifier[router] . identifier[url_for] ( identifier[default] , identifier[force_external] = keyword[True] , identifier[values] = identifier[values] )
|
def url_for(endpoint, default=DEFAULT_ENDPOINT, **values):
"""Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
"""
try:
return router.url_for(endpoint, force_external=True, values=values) # depends on [control=['try'], data=[]]
except Exception:
logger.warn("Could not build API URL for endpoint '%s'. No route provider registered?" % endpoint)
# build generic API URL
return router.url_for(default, force_external=True, values=values) # depends on [control=['except'], data=[]]
|
def findpeak(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz'):
"""Find a (positive) peak in the dataset.
This function is deprecated, please consider using findpeak_single() instead.
Inputs:
x, y, dy: abscissa, ordinate and the error of the ordinate (can be None)
position, hwhm, baseline, amplitude: first guesses for the named parameters
curve: 'Gauss' or 'Lorentz' (default)
Outputs:
peak position, error of peak position, hwhm, error of hwhm, baseline,
error of baseline, amplitude, error of amplitude.
Notes:
A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'.
"""
warnings.warn('Function findpeak() is deprecated, please use findpeak_single() instead.', DeprecationWarning)
pos, hwhm, baseline, ampl = findpeak_single(x, y, dy, position, hwhm, baseline, amplitude, curve)
return pos.val, pos.err, hwhm.val, hwhm.err, baseline.val, baseline.err, ampl.val, ampl.err
|
def function[findpeak, parameter[x, y, dy, position, hwhm, baseline, amplitude, curve]]:
constant[Find a (positive) peak in the dataset.
This function is deprecated, please consider using findpeak_single() instead.
Inputs:
x, y, dy: abscissa, ordinate and the error of the ordinate (can be None)
position, hwhm, baseline, amplitude: first guesses for the named parameters
curve: 'Gauss' or 'Lorentz' (default)
Outputs:
peak position, error of peak position, hwhm, error of hwhm, baseline,
error of baseline, amplitude, error of amplitude.
Notes:
A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'.
]
call[name[warnings].warn, parameter[constant[Function findpeak() is deprecated, please use findpeak_single() instead.], name[DeprecationWarning]]]
<ast.Tuple object at 0x7da1b10bd720> assign[=] call[name[findpeak_single], parameter[name[x], name[y], name[dy], name[position], name[hwhm], name[baseline], name[amplitude], name[curve]]]
return[tuple[[<ast.Attribute object at 0x7da1b10bd6f0>, <ast.Attribute object at 0x7da1b10bfc40>, <ast.Attribute object at 0x7da1b10bcac0>, <ast.Attribute object at 0x7da1b10bffd0>, <ast.Attribute object at 0x7da1b10bfa60>, <ast.Attribute object at 0x7da1b10bd780>, <ast.Attribute object at 0x7da1b10bfc70>, <ast.Attribute object at 0x7da1b10bca00>]]]
|
keyword[def] identifier[findpeak] ( identifier[x] , identifier[y] , identifier[dy] = keyword[None] , identifier[position] = keyword[None] , identifier[hwhm] = keyword[None] , identifier[baseline] = keyword[None] , identifier[amplitude] = keyword[None] , identifier[curve] = literal[string] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] )
identifier[pos] , identifier[hwhm] , identifier[baseline] , identifier[ampl] = identifier[findpeak_single] ( identifier[x] , identifier[y] , identifier[dy] , identifier[position] , identifier[hwhm] , identifier[baseline] , identifier[amplitude] , identifier[curve] )
keyword[return] identifier[pos] . identifier[val] , identifier[pos] . identifier[err] , identifier[hwhm] . identifier[val] , identifier[hwhm] . identifier[err] , identifier[baseline] . identifier[val] , identifier[baseline] . identifier[err] , identifier[ampl] . identifier[val] , identifier[ampl] . identifier[err]
|
def findpeak(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz'):
"""Find a (positive) peak in the dataset.
This function is deprecated, please consider using findpeak_single() instead.
Inputs:
x, y, dy: abscissa, ordinate and the error of the ordinate (can be None)
position, hwhm, baseline, amplitude: first guesses for the named parameters
curve: 'Gauss' or 'Lorentz' (default)
Outputs:
peak position, error of peak position, hwhm, error of hwhm, baseline,
error of baseline, amplitude, error of amplitude.
Notes:
A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'.
"""
warnings.warn('Function findpeak() is deprecated, please use findpeak_single() instead.', DeprecationWarning)
(pos, hwhm, baseline, ampl) = findpeak_single(x, y, dy, position, hwhm, baseline, amplitude, curve)
return (pos.val, pos.err, hwhm.val, hwhm.err, baseline.val, baseline.err, ampl.val, ampl.err)
|
def read(
self,
validity_check=False,
indexes=None,
resampling=None,
dst_nodata=None,
gdal_opts=None,
**kwargs
):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
"""
return self._read_as_tiledir(
data_type=self._file_type,
out_tile=self.tile,
td_crs=self._td_crs,
tiles_paths=self._tiles_paths,
profile=self._profile,
validity_check=validity_check,
indexes=indexes,
resampling=resampling if resampling else self._resampling,
dst_nodata=dst_nodata,
gdal_opts=gdal_opts,
**{k: v for k, v in kwargs.items() if k != "data_type"}
)
|
def function[read, parameter[self, validity_check, indexes, resampling, dst_nodata, gdal_opts]]:
constant[
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
]
return[call[name[self]._read_as_tiledir, parameter[]]]
|
keyword[def] identifier[read] (
identifier[self] ,
identifier[validity_check] = keyword[False] ,
identifier[indexes] = keyword[None] ,
identifier[resampling] = keyword[None] ,
identifier[dst_nodata] = keyword[None] ,
identifier[gdal_opts] = keyword[None] ,
** identifier[kwargs]
):
literal[string]
keyword[return] identifier[self] . identifier[_read_as_tiledir] (
identifier[data_type] = identifier[self] . identifier[_file_type] ,
identifier[out_tile] = identifier[self] . identifier[tile] ,
identifier[td_crs] = identifier[self] . identifier[_td_crs] ,
identifier[tiles_paths] = identifier[self] . identifier[_tiles_paths] ,
identifier[profile] = identifier[self] . identifier[_profile] ,
identifier[validity_check] = identifier[validity_check] ,
identifier[indexes] = identifier[indexes] ,
identifier[resampling] = identifier[resampling] keyword[if] identifier[resampling] keyword[else] identifier[self] . identifier[_resampling] ,
identifier[dst_nodata] = identifier[dst_nodata] ,
identifier[gdal_opts] = identifier[gdal_opts] ,
**{ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] () keyword[if] identifier[k] != literal[string] }
)
|
def read(self, validity_check=False, indexes=None, resampling=None, dst_nodata=None, gdal_opts=None, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
"""
return self._read_as_tiledir(data_type=self._file_type, out_tile=self.tile, td_crs=self._td_crs, tiles_paths=self._tiles_paths, profile=self._profile, validity_check=validity_check, indexes=indexes, resampling=resampling if resampling else self._resampling, dst_nodata=dst_nodata, gdal_opts=gdal_opts, **{k: v for (k, v) in kwargs.items() if k != 'data_type'})
|
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value
|
def function[get_value, parameter[repo_directory, key, expect_type]]:
constant[Gets the value of the specified key in the config file.]
variable[config] assign[=] call[name[read_config], parameter[name[repo_directory]]]
variable[value] assign[=] call[name[config].get, parameter[name[key]]]
if <ast.BoolOp object at 0x7da20c991d50> begin[:]
<ast.Raise object at 0x7da20c993d60>
return[name[value]]
|
keyword[def] identifier[get_value] ( identifier[repo_directory] , identifier[key] , identifier[expect_type] = keyword[None] ):
literal[string]
identifier[config] = identifier[read_config] ( identifier[repo_directory] )
identifier[value] = identifier[config] . identifier[get] ( identifier[key] )
keyword[if] identifier[expect_type] keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[expect_type] ):
keyword[raise] identifier[ConfigSchemaError] ( literal[string]
%( identifier[repr] ( identifier[key] ), identifier[repr] ( identifier[expect_type] ), identifier[repr] ( identifier[type] ( identifier[value] ))))
keyword[return] identifier[value]
|
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and (not isinstance(value, expect_type)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s' % (repr(key), repr(expect_type), repr(type(value)))) # depends on [control=['if'], data=[]]
return value
|
def post_build(self, pkt, pay):
"""
We encrypt the premaster secret (the 48 bytes) with either the server
certificate or the temporary RSA key provided in a server key exchange
message. After that step, we add the 2 bytes to provide the length, as
described in implementation notes at the end of section 7.4.7.1.
"""
enc = pkt
s = self.tls_session
s.pre_master_secret = enc
s.compute_ms_and_derive_keys()
if s.server_tmp_rsa_key is not None:
enc = s.server_tmp_rsa_key.encrypt(pkt, t="pkcs")
elif s.server_certs is not None and len(s.server_certs) > 0:
enc = s.server_certs[0].encrypt(pkt, t="pkcs")
else:
warning("No material to encrypt Pre Master Secret")
tmp_len = b""
if s.tls_version >= 0x0301:
tmp_len = struct.pack("!H", len(enc))
return tmp_len + enc + pay
|
def function[post_build, parameter[self, pkt, pay]]:
constant[
We encrypt the premaster secret (the 48 bytes) with either the server
certificate or the temporary RSA key provided in a server key exchange
message. After that step, we add the 2 bytes to provide the length, as
described in implementation notes at the end of section 7.4.7.1.
]
variable[enc] assign[=] name[pkt]
variable[s] assign[=] name[self].tls_session
name[s].pre_master_secret assign[=] name[enc]
call[name[s].compute_ms_and_derive_keys, parameter[]]
if compare[name[s].server_tmp_rsa_key is_not constant[None]] begin[:]
variable[enc] assign[=] call[name[s].server_tmp_rsa_key.encrypt, parameter[name[pkt]]]
variable[tmp_len] assign[=] constant[b'']
if compare[name[s].tls_version greater_or_equal[>=] constant[769]] begin[:]
variable[tmp_len] assign[=] call[name[struct].pack, parameter[constant[!H], call[name[len], parameter[name[enc]]]]]
return[binary_operation[binary_operation[name[tmp_len] + name[enc]] + name[pay]]]
|
keyword[def] identifier[post_build] ( identifier[self] , identifier[pkt] , identifier[pay] ):
literal[string]
identifier[enc] = identifier[pkt]
identifier[s] = identifier[self] . identifier[tls_session]
identifier[s] . identifier[pre_master_secret] = identifier[enc]
identifier[s] . identifier[compute_ms_and_derive_keys] ()
keyword[if] identifier[s] . identifier[server_tmp_rsa_key] keyword[is] keyword[not] keyword[None] :
identifier[enc] = identifier[s] . identifier[server_tmp_rsa_key] . identifier[encrypt] ( identifier[pkt] , identifier[t] = literal[string] )
keyword[elif] identifier[s] . identifier[server_certs] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[s] . identifier[server_certs] )> literal[int] :
identifier[enc] = identifier[s] . identifier[server_certs] [ literal[int] ]. identifier[encrypt] ( identifier[pkt] , identifier[t] = literal[string] )
keyword[else] :
identifier[warning] ( literal[string] )
identifier[tmp_len] = literal[string]
keyword[if] identifier[s] . identifier[tls_version] >= literal[int] :
identifier[tmp_len] = identifier[struct] . identifier[pack] ( literal[string] , identifier[len] ( identifier[enc] ))
keyword[return] identifier[tmp_len] + identifier[enc] + identifier[pay]
|
def post_build(self, pkt, pay):
"""
We encrypt the premaster secret (the 48 bytes) with either the server
certificate or the temporary RSA key provided in a server key exchange
message. After that step, we add the 2 bytes to provide the length, as
described in implementation notes at the end of section 7.4.7.1.
"""
enc = pkt
s = self.tls_session
s.pre_master_secret = enc
s.compute_ms_and_derive_keys()
if s.server_tmp_rsa_key is not None:
enc = s.server_tmp_rsa_key.encrypt(pkt, t='pkcs') # depends on [control=['if'], data=[]]
elif s.server_certs is not None and len(s.server_certs) > 0:
enc = s.server_certs[0].encrypt(pkt, t='pkcs') # depends on [control=['if'], data=[]]
else:
warning('No material to encrypt Pre Master Secret')
tmp_len = b''
if s.tls_version >= 769:
tmp_len = struct.pack('!H', len(enc)) # depends on [control=['if'], data=[]]
return tmp_len + enc + pay
|
def tables(self) -> List['Table']:
"""Return a list of found table objects."""
tables = [] # type: List['Table']
tables_append = tables.append
type_to_spans = self._type_to_spans
lststr = self._lststr
shadow = self._shadow[:]
ss, se = self._span
spans = type_to_spans.setdefault('Table', [])
if not spans:
# All the added spans will be new.
m = True # type: Any
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
span = [ss + ms + len(m[1]), ss + me]
spans.append(span)
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
# There are already exists some spans. Try to use the already existing
# before appending new spans.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
m = True
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
s, e = ss + ms + len(m[1]), ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
|
def function[tables, parameter[self]]:
constant[Return a list of found table objects.]
variable[tables] assign[=] list[[]]
variable[tables_append] assign[=] name[tables].append
variable[type_to_spans] assign[=] name[self]._type_to_spans
variable[lststr] assign[=] name[self]._lststr
variable[shadow] assign[=] call[name[self]._shadow][<ast.Slice object at 0x7da18f723010>]
<ast.Tuple object at 0x7da18f7203d0> assign[=] name[self]._span
variable[spans] assign[=] call[name[type_to_spans].setdefault, parameter[constant[Table], list[[]]]]
if <ast.UnaryOp object at 0x7da18f722440> begin[:]
variable[m] assign[=] constant[True]
while name[m] begin[:]
variable[m] assign[=] constant[False]
for taget[name[m]] in starred[call[name[TABLE_FINDITER], parameter[name[shadow]]]] begin[:]
<ast.Tuple object at 0x7da18f723b50> assign[=] call[name[m].span, parameter[]]
variable[span] assign[=] list[[<ast.BinOp object at 0x7da18f722170>, <ast.BinOp object at 0x7da1b0314be0>]]
call[name[spans].append, parameter[name[span]]]
call[name[tables_append], parameter[call[name[Table], parameter[name[lststr], name[type_to_spans], name[span], constant[Table]]]]]
call[name[shadow]][<ast.Slice object at 0x7da1b03149d0>] assign[=] binary_operation[constant[b'_'] * binary_operation[name[me] - name[ms]]]
return[name[tables]]
variable[span_tuple_to_span_get] assign[=] <ast.DictComp object at 0x7da1b0316350>.get
variable[m] assign[=] constant[True]
while name[m] begin[:]
variable[m] assign[=] constant[False]
for taget[name[m]] in starred[call[name[TABLE_FINDITER], parameter[name[shadow]]]] begin[:]
<ast.Tuple object at 0x7da1b0314190> assign[=] call[name[m].span, parameter[]]
<ast.Tuple object at 0x7da1b0317070> assign[=] tuple[[<ast.BinOp object at 0x7da1b0315510>, <ast.BinOp object at 0x7da1b0315330>]]
variable[old_span] assign[=] call[name[span_tuple_to_span_get], parameter[tuple[[<ast.Name object at 0x7da1b03179a0>, <ast.Name object at 0x7da1b03173d0>]]]]
if compare[name[old_span] is constant[None]] begin[:]
variable[span] assign[=] list[[<ast.Name object at 0x7da1b0316bf0>, <ast.Name object at 0x7da204962e30>]]
call[name[insort], parameter[name[spans], name[span]]]
call[name[tables_append], parameter[call[name[Table], parameter[name[lststr], name[type_to_spans], name[span], constant[Table]]]]]
call[name[shadow]][<ast.Slice object at 0x7da204961600>] assign[=] binary_operation[constant[b'_'] * binary_operation[name[me] - name[ms]]]
return[name[tables]]
|
keyword[def] identifier[tables] ( identifier[self] )-> identifier[List] [ literal[string] ]:
literal[string]
identifier[tables] =[]
identifier[tables_append] = identifier[tables] . identifier[append]
identifier[type_to_spans] = identifier[self] . identifier[_type_to_spans]
identifier[lststr] = identifier[self] . identifier[_lststr]
identifier[shadow] = identifier[self] . identifier[_shadow] [:]
identifier[ss] , identifier[se] = identifier[self] . identifier[_span]
identifier[spans] = identifier[type_to_spans] . identifier[setdefault] ( literal[string] ,[])
keyword[if] keyword[not] identifier[spans] :
identifier[m] = keyword[True]
keyword[while] identifier[m] :
identifier[m] = keyword[False]
keyword[for] identifier[m] keyword[in] identifier[TABLE_FINDITER] ( identifier[shadow] ):
identifier[ms] , identifier[me] = identifier[m] . identifier[span] ()
identifier[span] =[ identifier[ss] + identifier[ms] + identifier[len] ( identifier[m] [ literal[int] ]), identifier[ss] + identifier[me] ]
identifier[spans] . identifier[append] ( identifier[span] )
identifier[tables_append] ( identifier[Table] ( identifier[lststr] , identifier[type_to_spans] , identifier[span] , literal[string] ))
identifier[shadow] [ identifier[ms] : identifier[me] ]= literal[string] *( identifier[me] - identifier[ms] )
keyword[return] identifier[tables]
identifier[span_tuple_to_span_get] ={( identifier[s] [ literal[int] ], identifier[s] [ literal[int] ]): identifier[s] keyword[for] identifier[s] keyword[in] identifier[spans] }. identifier[get]
identifier[m] = keyword[True]
keyword[while] identifier[m] :
identifier[m] = keyword[False]
keyword[for] identifier[m] keyword[in] identifier[TABLE_FINDITER] ( identifier[shadow] ):
identifier[ms] , identifier[me] = identifier[m] . identifier[span] ()
identifier[s] , identifier[e] = identifier[ss] + identifier[ms] + identifier[len] ( identifier[m] [ literal[int] ]), identifier[ss] + identifier[me]
identifier[old_span] = identifier[span_tuple_to_span_get] (( identifier[s] , identifier[e] ))
keyword[if] identifier[old_span] keyword[is] keyword[None] :
identifier[span] =[ identifier[s] , identifier[e] ]
identifier[insort] ( identifier[spans] , identifier[span] )
keyword[else] :
identifier[span] = identifier[old_span]
identifier[tables_append] ( identifier[Table] ( identifier[lststr] , identifier[type_to_spans] , identifier[span] , literal[string] ))
identifier[shadow] [ identifier[ms] : identifier[me] ]= literal[string] *( identifier[me] - identifier[ms] )
keyword[return] identifier[tables]
|
def tables(self) -> List['Table']:
"""Return a list of found table objects."""
tables = [] # type: List['Table']
tables_append = tables.append
type_to_spans = self._type_to_spans
lststr = self._lststr
shadow = self._shadow[:]
(ss, se) = self._span
spans = type_to_spans.setdefault('Table', [])
if not spans:
# All the added spans will be new.
m = True # type: Any
while m:
m = False
for m in TABLE_FINDITER(shadow):
(ms, me) = m.span()
# Ignore leading whitespace using len(m[1]).
span = [ss + ms + len(m[1]), ss + me]
spans.append(span)
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms) # depends on [control=['for'], data=['m']] # depends on [control=['while'], data=[]]
return tables # depends on [control=['if'], data=[]]
# There are already exists some spans. Try to use the already existing
# before appending new spans.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
m = True
while m:
m = False
for m in TABLE_FINDITER(shadow):
(ms, me) = m.span()
# Ignore leading whitespace using len(m[1]).
(s, e) = (ss + ms + len(m[1]), ss + me)
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span) # depends on [control=['if'], data=[]]
else:
span = old_span
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms) # depends on [control=['for'], data=['m']] # depends on [control=['while'], data=[]]
return tables
|
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._censoring_type = CensoringType.RIGHT
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
hazards_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.hazards_ = pd.Series(hazards_, index=X.columns, name="coef") / self._norm_std
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
|
def function[fit, parameter[self, df, duration_col, event_col, show_progress, initial_point, strata, step_size, weights_col, cluster_col, robust, batch_mode]]:
constant[
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
]
if compare[name[duration_col] is constant[None]] begin[:]
<ast.Raise object at 0x7da204566440>
name[self]._censoring_type assign[=] name[CensoringType].RIGHT
name[self]._time_fit_was_called assign[=] binary_operation[call[call[name[datetime].utcnow, parameter[]].strftime, parameter[constant[%Y-%m-%d %H:%M:%S]]] + constant[ UTC]]
name[self].duration_col assign[=] name[duration_col]
name[self].event_col assign[=] name[event_col]
name[self].robust assign[=] name[robust]
name[self].cluster_col assign[=] name[cluster_col]
name[self].weights_col assign[=] name[weights_col]
name[self]._n_examples assign[=] call[name[df].shape][constant[0]]
name[self]._batch_mode assign[=] name[batch_mode]
name[self].strata assign[=] call[name[coalesce], parameter[name[strata], name[self].strata]]
<ast.Tuple object at 0x7da20c6a9cf0> assign[=] call[name[self]._preprocess_dataframe, parameter[name[df]]]
name[self].durations assign[=] call[name[T].copy, parameter[]]
name[self].event_observed assign[=] call[name[E].copy, parameter[]]
name[self].weights assign[=] call[name[weights].copy, parameter[]]
if compare[name[self].strata is_not constant[None]] begin[:]
name[self].durations.index assign[=] name[original_index]
name[self].event_observed.index assign[=] name[original_index]
name[self].weights.index assign[=] name[original_index]
name[self]._norm_mean assign[=] call[name[X].mean, parameter[constant[0]]]
name[self]._norm_std assign[=] call[name[X].std, parameter[constant[0]]]
variable[X_norm] assign[=] call[name[normalize], parameter[name[X], name[self]._norm_mean, name[self]._norm_std]]
variable[hazards_] assign[=] call[name[self]._fit_model, parameter[name[X_norm], name[T], name[E]]]
name[self].hazards_ assign[=] binary_operation[call[name[pd].Series, parameter[name[hazards_]]] / name[self]._norm_std]
name[self].variance_matrix_ assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c6aa5c0> / call[name[np].outer, parameter[name[self]._norm_std, name[self]._norm_std]]]
name[self].standard_errors_ assign[=] call[name[self]._compute_standard_errors, parameter[name[X_norm], name[T], name[E], name[weights]]]
name[self].confidence_intervals_ assign[=] call[name[self]._compute_confidence_intervals, parameter[]]
name[self]._predicted_partial_hazards_ assign[=] call[call[call[call[name[self].predict_partial_hazard, parameter[name[X]]].rename, parameter[]].assign, parameter[]].set_index, parameter[name[X].index]]
name[self].baseline_hazard_ assign[=] call[name[self]._compute_baseline_hazards, parameter[]]
name[self].baseline_cumulative_hazard_ assign[=] call[name[self]._compute_baseline_cumulative_hazard, parameter[]]
name[self].baseline_survival_ assign[=] call[name[self]._compute_baseline_survival, parameter[]]
if call[name[hasattr], parameter[name[self], constant[_concordance_score_]]] begin[:]
<ast.Delete object at 0x7da20c76fac0>
return[name[self]]
|
keyword[def] identifier[fit] (
identifier[self] ,
identifier[df] ,
identifier[duration_col] = keyword[None] ,
identifier[event_col] = keyword[None] ,
identifier[show_progress] = keyword[False] ,
identifier[initial_point] = keyword[None] ,
identifier[strata] = keyword[None] ,
identifier[step_size] = keyword[None] ,
identifier[weights_col] = keyword[None] ,
identifier[cluster_col] = keyword[None] ,
identifier[robust] = keyword[False] ,
identifier[batch_mode] = keyword[None] ,
):
literal[string]
keyword[if] identifier[duration_col] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_censoring_type] = identifier[CensoringType] . identifier[RIGHT]
identifier[self] . identifier[_time_fit_was_called] = identifier[datetime] . identifier[utcnow] (). identifier[strftime] ( literal[string] )+ literal[string]
identifier[self] . identifier[duration_col] = identifier[duration_col]
identifier[self] . identifier[event_col] = identifier[event_col]
identifier[self] . identifier[robust] = identifier[robust]
identifier[self] . identifier[cluster_col] = identifier[cluster_col]
identifier[self] . identifier[weights_col] = identifier[weights_col]
identifier[self] . identifier[_n_examples] = identifier[df] . identifier[shape] [ literal[int] ]
identifier[self] . identifier[_batch_mode] = identifier[batch_mode]
identifier[self] . identifier[strata] = identifier[coalesce] ( identifier[strata] , identifier[self] . identifier[strata] )
identifier[X] , identifier[T] , identifier[E] , identifier[weights] , identifier[original_index] , identifier[self] . identifier[_clusters] = identifier[self] . identifier[_preprocess_dataframe] ( identifier[df] )
identifier[self] . identifier[durations] = identifier[T] . identifier[copy] ()
identifier[self] . identifier[event_observed] = identifier[E] . identifier[copy] ()
identifier[self] . identifier[weights] = identifier[weights] . identifier[copy] ()
keyword[if] identifier[self] . identifier[strata] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[durations] . identifier[index] = identifier[original_index]
identifier[self] . identifier[event_observed] . identifier[index] = identifier[original_index]
identifier[self] . identifier[weights] . identifier[index] = identifier[original_index]
identifier[self] . identifier[_norm_mean] = identifier[X] . identifier[mean] ( literal[int] )
identifier[self] . identifier[_norm_std] = identifier[X] . identifier[std] ( literal[int] )
identifier[X_norm] = identifier[normalize] ( identifier[X] , identifier[self] . identifier[_norm_mean] , identifier[self] . identifier[_norm_std] )
identifier[hazards_] = identifier[self] . identifier[_fit_model] (
identifier[X_norm] , identifier[T] , identifier[E] , identifier[weights] = identifier[weights] , identifier[initial_point] = identifier[initial_point] , identifier[show_progress] = identifier[show_progress] , identifier[step_size] = identifier[step_size]
)
identifier[self] . identifier[hazards_] = identifier[pd] . identifier[Series] ( identifier[hazards_] , identifier[index] = identifier[X] . identifier[columns] , identifier[name] = literal[string] )/ identifier[self] . identifier[_norm_std]
identifier[self] . identifier[variance_matrix_] =- identifier[inv] ( identifier[self] . identifier[_hessian_] )/ identifier[np] . identifier[outer] ( identifier[self] . identifier[_norm_std] , identifier[self] . identifier[_norm_std] )
identifier[self] . identifier[standard_errors_] = identifier[self] . identifier[_compute_standard_errors] ( identifier[X_norm] , identifier[T] , identifier[E] , identifier[weights] )
identifier[self] . identifier[confidence_intervals_] = identifier[self] . identifier[_compute_confidence_intervals] ()
identifier[self] . identifier[_predicted_partial_hazards_] =(
identifier[self] . identifier[predict_partial_hazard] ( identifier[X] )
. identifier[rename] ( identifier[columns] ={ literal[int] : literal[string] })
. identifier[assign] ( identifier[T] = identifier[self] . identifier[durations] . identifier[values] , identifier[E] = identifier[self] . identifier[event_observed] . identifier[values] , identifier[W] = identifier[self] . identifier[weights] . identifier[values] )
. identifier[set_index] ( identifier[X] . identifier[index] )
)
identifier[self] . identifier[baseline_hazard_] = identifier[self] . identifier[_compute_baseline_hazards] ()
identifier[self] . identifier[baseline_cumulative_hazard_] = identifier[self] . identifier[_compute_baseline_cumulative_hazard] ()
identifier[self] . identifier[baseline_survival_] = identifier[self] . identifier[_compute_baseline_survival] ()
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[del] identifier[self] . identifier[_concordance_score_]
keyword[return] identifier[self]
|
def fit(self, df, duration_col=None, event_col=None, show_progress=False, initial_point=None, strata=None, step_size=None, weights_col=None, cluster_col=None, robust=False, batch_mode=None):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError('duration_col cannot be None.') # depends on [control=['if'], data=[]]
self._censoring_type = CensoringType.RIGHT
self._time_fit_was_called = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ' UTC'
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
(X, T, E, weights, original_index, self._clusters) = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index # depends on [control=['if'], data=[]]
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
hazards_ = self._fit_model(X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size)
self.hazards_ = pd.Series(hazards_, index=X.columns, name='coef') / self._norm_std
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = self.predict_partial_hazard(X).rename(columns={0: 'P'}).assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values).set_index(X.index)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, '_concordance_score_'):
# we have already fit the model.
del self._concordance_score_ # depends on [control=['if'], data=[]]
return self
|
def get_version(self):
"""
return 3-tuple of version info or None
:return: (str, str, str)
"""
raw_version = run_cmd(["podman", "version"], return_output=True)
regex = re.compile(r"Version:\s*(\d+)\.(\d+)\.(\d+)")
match = regex.findall(raw_version)
try:
return match[0]
except IndexError:
logger.error("unable to parse version from `podman version`")
return
|
def function[get_version, parameter[self]]:
constant[
return 3-tuple of version info or None
:return: (str, str, str)
]
variable[raw_version] assign[=] call[name[run_cmd], parameter[list[[<ast.Constant object at 0x7da1b1195540>, <ast.Constant object at 0x7da1b11944c0>]]]]
variable[regex] assign[=] call[name[re].compile, parameter[constant[Version:\s*(\d+)\.(\d+)\.(\d+)]]]
variable[match] assign[=] call[name[regex].findall, parameter[name[raw_version]]]
<ast.Try object at 0x7da1b1195630>
|
keyword[def] identifier[get_version] ( identifier[self] ):
literal[string]
identifier[raw_version] = identifier[run_cmd] ([ literal[string] , literal[string] ], identifier[return_output] = keyword[True] )
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] )
identifier[match] = identifier[regex] . identifier[findall] ( identifier[raw_version] )
keyword[try] :
keyword[return] identifier[match] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return]
|
def get_version(self):
"""
return 3-tuple of version info or None
:return: (str, str, str)
"""
raw_version = run_cmd(['podman', 'version'], return_output=True)
regex = re.compile('Version:\\s*(\\d+)\\.(\\d+)\\.(\\d+)')
match = regex.findall(raw_version)
try:
return match[0] # depends on [control=['try'], data=[]]
except IndexError:
logger.error('unable to parse version from `podman version`')
return # depends on [control=['except'], data=[]]
|
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self
|
def function[_parse_response_for_all_events, parameter[self, response]]:
constant[
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
]
variable[items] assign[=] call[name[response].xpath, parameter[constant[//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem]]]
if <ast.UnaryOp object at 0x7da1b28be680> begin[:]
variable[items] assign[=] call[name[response].xpath, parameter[constant[//m:GetItemResponseMessage/m:Items/t:CalendarItem]]]
if name[items] begin[:]
name[self].count assign[=] call[name[len], parameter[name[items]]]
call[name[log].debug, parameter[binary_operation[constant[Found %s items] <ast.Mod object at 0x7da2590d6920> name[self].count]]]
for taget[name[item]] in starred[name[items]] begin[:]
call[name[self]._add_event, parameter[]]
return[name[self]]
|
keyword[def] identifier[_parse_response_for_all_events] ( identifier[self] , identifier[response] ):
literal[string]
identifier[items] = identifier[response] . identifier[xpath] ( literal[string] , identifier[namespaces] = identifier[soap_request] . identifier[NAMESPACES] )
keyword[if] keyword[not] identifier[items] :
identifier[items] = identifier[response] . identifier[xpath] ( literal[string] , identifier[namespaces] = identifier[soap_request] . identifier[NAMESPACES] )
keyword[if] identifier[items] :
identifier[self] . identifier[count] = identifier[len] ( identifier[items] )
identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[count] )
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[self] . identifier[_add_event] ( identifier[xml] = identifier[soap_request] . identifier[M] . identifier[Items] ( identifier[deepcopy] ( identifier[item] )))
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] identifier[self]
|
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES) # depends on [control=['if'], data=[]]
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item))) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
else:
log.debug(u'No calendar items found with search parameters.')
return self
|
def usage(asked_for=0):
'''Exit with a usage string, used for bad argument or with -h'''
exit = fsq.const('FSQ_SUCCESS') if asked_for else\
fsq.const('FSQ_FAIL_PERM')
f = sys.stdout if asked_for else sys.stderr
shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format(
os.path.basename(_PROG)), f)
if asked_for:
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '\
'[-i|--ignore-listener] <proto>://<host>:<port>/url'\
.format(os.path.basename(_PROG)), f)
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'\
'[-i|--ignore-listener] unix://var/sock/foo.sock'\
.format(os.path.basename(_PROG)), f)
shout(' src_queue trg_queue host_queue item [item [...]]', f)
return exit
|
def function[usage, parameter[asked_for]]:
constant[Exit with a usage string, used for bad argument or with -h]
variable[exit] assign[=] <ast.IfExp object at 0x7da1b0ae30a0>
variable[f] assign[=] <ast.IfExp object at 0x7da1b0ae00d0>
call[name[shout], parameter[call[constant[{0} [opts] src_queue trg_queue host item_id [item_id [...]]].format, parameter[call[name[os].path.basename, parameter[name[_PROG]]]]], name[f]]]
if name[asked_for] begin[:]
call[name[shout], parameter[call[constant[{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] [-i|--ignore-listener] <proto>://<host>:<port>/url].format, parameter[call[name[os].path.basename, parameter[name[_PROG]]]]], name[f]]]
call[name[shout], parameter[call[constant[{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger][-i|--ignore-listener] unix://var/sock/foo.sock].format, parameter[call[name[os].path.basename, parameter[name[_PROG]]]]], name[f]]]
call[name[shout], parameter[constant[ src_queue trg_queue host_queue item [item [...]]], name[f]]]
return[name[exit]]
|
keyword[def] identifier[usage] ( identifier[asked_for] = literal[int] ):
literal[string]
identifier[exit] = identifier[fsq] . identifier[const] ( literal[string] ) keyword[if] identifier[asked_for] keyword[else] identifier[fsq] . identifier[const] ( literal[string] )
identifier[f] = identifier[sys] . identifier[stdout] keyword[if] identifier[asked_for] keyword[else] identifier[sys] . identifier[stderr]
identifier[shout] ( literal[string] . identifier[format] (
identifier[os] . identifier[path] . identifier[basename] ( identifier[_PROG] )), identifier[f] )
keyword[if] identifier[asked_for] :
identifier[shout] ( literal[string] literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[_PROG] )), identifier[f] )
identifier[shout] ( literal[string] literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[_PROG] )), identifier[f] )
identifier[shout] ( literal[string] , identifier[f] )
keyword[return] identifier[exit]
|
def usage(asked_for=0):
"""Exit with a usage string, used for bad argument or with -h"""
exit = fsq.const('FSQ_SUCCESS') if asked_for else fsq.const('FSQ_FAIL_PERM')
f = sys.stdout if asked_for else sys.stderr
shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format(os.path.basename(_PROG)), f)
if asked_for:
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] [-i|--ignore-listener] <proto>://<host>:<port>/url'.format(os.path.basename(_PROG)), f)
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger][-i|--ignore-listener] unix://var/sock/foo.sock'.format(os.path.basename(_PROG)), f)
shout(' src_queue trg_queue host_queue item [item [...]]', f) # depends on [control=['if'], data=[]]
return exit
|
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
|
def function[_transform_browser_coor, parameter[rRNA_interval, rRNA_coor]]:
constant[
transform interval format to browser coord: chr:start-end
]
with call[name[open], parameter[name[rRNA_coor], constant[w]]] begin[:]
with call[name[open], parameter[name[rRNA_interval], constant[r]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
<ast.Tuple object at 0x7da1b1987f40> assign[=] call[call[name[line].split, parameter[constant[ ]]]][<ast.Slice object at 0x7da1b1985f90>]
if call[name[bio].startswith, parameter[constant[rRNA]]] begin[:]
call[name[out_handle].write, parameter[call[constant[{0}:{1}-{2}
].format, parameter[name[c], name[s], name[e]]]]]
|
keyword[def] identifier[_transform_browser_coor] ( identifier[rRNA_interval] , identifier[rRNA_coor] ):
literal[string]
keyword[with] identifier[open] ( identifier[rRNA_coor] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[with] identifier[open] ( identifier[rRNA_interval] , literal[string] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
identifier[c] , identifier[bio] , identifier[source] , identifier[s] , identifier[e] = identifier[line] . identifier[split] ( literal[string] )[: literal[int] ]
keyword[if] identifier[bio] . identifier[startswith] ( literal[string] ):
identifier[out_handle] . identifier[write] (( literal[string] ). identifier[format] ( identifier[c] , identifier[s] , identifier[e] ))
|
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
(c, bio, source, s, e) = line.split('\t')[:5]
if bio.startswith('rRNA'):
out_handle.write('{0}:{1}-{2}\n'.format(c, s, e)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']] # depends on [control=['with'], data=['open', 'out_handle']]
|
def get_urls(self):
"""
Add our preview view to our urls.
"""
urls = super(PageAdmin, self).get_urls()
my_urls = patterns('',
(r'^add/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/history/(\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
)
return my_urls + urls
|
def function[get_urls, parameter[self]]:
constant[
Add our preview view to our urls.
]
variable[urls] assign[=] call[call[name[super], parameter[name[PageAdmin], name[self]]].get_urls, parameter[]]
variable[my_urls] assign[=] call[name[patterns], parameter[constant[], tuple[[<ast.Constant object at 0x7da1b13bb340>, <ast.Call object at 0x7da1b13bb1c0>]], tuple[[<ast.Constant object at 0x7da1b13b96f0>, <ast.Call object at 0x7da1b13ba110>]], tuple[[<ast.Constant object at 0x7da1b13bb400>, <ast.Call object at 0x7da1b13bb760>]]]]
return[binary_operation[name[my_urls] + name[urls]]]
|
keyword[def] identifier[get_urls] ( identifier[self] ):
literal[string]
identifier[urls] = identifier[super] ( identifier[PageAdmin] , identifier[self] ). identifier[get_urls] ()
identifier[my_urls] = identifier[patterns] ( literal[string] ,
( literal[string] , identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[PagePreviewView] . identifier[as_view] ())),
( literal[string] , identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[PagePreviewView] . identifier[as_view] ())),
( literal[string] , identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[PagePreviewView] . identifier[as_view] ())),
)
keyword[return] identifier[my_urls] + identifier[urls]
|
def get_urls(self):
"""
Add our preview view to our urls.
"""
urls = super(PageAdmin, self).get_urls()
my_urls = patterns('', ('^add/preview$', self.admin_site.admin_view(PagePreviewView.as_view())), ('^(?P<id>\\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())), ('^(?P<id>\\d+)/history/(\\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())))
return my_urls + urls
|
def get(self, key, no_cache=False):
"""Return the value of a single preference using a dotted path key
:arg no_cache: if true, the cache is bypassed
"""
section, name = self.parse_lookup(key)
preference = self.registry.get(
section=section, name=name, fallback=False)
if no_cache or not preferences_settings.ENABLE_CACHE:
return self.get_db_pref(section=section, name=name).value
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
|
def function[get, parameter[self, key, no_cache]]:
constant[Return the value of a single preference using a dotted path key
:arg no_cache: if true, the cache is bypassed
]
<ast.Tuple object at 0x7da1b119fee0> assign[=] call[name[self].parse_lookup, parameter[name[key]]]
variable[preference] assign[=] call[name[self].registry.get, parameter[]]
if <ast.BoolOp object at 0x7da1b11a0790> begin[:]
return[call[name[self].get_db_pref, parameter[]].value]
<ast.Try object at 0x7da1b11a11b0>
variable[db_pref] assign[=] call[name[self].get_db_pref, parameter[]]
call[name[self].to_cache, parameter[name[db_pref]]]
return[name[db_pref].value]
|
keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[no_cache] = keyword[False] ):
literal[string]
identifier[section] , identifier[name] = identifier[self] . identifier[parse_lookup] ( identifier[key] )
identifier[preference] = identifier[self] . identifier[registry] . identifier[get] (
identifier[section] = identifier[section] , identifier[name] = identifier[name] , identifier[fallback] = keyword[False] )
keyword[if] identifier[no_cache] keyword[or] keyword[not] identifier[preferences_settings] . identifier[ENABLE_CACHE] :
keyword[return] identifier[self] . identifier[get_db_pref] ( identifier[section] = identifier[section] , identifier[name] = identifier[name] ). identifier[value]
keyword[try] :
keyword[return] identifier[self] . identifier[from_cache] ( identifier[section] , identifier[name] )
keyword[except] identifier[CachedValueNotFound] :
keyword[pass]
identifier[db_pref] = identifier[self] . identifier[get_db_pref] ( identifier[section] = identifier[section] , identifier[name] = identifier[name] )
identifier[self] . identifier[to_cache] ( identifier[db_pref] )
keyword[return] identifier[db_pref] . identifier[value]
|
def get(self, key, no_cache=False):
"""Return the value of a single preference using a dotted path key
:arg no_cache: if true, the cache is bypassed
"""
(section, name) = self.parse_lookup(key)
preference = self.registry.get(section=section, name=name, fallback=False)
if no_cache or not preferences_settings.ENABLE_CACHE:
return self.get_db_pref(section=section, name=name).value # depends on [control=['if'], data=[]]
try:
return self.from_cache(section, name) # depends on [control=['try'], data=[]]
except CachedValueNotFound:
pass # depends on [control=['except'], data=[]]
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
|
def spherical_distance(pt0, pt1):
'''
spherical_distance(a, b) yields the angular distance between points a and b, both of which
should be expressed in spherical coordinates as (longitude, latitude).
If a and/or b are (2 x n) matrices, then the calculation is performed over all columns.
The spherical_distance function uses the Haversine formula; accordingly it may suffer from
rounding errors in the case of nearly antipodal points.
'''
dtheta = pt1[0] - pt0[0]
dphi = pt1[1] - pt0[1]
a = np.sin(dphi/2)**2 + np.cos(pt0[1]) * np.cos(pt1[1]) * np.sin(dtheta/2)**2
return 2 * np.arcsin(np.sqrt(a))
|
def function[spherical_distance, parameter[pt0, pt1]]:
constant[
spherical_distance(a, b) yields the angular distance between points a and b, both of which
should be expressed in spherical coordinates as (longitude, latitude).
If a and/or b are (2 x n) matrices, then the calculation is performed over all columns.
The spherical_distance function uses the Haversine formula; accordingly it may suffer from
rounding errors in the case of nearly antipodal points.
]
variable[dtheta] assign[=] binary_operation[call[name[pt1]][constant[0]] - call[name[pt0]][constant[0]]]
variable[dphi] assign[=] binary_operation[call[name[pt1]][constant[1]] - call[name[pt0]][constant[1]]]
variable[a] assign[=] binary_operation[binary_operation[call[name[np].sin, parameter[binary_operation[name[dphi] / constant[2]]]] ** constant[2]] + binary_operation[binary_operation[call[name[np].cos, parameter[call[name[pt0]][constant[1]]]] * call[name[np].cos, parameter[call[name[pt1]][constant[1]]]]] * binary_operation[call[name[np].sin, parameter[binary_operation[name[dtheta] / constant[2]]]] ** constant[2]]]]
return[binary_operation[constant[2] * call[name[np].arcsin, parameter[call[name[np].sqrt, parameter[name[a]]]]]]]
|
keyword[def] identifier[spherical_distance] ( identifier[pt0] , identifier[pt1] ):
literal[string]
identifier[dtheta] = identifier[pt1] [ literal[int] ]- identifier[pt0] [ literal[int] ]
identifier[dphi] = identifier[pt1] [ literal[int] ]- identifier[pt0] [ literal[int] ]
identifier[a] = identifier[np] . identifier[sin] ( identifier[dphi] / literal[int] )** literal[int] + identifier[np] . identifier[cos] ( identifier[pt0] [ literal[int] ])* identifier[np] . identifier[cos] ( identifier[pt1] [ literal[int] ])* identifier[np] . identifier[sin] ( identifier[dtheta] / literal[int] )** literal[int]
keyword[return] literal[int] * identifier[np] . identifier[arcsin] ( identifier[np] . identifier[sqrt] ( identifier[a] ))
|
def spherical_distance(pt0, pt1):
"""
spherical_distance(a, b) yields the angular distance between points a and b, both of which
should be expressed in spherical coordinates as (longitude, latitude).
If a and/or b are (2 x n) matrices, then the calculation is performed over all columns.
The spherical_distance function uses the Haversine formula; accordingly it may suffer from
rounding errors in the case of nearly antipodal points.
"""
dtheta = pt1[0] - pt0[0]
dphi = pt1[1] - pt0[1]
a = np.sin(dphi / 2) ** 2 + np.cos(pt0[1]) * np.cos(pt1[1]) * np.sin(dtheta / 2) ** 2
return 2 * np.arcsin(np.sqrt(a))
|
def _process_file_continue_ftp_response(self, response: FTPResponse):
'''Process a restarted content response.'''
if response.request.restart_value and response.restart_value:
self.open_file(self._filename, response, mode='ab+')
else:
self._raise_cannot_continue_error()
|
def function[_process_file_continue_ftp_response, parameter[self, response]]:
constant[Process a restarted content response.]
if <ast.BoolOp object at 0x7da18f722d10> begin[:]
call[name[self].open_file, parameter[name[self]._filename, name[response]]]
|
keyword[def] identifier[_process_file_continue_ftp_response] ( identifier[self] , identifier[response] : identifier[FTPResponse] ):
literal[string]
keyword[if] identifier[response] . identifier[request] . identifier[restart_value] keyword[and] identifier[response] . identifier[restart_value] :
identifier[self] . identifier[open_file] ( identifier[self] . identifier[_filename] , identifier[response] , identifier[mode] = literal[string] )
keyword[else] :
identifier[self] . identifier[_raise_cannot_continue_error] ()
|
def _process_file_continue_ftp_response(self, response: FTPResponse):
"""Process a restarted content response."""
if response.request.restart_value and response.restart_value:
self.open_file(self._filename, response, mode='ab+') # depends on [control=['if'], data=[]]
else:
self._raise_cannot_continue_error()
|
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=''):
'''For logging exception-raising function invocations during randomized unit tests.
'''
from .str import safe_str
args = args if args else ()
kwargs = kwargs if kwargs else {}
name = '{}.{}'.format(get_mod(func), name) if name else full_funcname(func)
if pretty:
invocation = ', '.join([safe_str(arg) for arg in args])
if kwargs:
invocation += ', '
invocation += ', '.join(['{}={}'.format(key, safe_str(value))
for key, value in sorted(kwargs.items())])
else:
invocation = 'args={}, kwargs={}'.format(safe_str(args), safe_str(kwargs))
msg = '***{}***: "{}" --- {}({})'.format(get_typename(exc),
message(exc),
name,
invocation)
elogger.error(msg)
|
def function[elog, parameter[exc, func, args, kwargs, str, pretty, name]]:
constant[For logging exception-raising function invocations during randomized unit tests.
]
from relative_module[str] import module[safe_str]
variable[args] assign[=] <ast.IfExp object at 0x7da20e962980>
variable[kwargs] assign[=] <ast.IfExp object at 0x7da20e963df0>
variable[name] assign[=] <ast.IfExp object at 0x7da20e960640>
if name[pretty] begin[:]
variable[invocation] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da20e9619c0>]]
if name[kwargs] begin[:]
<ast.AugAssign object at 0x7da20e963e20>
<ast.AugAssign object at 0x7da20e961e10>
variable[msg] assign[=] call[constant[***{}***: "{}" --- {}({})].format, parameter[call[name[get_typename], parameter[name[exc]]], call[name[message], parameter[name[exc]]], name[name], name[invocation]]]
call[name[elogger].error, parameter[name[msg]]]
|
keyword[def] identifier[elog] ( identifier[exc] , identifier[func] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] , identifier[str] = identifier[str] , identifier[pretty] = keyword[True] , identifier[name] = literal[string] ):
literal[string]
keyword[from] . identifier[str] keyword[import] identifier[safe_str]
identifier[args] = identifier[args] keyword[if] identifier[args] keyword[else] ()
identifier[kwargs] = identifier[kwargs] keyword[if] identifier[kwargs] keyword[else] {}
identifier[name] = literal[string] . identifier[format] ( identifier[get_mod] ( identifier[func] ), identifier[name] ) keyword[if] identifier[name] keyword[else] identifier[full_funcname] ( identifier[func] )
keyword[if] identifier[pretty] :
identifier[invocation] = literal[string] . identifier[join] ([ identifier[safe_str] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ])
keyword[if] identifier[kwargs] :
identifier[invocation] += literal[string]
identifier[invocation] += literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[key] , identifier[safe_str] ( identifier[value] ))
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sorted] ( identifier[kwargs] . identifier[items] ())])
keyword[else] :
identifier[invocation] = literal[string] . identifier[format] ( identifier[safe_str] ( identifier[args] ), identifier[safe_str] ( identifier[kwargs] ))
identifier[msg] = literal[string] . identifier[format] ( identifier[get_typename] ( identifier[exc] ),
identifier[message] ( identifier[exc] ),
identifier[name] ,
identifier[invocation] )
identifier[elogger] . identifier[error] ( identifier[msg] )
|
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=''):
"""For logging exception-raising function invocations during randomized unit tests.
"""
from .str import safe_str
args = args if args else ()
kwargs = kwargs if kwargs else {}
name = '{}.{}'.format(get_mod(func), name) if name else full_funcname(func)
if pretty:
invocation = ', '.join([safe_str(arg) for arg in args])
if kwargs:
invocation += ', '
invocation += ', '.join(['{}={}'.format(key, safe_str(value)) for (key, value) in sorted(kwargs.items())]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
invocation = 'args={}, kwargs={}'.format(safe_str(args), safe_str(kwargs))
msg = '***{}***: "{}" --- {}({})'.format(get_typename(exc), message(exc), name, invocation)
elogger.error(msg)
|
def parsexmldeclarations(self, node):
"""Internal method to parse XML declarations"""
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Processing Annotation Declarations",file=stderr)
self.declareprocessed = True
for subnode in node: #pylint: disable=too-many-nested-blocks
if not isinstance(subnode.tag, str): continue
if subnode.tag[:25] == '{' + NSFOLIA + '}' and subnode.tag[-11:] == '-annotation':
prefix = subnode.tag[25:][:-11]
type = None
if prefix.upper() in vars(AnnotationType):
type = vars(AnnotationType)[prefix.upper()]
else:
raise Exception("Unknown declaration: " + subnode.tag)
if 'set' in subnode.attrib and subnode.attrib['set']:
set = subnode.attrib['set']
else:
set = 'undefined'
if (type,set) in self.annotations:
if type == AnnotationType.TEXT:
#explicit Text declaration, remove the implicit declaration:
a = []
for t,s in self.annotations:
if not (t == AnnotationType.TEXT and s == 'undefined'):
a.append( (t,s) )
self.annotations = a
#raise ValueError("Double declaration of " + subnode.tag + ", set '" + set + "' + is already declared") //doubles are okay says Ko
else:
self.annotations.append( (type, set) )
#Load set definition
if set and self.loadsetdefinitions and set not in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
try:
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
except DeepValidationError:
print("WARNING: Set " + set + " could not be downloaded, ignoring!",file=sys.stderr) #warning and ignore
#Set defaults
if type in self.annotationdefaults and set in self.annotationdefaults[type]:
#handle duplicate. If ambiguous: remove defaults
if 'annotator' in subnode.attrib:
if not ('annotator' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotator'] = subnode.attrib['annotator']
elif self.annotationdefaults[type][set]['annotator'] != subnode.attrib['annotator']:
del self.annotationdefaults[type][set]['annotator']
if 'annotatortype' in subnode.attrib:
if not ('annotatortype' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotatortype'] = subnode.attrib['annotatortype']
elif self.annotationdefaults[type][set]['annotatortype'] != subnode.attrib['annotatortype']:
del self.annotationdefaults[type][set]['annotatortype']
else:
defaults = {}
if 'annotator' in subnode.attrib:
defaults['annotator'] = subnode.attrib['annotator']
if 'annotatortype' in subnode.attrib:
if subnode.attrib['annotatortype'] == 'auto':
defaults['annotatortype'] = AnnotatorType.AUTO
else:
defaults['annotatortype'] = AnnotatorType.MANUAL
if 'datetime' in subnode.attrib:
if isinstance(subnode.attrib['datetime'], datetime):
defaults['datetime'] = subnode.attrib['datetime']
else:
defaults['datetime'] = parse_datetime(subnode.attrib['datetime'])
if not type in self.annotationdefaults:
self.annotationdefaults[type] = {}
self.annotationdefaults[type][set] = defaults
if 'external' in subnode.attrib and subnode.attrib['external']:
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Loading external document: " + subnode.attrib['external'],file=stderr)
if not type in self.standoffdocs:
self.standoffdocs[type] = {}
self.standoffdocs[type][set] = {}
#check if it is already loaded, if multiple references are made to the same doc we reuse the instance
standoffdoc = None
for t in self.standoffdocs:
for s in self.standoffdocs[t]:
for source in self.standoffdocs[t][s]:
if source == subnode.attrib['external']:
standoffdoc = self.standoffdocs[t][s]
break
if standoffdoc: break
if standoffdoc: break
if not standoffdoc:
if subnode.attrib['external'][:7] == 'http://' or subnode.attrib['external'][:8] == 'https://':
#document is remote, download (in memory)
try:
f = urlopen(subnode.attrib['external'])
except:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
try:
content = u(f.read())
except IOError:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
f.close()
standoffdoc = Document(string=content, parentdoc=self, setdefinitions=self.setdefinitions)
elif os.path.exists(subnode.attrib['external']):
#document is on disk:
standoffdoc = Document(file=subnode.attrib['external'], parentdoc=self, setdefinitions=self.setdefinitions)
else:
#document not found
raise DeepValidationError("Unable to find standoff document: " + subnode.attrib['external'])
self.standoffdocs[type][set][subnode.attrib['external']] = standoffdoc
standoffdoc.parentdoc = self
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Found declared annotation " + subnode.tag + ". Defaults: " + repr(defaults),file=stderr)
|
def function[parsexmldeclarations, parameter[self, node]]:
constant[Internal method to parse XML declarations]
if compare[name[self].debug greater_or_equal[>=] constant[1]] begin[:]
call[name[print], parameter[constant[[PyNLPl FoLiA DEBUG] Processing Annotation Declarations]]]
name[self].declareprocessed assign[=] constant[True]
for taget[name[subnode]] in starred[name[node]] begin[:]
if <ast.UnaryOp object at 0x7da2054a5c60> begin[:]
continue
if <ast.BoolOp object at 0x7da2054a71c0> begin[:]
variable[prefix] assign[=] call[call[name[subnode].tag][<ast.Slice object at 0x7da2054a72e0>]][<ast.Slice object at 0x7da2054a4880>]
variable[type] assign[=] constant[None]
if compare[call[name[prefix].upper, parameter[]] in call[name[vars], parameter[name[AnnotationType]]]] begin[:]
variable[type] assign[=] call[call[name[vars], parameter[name[AnnotationType]]]][call[name[prefix].upper, parameter[]]]
if <ast.BoolOp object at 0x7da2054a4ca0> begin[:]
variable[set] assign[=] call[name[subnode].attrib][constant[set]]
if compare[tuple[[<ast.Name object at 0x7da2054a5b70>, <ast.Name object at 0x7da2054a77c0>]] in name[self].annotations] begin[:]
if compare[name[type] equal[==] name[AnnotationType].TEXT] begin[:]
variable[a] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2054a65c0>, <ast.Name object at 0x7da2054a4e80>]]] in starred[name[self].annotations] begin[:]
if <ast.UnaryOp object at 0x7da2054a5d50> begin[:]
call[name[a].append, parameter[tuple[[<ast.Name object at 0x7da2054a7580>, <ast.Name object at 0x7da2054a57e0>]]]]
name[self].annotations assign[=] name[a]
if <ast.BoolOp object at 0x7da2054a7070> begin[:]
if <ast.BoolOp object at 0x7da2054a5000> begin[:]
<ast.Try object at 0x7da2054a4700>
if <ast.BoolOp object at 0x7da207f9aef0> begin[:]
if compare[constant[annotator] in name[subnode].attrib] begin[:]
if <ast.UnaryOp object at 0x7da207f9ace0> begin[:]
call[call[call[name[self].annotationdefaults][name[type]]][name[set]]][constant[annotator]] assign[=] call[name[subnode].attrib][constant[annotator]]
if compare[constant[annotatortype] in name[subnode].attrib] begin[:]
if <ast.UnaryOp object at 0x7da207f985b0> begin[:]
call[call[call[name[self].annotationdefaults][name[type]]][name[set]]][constant[annotatortype]] assign[=] call[name[subnode].attrib][constant[annotatortype]]
if <ast.BoolOp object at 0x7da18f09d030> begin[:]
if compare[name[self].debug greater_or_equal[>=] constant[1]] begin[:]
call[name[print], parameter[binary_operation[constant[[PyNLPl FoLiA DEBUG] Loading external document: ] + call[name[subnode].attrib][constant[external]]]]]
if <ast.UnaryOp object at 0x7da18f09e770> begin[:]
call[name[self].standoffdocs][name[type]] assign[=] dictionary[[], []]
call[call[name[self].standoffdocs][name[type]]][name[set]] assign[=] dictionary[[], []]
variable[standoffdoc] assign[=] constant[None]
for taget[name[t]] in starred[name[self].standoffdocs] begin[:]
for taget[name[s]] in starred[call[name[self].standoffdocs][name[t]]] begin[:]
for taget[name[source]] in starred[call[call[name[self].standoffdocs][name[t]]][name[s]]] begin[:]
if compare[name[source] equal[==] call[name[subnode].attrib][constant[external]]] begin[:]
variable[standoffdoc] assign[=] call[call[name[self].standoffdocs][name[t]]][name[s]]
break
if name[standoffdoc] begin[:]
break
if name[standoffdoc] begin[:]
break
if <ast.UnaryOp object at 0x7da18f09fb50> begin[:]
if <ast.BoolOp object at 0x7da18f09f580> begin[:]
<ast.Try object at 0x7da18f09fac0>
<ast.Try object at 0x7da18f09d630>
call[name[f].close, parameter[]]
variable[standoffdoc] assign[=] call[name[Document], parameter[]]
call[call[call[name[self].standoffdocs][name[type]]][name[set]]][call[name[subnode].attrib][constant[external]]] assign[=] name[standoffdoc]
name[standoffdoc].parentdoc assign[=] name[self]
if compare[name[self].debug greater_or_equal[>=] constant[1]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[constant[[PyNLPl FoLiA DEBUG] Found declared annotation ] + name[subnode].tag] + constant[. Defaults: ]] + call[name[repr], parameter[name[defaults]]]]]]
|
keyword[def] identifier[parsexmldeclarations] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[self] . identifier[debug] >= literal[int] :
identifier[print] ( literal[string] , identifier[file] = identifier[stderr] )
identifier[self] . identifier[declareprocessed] = keyword[True]
keyword[for] identifier[subnode] keyword[in] identifier[node] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[subnode] . identifier[tag] , identifier[str] ): keyword[continue]
keyword[if] identifier[subnode] . identifier[tag] [: literal[int] ]== literal[string] + identifier[NSFOLIA] + literal[string] keyword[and] identifier[subnode] . identifier[tag] [- literal[int] :]== literal[string] :
identifier[prefix] = identifier[subnode] . identifier[tag] [ literal[int] :][:- literal[int] ]
identifier[type] = keyword[None]
keyword[if] identifier[prefix] . identifier[upper] () keyword[in] identifier[vars] ( identifier[AnnotationType] ):
identifier[type] = identifier[vars] ( identifier[AnnotationType] )[ identifier[prefix] . identifier[upper] ()]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] + identifier[subnode] . identifier[tag] )
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] keyword[and] identifier[subnode] . identifier[attrib] [ literal[string] ]:
identifier[set] = identifier[subnode] . identifier[attrib] [ literal[string] ]
keyword[else] :
identifier[set] = literal[string]
keyword[if] ( identifier[type] , identifier[set] ) keyword[in] identifier[self] . identifier[annotations] :
keyword[if] identifier[type] == identifier[AnnotationType] . identifier[TEXT] :
identifier[a] =[]
keyword[for] identifier[t] , identifier[s] keyword[in] identifier[self] . identifier[annotations] :
keyword[if] keyword[not] ( identifier[t] == identifier[AnnotationType] . identifier[TEXT] keyword[and] identifier[s] == literal[string] ):
identifier[a] . identifier[append] (( identifier[t] , identifier[s] ))
identifier[self] . identifier[annotations] = identifier[a]
keyword[else] :
identifier[self] . identifier[annotations] . identifier[append] (( identifier[type] , identifier[set] ))
keyword[if] identifier[set] keyword[and] identifier[self] . identifier[loadsetdefinitions] keyword[and] identifier[set] keyword[not] keyword[in] identifier[self] . identifier[setdefinitions] :
keyword[if] identifier[set] [: literal[int] ]== literal[string] keyword[or] identifier[set] [: literal[int] ]== literal[string] keyword[or] identifier[set] [: literal[int] ]== literal[string] :
keyword[try] :
identifier[self] . identifier[setdefinitions] [ identifier[set] ]= identifier[SetDefinition] ( identifier[set] , identifier[verbose] = identifier[self] . identifier[verbose] )
keyword[except] identifier[DeepValidationError] :
identifier[print] ( literal[string] + identifier[set] + literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[if] identifier[type] keyword[in] identifier[self] . identifier[annotationdefaults] keyword[and] identifier[set] keyword[in] identifier[self] . identifier[annotationdefaults] [ identifier[type] ]:
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] :
keyword[if] keyword[not] ( literal[string] keyword[in] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ]):
identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]= identifier[subnode] . identifier[attrib] [ literal[string] ]
keyword[elif] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]!= identifier[subnode] . identifier[attrib] [ literal[string] ]:
keyword[del] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] :
keyword[if] keyword[not] ( literal[string] keyword[in] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ]):
identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]= identifier[subnode] . identifier[attrib] [ literal[string] ]
keyword[elif] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]!= identifier[subnode] . identifier[attrib] [ literal[string] ]:
keyword[del] identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ][ literal[string] ]
keyword[else] :
identifier[defaults] ={}
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] :
identifier[defaults] [ literal[string] ]= identifier[subnode] . identifier[attrib] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] :
keyword[if] identifier[subnode] . identifier[attrib] [ literal[string] ]== literal[string] :
identifier[defaults] [ literal[string] ]= identifier[AnnotatorType] . identifier[AUTO]
keyword[else] :
identifier[defaults] [ literal[string] ]= identifier[AnnotatorType] . identifier[MANUAL]
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] :
keyword[if] identifier[isinstance] ( identifier[subnode] . identifier[attrib] [ literal[string] ], identifier[datetime] ):
identifier[defaults] [ literal[string] ]= identifier[subnode] . identifier[attrib] [ literal[string] ]
keyword[else] :
identifier[defaults] [ literal[string] ]= identifier[parse_datetime] ( identifier[subnode] . identifier[attrib] [ literal[string] ])
keyword[if] keyword[not] identifier[type] keyword[in] identifier[self] . identifier[annotationdefaults] :
identifier[self] . identifier[annotationdefaults] [ identifier[type] ]={}
identifier[self] . identifier[annotationdefaults] [ identifier[type] ][ identifier[set] ]= identifier[defaults]
keyword[if] literal[string] keyword[in] identifier[subnode] . identifier[attrib] keyword[and] identifier[subnode] . identifier[attrib] [ literal[string] ]:
keyword[if] identifier[self] . identifier[debug] >= literal[int] :
identifier[print] ( literal[string] + identifier[subnode] . identifier[attrib] [ literal[string] ], identifier[file] = identifier[stderr] )
keyword[if] keyword[not] identifier[type] keyword[in] identifier[self] . identifier[standoffdocs] :
identifier[self] . identifier[standoffdocs] [ identifier[type] ]={}
identifier[self] . identifier[standoffdocs] [ identifier[type] ][ identifier[set] ]={}
identifier[standoffdoc] = keyword[None]
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[standoffdocs] :
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[standoffdocs] [ identifier[t] ]:
keyword[for] identifier[source] keyword[in] identifier[self] . identifier[standoffdocs] [ identifier[t] ][ identifier[s] ]:
keyword[if] identifier[source] == identifier[subnode] . identifier[attrib] [ literal[string] ]:
identifier[standoffdoc] = identifier[self] . identifier[standoffdocs] [ identifier[t] ][ identifier[s] ]
keyword[break]
keyword[if] identifier[standoffdoc] : keyword[break]
keyword[if] identifier[standoffdoc] : keyword[break]
keyword[if] keyword[not] identifier[standoffdoc] :
keyword[if] identifier[subnode] . identifier[attrib] [ literal[string] ][: literal[int] ]== literal[string] keyword[or] identifier[subnode] . identifier[attrib] [ literal[string] ][: literal[int] ]== literal[string] :
keyword[try] :
identifier[f] = identifier[urlopen] ( identifier[subnode] . identifier[attrib] [ literal[string] ])
keyword[except] :
keyword[raise] identifier[DeepValidationError] ( literal[string] + identifier[subnode] . identifier[attrib] [ literal[string] ])
keyword[try] :
identifier[content] = identifier[u] ( identifier[f] . identifier[read] ())
keyword[except] identifier[IOError] :
keyword[raise] identifier[DeepValidationError] ( literal[string] + identifier[subnode] . identifier[attrib] [ literal[string] ])
identifier[f] . identifier[close] ()
identifier[standoffdoc] = identifier[Document] ( identifier[string] = identifier[content] , identifier[parentdoc] = identifier[self] , identifier[setdefinitions] = identifier[self] . identifier[setdefinitions] )
keyword[elif] identifier[os] . identifier[path] . identifier[exists] ( identifier[subnode] . identifier[attrib] [ literal[string] ]):
identifier[standoffdoc] = identifier[Document] ( identifier[file] = identifier[subnode] . identifier[attrib] [ literal[string] ], identifier[parentdoc] = identifier[self] , identifier[setdefinitions] = identifier[self] . identifier[setdefinitions] )
keyword[else] :
keyword[raise] identifier[DeepValidationError] ( literal[string] + identifier[subnode] . identifier[attrib] [ literal[string] ])
identifier[self] . identifier[standoffdocs] [ identifier[type] ][ identifier[set] ][ identifier[subnode] . identifier[attrib] [ literal[string] ]]= identifier[standoffdoc]
identifier[standoffdoc] . identifier[parentdoc] = identifier[self]
keyword[if] identifier[self] . identifier[debug] >= literal[int] :
identifier[print] ( literal[string] + identifier[subnode] . identifier[tag] + literal[string] + identifier[repr] ( identifier[defaults] ), identifier[file] = identifier[stderr] )
|
def parsexmldeclarations(self, node):
"""Internal method to parse XML declarations"""
if self.debug >= 1:
print('[PyNLPl FoLiA DEBUG] Processing Annotation Declarations', file=stderr) # depends on [control=['if'], data=[]]
self.declareprocessed = True
for subnode in node: #pylint: disable=too-many-nested-blocks
if not isinstance(subnode.tag, str):
continue # depends on [control=['if'], data=[]]
if subnode.tag[:25] == '{' + NSFOLIA + '}' and subnode.tag[-11:] == '-annotation':
prefix = subnode.tag[25:][:-11]
type = None
if prefix.upper() in vars(AnnotationType):
type = vars(AnnotationType)[prefix.upper()] # depends on [control=['if'], data=[]]
else:
raise Exception('Unknown declaration: ' + subnode.tag)
if 'set' in subnode.attrib and subnode.attrib['set']:
set = subnode.attrib['set'] # depends on [control=['if'], data=[]]
else:
set = 'undefined'
if (type, set) in self.annotations:
if type == AnnotationType.TEXT:
#explicit Text declaration, remove the implicit declaration:
a = []
for (t, s) in self.annotations:
if not (t == AnnotationType.TEXT and s == 'undefined'):
a.append((t, s)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self.annotations = a # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
#raise ValueError("Double declaration of " + subnode.tag + ", set '" + set + "' + is already declared") //doubles are okay says Ko
self.annotations.append((type, set))
#Load set definition
if set and self.loadsetdefinitions and (set not in self.setdefinitions):
if set[:7] == 'http://' or set[:8] == 'https://' or set[:6] == 'ftp://':
try:
self.setdefinitions[set] = SetDefinition(set, verbose=self.verbose) #will raise exception on error # depends on [control=['try'], data=[]]
except DeepValidationError:
print('WARNING: Set ' + set + ' could not be downloaded, ignoring!', file=sys.stderr) #warning and ignore # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#Set defaults
if type in self.annotationdefaults and set in self.annotationdefaults[type]:
#handle duplicate. If ambiguous: remove defaults
if 'annotator' in subnode.attrib:
if not 'annotator' in self.annotationdefaults[type][set]:
self.annotationdefaults[type][set]['annotator'] = subnode.attrib['annotator'] # depends on [control=['if'], data=[]]
elif self.annotationdefaults[type][set]['annotator'] != subnode.attrib['annotator']:
del self.annotationdefaults[type][set]['annotator'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'annotatortype' in subnode.attrib:
if not 'annotatortype' in self.annotationdefaults[type][set]:
self.annotationdefaults[type][set]['annotatortype'] = subnode.attrib['annotatortype'] # depends on [control=['if'], data=[]]
elif self.annotationdefaults[type][set]['annotatortype'] != subnode.attrib['annotatortype']:
del self.annotationdefaults[type][set]['annotatortype'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
defaults = {}
if 'annotator' in subnode.attrib:
defaults['annotator'] = subnode.attrib['annotator'] # depends on [control=['if'], data=[]]
if 'annotatortype' in subnode.attrib:
if subnode.attrib['annotatortype'] == 'auto':
defaults['annotatortype'] = AnnotatorType.AUTO # depends on [control=['if'], data=[]]
else:
defaults['annotatortype'] = AnnotatorType.MANUAL # depends on [control=['if'], data=[]]
if 'datetime' in subnode.attrib:
if isinstance(subnode.attrib['datetime'], datetime):
defaults['datetime'] = subnode.attrib['datetime'] # depends on [control=['if'], data=[]]
else:
defaults['datetime'] = parse_datetime(subnode.attrib['datetime']) # depends on [control=['if'], data=[]]
if not type in self.annotationdefaults:
self.annotationdefaults[type] = {} # depends on [control=['if'], data=[]]
self.annotationdefaults[type][set] = defaults
if 'external' in subnode.attrib and subnode.attrib['external']:
if self.debug >= 1:
print('[PyNLPl FoLiA DEBUG] Loading external document: ' + subnode.attrib['external'], file=stderr) # depends on [control=['if'], data=[]]
if not type in self.standoffdocs:
self.standoffdocs[type] = {} # depends on [control=['if'], data=[]]
self.standoffdocs[type][set] = {}
#check if it is already loaded, if multiple references are made to the same doc we reuse the instance
standoffdoc = None
for t in self.standoffdocs:
for s in self.standoffdocs[t]:
for source in self.standoffdocs[t][s]:
if source == subnode.attrib['external']:
standoffdoc = self.standoffdocs[t][s]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['source']]
if standoffdoc:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']]
if standoffdoc:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
if not standoffdoc:
if subnode.attrib['external'][:7] == 'http://' or subnode.attrib['external'][:8] == 'https://':
#document is remote, download (in memory)
try:
f = urlopen(subnode.attrib['external']) # depends on [control=['try'], data=[]]
except:
raise DeepValidationError('Unable to download standoff document: ' + subnode.attrib['external']) # depends on [control=['except'], data=[]]
try:
content = u(f.read()) # depends on [control=['try'], data=[]]
except IOError:
raise DeepValidationError('Unable to download standoff document: ' + subnode.attrib['external']) # depends on [control=['except'], data=[]]
f.close()
standoffdoc = Document(string=content, parentdoc=self, setdefinitions=self.setdefinitions) # depends on [control=['if'], data=[]]
elif os.path.exists(subnode.attrib['external']):
#document is on disk:
standoffdoc = Document(file=subnode.attrib['external'], parentdoc=self, setdefinitions=self.setdefinitions) # depends on [control=['if'], data=[]]
else:
#document not found
raise DeepValidationError('Unable to find standoff document: ' + subnode.attrib['external']) # depends on [control=['if'], data=[]]
self.standoffdocs[type][set][subnode.attrib['external']] = standoffdoc
standoffdoc.parentdoc = self # depends on [control=['if'], data=[]]
if self.debug >= 1:
print('[PyNLPl FoLiA DEBUG] Found declared annotation ' + subnode.tag + '. Defaults: ' + repr(defaults), file=stderr) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subnode']]
|
def get_next(self, request):
"""
Returns a url to redirect to after the login / signup.
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
elif 'next' in request.GET:
next = request.GET.get('next')
elif 'next' in request.POST:
next = request.POST.get('next')
else:
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
netloc = urlparse.urlparse(next)[1]
if netloc and netloc != request.get_host():
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
return next
|
def function[get_next, parameter[self, request]]:
constant[
Returns a url to redirect to after the login / signup.
]
if compare[constant[next] in name[request].session] begin[:]
variable[next] assign[=] call[name[request].session][constant[next]]
<ast.Delete object at 0x7da1b1a5ece0>
variable[netloc] assign[=] call[call[name[urlparse].urlparse, parameter[name[next]]]][constant[1]]
if <ast.BoolOp object at 0x7da18c4cdde0> begin[:]
variable[next] assign[=] call[name[getattr], parameter[name[settings], constant[LOGIN_REDIRECT_URL], constant[/]]]
return[name[next]]
|
keyword[def] identifier[get_next] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[request] . identifier[session] :
identifier[next] = identifier[request] . identifier[session] [ literal[string] ]
keyword[del] identifier[request] . identifier[session] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[request] . identifier[GET] :
identifier[next] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[request] . identifier[POST] :
identifier[next] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] )
keyword[else] :
identifier[next] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] )
identifier[netloc] = identifier[urlparse] . identifier[urlparse] ( identifier[next] )[ literal[int] ]
keyword[if] identifier[netloc] keyword[and] identifier[netloc] != identifier[request] . identifier[get_host] ():
identifier[next] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] )
keyword[return] identifier[next]
|
def get_next(self, request):
"""
Returns a url to redirect to after the login / signup.
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next'] # depends on [control=['if'], data=[]]
elif 'next' in request.GET:
next = request.GET.get('next') # depends on [control=['if'], data=[]]
elif 'next' in request.POST:
next = request.POST.get('next') # depends on [control=['if'], data=[]]
else:
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
netloc = urlparse.urlparse(next)[1]
if netloc and netloc != request.get_host():
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/') # depends on [control=['if'], data=[]]
return next
|
def _get_mass_by_index(self, index):
"""
where index can either by an integer or a list of integers (returns some of masses)
"""
if hasattr(index, '__iter__'):
return sum([self.masses[i] for i in index])
else:
return self.masses[index]
|
def function[_get_mass_by_index, parameter[self, index]]:
constant[
where index can either by an integer or a list of integers (returns some of masses)
]
if call[name[hasattr], parameter[name[index], constant[__iter__]]] begin[:]
return[call[name[sum], parameter[<ast.ListComp object at 0x7da18f58d990>]]]
|
keyword[def] identifier[_get_mass_by_index] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[index] , literal[string] ):
keyword[return] identifier[sum] ([ identifier[self] . identifier[masses] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[index] ])
keyword[else] :
keyword[return] identifier[self] . identifier[masses] [ identifier[index] ]
|
def _get_mass_by_index(self, index):
"""
where index can either by an integer or a list of integers (returns some of masses)
"""
if hasattr(index, '__iter__'):
return sum([self.masses[i] for i in index]) # depends on [control=['if'], data=[]]
else:
return self.masses[index]
|
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
|
def function[_write4bits, parameter[self, value]]:
constant[Write 4 bits of data into the data bus.]
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
variable[bit] assign[=] binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> name[i]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]]
call[name[GPIO].output, parameter[call[name[self].pins][binary_operation[name[i] + constant[7]]], name[bit]]]
call[name[self]._pulse_enable, parameter[]]
|
keyword[def] identifier[_write4bits] ( identifier[self] , identifier[value] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[bit] =( identifier[value] >> identifier[i] )& literal[int]
identifier[GPIO] . identifier[output] ( identifier[self] . identifier[pins] [ identifier[i] + literal[int] ], identifier[bit] )
identifier[self] . identifier[_pulse_enable] ()
|
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = value >> i & 1
GPIO.output(self.pins[i + 7], bit) # depends on [control=['for'], data=['i']]
self._pulse_enable()
|
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.')
|
def function[Residual, parameter[]]:
constant[Constructs a residual version of layers, summing input to layers output.]
variable[shortcut] assign[=] call[name[kwargs].get, parameter[constant[shortcut], call[name[Identity], parameter[]]]]
if compare[call[name[len], parameter[name[layers]]] greater[>] constant[1]] begin[:]
return[call[name[Serial], parameter[call[name[Branch], parameter[]], call[name[Parallel], parameter[call[name[Serial], parameter[<ast.Starred object at 0x7da20c6e7af0>]], name[shortcut]]], call[name[SumBranches], parameter[]]]]]
|
keyword[def] identifier[Residual] (* identifier[layers] ,** identifier[kwargs] ):
literal[string]
identifier[shortcut] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[Identity] ())
keyword[if] identifier[len] ( identifier[layers] )> literal[int] :
keyword[return] identifier[Serial] (
identifier[Branch] (),
identifier[Parallel] ( identifier[Serial] (* identifier[layers] ), identifier[shortcut] ),
identifier[SumBranches] ()
)
keyword[elif] identifier[len] ( identifier[layers] )== literal[int] :
keyword[return] identifier[Serial] (
identifier[Branch] (),
identifier[Parallel] ( identifier[layers] [ literal[int] ], identifier[shortcut] ),
identifier[SumBranches] ()
)
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1: # pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
return Serial(Branch(), Parallel(Serial(*layers), shortcut), SumBranches()) # depends on [control=['if'], data=[]]
elif len(layers) == 1: # pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
return Serial(Branch(), Parallel(layers[0], shortcut), SumBranches()) # depends on [control=['if'], data=[]]
else:
raise ValueError('Empty residual combinator.')
|
def create_figures(self):
"""
creates the maplotlib figures]
self.matplotlibwidget_1
self.matplotlibwidget_2
and toolbars
self.mpl_toolbar_1
self.mpl_toolbar_2
Returns:
"""
try:
self.horizontalLayout_14.removeWidget(self.matplotlibwidget_1)
self.matplotlibwidget_1.close()
except AttributeError:
pass
try:
self.horizontalLayout_15.removeWidget(self.matplotlibwidget_2)
self.matplotlibwidget_2.close()
except AttributeError:
pass
self.matplotlibwidget_2 = MatplotlibWidget(self.plot_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.matplotlibwidget_2.sizePolicy().hasHeightForWidth())
self.matplotlibwidget_2.setSizePolicy(sizePolicy)
self.matplotlibwidget_2.setMinimumSize(QtCore.QSize(200, 200))
self.matplotlibwidget_2.setObjectName("matplotlibwidget_2")
self.horizontalLayout_16.addWidget(self.matplotlibwidget_2)
self.matplotlibwidget_1 = MatplotlibWidget(self.plot_1)
self.matplotlibwidget_1.setMinimumSize(QtCore.QSize(200, 200))
self.matplotlibwidget_1.setObjectName("matplotlibwidget_1")
self.horizontalLayout_15.addWidget(self.matplotlibwidget_1)
self.matplotlibwidget_1.mpl_connect('button_press_event', self.plot_clicked)
self.matplotlibwidget_2.mpl_connect('button_press_event', self.plot_clicked)
# adds a toolbar to the plots
self.mpl_toolbar_1 = NavigationToolbar(self.matplotlibwidget_1.canvas, self.toolbar_space_1)
self.mpl_toolbar_2 = NavigationToolbar(self.matplotlibwidget_2.canvas, self.toolbar_space_2)
self.horizontalLayout_9.addWidget(self.mpl_toolbar_2)
self.horizontalLayout_14.addWidget(self.mpl_toolbar_1)
self.matplotlibwidget_1.figure.set_tight_layout(True)
self.matplotlibwidget_2.figure.set_tight_layout(True)
|
def function[create_figures, parameter[self]]:
constant[
creates the maplotlib figures]
self.matplotlibwidget_1
self.matplotlibwidget_2
and toolbars
self.mpl_toolbar_1
self.mpl_toolbar_2
Returns:
]
<ast.Try object at 0x7da1b23b3880>
<ast.Try object at 0x7da1b25c3b20>
name[self].matplotlibwidget_2 assign[=] call[name[MatplotlibWidget], parameter[name[self].plot_2]]
variable[sizePolicy] assign[=] call[name[QtWidgets].QSizePolicy, parameter[name[QtWidgets].QSizePolicy.Expanding, name[QtWidgets].QSizePolicy.Expanding]]
call[name[sizePolicy].setHorizontalStretch, parameter[constant[0]]]
call[name[sizePolicy].setVerticalStretch, parameter[constant[0]]]
call[name[sizePolicy].setHeightForWidth, parameter[call[call[name[self].matplotlibwidget_2.sizePolicy, parameter[]].hasHeightForWidth, parameter[]]]]
call[name[self].matplotlibwidget_2.setSizePolicy, parameter[name[sizePolicy]]]
call[name[self].matplotlibwidget_2.setMinimumSize, parameter[call[name[QtCore].QSize, parameter[constant[200], constant[200]]]]]
call[name[self].matplotlibwidget_2.setObjectName, parameter[constant[matplotlibwidget_2]]]
call[name[self].horizontalLayout_16.addWidget, parameter[name[self].matplotlibwidget_2]]
name[self].matplotlibwidget_1 assign[=] call[name[MatplotlibWidget], parameter[name[self].plot_1]]
call[name[self].matplotlibwidget_1.setMinimumSize, parameter[call[name[QtCore].QSize, parameter[constant[200], constant[200]]]]]
call[name[self].matplotlibwidget_1.setObjectName, parameter[constant[matplotlibwidget_1]]]
call[name[self].horizontalLayout_15.addWidget, parameter[name[self].matplotlibwidget_1]]
call[name[self].matplotlibwidget_1.mpl_connect, parameter[constant[button_press_event], name[self].plot_clicked]]
call[name[self].matplotlibwidget_2.mpl_connect, parameter[constant[button_press_event], name[self].plot_clicked]]
name[self].mpl_toolbar_1 assign[=] call[name[NavigationToolbar], parameter[name[self].matplotlibwidget_1.canvas, name[self].toolbar_space_1]]
name[self].mpl_toolbar_2 assign[=] call[name[NavigationToolbar], parameter[name[self].matplotlibwidget_2.canvas, name[self].toolbar_space_2]]
call[name[self].horizontalLayout_9.addWidget, parameter[name[self].mpl_toolbar_2]]
call[name[self].horizontalLayout_14.addWidget, parameter[name[self].mpl_toolbar_1]]
call[name[self].matplotlibwidget_1.figure.set_tight_layout, parameter[constant[True]]]
call[name[self].matplotlibwidget_2.figure.set_tight_layout, parameter[constant[True]]]
|
keyword[def] identifier[create_figures] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[horizontalLayout_14] . identifier[removeWidget] ( identifier[self] . identifier[matplotlibwidget_1] )
identifier[self] . identifier[matplotlibwidget_1] . identifier[close] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[try] :
identifier[self] . identifier[horizontalLayout_15] . identifier[removeWidget] ( identifier[self] . identifier[matplotlibwidget_2] )
identifier[self] . identifier[matplotlibwidget_2] . identifier[close] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[self] . identifier[matplotlibwidget_2] = identifier[MatplotlibWidget] ( identifier[self] . identifier[plot_2] )
identifier[sizePolicy] = identifier[QtWidgets] . identifier[QSizePolicy] ( identifier[QtWidgets] . identifier[QSizePolicy] . identifier[Expanding] , identifier[QtWidgets] . identifier[QSizePolicy] . identifier[Expanding] )
identifier[sizePolicy] . identifier[setHorizontalStretch] ( literal[int] )
identifier[sizePolicy] . identifier[setVerticalStretch] ( literal[int] )
identifier[sizePolicy] . identifier[setHeightForWidth] ( identifier[self] . identifier[matplotlibwidget_2] . identifier[sizePolicy] (). identifier[hasHeightForWidth] ())
identifier[self] . identifier[matplotlibwidget_2] . identifier[setSizePolicy] ( identifier[sizePolicy] )
identifier[self] . identifier[matplotlibwidget_2] . identifier[setMinimumSize] ( identifier[QtCore] . identifier[QSize] ( literal[int] , literal[int] ))
identifier[self] . identifier[matplotlibwidget_2] . identifier[setObjectName] ( literal[string] )
identifier[self] . identifier[horizontalLayout_16] . identifier[addWidget] ( identifier[self] . identifier[matplotlibwidget_2] )
identifier[self] . identifier[matplotlibwidget_1] = identifier[MatplotlibWidget] ( identifier[self] . identifier[plot_1] )
identifier[self] . identifier[matplotlibwidget_1] . identifier[setMinimumSize] ( identifier[QtCore] . identifier[QSize] ( literal[int] , literal[int] ))
identifier[self] . identifier[matplotlibwidget_1] . identifier[setObjectName] ( literal[string] )
identifier[self] . identifier[horizontalLayout_15] . identifier[addWidget] ( identifier[self] . identifier[matplotlibwidget_1] )
identifier[self] . identifier[matplotlibwidget_1] . identifier[mpl_connect] ( literal[string] , identifier[self] . identifier[plot_clicked] )
identifier[self] . identifier[matplotlibwidget_2] . identifier[mpl_connect] ( literal[string] , identifier[self] . identifier[plot_clicked] )
identifier[self] . identifier[mpl_toolbar_1] = identifier[NavigationToolbar] ( identifier[self] . identifier[matplotlibwidget_1] . identifier[canvas] , identifier[self] . identifier[toolbar_space_1] )
identifier[self] . identifier[mpl_toolbar_2] = identifier[NavigationToolbar] ( identifier[self] . identifier[matplotlibwidget_2] . identifier[canvas] , identifier[self] . identifier[toolbar_space_2] )
identifier[self] . identifier[horizontalLayout_9] . identifier[addWidget] ( identifier[self] . identifier[mpl_toolbar_2] )
identifier[self] . identifier[horizontalLayout_14] . identifier[addWidget] ( identifier[self] . identifier[mpl_toolbar_1] )
identifier[self] . identifier[matplotlibwidget_1] . identifier[figure] . identifier[set_tight_layout] ( keyword[True] )
identifier[self] . identifier[matplotlibwidget_2] . identifier[figure] . identifier[set_tight_layout] ( keyword[True] )
|
def create_figures(self):
"""
creates the maplotlib figures]
self.matplotlibwidget_1
self.matplotlibwidget_2
and toolbars
self.mpl_toolbar_1
self.mpl_toolbar_2
Returns:
"""
try:
self.horizontalLayout_14.removeWidget(self.matplotlibwidget_1)
self.matplotlibwidget_1.close() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
try:
self.horizontalLayout_15.removeWidget(self.matplotlibwidget_2)
self.matplotlibwidget_2.close() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
self.matplotlibwidget_2 = MatplotlibWidget(self.plot_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.matplotlibwidget_2.sizePolicy().hasHeightForWidth())
self.matplotlibwidget_2.setSizePolicy(sizePolicy)
self.matplotlibwidget_2.setMinimumSize(QtCore.QSize(200, 200))
self.matplotlibwidget_2.setObjectName('matplotlibwidget_2')
self.horizontalLayout_16.addWidget(self.matplotlibwidget_2)
self.matplotlibwidget_1 = MatplotlibWidget(self.plot_1)
self.matplotlibwidget_1.setMinimumSize(QtCore.QSize(200, 200))
self.matplotlibwidget_1.setObjectName('matplotlibwidget_1')
self.horizontalLayout_15.addWidget(self.matplotlibwidget_1)
self.matplotlibwidget_1.mpl_connect('button_press_event', self.plot_clicked)
self.matplotlibwidget_2.mpl_connect('button_press_event', self.plot_clicked)
# adds a toolbar to the plots
self.mpl_toolbar_1 = NavigationToolbar(self.matplotlibwidget_1.canvas, self.toolbar_space_1)
self.mpl_toolbar_2 = NavigationToolbar(self.matplotlibwidget_2.canvas, self.toolbar_space_2)
self.horizontalLayout_9.addWidget(self.mpl_toolbar_2)
self.horizontalLayout_14.addWidget(self.mpl_toolbar_1)
self.matplotlibwidget_1.figure.set_tight_layout(True)
self.matplotlibwidget_2.figure.set_tight_layout(True)
|
def getFeatureSetByName(self, name):
"""
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
"""
if name not in self._featureSetNameMap:
raise exceptions.FeatureSetNameNotFoundException(name)
return self._featureSetNameMap[name]
|
def function[getFeatureSetByName, parameter[self, name]]:
constant[
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._featureSetNameMap] begin[:]
<ast.Raise object at 0x7da204566ad0>
return[call[name[self]._featureSetNameMap][name[name]]]
|
keyword[def] identifier[getFeatureSetByName] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_featureSetNameMap] :
keyword[raise] identifier[exceptions] . identifier[FeatureSetNameNotFoundException] ( identifier[name] )
keyword[return] identifier[self] . identifier[_featureSetNameMap] [ identifier[name] ]
|
def getFeatureSetByName(self, name):
"""
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
"""
if name not in self._featureSetNameMap:
raise exceptions.FeatureSetNameNotFoundException(name) # depends on [control=['if'], data=['name']]
return self._featureSetNameMap[name]
|
def prep_folder(self, seq):
"""Take in a sequence string and prepares the folder for the I-TASSER run."""
itasser_dir = op.join(self.root_dir, self.id)
if not op.exists(itasser_dir):
os.makedirs(itasser_dir)
tmp = {self.id: seq}
fasta.write_fasta_file_from_dict(indict=tmp,
outname='seq',
outext='.fasta',
outdir=itasser_dir)
return itasser_dir
|
def function[prep_folder, parameter[self, seq]]:
constant[Take in a sequence string and prepares the folder for the I-TASSER run.]
variable[itasser_dir] assign[=] call[name[op].join, parameter[name[self].root_dir, name[self].id]]
if <ast.UnaryOp object at 0x7da1b0ebf160> begin[:]
call[name[os].makedirs, parameter[name[itasser_dir]]]
variable[tmp] assign[=] dictionary[[<ast.Attribute object at 0x7da204622260>], [<ast.Name object at 0x7da2046217b0>]]
call[name[fasta].write_fasta_file_from_dict, parameter[]]
return[name[itasser_dir]]
|
keyword[def] identifier[prep_folder] ( identifier[self] , identifier[seq] ):
literal[string]
identifier[itasser_dir] = identifier[op] . identifier[join] ( identifier[self] . identifier[root_dir] , identifier[self] . identifier[id] )
keyword[if] keyword[not] identifier[op] . identifier[exists] ( identifier[itasser_dir] ):
identifier[os] . identifier[makedirs] ( identifier[itasser_dir] )
identifier[tmp] ={ identifier[self] . identifier[id] : identifier[seq] }
identifier[fasta] . identifier[write_fasta_file_from_dict] ( identifier[indict] = identifier[tmp] ,
identifier[outname] = literal[string] ,
identifier[outext] = literal[string] ,
identifier[outdir] = identifier[itasser_dir] )
keyword[return] identifier[itasser_dir]
|
def prep_folder(self, seq):
"""Take in a sequence string and prepares the folder for the I-TASSER run."""
itasser_dir = op.join(self.root_dir, self.id)
if not op.exists(itasser_dir):
os.makedirs(itasser_dir) # depends on [control=['if'], data=[]]
tmp = {self.id: seq}
fasta.write_fasta_file_from_dict(indict=tmp, outname='seq', outext='.fasta', outdir=itasser_dir)
return itasser_dir
|
def extract_named_entities(text_blocks):
"""
Return a list of named entities extracted from provided text blocks (list of text strings).
"""
sentences = []
for text in text_blocks:
sentences.extend(nltk.sent_tokenize(text))
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
def extract_entity_names(t):
entity_names = []
if hasattr(t, 'label'):
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child))
return entity_names
entity_names = []
for tree in chunked_sentences:
entity_names.extend(extract_entity_names(tree))
return set(entity_names)
|
def function[extract_named_entities, parameter[text_blocks]]:
constant[
Return a list of named entities extracted from provided text blocks (list of text strings).
]
variable[sentences] assign[=] list[[]]
for taget[name[text]] in starred[name[text_blocks]] begin[:]
call[name[sentences].extend, parameter[call[name[nltk].sent_tokenize, parameter[name[text]]]]]
variable[tokenized_sentences] assign[=] <ast.ListComp object at 0x7da20c6e6e60>
variable[tagged_sentences] assign[=] <ast.ListComp object at 0x7da20c6e5b40>
variable[chunked_sentences] assign[=] call[name[nltk].ne_chunk_sents, parameter[name[tagged_sentences]]]
def function[extract_entity_names, parameter[t]]:
variable[entity_names] assign[=] list[[]]
if call[name[hasattr], parameter[name[t], constant[label]]] begin[:]
if compare[call[name[t].label, parameter[]] equal[==] constant[NE]] begin[:]
call[name[entity_names].append, parameter[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20c6e4fd0>]]]]
return[name[entity_names]]
variable[entity_names] assign[=] list[[]]
for taget[name[tree]] in starred[name[chunked_sentences]] begin[:]
call[name[entity_names].extend, parameter[call[name[extract_entity_names], parameter[name[tree]]]]]
return[call[name[set], parameter[name[entity_names]]]]
|
keyword[def] identifier[extract_named_entities] ( identifier[text_blocks] ):
literal[string]
identifier[sentences] =[]
keyword[for] identifier[text] keyword[in] identifier[text_blocks] :
identifier[sentences] . identifier[extend] ( identifier[nltk] . identifier[sent_tokenize] ( identifier[text] ))
identifier[tokenized_sentences] =[ identifier[nltk] . identifier[word_tokenize] ( identifier[sentence] ) keyword[for] identifier[sentence] keyword[in] identifier[sentences] ]
identifier[tagged_sentences] =[ identifier[nltk] . identifier[pos_tag] ( identifier[sentence] ) keyword[for] identifier[sentence] keyword[in] identifier[tokenized_sentences] ]
identifier[chunked_sentences] = identifier[nltk] . identifier[ne_chunk_sents] ( identifier[tagged_sentences] , identifier[binary] = keyword[True] )
keyword[def] identifier[extract_entity_names] ( identifier[t] ):
identifier[entity_names] =[]
keyword[if] identifier[hasattr] ( identifier[t] , literal[string] ):
keyword[if] identifier[t] . identifier[label] ()== literal[string] :
identifier[entity_names] . identifier[append] ( literal[string] . identifier[join] ([ identifier[child] [ literal[int] ] keyword[for] identifier[child] keyword[in] identifier[t] ]))
keyword[else] :
keyword[for] identifier[child] keyword[in] identifier[t] :
identifier[entity_names] . identifier[extend] ( identifier[extract_entity_names] ( identifier[child] ))
keyword[return] identifier[entity_names]
identifier[entity_names] =[]
keyword[for] identifier[tree] keyword[in] identifier[chunked_sentences] :
identifier[entity_names] . identifier[extend] ( identifier[extract_entity_names] ( identifier[tree] ))
keyword[return] identifier[set] ( identifier[entity_names] )
|
def extract_named_entities(text_blocks):
"""
Return a list of named entities extracted from provided text blocks (list of text strings).
"""
sentences = []
for text in text_blocks:
sentences.extend(nltk.sent_tokenize(text)) # depends on [control=['for'], data=['text']]
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
def extract_entity_names(t):
entity_names = []
if hasattr(t, 'label'):
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t])) # depends on [control=['if'], data=[]]
else:
for child in t:
entity_names.extend(extract_entity_names(child)) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
return entity_names
entity_names = []
for tree in chunked_sentences:
entity_names.extend(extract_entity_names(tree)) # depends on [control=['for'], data=['tree']]
return set(entity_names)
|
def flatten(list_of_lists):
"""Flatten a list of lists but maintain strings and ints as entries."""
flat_list = []
for sublist in list_of_lists:
if isinstance(sublist, string_types) or isinstance(sublist, int):
flat_list.append(sublist)
elif sublist is None:
continue
elif not isinstance(sublist, string_types) and len(sublist) == 1:
flat_list.append(sublist[0])
else:
flat_list.append(tuple(sublist))
return flat_list
|
def function[flatten, parameter[list_of_lists]]:
constant[Flatten a list of lists but maintain strings and ints as entries.]
variable[flat_list] assign[=] list[[]]
for taget[name[sublist]] in starred[name[list_of_lists]] begin[:]
if <ast.BoolOp object at 0x7da1b0659180> begin[:]
call[name[flat_list].append, parameter[name[sublist]]]
return[name[flat_list]]
|
keyword[def] identifier[flatten] ( identifier[list_of_lists] ):
literal[string]
identifier[flat_list] =[]
keyword[for] identifier[sublist] keyword[in] identifier[list_of_lists] :
keyword[if] identifier[isinstance] ( identifier[sublist] , identifier[string_types] ) keyword[or] identifier[isinstance] ( identifier[sublist] , identifier[int] ):
identifier[flat_list] . identifier[append] ( identifier[sublist] )
keyword[elif] identifier[sublist] keyword[is] keyword[None] :
keyword[continue]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[sublist] , identifier[string_types] ) keyword[and] identifier[len] ( identifier[sublist] )== literal[int] :
identifier[flat_list] . identifier[append] ( identifier[sublist] [ literal[int] ])
keyword[else] :
identifier[flat_list] . identifier[append] ( identifier[tuple] ( identifier[sublist] ))
keyword[return] identifier[flat_list]
|
def flatten(list_of_lists):
"""Flatten a list of lists but maintain strings and ints as entries."""
flat_list = []
for sublist in list_of_lists:
if isinstance(sublist, string_types) or isinstance(sublist, int):
flat_list.append(sublist) # depends on [control=['if'], data=[]]
elif sublist is None:
continue # depends on [control=['if'], data=[]]
elif not isinstance(sublist, string_types) and len(sublist) == 1:
flat_list.append(sublist[0]) # depends on [control=['if'], data=[]]
else:
flat_list.append(tuple(sublist)) # depends on [control=['for'], data=['sublist']]
return flat_list
|
def wait(self):
"""
Wait for the request to be dispatched.
"""
self.wait_event = threading.Event()
timeout = int(self.timeout) if self.timeout else None
self.timed_out = not self.wait_event.wait(timeout)
|
def function[wait, parameter[self]]:
constant[
Wait for the request to be dispatched.
]
name[self].wait_event assign[=] call[name[threading].Event, parameter[]]
variable[timeout] assign[=] <ast.IfExp object at 0x7da1b1a64be0>
name[self].timed_out assign[=] <ast.UnaryOp object at 0x7da1b1a65c90>
|
keyword[def] identifier[wait] ( identifier[self] ):
literal[string]
identifier[self] . identifier[wait_event] = identifier[threading] . identifier[Event] ()
identifier[timeout] = identifier[int] ( identifier[self] . identifier[timeout] ) keyword[if] identifier[self] . identifier[timeout] keyword[else] keyword[None]
identifier[self] . identifier[timed_out] = keyword[not] identifier[self] . identifier[wait_event] . identifier[wait] ( identifier[timeout] )
|
def wait(self):
"""
Wait for the request to be dispatched.
"""
self.wait_event = threading.Event()
timeout = int(self.timeout) if self.timeout else None
self.timed_out = not self.wait_event.wait(timeout)
|
def get_config(self, budget):
"""
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
"""
self.lock.acquire()
if not self.is_trained:
c = self.config_space.sample_configuration().get_array()
else:
candidates = np.array([self.config_space.sample_configuration().get_array()
for _ in range(self.n_candidates)])
# We are only interested on the asymptotic value
projected_candidates = np.concatenate((candidates, np.ones([self.n_candidates, 1])), axis=1)
# Compute the upper confidence bound of the function at the asymptote
m, v = self.model.predict(projected_candidates)
ucb_values = m + self.delta * np.sqrt(v)
print(ucb_values)
# Sample a configuration based on the ucb values
p = np.ones(self.n_candidates) * (ucb_values / np.sum(ucb_values))
idx = np.random.choice(self.n_candidates, 1, False, p)
c = candidates[idx][0]
config = ConfigSpace.Configuration(self.config_space, vector=c)
self.lock.release()
return config.get_dictionary(), {}
|
def function[get_config, parameter[self, budget]]:
constant[
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
]
call[name[self].lock.acquire, parameter[]]
if <ast.UnaryOp object at 0x7da1b1775ba0> begin[:]
variable[c] assign[=] call[call[name[self].config_space.sample_configuration, parameter[]].get_array, parameter[]]
variable[config] assign[=] call[name[ConfigSpace].Configuration, parameter[name[self].config_space]]
call[name[self].lock.release, parameter[]]
return[tuple[[<ast.Call object at 0x7da1b175dc00>, <ast.Dict object at 0x7da1b175c4c0>]]]
|
keyword[def] identifier[get_config] ( identifier[self] , identifier[budget] ):
literal[string]
identifier[self] . identifier[lock] . identifier[acquire] ()
keyword[if] keyword[not] identifier[self] . identifier[is_trained] :
identifier[c] = identifier[self] . identifier[config_space] . identifier[sample_configuration] (). identifier[get_array] ()
keyword[else] :
identifier[candidates] = identifier[np] . identifier[array] ([ identifier[self] . identifier[config_space] . identifier[sample_configuration] (). identifier[get_array] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[n_candidates] )])
identifier[projected_candidates] = identifier[np] . identifier[concatenate] (( identifier[candidates] , identifier[np] . identifier[ones] ([ identifier[self] . identifier[n_candidates] , literal[int] ])), identifier[axis] = literal[int] )
identifier[m] , identifier[v] = identifier[self] . identifier[model] . identifier[predict] ( identifier[projected_candidates] )
identifier[ucb_values] = identifier[m] + identifier[self] . identifier[delta] * identifier[np] . identifier[sqrt] ( identifier[v] )
identifier[print] ( identifier[ucb_values] )
identifier[p] = identifier[np] . identifier[ones] ( identifier[self] . identifier[n_candidates] )*( identifier[ucb_values] / identifier[np] . identifier[sum] ( identifier[ucb_values] ))
identifier[idx] = identifier[np] . identifier[random] . identifier[choice] ( identifier[self] . identifier[n_candidates] , literal[int] , keyword[False] , identifier[p] )
identifier[c] = identifier[candidates] [ identifier[idx] ][ literal[int] ]
identifier[config] = identifier[ConfigSpace] . identifier[Configuration] ( identifier[self] . identifier[config_space] , identifier[vector] = identifier[c] )
identifier[self] . identifier[lock] . identifier[release] ()
keyword[return] identifier[config] . identifier[get_dictionary] (),{}
|
def get_config(self, budget):
"""
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
"""
self.lock.acquire()
if not self.is_trained:
c = self.config_space.sample_configuration().get_array() # depends on [control=['if'], data=[]]
else:
candidates = np.array([self.config_space.sample_configuration().get_array() for _ in range(self.n_candidates)])
# We are only interested on the asymptotic value
projected_candidates = np.concatenate((candidates, np.ones([self.n_candidates, 1])), axis=1)
# Compute the upper confidence bound of the function at the asymptote
(m, v) = self.model.predict(projected_candidates)
ucb_values = m + self.delta * np.sqrt(v)
print(ucb_values)
# Sample a configuration based on the ucb values
p = np.ones(self.n_candidates) * (ucb_values / np.sum(ucb_values))
idx = np.random.choice(self.n_candidates, 1, False, p)
c = candidates[idx][0]
config = ConfigSpace.Configuration(self.config_space, vector=c)
self.lock.release()
return (config.get_dictionary(), {})
|
def install_cygwin(name, install_args=None, override_args=False):
'''
Instructs Chocolatey to install a package via Cygwin.
name
The name of the package to be installed. Only accepts a single argument.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_cygwin <package name>
salt '*' chocolatey.install_cygwin <package name> install_args=<args> override_args=True
'''
return install(name,
source='cygwin',
install_args=install_args,
override_args=override_args)
|
def function[install_cygwin, parameter[name, install_args, override_args]]:
constant[
Instructs Chocolatey to install a package via Cygwin.
name
The name of the package to be installed. Only accepts a single argument.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_cygwin <package name>
salt '*' chocolatey.install_cygwin <package name> install_args=<args> override_args=True
]
return[call[name[install], parameter[name[name]]]]
|
keyword[def] identifier[install_cygwin] ( identifier[name] , identifier[install_args] = keyword[None] , identifier[override_args] = keyword[False] ):
literal[string]
keyword[return] identifier[install] ( identifier[name] ,
identifier[source] = literal[string] ,
identifier[install_args] = identifier[install_args] ,
identifier[override_args] = identifier[override_args] )
|
def install_cygwin(name, install_args=None, override_args=False):
"""
Instructs Chocolatey to install a package via Cygwin.
name
The name of the package to be installed. Only accepts a single argument.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_cygwin <package name>
salt '*' chocolatey.install_cygwin <package name> install_args=<args> override_args=True
"""
return install(name, source='cygwin', install_args=install_args, override_args=override_args)
|
def encode_endian(text, encoding, errors="strict", le=True):
"""Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
"""
encoding = codecs.lookup(encoding).name
if encoding == "utf-16":
if le:
return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors)
else:
return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors)
elif encoding == "utf-32":
if le:
return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors)
else:
return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors)
else:
return text.encode(encoding, errors)
|
def function[encode_endian, parameter[text, encoding, errors, le]]:
constant[Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
]
variable[encoding] assign[=] call[name[codecs].lookup, parameter[name[encoding]]].name
if compare[name[encoding] equal[==] constant[utf-16]] begin[:]
if name[le] begin[:]
return[binary_operation[name[codecs].BOM_UTF16_LE + call[name[text].encode, parameter[constant[utf-16-le], name[errors]]]]]
|
keyword[def] identifier[encode_endian] ( identifier[text] , identifier[encoding] , identifier[errors] = literal[string] , identifier[le] = keyword[True] ):
literal[string]
identifier[encoding] = identifier[codecs] . identifier[lookup] ( identifier[encoding] ). identifier[name]
keyword[if] identifier[encoding] == literal[string] :
keyword[if] identifier[le] :
keyword[return] identifier[codecs] . identifier[BOM_UTF16_LE] + identifier[text] . identifier[encode] ( literal[string] , identifier[errors] )
keyword[else] :
keyword[return] identifier[codecs] . identifier[BOM_UTF16_BE] + identifier[text] . identifier[encode] ( literal[string] , identifier[errors] )
keyword[elif] identifier[encoding] == literal[string] :
keyword[if] identifier[le] :
keyword[return] identifier[codecs] . identifier[BOM_UTF32_LE] + identifier[text] . identifier[encode] ( literal[string] , identifier[errors] )
keyword[else] :
keyword[return] identifier[codecs] . identifier[BOM_UTF32_BE] + identifier[text] . identifier[encode] ( literal[string] , identifier[errors] )
keyword[else] :
keyword[return] identifier[text] . identifier[encode] ( identifier[encoding] , identifier[errors] )
|
def encode_endian(text, encoding, errors='strict', le=True):
"""Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
"""
encoding = codecs.lookup(encoding).name
if encoding == 'utf-16':
if le:
return codecs.BOM_UTF16_LE + text.encode('utf-16-le', errors) # depends on [control=['if'], data=[]]
else:
return codecs.BOM_UTF16_BE + text.encode('utf-16-be', errors) # depends on [control=['if'], data=[]]
elif encoding == 'utf-32':
if le:
return codecs.BOM_UTF32_LE + text.encode('utf-32-le', errors) # depends on [control=['if'], data=[]]
else:
return codecs.BOM_UTF32_BE + text.encode('utf-32-be', errors) # depends on [control=['if'], data=[]]
else:
return text.encode(encoding, errors)
|
def register_converter(self, converter: Converter[S, T]):
"""
Utility method to register any converter. Converters that support any type will be stored in the "generic"
lists, and the others will be stored in front of the types they support
:return:
"""
check_var(converter, var_types=Converter, var_name='converter')
# (0) sanity check : check that parser handles jokers properly
res = converter.is_able_to_convert_detailed(from_type=JOKER, to_type=JOKER, strict=True)
if not (res[0] is True and res[1] is None and res[2] is None):
raise ValueError('Converter ' + str(converter) + ' can not be registered since it does not handle the JOKER'
' cases correctly')
# compute all possible chains and save them
generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains \
= self._create_all_new_chains(converter)
self._generic_nonstrict_conversion_chains += generic_nonstrict_chains
self._generic_conversion_chains += generic_chains
self._specific_non_strict_conversion_chains += specific_nonstrict_chains
self._specific_conversion_chains += specific_chains
# sort all lists by length
self._generic_nonstrict_conversion_chains = sorted(self._generic_nonstrict_conversion_chains, key=len,
reverse=True)
self._generic_conversion_chains = sorted(self._generic_conversion_chains, key=len, reverse=True)
self._specific_non_strict_conversion_chains = sorted(self._specific_non_strict_conversion_chains, key=len,
reverse=True)
self._specific_conversion_chains = sorted(self._specific_conversion_chains, key=len, reverse=True)
|
def function[register_converter, parameter[self, converter]]:
constant[
Utility method to register any converter. Converters that support any type will be stored in the "generic"
lists, and the others will be stored in front of the types they support
:return:
]
call[name[check_var], parameter[name[converter]]]
variable[res] assign[=] call[name[converter].is_able_to_convert_detailed, parameter[]]
if <ast.UnaryOp object at 0x7da18dc98fa0> begin[:]
<ast.Raise object at 0x7da18dc98dc0>
<ast.Tuple object at 0x7da18dc9af50> assign[=] call[name[self]._create_all_new_chains, parameter[name[converter]]]
<ast.AugAssign object at 0x7da18dc98760>
<ast.AugAssign object at 0x7da18dc98340>
<ast.AugAssign object at 0x7da18dc9ae00>
<ast.AugAssign object at 0x7da18dc9a950>
name[self]._generic_nonstrict_conversion_chains assign[=] call[name[sorted], parameter[name[self]._generic_nonstrict_conversion_chains]]
name[self]._generic_conversion_chains assign[=] call[name[sorted], parameter[name[self]._generic_conversion_chains]]
name[self]._specific_non_strict_conversion_chains assign[=] call[name[sorted], parameter[name[self]._specific_non_strict_conversion_chains]]
name[self]._specific_conversion_chains assign[=] call[name[sorted], parameter[name[self]._specific_conversion_chains]]
|
keyword[def] identifier[register_converter] ( identifier[self] , identifier[converter] : identifier[Converter] [ identifier[S] , identifier[T] ]):
literal[string]
identifier[check_var] ( identifier[converter] , identifier[var_types] = identifier[Converter] , identifier[var_name] = literal[string] )
identifier[res] = identifier[converter] . identifier[is_able_to_convert_detailed] ( identifier[from_type] = identifier[JOKER] , identifier[to_type] = identifier[JOKER] , identifier[strict] = keyword[True] )
keyword[if] keyword[not] ( identifier[res] [ literal[int] ] keyword[is] keyword[True] keyword[and] identifier[res] [ literal[int] ] keyword[is] keyword[None] keyword[and] identifier[res] [ literal[int] ] keyword[is] keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[converter] )+ literal[string]
literal[string] )
identifier[generic_chains] , identifier[generic_nonstrict_chains] , identifier[specific_chains] , identifier[specific_nonstrict_chains] = identifier[self] . identifier[_create_all_new_chains] ( identifier[converter] )
identifier[self] . identifier[_generic_nonstrict_conversion_chains] += identifier[generic_nonstrict_chains]
identifier[self] . identifier[_generic_conversion_chains] += identifier[generic_chains]
identifier[self] . identifier[_specific_non_strict_conversion_chains] += identifier[specific_nonstrict_chains]
identifier[self] . identifier[_specific_conversion_chains] += identifier[specific_chains]
identifier[self] . identifier[_generic_nonstrict_conversion_chains] = identifier[sorted] ( identifier[self] . identifier[_generic_nonstrict_conversion_chains] , identifier[key] = identifier[len] ,
identifier[reverse] = keyword[True] )
identifier[self] . identifier[_generic_conversion_chains] = identifier[sorted] ( identifier[self] . identifier[_generic_conversion_chains] , identifier[key] = identifier[len] , identifier[reverse] = keyword[True] )
identifier[self] . identifier[_specific_non_strict_conversion_chains] = identifier[sorted] ( identifier[self] . identifier[_specific_non_strict_conversion_chains] , identifier[key] = identifier[len] ,
identifier[reverse] = keyword[True] )
identifier[self] . identifier[_specific_conversion_chains] = identifier[sorted] ( identifier[self] . identifier[_specific_conversion_chains] , identifier[key] = identifier[len] , identifier[reverse] = keyword[True] )
|
def register_converter(self, converter: Converter[S, T]):
"""
Utility method to register any converter. Converters that support any type will be stored in the "generic"
lists, and the others will be stored in front of the types they support
:return:
"""
check_var(converter, var_types=Converter, var_name='converter')
# (0) sanity check : check that parser handles jokers properly
res = converter.is_able_to_convert_detailed(from_type=JOKER, to_type=JOKER, strict=True)
if not (res[0] is True and res[1] is None and (res[2] is None)):
raise ValueError('Converter ' + str(converter) + ' can not be registered since it does not handle the JOKER cases correctly') # depends on [control=['if'], data=[]]
# compute all possible chains and save them
(generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains) = self._create_all_new_chains(converter)
self._generic_nonstrict_conversion_chains += generic_nonstrict_chains
self._generic_conversion_chains += generic_chains
self._specific_non_strict_conversion_chains += specific_nonstrict_chains
self._specific_conversion_chains += specific_chains
# sort all lists by length
self._generic_nonstrict_conversion_chains = sorted(self._generic_nonstrict_conversion_chains, key=len, reverse=True)
self._generic_conversion_chains = sorted(self._generic_conversion_chains, key=len, reverse=True)
self._specific_non_strict_conversion_chains = sorted(self._specific_non_strict_conversion_chains, key=len, reverse=True)
self._specific_conversion_chains = sorted(self._specific_conversion_chains, key=len, reverse=True)
|
def queuedb_remove(path, entry, cur=None):
"""
Remove an element from a queue.
Return True on success
Raise on error
"""
sql = "DELETE FROM queue WHERE queue_id = ? AND name = ?;"
args = (entry['queue_id'], entry['name'])
cursor = None
if cur:
cursor = cur
else:
db = queuedb_open(path)
if db is None:
raise Exception("Failed to open %s" % path)
cursor = db.cursor()
res = queuedb_query_execute(cursor, sql, args)
if cur is None:
db.commit()
db.close()
return True
|
def function[queuedb_remove, parameter[path, entry, cur]]:
constant[
Remove an element from a queue.
Return True on success
Raise on error
]
variable[sql] assign[=] constant[DELETE FROM queue WHERE queue_id = ? AND name = ?;]
variable[args] assign[=] tuple[[<ast.Subscript object at 0x7da204344a90>, <ast.Subscript object at 0x7da2043455a0>]]
variable[cursor] assign[=] constant[None]
if name[cur] begin[:]
variable[cursor] assign[=] name[cur]
variable[res] assign[=] call[name[queuedb_query_execute], parameter[name[cursor], name[sql], name[args]]]
if compare[name[cur] is constant[None]] begin[:]
call[name[db].commit, parameter[]]
call[name[db].close, parameter[]]
return[constant[True]]
|
keyword[def] identifier[queuedb_remove] ( identifier[path] , identifier[entry] , identifier[cur] = keyword[None] ):
literal[string]
identifier[sql] = literal[string]
identifier[args] =( identifier[entry] [ literal[string] ], identifier[entry] [ literal[string] ])
identifier[cursor] = keyword[None]
keyword[if] identifier[cur] :
identifier[cursor] = identifier[cur]
keyword[else] :
identifier[db] = identifier[queuedb_open] ( identifier[path] )
keyword[if] identifier[db] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[path] )
identifier[cursor] = identifier[db] . identifier[cursor] ()
identifier[res] = identifier[queuedb_query_execute] ( identifier[cursor] , identifier[sql] , identifier[args] )
keyword[if] identifier[cur] keyword[is] keyword[None] :
identifier[db] . identifier[commit] ()
identifier[db] . identifier[close] ()
keyword[return] keyword[True]
|
def queuedb_remove(path, entry, cur=None):
"""
Remove an element from a queue.
Return True on success
Raise on error
"""
sql = 'DELETE FROM queue WHERE queue_id = ? AND name = ?;'
args = (entry['queue_id'], entry['name'])
cursor = None
if cur:
cursor = cur # depends on [control=['if'], data=[]]
else:
db = queuedb_open(path)
if db is None:
raise Exception('Failed to open %s' % path) # depends on [control=['if'], data=[]]
cursor = db.cursor()
res = queuedb_query_execute(cursor, sql, args)
if cur is None:
db.commit()
db.close() # depends on [control=['if'], data=[]]
return True
|
def draw_label_path(context, width, height, arrow_height, distance_to_port, port_offset):
"""Draws the path for an upright label
:param context: The Cairo context
:param float width: Width of the label
:param float height: Height of the label
:param float distance_to_port: Distance to the port related to the label
:param float port_offset: Distance from the port center to its border
:param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port
"""
c = context
# The current point is the port position
# Mover to outer border of state
c.rel_move_to(0, port_offset)
# Draw line to arrow tip of label
c.rel_line_to(0, distance_to_port)
# Line to upper left corner
c.rel_line_to(-width / 2., arrow_height)
# Line to lower left corner
c.rel_line_to(0, height - arrow_height)
# Line to lower right corner
c.rel_line_to(width, 0)
# Line to upper right corner
c.rel_line_to(0, -(height - arrow_height))
# Line to center top (tip of label)
c.rel_line_to(-width / 2., -arrow_height)
# Close path
c.close_path()
|
def function[draw_label_path, parameter[context, width, height, arrow_height, distance_to_port, port_offset]]:
constant[Draws the path for an upright label
:param context: The Cairo context
:param float width: Width of the label
:param float height: Height of the label
:param float distance_to_port: Distance to the port related to the label
:param float port_offset: Distance from the port center to its border
:param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port
]
variable[c] assign[=] name[context]
call[name[c].rel_move_to, parameter[constant[0], name[port_offset]]]
call[name[c].rel_line_to, parameter[constant[0], name[distance_to_port]]]
call[name[c].rel_line_to, parameter[binary_operation[<ast.UnaryOp object at 0x7da2041db9d0> / constant[2.0]], name[arrow_height]]]
call[name[c].rel_line_to, parameter[constant[0], binary_operation[name[height] - name[arrow_height]]]]
call[name[c].rel_line_to, parameter[name[width], constant[0]]]
call[name[c].rel_line_to, parameter[constant[0], <ast.UnaryOp object at 0x7da2041d9ba0>]]
call[name[c].rel_line_to, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1a12800> / constant[2.0]], <ast.UnaryOp object at 0x7da1b1a106d0>]]
call[name[c].close_path, parameter[]]
|
keyword[def] identifier[draw_label_path] ( identifier[context] , identifier[width] , identifier[height] , identifier[arrow_height] , identifier[distance_to_port] , identifier[port_offset] ):
literal[string]
identifier[c] = identifier[context]
identifier[c] . identifier[rel_move_to] ( literal[int] , identifier[port_offset] )
identifier[c] . identifier[rel_line_to] ( literal[int] , identifier[distance_to_port] )
identifier[c] . identifier[rel_line_to] (- identifier[width] / literal[int] , identifier[arrow_height] )
identifier[c] . identifier[rel_line_to] ( literal[int] , identifier[height] - identifier[arrow_height] )
identifier[c] . identifier[rel_line_to] ( identifier[width] , literal[int] )
identifier[c] . identifier[rel_line_to] ( literal[int] ,-( identifier[height] - identifier[arrow_height] ))
identifier[c] . identifier[rel_line_to] (- identifier[width] / literal[int] ,- identifier[arrow_height] )
identifier[c] . identifier[close_path] ()
|
def draw_label_path(context, width, height, arrow_height, distance_to_port, port_offset):
"""Draws the path for an upright label
:param context: The Cairo context
:param float width: Width of the label
:param float height: Height of the label
:param float distance_to_port: Distance to the port related to the label
:param float port_offset: Distance from the port center to its border
:param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port
"""
c = context
# The current point is the port position
# Mover to outer border of state
c.rel_move_to(0, port_offset)
# Draw line to arrow tip of label
c.rel_line_to(0, distance_to_port)
# Line to upper left corner
c.rel_line_to(-width / 2.0, arrow_height)
# Line to lower left corner
c.rel_line_to(0, height - arrow_height)
# Line to lower right corner
c.rel_line_to(width, 0)
# Line to upper right corner
c.rel_line_to(0, -(height - arrow_height))
# Line to center top (tip of label)
c.rel_line_to(-width / 2.0, -arrow_height)
# Close path
c.close_path()
|
def getProjectionRaw(self, eEye):
"""
The components necessary to build your own projection matrix in case your
application is doing something fancy like infinite Z
"""
fn = self.function_table.getProjectionRaw
pfLeft = c_float()
pfRight = c_float()
pfTop = c_float()
pfBottom = c_float()
fn(eEye, byref(pfLeft), byref(pfRight), byref(pfTop), byref(pfBottom))
return pfLeft.value, pfRight.value, pfTop.value, pfBottom.value
|
def function[getProjectionRaw, parameter[self, eEye]]:
constant[
The components necessary to build your own projection matrix in case your
application is doing something fancy like infinite Z
]
variable[fn] assign[=] name[self].function_table.getProjectionRaw
variable[pfLeft] assign[=] call[name[c_float], parameter[]]
variable[pfRight] assign[=] call[name[c_float], parameter[]]
variable[pfTop] assign[=] call[name[c_float], parameter[]]
variable[pfBottom] assign[=] call[name[c_float], parameter[]]
call[name[fn], parameter[name[eEye], call[name[byref], parameter[name[pfLeft]]], call[name[byref], parameter[name[pfRight]]], call[name[byref], parameter[name[pfTop]]], call[name[byref], parameter[name[pfBottom]]]]]
return[tuple[[<ast.Attribute object at 0x7da204623c70>, <ast.Attribute object at 0x7da204621ba0>, <ast.Attribute object at 0x7da204621060>, <ast.Attribute object at 0x7da204622e30>]]]
|
keyword[def] identifier[getProjectionRaw] ( identifier[self] , identifier[eEye] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[getProjectionRaw]
identifier[pfLeft] = identifier[c_float] ()
identifier[pfRight] = identifier[c_float] ()
identifier[pfTop] = identifier[c_float] ()
identifier[pfBottom] = identifier[c_float] ()
identifier[fn] ( identifier[eEye] , identifier[byref] ( identifier[pfLeft] ), identifier[byref] ( identifier[pfRight] ), identifier[byref] ( identifier[pfTop] ), identifier[byref] ( identifier[pfBottom] ))
keyword[return] identifier[pfLeft] . identifier[value] , identifier[pfRight] . identifier[value] , identifier[pfTop] . identifier[value] , identifier[pfBottom] . identifier[value]
|
def getProjectionRaw(self, eEye):
"""
The components necessary to build your own projection matrix in case your
application is doing something fancy like infinite Z
"""
fn = self.function_table.getProjectionRaw
pfLeft = c_float()
pfRight = c_float()
pfTop = c_float()
pfBottom = c_float()
fn(eEye, byref(pfLeft), byref(pfRight), byref(pfTop), byref(pfBottom))
return (pfLeft.value, pfRight.value, pfTop.value, pfBottom.value)
|
def parse_file(self, f):
"""
Parse an ELF file and fill the class' properties.
Arguments:
f(file or str): The (path to) the ELF file to read.
"""
if type(f) is str:
self.f = open(f, 'rb')
else:
self.f = f
self._parse_header(self.f.read(64))
|
def function[parse_file, parameter[self, f]]:
constant[
Parse an ELF file and fill the class' properties.
Arguments:
f(file or str): The (path to) the ELF file to read.
]
if compare[call[name[type], parameter[name[f]]] is name[str]] begin[:]
name[self].f assign[=] call[name[open], parameter[name[f], constant[rb]]]
call[name[self]._parse_header, parameter[call[name[self].f.read, parameter[constant[64]]]]]
|
keyword[def] identifier[parse_file] ( identifier[self] , identifier[f] ):
literal[string]
keyword[if] identifier[type] ( identifier[f] ) keyword[is] identifier[str] :
identifier[self] . identifier[f] = identifier[open] ( identifier[f] , literal[string] )
keyword[else] :
identifier[self] . identifier[f] = identifier[f]
identifier[self] . identifier[_parse_header] ( identifier[self] . identifier[f] . identifier[read] ( literal[int] ))
|
def parse_file(self, f):
"""
Parse an ELF file and fill the class' properties.
Arguments:
f(file or str): The (path to) the ELF file to read.
"""
if type(f) is str:
self.f = open(f, 'rb') # depends on [control=['if'], data=[]]
else:
self.f = f
self._parse_header(self.f.read(64))
|
def _padding(self, image, geometry, options):
"""
Pads the image
"""
# The order is important. The gravity option should come before extent.
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image
|
def function[_padding, parameter[self, image, geometry, options]]:
constant[
Pads the image
]
call[call[name[image]][constant[options]]][constant[background]] assign[=] call[name[options].get, parameter[constant[padding_color]]]
call[call[name[image]][constant[options]]][constant[gravity]] assign[=] constant[center]
call[call[name[image]][constant[options]]][constant[extent]] assign[=] binary_operation[constant[%sx%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b20b4640>, <ast.Subscript object at 0x7da1b20b6200>]]]
return[name[image]]
|
keyword[def] identifier[_padding] ( identifier[self] , identifier[image] , identifier[geometry] , identifier[options] ):
literal[string]
identifier[image] [ literal[string] ][ literal[string] ]= identifier[options] . identifier[get] ( literal[string] )
identifier[image] [ literal[string] ][ literal[string] ]= literal[string]
identifier[image] [ literal[string] ][ literal[string] ]= literal[string] %( identifier[geometry] [ literal[int] ], identifier[geometry] [ literal[int] ])
keyword[return] identifier[image]
|
def _padding(self, image, geometry, options):
"""
Pads the image
"""
# The order is important. The gravity option should come before extent.
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image
|
def repair(self, x, copy_if_changed=True, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i]))
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i]))
if bounds[1] is not None:
if isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i]))
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i]))
return x
|
def function[repair, parameter[self, x, copy_if_changed, copy_always]]:
constant[sets out-of-bounds components of ``x`` on the bounds.
]
variable[copy] assign[=] name[copy_if_changed]
if name[copy_always] begin[:]
variable[x] assign[=] call[name[array], parameter[name[x]]]
variable[bounds] assign[=] name[self].bounds
if compare[name[bounds] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0b7e8c0>, <ast.List object at 0x7da1b0b7c550>, <ast.Tuple object at 0x7da1b0b7c670>]]] begin[:]
variable[x] assign[=] <ast.IfExp object at 0x7da1b0b7fa00>
if compare[call[name[bounds]][constant[0]] is_not constant[None]] begin[:]
if call[name[isscalar], parameter[call[name[bounds]][constant[0]]]] begin[:]
for taget[name[i]] in starred[call[name[rglen], parameter[name[x]]]] begin[:]
call[name[x]][name[i]] assign[=] call[name[max], parameter[tuple[[<ast.Subscript object at 0x7da1b0b7ee30>, <ast.Subscript object at 0x7da1b0b7eda0>]]]]
if compare[call[name[bounds]][constant[1]] is_not constant[None]] begin[:]
if call[name[isscalar], parameter[call[name[bounds]][constant[1]]]] begin[:]
for taget[name[i]] in starred[call[name[rglen], parameter[name[x]]]] begin[:]
call[name[x]][name[i]] assign[=] call[name[min], parameter[tuple[[<ast.Subscript object at 0x7da1b0cb59c0>, <ast.Subscript object at 0x7da1b0cb4040>]]]]
return[name[x]]
|
keyword[def] identifier[repair] ( identifier[self] , identifier[x] , identifier[copy_if_changed] = keyword[True] , identifier[copy_always] = keyword[False] ):
literal[string]
identifier[copy] = identifier[copy_if_changed]
keyword[if] identifier[copy_always] :
identifier[x] = identifier[array] ( identifier[x] , identifier[copy] = keyword[True] )
identifier[bounds] = identifier[self] . identifier[bounds]
keyword[if] identifier[bounds] keyword[not] keyword[in] ( keyword[None] ,[ keyword[None] , keyword[None] ],( keyword[None] , keyword[None] )):
identifier[x] = identifier[array] ( identifier[x] , identifier[copy] = keyword[True] ) keyword[if] identifier[copy] keyword[and] keyword[not] identifier[copy_always] keyword[else] identifier[x]
keyword[if] identifier[bounds] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isscalar] ( identifier[bounds] [ literal[int] ]):
keyword[for] identifier[i] keyword[in] identifier[rglen] ( identifier[x] ):
identifier[x] [ identifier[i] ]= identifier[max] (( identifier[bounds] [ literal[int] ], identifier[x] [ identifier[i] ]))
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[rglen] ( identifier[x] ):
identifier[j] = identifier[min] ([ identifier[i] , identifier[len] ( identifier[bounds] [ literal[int] ])- literal[int] ])
keyword[if] identifier[bounds] [ literal[int] ][ identifier[j] ] keyword[is] keyword[not] keyword[None] :
identifier[x] [ identifier[i] ]= identifier[max] (( identifier[bounds] [ literal[int] ][ identifier[j] ], identifier[x] [ identifier[i] ]))
keyword[if] identifier[bounds] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isscalar] ( identifier[bounds] [ literal[int] ]):
keyword[for] identifier[i] keyword[in] identifier[rglen] ( identifier[x] ):
identifier[x] [ identifier[i] ]= identifier[min] (( identifier[bounds] [ literal[int] ], identifier[x] [ identifier[i] ]))
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[rglen] ( identifier[x] ):
identifier[j] = identifier[min] (( identifier[i] , identifier[len] ( identifier[bounds] [ literal[int] ])- literal[int] ))
keyword[if] identifier[bounds] [ literal[int] ][ identifier[j] ] keyword[is] keyword[not] keyword[None] :
identifier[x] [ identifier[i] ]= identifier[min] (( identifier[bounds] [ literal[int] ][ identifier[j] ], identifier[x] [ identifier[i] ]))
keyword[return] identifier[x]
|
def repair(self, x, copy_if_changed=True, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
if copy_always:
x = array(x, copy=True) # depends on [control=['if'], data=[]]
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x = array(x, copy=True) if copy and (not copy_always) else x
if bounds[0] is not None:
if isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i])) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if bounds[1] is not None:
if isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i])) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['bounds']]
return x
|
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf('{}_{}'.format(s,0))
n_other.tag = n0.tag
n0.tag = 0
|
def function[_fix_labels, parameter[self]]:
constant[For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
]
for taget[name[s]] in starred[name[self].systems] begin[:]
variable[mag0] assign[=] name[np].inf
variable[n0] assign[=] constant[None]
for taget[name[n]] in starred[call[name[self].get_system, parameter[name[s]]]] begin[:]
if call[name[isinstance], parameter[name[n].parent, name[DummyObsNode]]] begin[:]
continue
<ast.Tuple object at 0x7da18dc989a0> assign[=] name[n].parent.value
if compare[name[mag] less[<] name[mag0]] begin[:]
variable[mag0] assign[=] name[mag]
variable[n0] assign[=] name[n]
if <ast.BoolOp object at 0x7da18dc98130> begin[:]
variable[n_other] assign[=] call[name[self].get_leaf, parameter[call[constant[{}_{}].format, parameter[name[s], constant[0]]]]]
name[n_other].tag assign[=] name[n0].tag
name[n0].tag assign[=] constant[0]
|
keyword[def] identifier[_fix_labels] ( identifier[self] ):
literal[string]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[systems] :
identifier[mag0] = identifier[np] . identifier[inf]
identifier[n0] = keyword[None]
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[get_system] ( identifier[s] ):
keyword[if] identifier[isinstance] ( identifier[n] . identifier[parent] , identifier[DummyObsNode] ):
keyword[continue]
identifier[mag] , identifier[_] = identifier[n] . identifier[parent] . identifier[value]
keyword[if] identifier[mag] < identifier[mag0] :
identifier[mag0] = identifier[mag]
identifier[n0] = identifier[n]
keyword[if] identifier[n0] keyword[is] keyword[not] keyword[None] keyword[and] identifier[n0] . identifier[tag] != literal[int] :
identifier[n_other] = identifier[self] . identifier[get_leaf] ( literal[string] . identifier[format] ( identifier[s] , literal[int] ))
identifier[n_other] . identifier[tag] = identifier[n0] . identifier[tag]
identifier[n0] . identifier[tag] = literal[int]
|
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue # depends on [control=['if'], data=[]]
(mag, _) = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n # depends on [control=['if'], data=['mag', 'mag0']] # depends on [control=['for'], data=['n']]
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf('{}_{}'.format(s, 0))
n_other.tag = n0.tag
n0.tag = 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']]
|
def _extract_all_responses(self, resources, api_endpoint, api_name):
""" Aux function to extract all the API endpoint responses.
Args:
resources: list of string hashes.
api_endpoint: endpoint path
api_name: endpoint name
Returns:
A dict with the hash as key and the VT report as value.
"""
all_responses, resources = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources)
response_chunks = self._request_reports("resource", resource_chunks, api_endpoint)
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
|
def function[_extract_all_responses, parameter[self, resources, api_endpoint, api_name]]:
constant[ Aux function to extract all the API endpoint responses.
Args:
resources: list of string hashes.
api_endpoint: endpoint path
api_name: endpoint name
Returns:
A dict with the hash as key and the VT report as value.
]
<ast.Tuple object at 0x7da20e748400> assign[=] call[name[self]._bulk_cache_lookup, parameter[name[api_name], name[resources]]]
variable[resource_chunks] assign[=] call[name[self]._prepare_resource_chunks, parameter[name[resources]]]
variable[response_chunks] assign[=] call[name[self]._request_reports, parameter[constant[resource], name[resource_chunks], name[api_endpoint]]]
call[name[self]._extract_response_chunks, parameter[name[all_responses], name[response_chunks], name[api_name]]]
return[name[all_responses]]
|
keyword[def] identifier[_extract_all_responses] ( identifier[self] , identifier[resources] , identifier[api_endpoint] , identifier[api_name] ):
literal[string]
identifier[all_responses] , identifier[resources] = identifier[self] . identifier[_bulk_cache_lookup] ( identifier[api_name] , identifier[resources] )
identifier[resource_chunks] = identifier[self] . identifier[_prepare_resource_chunks] ( identifier[resources] )
identifier[response_chunks] = identifier[self] . identifier[_request_reports] ( literal[string] , identifier[resource_chunks] , identifier[api_endpoint] )
identifier[self] . identifier[_extract_response_chunks] ( identifier[all_responses] , identifier[response_chunks] , identifier[api_name] )
keyword[return] identifier[all_responses]
|
def _extract_all_responses(self, resources, api_endpoint, api_name):
""" Aux function to extract all the API endpoint responses.
Args:
resources: list of string hashes.
api_endpoint: endpoint path
api_name: endpoint name
Returns:
A dict with the hash as key and the VT report as value.
"""
(all_responses, resources) = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources)
response_chunks = self._request_reports('resource', resource_chunks, api_endpoint)
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
|
def load_gffutils_db(f):
"""
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db
|
def function[load_gffutils_db, parameter[f]]:
constant[
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
]
import module[gffutils]
variable[db] assign[=] call[name[gffutils].FeatureDB, parameter[name[f]]]
return[name[db]]
|
keyword[def] identifier[load_gffutils_db] ( identifier[f] ):
literal[string]
keyword[import] identifier[gffutils]
identifier[db] = identifier[gffutils] . identifier[FeatureDB] ( identifier[f] , identifier[keep_order] = keyword[True] )
keyword[return] identifier[db]
|
def load_gffutils_db(f):
"""
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db
|
def get_iso_dir():
"""Get the ugali isochrone directory."""
dirname = os.path.join(get_ugali_dir(),'isochrones')
if not os.path.exists(dirname):
from ugali.utils.logger import logger
msg = "Isochrone directory not found:\n%s"%dirname
logger.warning(msg)
return dirname
|
def function[get_iso_dir, parameter[]]:
constant[Get the ugali isochrone directory.]
variable[dirname] assign[=] call[name[os].path.join, parameter[call[name[get_ugali_dir], parameter[]], constant[isochrones]]]
if <ast.UnaryOp object at 0x7da1b25d3a30> begin[:]
from relative_module[ugali.utils.logger] import module[logger]
variable[msg] assign[=] binary_operation[constant[Isochrone directory not found:
%s] <ast.Mod object at 0x7da2590d6920> name[dirname]]
call[name[logger].warning, parameter[name[msg]]]
return[name[dirname]]
|
keyword[def] identifier[get_iso_dir] ():
literal[string]
identifier[dirname] = identifier[os] . identifier[path] . identifier[join] ( identifier[get_ugali_dir] (), literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dirname] ):
keyword[from] identifier[ugali] . identifier[utils] . identifier[logger] keyword[import] identifier[logger]
identifier[msg] = literal[string] % identifier[dirname]
identifier[logger] . identifier[warning] ( identifier[msg] )
keyword[return] identifier[dirname]
|
def get_iso_dir():
"""Get the ugali isochrone directory."""
dirname = os.path.join(get_ugali_dir(), 'isochrones')
if not os.path.exists(dirname):
from ugali.utils.logger import logger
msg = 'Isochrone directory not found:\n%s' % dirname
logger.warning(msg) # depends on [control=['if'], data=[]]
return dirname
|
def get_activities(self, activity_ids=None, max_records=50):
"""
Get all activies for this group.
"""
return self.connection.get_all_activities(self, activity_ids,
max_records)
|
def function[get_activities, parameter[self, activity_ids, max_records]]:
constant[
Get all activies for this group.
]
return[call[name[self].connection.get_all_activities, parameter[name[self], name[activity_ids], name[max_records]]]]
|
keyword[def] identifier[get_activities] ( identifier[self] , identifier[activity_ids] = keyword[None] , identifier[max_records] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[connection] . identifier[get_all_activities] ( identifier[self] , identifier[activity_ids] ,
identifier[max_records] )
|
def get_activities(self, activity_ids=None, max_records=50):
"""
Get all activies for this group.
"""
return self.connection.get_all_activities(self, activity_ids, max_records)
|
def _GetAccountsData(self, metadata_dict):
"""Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and not instance_data.get('sshKeys'):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys'))
accounts_data = '\n'.join([key for key in valid_keys if key])
return self._ParseAccountsData(accounts_data)
|
def function[_GetAccountsData, parameter[self, metadata_dict]]:
constant[Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
]
<ast.Tuple object at 0x7da1b170f5b0> assign[=] call[name[self]._GetInstanceAndProjectAttributes, parameter[name[metadata_dict]]]
variable[valid_keys] assign[=] list[[<ast.Call object at 0x7da1b170ef80>, <ast.Call object at 0x7da1b170f070>]]
variable[block_project] assign[=] call[call[name[instance_data].get, parameter[constant[block-project-ssh-keys], constant[]]].lower, parameter[]]
if <ast.BoolOp object at 0x7da1b170f7c0> begin[:]
call[name[valid_keys].append, parameter[call[name[project_data].get, parameter[constant[ssh-keys]]]]]
call[name[valid_keys].append, parameter[call[name[project_data].get, parameter[constant[sshKeys]]]]]
variable[accounts_data] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da204963ca0>]]
return[call[name[self]._ParseAccountsData, parameter[name[accounts_data]]]]
|
keyword[def] identifier[_GetAccountsData] ( identifier[self] , identifier[metadata_dict] ):
literal[string]
identifier[instance_data] , identifier[project_data] = identifier[self] . identifier[_GetInstanceAndProjectAttributes] (
identifier[metadata_dict] )
identifier[valid_keys] =[ identifier[instance_data] . identifier[get] ( literal[string] ), identifier[instance_data] . identifier[get] ( literal[string] )]
identifier[block_project] = identifier[instance_data] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] ()
keyword[if] identifier[block_project] != literal[string] keyword[and] keyword[not] identifier[instance_data] . identifier[get] ( literal[string] ):
identifier[valid_keys] . identifier[append] ( identifier[project_data] . identifier[get] ( literal[string] ))
identifier[valid_keys] . identifier[append] ( identifier[project_data] . identifier[get] ( literal[string] ))
identifier[accounts_data] = literal[string] . identifier[join] ([ identifier[key] keyword[for] identifier[key] keyword[in] identifier[valid_keys] keyword[if] identifier[key] ])
keyword[return] identifier[self] . identifier[_ParseAccountsData] ( identifier[accounts_data] )
|
def _GetAccountsData(self, metadata_dict):
"""Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
(instance_data, project_data) = self._GetInstanceAndProjectAttributes(metadata_dict)
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and (not instance_data.get('sshKeys')):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys')) # depends on [control=['if'], data=[]]
accounts_data = '\n'.join([key for key in valid_keys if key])
return self._ParseAccountsData(accounts_data)
|
def _get_mixin_attributes(mixins):
"""Retrieve the attributes for a given set of mixin classes.
The attributes of each mixin class are being merged into a single
dictionary.
"""
return {attribute: mixin.__dict__[attribute]
for mixin in mixins
for attribute in _MIXIN_ATTRIBUTES[mixin]}
|
def function[_get_mixin_attributes, parameter[mixins]]:
constant[Retrieve the attributes for a given set of mixin classes.
The attributes of each mixin class are being merged into a single
dictionary.
]
return[<ast.DictComp object at 0x7da1b15c1630>]
|
keyword[def] identifier[_get_mixin_attributes] ( identifier[mixins] ):
literal[string]
keyword[return] { identifier[attribute] : identifier[mixin] . identifier[__dict__] [ identifier[attribute] ]
keyword[for] identifier[mixin] keyword[in] identifier[mixins]
keyword[for] identifier[attribute] keyword[in] identifier[_MIXIN_ATTRIBUTES] [ identifier[mixin] ]}
|
def _get_mixin_attributes(mixins):
"""Retrieve the attributes for a given set of mixin classes.
The attributes of each mixin class are being merged into a single
dictionary.
"""
return {attribute: mixin.__dict__[attribute] for mixin in mixins for attribute in _MIXIN_ATTRIBUTES[mixin]}
|
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(value=x)
if x.dtype == tf.bool:
return tf.where(x, tf.fill(x.shape, 'True'), tf.fill(x.shape, 'False'))
return x
|
def function[_to_str, parameter[x]]:
constant[Converts a bool tensor to a string with True/False values.]
variable[x] assign[=] call[name[tf].convert_to_tensor, parameter[]]
if compare[name[x].dtype equal[==] name[tf].bool] begin[:]
return[call[name[tf].where, parameter[name[x], call[name[tf].fill, parameter[name[x].shape, constant[True]]], call[name[tf].fill, parameter[name[x].shape, constant[False]]]]]]
return[name[x]]
|
keyword[def] identifier[_to_str] ( identifier[x] ):
literal[string]
identifier[x] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[x] )
keyword[if] identifier[x] . identifier[dtype] == identifier[tf] . identifier[bool] :
keyword[return] identifier[tf] . identifier[where] ( identifier[x] , identifier[tf] . identifier[fill] ( identifier[x] . identifier[shape] , literal[string] ), identifier[tf] . identifier[fill] ( identifier[x] . identifier[shape] , literal[string] ))
keyword[return] identifier[x]
|
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(value=x)
if x.dtype == tf.bool:
return tf.where(x, tf.fill(x.shape, 'True'), tf.fill(x.shape, 'False')) # depends on [control=['if'], data=[]]
return x
|
def format_hex(i, num_bytes=4, prefix='0x'):
""" Format hexidecimal string from decimal integer value
>>> format_hex(42, num_bytes=8, prefix=None)
'0000002a'
>>> format_hex(23)
'0x0017'
"""
prefix = str(prefix or '')
i = int(i or 0)
return prefix + '{0:0{1}x}'.format(i, num_bytes)
|
def function[format_hex, parameter[i, num_bytes, prefix]]:
constant[ Format hexidecimal string from decimal integer value
>>> format_hex(42, num_bytes=8, prefix=None)
'0000002a'
>>> format_hex(23)
'0x0017'
]
variable[prefix] assign[=] call[name[str], parameter[<ast.BoolOp object at 0x7da2054a6260>]]
variable[i] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da2054a6a10>]]
return[binary_operation[name[prefix] + call[constant[{0:0{1}x}].format, parameter[name[i], name[num_bytes]]]]]
|
keyword[def] identifier[format_hex] ( identifier[i] , identifier[num_bytes] = literal[int] , identifier[prefix] = literal[string] ):
literal[string]
identifier[prefix] = identifier[str] ( identifier[prefix] keyword[or] literal[string] )
identifier[i] = identifier[int] ( identifier[i] keyword[or] literal[int] )
keyword[return] identifier[prefix] + literal[string] . identifier[format] ( identifier[i] , identifier[num_bytes] )
|
def format_hex(i, num_bytes=4, prefix='0x'):
""" Format hexidecimal string from decimal integer value
>>> format_hex(42, num_bytes=8, prefix=None)
'0000002a'
>>> format_hex(23)
'0x0017'
"""
prefix = str(prefix or '')
i = int(i or 0)
return prefix + '{0:0{1}x}'.format(i, num_bytes)
|
def pad(obj, pad_length):
"""
Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros.
"""
_check_supported(obj)
copied = deepcopy(obj)
copied.pad(pad_length)
return copied
|
def function[pad, parameter[obj, pad_length]]:
constant[
Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros.
]
call[name[_check_supported], parameter[name[obj]]]
variable[copied] assign[=] call[name[deepcopy], parameter[name[obj]]]
call[name[copied].pad, parameter[name[pad_length]]]
return[name[copied]]
|
keyword[def] identifier[pad] ( identifier[obj] , identifier[pad_length] ):
literal[string]
identifier[_check_supported] ( identifier[obj] )
identifier[copied] = identifier[deepcopy] ( identifier[obj] )
identifier[copied] . identifier[pad] ( identifier[pad_length] )
keyword[return] identifier[copied]
|
def pad(obj, pad_length):
"""
Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros.
"""
_check_supported(obj)
copied = deepcopy(obj)
copied.pad(pad_length)
return copied
|
def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary
|
def function[summarise_pdfs, parameter[pdfs]]:
constant[
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
]
call[name[print], parameter[call[constant[Summarising {0} articles ({1} had errors)].format, parameter[call[name[len], parameter[name[pdfs]]], call[name[pdfs].count, parameter[constant[None]]]]]]]
variable[pdfs] assign[=] <ast.ListComp object at 0x7da20e961270>
variable[summary] assign[=] call[name[PdfFileWriter], parameter[]]
for taget[name[pdf]] in starred[name[pdfs]] begin[:]
call[name[summary].addPage, parameter[call[call[name[PdfFileReader], parameter[call[name[StringIO], parameter[name[pdf]]]]].getPage, parameter[constant[0]]]]]
return[name[summary]]
|
keyword[def] identifier[summarise_pdfs] ( identifier[pdfs] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] (
identifier[len] ( identifier[pdfs] ), identifier[pdfs] . identifier[count] ( keyword[None] )))
identifier[pdfs] =[ identifier[_] keyword[for] identifier[_] keyword[in] identifier[pdfs] keyword[if] identifier[_] keyword[is] keyword[not] keyword[None] ]
identifier[summary] = identifier[PdfFileWriter] ()
keyword[for] identifier[pdf] keyword[in] identifier[pdfs] :
identifier[summary] . identifier[addPage] ( identifier[PdfFileReader] ( identifier[StringIO] ( identifier[pdf] )). identifier[getPage] ( literal[int] ))
keyword[return] identifier[summary]
|
def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0)) # depends on [control=['for'], data=['pdf']]
return summary
|
def _make_trace(neuron, plane):
'''Create the trace to be plotted'''
for neurite in iter_neurites(neuron):
segments = list(iter_segments(neurite))
segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in segments]
coords = dict(x=list(chain.from_iterable((p1[0], p2[0], None) for p1, p2 in segs)),
y=list(chain.from_iterable((p1[1], p2[1], None) for p1, p2 in segs)),
z=list(chain.from_iterable((p1[2], p2[2], None) for p1, p2 in segs)))
color = TREE_COLOR.get(neurite.root_node.type, 'black')
if plane.lower() == '3d':
plot_fun = go.Scatter3d
else:
plot_fun = go.Scatter
coords = dict(x=coords[plane[0]], y=coords[plane[1]])
yield plot_fun(
line=dict(color=color, width=2),
mode='lines',
**coords
)
|
def function[_make_trace, parameter[neuron, plane]]:
constant[Create the trace to be plotted]
for taget[name[neurite]] in starred[call[name[iter_neurites], parameter[name[neuron]]]] begin[:]
variable[segments] assign[=] call[name[list], parameter[call[name[iter_segments], parameter[name[neurite]]]]]
variable[segs] assign[=] <ast.ListComp object at 0x7da204621780>
variable[coords] assign[=] call[name[dict], parameter[]]
variable[color] assign[=] call[name[TREE_COLOR].get, parameter[name[neurite].root_node.type, constant[black]]]
if compare[call[name[plane].lower, parameter[]] equal[==] constant[3d]] begin[:]
variable[plot_fun] assign[=] name[go].Scatter3d
<ast.Yield object at 0x7da18f00f6a0>
|
keyword[def] identifier[_make_trace] ( identifier[neuron] , identifier[plane] ):
literal[string]
keyword[for] identifier[neurite] keyword[in] identifier[iter_neurites] ( identifier[neuron] ):
identifier[segments] = identifier[list] ( identifier[iter_segments] ( identifier[neurite] ))
identifier[segs] =[( identifier[s] [ literal[int] ][ identifier[COLS] . identifier[XYZ] ], identifier[s] [ literal[int] ][ identifier[COLS] . identifier[XYZ] ]) keyword[for] identifier[s] keyword[in] identifier[segments] ]
identifier[coords] = identifier[dict] ( identifier[x] = identifier[list] ( identifier[chain] . identifier[from_iterable] (( identifier[p1] [ literal[int] ], identifier[p2] [ literal[int] ], keyword[None] ) keyword[for] identifier[p1] , identifier[p2] keyword[in] identifier[segs] )),
identifier[y] = identifier[list] ( identifier[chain] . identifier[from_iterable] (( identifier[p1] [ literal[int] ], identifier[p2] [ literal[int] ], keyword[None] ) keyword[for] identifier[p1] , identifier[p2] keyword[in] identifier[segs] )),
identifier[z] = identifier[list] ( identifier[chain] . identifier[from_iterable] (( identifier[p1] [ literal[int] ], identifier[p2] [ literal[int] ], keyword[None] ) keyword[for] identifier[p1] , identifier[p2] keyword[in] identifier[segs] )))
identifier[color] = identifier[TREE_COLOR] . identifier[get] ( identifier[neurite] . identifier[root_node] . identifier[type] , literal[string] )
keyword[if] identifier[plane] . identifier[lower] ()== literal[string] :
identifier[plot_fun] = identifier[go] . identifier[Scatter3d]
keyword[else] :
identifier[plot_fun] = identifier[go] . identifier[Scatter]
identifier[coords] = identifier[dict] ( identifier[x] = identifier[coords] [ identifier[plane] [ literal[int] ]], identifier[y] = identifier[coords] [ identifier[plane] [ literal[int] ]])
keyword[yield] identifier[plot_fun] (
identifier[line] = identifier[dict] ( identifier[color] = identifier[color] , identifier[width] = literal[int] ),
identifier[mode] = literal[string] ,
** identifier[coords]
)
|
def _make_trace(neuron, plane):
"""Create the trace to be plotted"""
for neurite in iter_neurites(neuron):
segments = list(iter_segments(neurite))
segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in segments]
coords = dict(x=list(chain.from_iterable(((p1[0], p2[0], None) for (p1, p2) in segs))), y=list(chain.from_iterable(((p1[1], p2[1], None) for (p1, p2) in segs))), z=list(chain.from_iterable(((p1[2], p2[2], None) for (p1, p2) in segs))))
color = TREE_COLOR.get(neurite.root_node.type, 'black')
if plane.lower() == '3d':
plot_fun = go.Scatter3d # depends on [control=['if'], data=[]]
else:
plot_fun = go.Scatter
coords = dict(x=coords[plane[0]], y=coords[plane[1]])
yield plot_fun(line=dict(color=color, width=2), mode='lines', **coords) # depends on [control=['for'], data=['neurite']]
|
def load(self, commit=None):
"""Load a result from the database."""
git_info = self.record_git_info(commit)
LOGGER.info("Loading result from '%s'.", git_info.hexsha)
result = MemoteResult(
self.session.query(Result.memote_result).
filter_by(hexsha=git_info.hexsha).
one().memote_result)
# Add git info so the object is equivalent to the one returned by the
# RepoResultManager.
self.add_git(result.meta, git_info)
return result
|
def function[load, parameter[self, commit]]:
constant[Load a result from the database.]
variable[git_info] assign[=] call[name[self].record_git_info, parameter[name[commit]]]
call[name[LOGGER].info, parameter[constant[Loading result from '%s'.], name[git_info].hexsha]]
variable[result] assign[=] call[name[MemoteResult], parameter[call[call[call[name[self].session.query, parameter[name[Result].memote_result]].filter_by, parameter[]].one, parameter[]].memote_result]]
call[name[self].add_git, parameter[name[result].meta, name[git_info]]]
return[name[result]]
|
keyword[def] identifier[load] ( identifier[self] , identifier[commit] = keyword[None] ):
literal[string]
identifier[git_info] = identifier[self] . identifier[record_git_info] ( identifier[commit] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[git_info] . identifier[hexsha] )
identifier[result] = identifier[MemoteResult] (
identifier[self] . identifier[session] . identifier[query] ( identifier[Result] . identifier[memote_result] ).
identifier[filter_by] ( identifier[hexsha] = identifier[git_info] . identifier[hexsha] ).
identifier[one] (). identifier[memote_result] )
identifier[self] . identifier[add_git] ( identifier[result] . identifier[meta] , identifier[git_info] )
keyword[return] identifier[result]
|
def load(self, commit=None):
"""Load a result from the database."""
git_info = self.record_git_info(commit)
LOGGER.info("Loading result from '%s'.", git_info.hexsha)
result = MemoteResult(self.session.query(Result.memote_result).filter_by(hexsha=git_info.hexsha).one().memote_result)
# Add git info so the object is equivalent to the one returned by the
# RepoResultManager.
self.add_git(result.meta, git_info)
return result
|
def _output(cls,
tensors: Sequence[tf.Tensor],
dtypes: Sequence[tf.DType]) -> Sequence[tf.Tensor]:
'''Converts `tensors` to the corresponding `dtypes`.'''
outputs = []
for tensor, dtype in zip(tensors, dtypes):
tensor = tensor[0]
if tensor.dtype != dtype:
tensor = tf.cast(tensor, dtype)
outputs.append(tensor)
return tuple(outputs)
|
def function[_output, parameter[cls, tensors, dtypes]]:
constant[Converts `tensors` to the corresponding `dtypes`.]
variable[outputs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b24af400>, <ast.Name object at 0x7da1b24af0d0>]]] in starred[call[name[zip], parameter[name[tensors], name[dtypes]]]] begin[:]
variable[tensor] assign[=] call[name[tensor]][constant[0]]
if compare[name[tensor].dtype not_equal[!=] name[dtype]] begin[:]
variable[tensor] assign[=] call[name[tf].cast, parameter[name[tensor], name[dtype]]]
call[name[outputs].append, parameter[name[tensor]]]
return[call[name[tuple], parameter[name[outputs]]]]
|
keyword[def] identifier[_output] ( identifier[cls] ,
identifier[tensors] : identifier[Sequence] [ identifier[tf] . identifier[Tensor] ],
identifier[dtypes] : identifier[Sequence] [ identifier[tf] . identifier[DType] ])-> identifier[Sequence] [ identifier[tf] . identifier[Tensor] ]:
literal[string]
identifier[outputs] =[]
keyword[for] identifier[tensor] , identifier[dtype] keyword[in] identifier[zip] ( identifier[tensors] , identifier[dtypes] ):
identifier[tensor] = identifier[tensor] [ literal[int] ]
keyword[if] identifier[tensor] . identifier[dtype] != identifier[dtype] :
identifier[tensor] = identifier[tf] . identifier[cast] ( identifier[tensor] , identifier[dtype] )
identifier[outputs] . identifier[append] ( identifier[tensor] )
keyword[return] identifier[tuple] ( identifier[outputs] )
|
def _output(cls, tensors: Sequence[tf.Tensor], dtypes: Sequence[tf.DType]) -> Sequence[tf.Tensor]:
"""Converts `tensors` to the corresponding `dtypes`."""
outputs = []
for (tensor, dtype) in zip(tensors, dtypes):
tensor = tensor[0]
if tensor.dtype != dtype:
tensor = tf.cast(tensor, dtype) # depends on [control=['if'], data=['dtype']]
outputs.append(tensor) # depends on [control=['for'], data=[]]
return tuple(outputs)
|
def _replace_file(path, content):
"""Writes a file if it doesn't already exist with the same content.
This is useful because cargo uses timestamps to decide whether to compile things."""
if os.path.exists(path):
with open(path, 'r') as f:
if content == f.read():
print("Not overwriting {} because it is unchanged".format(path), file=sys.stderr)
return
with open(path, 'w') as f:
f.write(content)
|
def function[_replace_file, parameter[path, content]]:
constant[Writes a file if it doesn't already exist with the same content.
This is useful because cargo uses timestamps to decide whether to compile things.]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
if compare[name[content] equal[==] call[name[f].read, parameter[]]] begin[:]
call[name[print], parameter[call[constant[Not overwriting {} because it is unchanged].format, parameter[name[path]]]]]
return[None]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
call[name[f].write, parameter[name[content]]]
|
keyword[def] identifier[_replace_file] ( identifier[path] , identifier[content] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[if] identifier[content] == identifier[f] . identifier[read] ():
identifier[print] ( literal[string] . identifier[format] ( identifier[path] ), identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[content] )
|
def _replace_file(path, content):
"""Writes a file if it doesn't already exist with the same content.
This is useful because cargo uses timestamps to decide whether to compile things."""
if os.path.exists(path):
with open(path, 'r') as f:
if content == f.read():
print('Not overwriting {} because it is unchanged'.format(path), file=sys.stderr)
return # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
with open(path, 'w') as f:
f.write(content) # depends on [control=['with'], data=['f']]
|
def register(cls, package_type):
"""Register a concrete implementation of a Package to be recognized by pex."""
if not issubclass(package_type, cls):
raise TypeError('package_type must be a subclass of Package.')
cls._REGISTRY.add(package_type)
|
def function[register, parameter[cls, package_type]]:
constant[Register a concrete implementation of a Package to be recognized by pex.]
if <ast.UnaryOp object at 0x7da18bc71270> begin[:]
<ast.Raise object at 0x7da18bc718d0>
call[name[cls]._REGISTRY.add, parameter[name[package_type]]]
|
keyword[def] identifier[register] ( identifier[cls] , identifier[package_type] ):
literal[string]
keyword[if] keyword[not] identifier[issubclass] ( identifier[package_type] , identifier[cls] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[cls] . identifier[_REGISTRY] . identifier[add] ( identifier[package_type] )
|
def register(cls, package_type):
"""Register a concrete implementation of a Package to be recognized by pex."""
if not issubclass(package_type, cls):
raise TypeError('package_type must be a subclass of Package.') # depends on [control=['if'], data=[]]
cls._REGISTRY.add(package_type)
|
def _upload_file_aws_cli(local_fname, bucket, keyname, config=None, mditems=None):
"""Streaming upload via the standard AWS command line interface.
"""
s3_fname = "s3://%s/%s" % (bucket, keyname)
args = ["--sse", "--expected-size", str(os.path.getsize(local_fname))]
if config:
if config.get("region"):
args += ["--region", config.get("region")]
if config.get("reduced_redundancy"):
args += ["--storage-class", "REDUCED_REDUNDANCY"]
cmd = [os.path.join(os.path.dirname(sys.executable), "aws"), "s3", "cp"] + args + \
[local_fname, s3_fname]
do.run(cmd, "Upload to s3: %s %s" % (bucket, keyname))
|
def function[_upload_file_aws_cli, parameter[local_fname, bucket, keyname, config, mditems]]:
constant[Streaming upload via the standard AWS command line interface.
]
variable[s3_fname] assign[=] binary_operation[constant[s3://%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1885c00>, <ast.Name object at 0x7da1b18845e0>]]]
variable[args] assign[=] list[[<ast.Constant object at 0x7da1b1884820>, <ast.Constant object at 0x7da1b1884cd0>, <ast.Call object at 0x7da1b1885de0>]]
if name[config] begin[:]
if call[name[config].get, parameter[constant[region]]] begin[:]
<ast.AugAssign object at 0x7da1b1887df0>
if call[name[config].get, parameter[constant[reduced_redundancy]]] begin[:]
<ast.AugAssign object at 0x7da1b18861a0>
variable[cmd] assign[=] binary_operation[binary_operation[list[[<ast.Call object at 0x7da1b1884730>, <ast.Constant object at 0x7da1b18dadd0>, <ast.Constant object at 0x7da1b18d8fd0>]] + name[args]] + list[[<ast.Name object at 0x7da1b18d9ab0>, <ast.Name object at 0x7da1b18d99c0>]]]
call[name[do].run, parameter[name[cmd], binary_operation[constant[Upload to s3: %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b18d9690>, <ast.Name object at 0x7da1b18d90f0>]]]]]
|
keyword[def] identifier[_upload_file_aws_cli] ( identifier[local_fname] , identifier[bucket] , identifier[keyname] , identifier[config] = keyword[None] , identifier[mditems] = keyword[None] ):
literal[string]
identifier[s3_fname] = literal[string] %( identifier[bucket] , identifier[keyname] )
identifier[args] =[ literal[string] , literal[string] , identifier[str] ( identifier[os] . identifier[path] . identifier[getsize] ( identifier[local_fname] ))]
keyword[if] identifier[config] :
keyword[if] identifier[config] . identifier[get] ( literal[string] ):
identifier[args] +=[ literal[string] , identifier[config] . identifier[get] ( literal[string] )]
keyword[if] identifier[config] . identifier[get] ( literal[string] ):
identifier[args] +=[ literal[string] , literal[string] ]
identifier[cmd] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[sys] . identifier[executable] ), literal[string] ), literal[string] , literal[string] ]+ identifier[args] +[ identifier[local_fname] , identifier[s3_fname] ]
identifier[do] . identifier[run] ( identifier[cmd] , literal[string] %( identifier[bucket] , identifier[keyname] ))
|
def _upload_file_aws_cli(local_fname, bucket, keyname, config=None, mditems=None):
"""Streaming upload via the standard AWS command line interface.
"""
s3_fname = 's3://%s/%s' % (bucket, keyname)
args = ['--sse', '--expected-size', str(os.path.getsize(local_fname))]
if config:
if config.get('region'):
args += ['--region', config.get('region')] # depends on [control=['if'], data=[]]
if config.get('reduced_redundancy'):
args += ['--storage-class', 'REDUCED_REDUNDANCY'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
cmd = [os.path.join(os.path.dirname(sys.executable), 'aws'), 's3', 'cp'] + args + [local_fname, s3_fname]
do.run(cmd, 'Upload to s3: %s %s' % (bucket, keyname))
|
def add_device(self, device_id):
""" Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
path = self.subpath('/devices/{device_id}'.format(device_id=device_id))
return self.api.put(path)
|
def function[add_device, parameter[self, device_id]]:
constant[ Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
]
variable[path] assign[=] call[name[self].subpath, parameter[call[constant[/devices/{device_id}].format, parameter[]]]]
return[call[name[self].api.put, parameter[name[path]]]]
|
keyword[def] identifier[add_device] ( identifier[self] , identifier[device_id] ):
literal[string]
identifier[path] = identifier[self] . identifier[subpath] ( literal[string] . identifier[format] ( identifier[device_id] = identifier[device_id] ))
keyword[return] identifier[self] . identifier[api] . identifier[put] ( identifier[path] )
|
def add_device(self, device_id):
""" Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
path = self.subpath('/devices/{device_id}'.format(device_id=device_id))
return self.api.put(path)
|
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS' ]:
ilock = self.get_rc_input(msg, self.interlock_channel)
if ilock <= 0:
self.console.set_status('ILOCK', 'ILOCK:--', fg='grey', row=4)
elif ilock >= 1800:
self.console.set_status('ILOCK', 'ILOCK:ON', fg='red', row=4)
else:
self.console.set_status('ILOCK', 'ILOCK:OFF', fg='green', row=4)
override = self.get_rc_input(msg, self.override_channel)
if override <= 0:
self.console.set_status('OVR', 'OVR:--', fg='grey', row=4)
elif override >= 1800:
self.console.set_status('OVR', 'OVR:ON', fg='red', row=4)
else:
self.console.set_status('OVR', 'OVR:OFF', fg='green', row=4)
zeroi = self.get_rc_input(msg, self.zero_I_channel)
if zeroi <= 0:
self.console.set_status('ZEROI', 'ZEROI:--', fg='grey', row=4)
elif zeroi >= 1800:
self.console.set_status('ZEROI', 'ZEROI:ON', fg='red', row=4)
else:
self.console.set_status('ZEROI', 'ZEROI:OFF', fg='green', row=4)
novtol = self.get_rc_input(msg, self.no_vtol_channel)
if novtol <= 0:
self.console.set_status('NOVTOL', 'NOVTOL:--', fg='grey', row=4)
elif novtol >= 1800:
self.console.set_status('NOVTOL', 'NOVTOL:ON', fg='red', row=4)
else:
self.console.set_status('NOVTOL', 'NOVTOL:OFF', fg='green', row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rsc = self.get_pwm_output(msg, self.rsc_out_channel)
if rsc <= 0:
self.console.set_status('RSC', 'RSC:--', fg='grey', row=4)
elif rsc <= 1200:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='red', row=4)
elif rsc <= 1600:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='orange', row=4)
else:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='green', row=4)
thr = self.get_pwm_output(msg, self.fwd_thr_channel)
if thr <= 0:
self.console.set_status('FTHR', 'FTHR:--', fg='grey', row=4)
elif thr <= 1100:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='red', row=4)
elif thr <= 1500:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='orange', row=4)
else:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='green', row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 1000:
rpm_colour = 'red'
elif rpm < 2000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4)
|
def function[mavlink_packet, parameter[self, msg]]:
constant[handle an incoming mavlink packet]
variable[type] assign[=] call[name[msg].get_type, parameter[]]
variable[master] assign[=] name[self].master
if compare[name[type] in list[[<ast.Constant object at 0x7da1b16bdf90>]]] begin[:]
variable[ilock] assign[=] call[name[self].get_rc_input, parameter[name[msg], name[self].interlock_channel]]
if compare[name[ilock] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[ILOCK], constant[ILOCK:--]]]
variable[override] assign[=] call[name[self].get_rc_input, parameter[name[msg], name[self].override_channel]]
if compare[name[override] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[OVR], constant[OVR:--]]]
variable[zeroi] assign[=] call[name[self].get_rc_input, parameter[name[msg], name[self].zero_I_channel]]
if compare[name[zeroi] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[ZEROI], constant[ZEROI:--]]]
variable[novtol] assign[=] call[name[self].get_rc_input, parameter[name[msg], name[self].no_vtol_channel]]
if compare[name[novtol] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[NOVTOL], constant[NOVTOL:--]]]
if compare[name[type] in list[[<ast.Constant object at 0x7da1b16beb00>]]] begin[:]
variable[rsc] assign[=] call[name[self].get_pwm_output, parameter[name[msg], name[self].rsc_out_channel]]
if compare[name[rsc] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[RSC], constant[RSC:--]]]
variable[thr] assign[=] call[name[self].get_pwm_output, parameter[name[msg], name[self].fwd_thr_channel]]
if compare[name[thr] less_or_equal[<=] constant[0]] begin[:]
call[name[self].console.set_status, parameter[constant[FTHR], constant[FTHR:--]]]
if compare[name[type] in list[[<ast.Constant object at 0x7da1b1721330>]]] begin[:]
variable[rpm] assign[=] name[msg].rpm1
if compare[name[rpm] less[<] constant[1000]] begin[:]
variable[rpm_colour] assign[=] constant[red]
call[name[self].console.set_status, parameter[constant[RPM], binary_operation[constant[RPM: %u] <ast.Mod object at 0x7da2590d6920> name[rpm]]]]
|
keyword[def] identifier[mavlink_packet] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[type] = identifier[msg] . identifier[get_type] ()
identifier[master] = identifier[self] . identifier[master]
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[ilock] = identifier[self] . identifier[get_rc_input] ( identifier[msg] , identifier[self] . identifier[interlock_channel] )
keyword[if] identifier[ilock] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[ilock] >= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
identifier[override] = identifier[self] . identifier[get_rc_input] ( identifier[msg] , identifier[self] . identifier[override_channel] )
keyword[if] identifier[override] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[override] >= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
identifier[zeroi] = identifier[self] . identifier[get_rc_input] ( identifier[msg] , identifier[self] . identifier[zero_I_channel] )
keyword[if] identifier[zeroi] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[zeroi] >= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
identifier[novtol] = identifier[self] . identifier[get_rc_input] ( identifier[msg] , identifier[self] . identifier[no_vtol_channel] )
keyword[if] identifier[novtol] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[novtol] >= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[rsc] = identifier[self] . identifier[get_pwm_output] ( identifier[msg] , identifier[self] . identifier[rsc_out_channel] )
keyword[if] identifier[rsc] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[rsc] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[rsc] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[rsc] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[rsc] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[rsc] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
identifier[thr] = identifier[self] . identifier[get_pwm_output] ( identifier[msg] , identifier[self] . identifier[fwd_thr_channel] )
keyword[if] identifier[thr] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[thr] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[thr] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[elif] identifier[thr] <= literal[int] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[thr] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[else] :
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[thr] , identifier[fg] = literal[string] , identifier[row] = literal[int] )
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[rpm] = identifier[msg] . identifier[rpm1]
keyword[if] identifier[rpm] < literal[int] :
identifier[rpm_colour] = literal[string]
keyword[elif] identifier[rpm] < literal[int] :
identifier[rpm_colour] = literal[string]
keyword[else] :
identifier[rpm_colour] = literal[string]
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[rpm] , identifier[fg] = identifier[rpm_colour] , identifier[row] = literal[int] )
|
def mavlink_packet(self, msg):
"""handle an incoming mavlink packet"""
type = msg.get_type()
master = self.master
# add some status fields
if type in ['RC_CHANNELS']:
ilock = self.get_rc_input(msg, self.interlock_channel)
if ilock <= 0:
self.console.set_status('ILOCK', 'ILOCK:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif ilock >= 1800:
self.console.set_status('ILOCK', 'ILOCK:ON', fg='red', row=4) # depends on [control=['if'], data=[]]
else:
self.console.set_status('ILOCK', 'ILOCK:OFF', fg='green', row=4)
override = self.get_rc_input(msg, self.override_channel)
if override <= 0:
self.console.set_status('OVR', 'OVR:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif override >= 1800:
self.console.set_status('OVR', 'OVR:ON', fg='red', row=4) # depends on [control=['if'], data=[]]
else:
self.console.set_status('OVR', 'OVR:OFF', fg='green', row=4)
zeroi = self.get_rc_input(msg, self.zero_I_channel)
if zeroi <= 0:
self.console.set_status('ZEROI', 'ZEROI:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif zeroi >= 1800:
self.console.set_status('ZEROI', 'ZEROI:ON', fg='red', row=4) # depends on [control=['if'], data=[]]
else:
self.console.set_status('ZEROI', 'ZEROI:OFF', fg='green', row=4)
novtol = self.get_rc_input(msg, self.no_vtol_channel)
if novtol <= 0:
self.console.set_status('NOVTOL', 'NOVTOL:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif novtol >= 1800:
self.console.set_status('NOVTOL', 'NOVTOL:ON', fg='red', row=4) # depends on [control=['if'], data=[]]
else:
self.console.set_status('NOVTOL', 'NOVTOL:OFF', fg='green', row=4) # depends on [control=['if'], data=[]]
if type in ['SERVO_OUTPUT_RAW']:
rsc = self.get_pwm_output(msg, self.rsc_out_channel)
if rsc <= 0:
self.console.set_status('RSC', 'RSC:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif rsc <= 1200:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='red', row=4) # depends on [control=['if'], data=['rsc']]
elif rsc <= 1600:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='orange', row=4) # depends on [control=['if'], data=['rsc']]
else:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='green', row=4)
thr = self.get_pwm_output(msg, self.fwd_thr_channel)
if thr <= 0:
self.console.set_status('FTHR', 'FTHR:--', fg='grey', row=4) # depends on [control=['if'], data=[]]
elif thr <= 1100:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='red', row=4) # depends on [control=['if'], data=['thr']]
elif thr <= 1500:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='orange', row=4) # depends on [control=['if'], data=['thr']]
else:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='green', row=4) # depends on [control=['if'], data=[]]
if type in ['RPM']:
rpm = msg.rpm1
if rpm < 1000:
rpm_colour = 'red' # depends on [control=['if'], data=[]]
elif rpm < 2000:
rpm_colour = 'orange' # depends on [control=['if'], data=[]]
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4) # depends on [control=['if'], data=[]]
|
def is_production(flag_name: str = 'PRODUCTION', strict: bool = False):
"""
Reads env ``PRODUCTION`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise
"""
return env_bool_flag(flag_name, strict=strict)
|
def function[is_production, parameter[flag_name, strict]]:
constant[
Reads env ``PRODUCTION`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise
]
return[call[name[env_bool_flag], parameter[name[flag_name]]]]
|
keyword[def] identifier[is_production] ( identifier[flag_name] : identifier[str] = literal[string] , identifier[strict] : identifier[bool] = keyword[False] ):
literal[string]
keyword[return] identifier[env_bool_flag] ( identifier[flag_name] , identifier[strict] = identifier[strict] )
|
def is_production(flag_name: str='PRODUCTION', strict: bool=False):
"""
Reads env ``PRODUCTION`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise
"""
return env_bool_flag(flag_name, strict=strict)
|
def response(self):
"""Return the response to a standard dt impulse."""
values = []
sum_values = 0.
ma_coefs = self.ma_coefs
ar_coefs = self.ar_coefs
ma_order = self.ma_order
for idx in range(len(self.ma.delays)):
value = 0.
if idx < ma_order:
value += ma_coefs[idx]
for jdx, ar_coef in enumerate(ar_coefs):
zdx = idx-jdx-1
if zdx >= 0:
value += ar_coef*values[zdx]
values.append(value)
sum_values += value
return numpy.array(values)
|
def function[response, parameter[self]]:
constant[Return the response to a standard dt impulse.]
variable[values] assign[=] list[[]]
variable[sum_values] assign[=] constant[0.0]
variable[ma_coefs] assign[=] name[self].ma_coefs
variable[ar_coefs] assign[=] name[self].ar_coefs
variable[ma_order] assign[=] name[self].ma_order
for taget[name[idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].ma.delays]]]]] begin[:]
variable[value] assign[=] constant[0.0]
if compare[name[idx] less[<] name[ma_order]] begin[:]
<ast.AugAssign object at 0x7da20c7cba30>
for taget[tuple[[<ast.Name object at 0x7da20c7cb610>, <ast.Name object at 0x7da20c7ca2f0>]]] in starred[call[name[enumerate], parameter[name[ar_coefs]]]] begin[:]
variable[zdx] assign[=] binary_operation[binary_operation[name[idx] - name[jdx]] - constant[1]]
if compare[name[zdx] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c7cbaf0>
call[name[values].append, parameter[name[value]]]
<ast.AugAssign object at 0x7da20c7cbac0>
return[call[name[numpy].array, parameter[name[values]]]]
|
keyword[def] identifier[response] ( identifier[self] ):
literal[string]
identifier[values] =[]
identifier[sum_values] = literal[int]
identifier[ma_coefs] = identifier[self] . identifier[ma_coefs]
identifier[ar_coefs] = identifier[self] . identifier[ar_coefs]
identifier[ma_order] = identifier[self] . identifier[ma_order]
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[ma] . identifier[delays] )):
identifier[value] = literal[int]
keyword[if] identifier[idx] < identifier[ma_order] :
identifier[value] += identifier[ma_coefs] [ identifier[idx] ]
keyword[for] identifier[jdx] , identifier[ar_coef] keyword[in] identifier[enumerate] ( identifier[ar_coefs] ):
identifier[zdx] = identifier[idx] - identifier[jdx] - literal[int]
keyword[if] identifier[zdx] >= literal[int] :
identifier[value] += identifier[ar_coef] * identifier[values] [ identifier[zdx] ]
identifier[values] . identifier[append] ( identifier[value] )
identifier[sum_values] += identifier[value]
keyword[return] identifier[numpy] . identifier[array] ( identifier[values] )
|
def response(self):
"""Return the response to a standard dt impulse."""
values = []
sum_values = 0.0
ma_coefs = self.ma_coefs
ar_coefs = self.ar_coefs
ma_order = self.ma_order
for idx in range(len(self.ma.delays)):
value = 0.0
if idx < ma_order:
value += ma_coefs[idx] # depends on [control=['if'], data=['idx']]
for (jdx, ar_coef) in enumerate(ar_coefs):
zdx = idx - jdx - 1
if zdx >= 0:
value += ar_coef * values[zdx] # depends on [control=['if'], data=['zdx']] # depends on [control=['for'], data=[]]
values.append(value)
sum_values += value # depends on [control=['for'], data=['idx']]
return numpy.array(values)
|
def execute(self, method, *args, **kargs):
result = None
'''
max 10 rechecks
'''
for i in range(0, 10):
try:
method_map = {
'get_lead_by_id': self.get_lead_by_id,
'get_multiple_leads_by_filter_type': self.get_multiple_leads_by_filter_type,
'get_multiple_leads_by_list_id': self.get_multiple_leads_by_list_id,
'get_multiple_leads_by_list_id_yield': self.get_multiple_leads_by_list_id_yield,
'get_multiple_leads_by_program_id': self.get_multiple_leads_by_program_id,
'get_multiple_leads_by_program_id_yield': self.get_multiple_leads_by_program_id_yield,
'change_lead_program_status': self.change_lead_program_status,
'create_update_leads': self.create_update_leads,
'associate_lead': self.associate_lead,
'push_lead': self.push_lead,
'merge_lead': self.merge_lead,
'get_lead_partitions': self.get_lead_partitions,
'create_list': self.create_list,
'update_list': self.update_list,
'delete_list': self.delete_list,
'get_list_by_id': self.get_list_by_id,
'get_list_by_name': self.get_list_by_name,
'get_multiple_lists': self.get_multiple_lists,
'browse_lists': self.browse_lists,
'add_leads_to_list': self.add_leads_to_list,
'remove_leads_from_list': self.remove_leads_from_list,
'member_of_list': self.member_of_list,
'get_campaign_by_id': self.get_campaign_by_id,
'get_multiple_campaigns': self.get_multiple_campaigns,
'schedule_campaign': self.schedule_campaign,
'request_campaign': self.request_campaign,
'import_lead': self.import_lead,
'get_import_lead_status': self.get_import_lead_status,
'get_import_failure_file': self.get_import_failure_file,
'get_import_warning_file': self.get_import_warning_file,
'describe': self.describe,
'get_activity_types': self.get_activity_types,
'get_paging_token': self.get_paging_token,
'get_lead_activities': self.get_lead_activities,
'get_lead_activities_yield': self.get_lead_activities_yield,
'get_lead_changes': self.get_lead_changes,
'get_lead_changes_yield': self.get_lead_changes_yield,
'add_custom_activities': self.add_custom_activities,
'get_daily_usage': self.get_daily_usage,
'get_last_7_days_usage': self.get_last_7_days_usage,
'get_daily_errors': self.get_daily_errors,
'get_last_7_days_errors': self.get_last_7_days_errors,
'delete_lead': self.delete_lead,
'get_deleted_leads': self.get_deleted_leads,
'update_leads_partition': self.update_leads_partition,
'create_folder': self.create_folder,
'get_folder_by_id': self.get_folder_by_id,
'get_folder_by_name': self.get_folder_by_name,
'get_folder_contents': self.get_folder_contents,
'update_folder': self.update_folder,
'delete_folder': self.delete_folder,
'browse_folders': self.browse_folders,
'create_token': self.create_token,
'get_tokens': self.get_tokens,
'delete_tokens': self.delete_tokens,
'create_email_template': self.create_email_template,
'get_email_template_by_id': self.get_email_template_by_id,
'get_email_template_by_name': self.get_email_template_by_name,
'update_email_template': self.update_email_template,
'delete_email_template': self.delete_email_template,
'get_email_templates': self.get_email_templates,
'get_email_templates_yield': self.get_email_templates_yield,
'get_email_template_content': self.get_email_template_content,
'update_email_template_content': self.update_email_template_content,
'approve_email_template': self.approve_email_template,
'unapprove_email_template': self.unapprove_email_template,
'discard_email_template_draft': self.discard_email_template_draft,
'clone_email_template': self.clone_email_template,
'create_email': self.create_email,
'get_email_by_id': self.get_email_by_id,
'get_email_by_name': self.get_email_by_name,
'delete_email': self.delete_email,
'update_email': self.update_email,
'get_emails': self.get_emails,
'get_emails_yield': self.get_emails_yield,
'get_email_content': self.get_email_content,
'update_email_content': self.update_email_content,
'update_email_content_in_editable_section': self.update_email_content_in_editable_section,
'get_email_dynamic_content': self.get_email_dynamic_content,
'update_email_dynamic_content': self.update_email_dynamic_content,
'approve_email': self.approve_email,
'unapprove_email': self.unapprove_email,
'discard_email_draft': self.discard_email_draft,
'clone_email': self.clone_email,
'send_sample_email': self.send_sample_email,
'get_email_full_content': self.get_email_full_content,
'create_landing_page': self.create_landing_page,
'get_landing_page_by_id': self.get_landing_page_by_id,
'get_landing_page_by_name': self.get_landing_page_by_name,
'delete_landing_page': self.delete_landing_page,
'update_landing_page': self.update_landing_page,
'get_landing_pages': self.get_landing_pages,
'get_landing_pages_yield': self.get_landing_pages_yield,
'get_landing_page_content': self.get_landing_page_content,
'create_landing_page_content_section': self.create_landing_page_content_section,
'update_landing_page_content_section': self.update_landing_page_content_section,
'delete_landing_page_content_section': self.delete_landing_page_content_section,
'get_landing_page_dynamic_content': self.get_landing_page_dynamic_content,
'update_landing_page_dynamic_content': self.update_landing_page_dynamic_content,
'approve_landing_page': self.approve_landing_page,
'unapprove_landing_page': self.unapprove_landing_page,
'discard_landing_page_draft': self.discard_landing_page_draft,
'clone_landing_page': self.clone_landing_page,
'create_form': self.create_form,
'get_form_by_id': self.get_form_by_id,
'get_form_by_name': self.get_form_by_name,
'delete_form': self.delete_form,
'update_form': self.update_form,
'get_forms': self.get_forms,
'get_forms_yield': self.get_forms_yield,
'get_form_fields': self.get_form_fields,
'create_form_field': self.create_form_field,
'update_form_field': self.update_form_field,
'delete_form_field': self.delete_form_field,
'approve_form': self.approve_form,
'unapprove_form': self.unapprove_form,
'discard_form_draft': self.discard_form_draft,
'clone_form': self.clone_form,
'create_file': self.create_file,
'get_file_by_id': self.get_file_by_id,
'get_file_by_name': self.get_file_by_name,
'list_files': self.list_files,
'get_files_yield': self.get_files_yield,
'update_file_content': self.update_file_content,
'create_snippet': self.create_snippet,
'get_snippet_by_id': self.get_snippet_by_id,
'delete_snippet': self.delete_snippet,
'update_snippet': self.update_snippet,
'get_snippets': self.get_snippets,
'get_snippets_yield': self.get_snippets_yield,
'get_snippet_content': self.get_snippet_content,
'update_snippet_content': self.update_snippet_content,
'approve_snippet': self.approve_snippet,
'unapprove_snippet': self.unapprove_snippet,
'discard_snippet_draft': self.discard_snippet_draft,
'clone_snippet': self.clone_snippet,
'update_snippet_dynamic_content': self.update_snippet_dynamic_content,
'get_snippet_dynamic_content': self.get_snippet_dynamic_content,
'get_segmentations': self.get_segmentations,
'get_segments': self.get_segments,
'create_landing_page_template': self.create_landing_page_template,
'get_landing_page_template_by_id': self.get_landing_page_template_by_id,
'get_landing_page_template_by_name': self.get_landing_page_template_by_name,
'get_landing_page_templates': self.get_landing_page_templates,
'get_landing_page_templates_yield': self.get_landing_page_templates_yield,
'get_landing_page_template_content': self.get_landing_page_template_content,
'update_landing_page_template_content': self.update_landing_page_template_content,
'update_landing_page_template': self.update_landing_page_template,
'delete_landing_page_template': self.delete_landing_page_template,
'approve_landing_page_template': self.approve_landing_page_template,
'unapprove_landing_page_template': self.unapprove_landing_page_template,
'discard_landing_page_template_draft': self.discard_landing_page_template_draft,
'clone_landing_page_template': self.clone_landing_page_template,
'create_program': self.create_program,
'get_program_by_id': self.get_program_by_id,
'get_program_by_name': self.get_program_by_name,
'get_program_by_tag_type': self.get_program_by_tag_type,
'update_program': self.update_program,
'delete_program': self.delete_program,
'browse_programs': self.browse_programs,
'get_programs_yield': self.get_programs_yield,
'clone_program': self.clone_program,
'approve_program': self.approve_program,
'unapprove_program': self.unapprove_program,
'get_channels': self.get_channels,
'get_channel_by_name': self.get_channel_by_name,
'get_tags': self.get_tags,
'get_tag_by_name': self.get_tag_by_name,
'get_list_of_custom_objects': self.get_list_of_custom_objects,
'describe_custom_object': self.describe_custom_object,
'create_update_custom_objects': self.create_update_custom_objects,
'delete_custom_objects': self.delete_custom_objects,
'get_custom_objects': self.get_custom_objects,
'describe_opportunity': self.describe_opportunity,
'create_update_opportunities': self.create_update_opportunities,
'delete_opportunities': self.delete_opportunities,
'get_opportunities': self.get_opportunities,
'describe_opportunity_role': self.describe_opportunity_role,
'create_update_opportunities_roles': self.create_update_opportunities_roles,
'delete_opportunity_roles': self.delete_opportunity_roles,
'get_opportunity_roles': self.get_opportunity_roles,
'describe_company': self.describe_company,
'create_update_companies': self.create_update_companies,
'delete_companies': self.delete_companies,
'get_companies': self.get_companies,
'describe_sales_person': self.describe_sales_person,
'create_update_sales_persons': self.create_update_sales_persons,
'delete_sales_persons': self.delete_sales_persons,
'get_sales_persons': self.get_sales_persons,
'get_custom_activity_types': self.get_custom_activity_types,
'describe_custom_activity_type': self.describe_custom_activity_type,
'create_custom_activity_type': self.create_custom_activity_type,
'update_custom_activity_type': self.update_custom_activity_type,
'approve_custom_activity_type': self.approve_custom_activity_type,
'create_custom_activity_type_attribute': self.create_custom_activity_type_attribute,
'discard_custom_activity_type_draft': self.discard_custom_activity_type_draft,
'delete_custom_activity_type': self.delete_custom_activity_type,
'update_custom_activity_type_attribute': self.update_custom_activity_type_attribute,
'delete_custom_activity_type_attribute': self.delete_custom_activity_type_attribute,
'get_leads_export_jobs_list': self.get_leads_export_jobs_list,
'get_activities_export_jobs_list': self.get_activities_export_jobs_list,
'create_leads_export_job': self.create_leads_export_job,
'create_activities_export_job': self.create_activities_export_job,
'enqueue_leads_export_job': self.enqueue_leads_export_job,
'enqueue_activities_export_job': self.enqueue_activities_export_job,
'cancel_leads_export_job': self.cancel_leads_export_job,
'cancel_activities_export_job': self.cancel_activities_export_job,
'get_leads_export_job_status': self.get_leads_export_job_status,
'get_activities_export_job_status': self.get_activities_export_job_status,
'get_leads_export_job_file': self.get_leads_export_job_file,
'get_activities_export_job_file': self.get_activities_export_job_file
}
result = method_map[method](*args, **kargs)
except MarketoException as e:
'''
601 -> auth token not valid
602 -> auth token expired
'''
if e.code in ['601', '602']:
self.authenticate()
continue
else:
raise Exception({'message': e.message, 'code': e.code})
break
return result
|
def function[execute, parameter[self, method]]:
variable[result] assign[=] constant[None]
constant[
max 10 rechecks
]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[10]]]] begin[:]
<ast.Try object at 0x7da1b1387c40>
break
return[name[result]]
|
keyword[def] identifier[execute] ( identifier[self] , identifier[method] ,* identifier[args] ,** identifier[kargs] ):
identifier[result] = keyword[None]
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[try] :
identifier[method_map] ={
literal[string] : identifier[self] . identifier[get_lead_by_id] ,
literal[string] : identifier[self] . identifier[get_multiple_leads_by_filter_type] ,
literal[string] : identifier[self] . identifier[get_multiple_leads_by_list_id] ,
literal[string] : identifier[self] . identifier[get_multiple_leads_by_list_id_yield] ,
literal[string] : identifier[self] . identifier[get_multiple_leads_by_program_id] ,
literal[string] : identifier[self] . identifier[get_multiple_leads_by_program_id_yield] ,
literal[string] : identifier[self] . identifier[change_lead_program_status] ,
literal[string] : identifier[self] . identifier[create_update_leads] ,
literal[string] : identifier[self] . identifier[associate_lead] ,
literal[string] : identifier[self] . identifier[push_lead] ,
literal[string] : identifier[self] . identifier[merge_lead] ,
literal[string] : identifier[self] . identifier[get_lead_partitions] ,
literal[string] : identifier[self] . identifier[create_list] ,
literal[string] : identifier[self] . identifier[update_list] ,
literal[string] : identifier[self] . identifier[delete_list] ,
literal[string] : identifier[self] . identifier[get_list_by_id] ,
literal[string] : identifier[self] . identifier[get_list_by_name] ,
literal[string] : identifier[self] . identifier[get_multiple_lists] ,
literal[string] : identifier[self] . identifier[browse_lists] ,
literal[string] : identifier[self] . identifier[add_leads_to_list] ,
literal[string] : identifier[self] . identifier[remove_leads_from_list] ,
literal[string] : identifier[self] . identifier[member_of_list] ,
literal[string] : identifier[self] . identifier[get_campaign_by_id] ,
literal[string] : identifier[self] . identifier[get_multiple_campaigns] ,
literal[string] : identifier[self] . identifier[schedule_campaign] ,
literal[string] : identifier[self] . identifier[request_campaign] ,
literal[string] : identifier[self] . identifier[import_lead] ,
literal[string] : identifier[self] . identifier[get_import_lead_status] ,
literal[string] : identifier[self] . identifier[get_import_failure_file] ,
literal[string] : identifier[self] . identifier[get_import_warning_file] ,
literal[string] : identifier[self] . identifier[describe] ,
literal[string] : identifier[self] . identifier[get_activity_types] ,
literal[string] : identifier[self] . identifier[get_paging_token] ,
literal[string] : identifier[self] . identifier[get_lead_activities] ,
literal[string] : identifier[self] . identifier[get_lead_activities_yield] ,
literal[string] : identifier[self] . identifier[get_lead_changes] ,
literal[string] : identifier[self] . identifier[get_lead_changes_yield] ,
literal[string] : identifier[self] . identifier[add_custom_activities] ,
literal[string] : identifier[self] . identifier[get_daily_usage] ,
literal[string] : identifier[self] . identifier[get_last_7_days_usage] ,
literal[string] : identifier[self] . identifier[get_daily_errors] ,
literal[string] : identifier[self] . identifier[get_last_7_days_errors] ,
literal[string] : identifier[self] . identifier[delete_lead] ,
literal[string] : identifier[self] . identifier[get_deleted_leads] ,
literal[string] : identifier[self] . identifier[update_leads_partition] ,
literal[string] : identifier[self] . identifier[create_folder] ,
literal[string] : identifier[self] . identifier[get_folder_by_id] ,
literal[string] : identifier[self] . identifier[get_folder_by_name] ,
literal[string] : identifier[self] . identifier[get_folder_contents] ,
literal[string] : identifier[self] . identifier[update_folder] ,
literal[string] : identifier[self] . identifier[delete_folder] ,
literal[string] : identifier[self] . identifier[browse_folders] ,
literal[string] : identifier[self] . identifier[create_token] ,
literal[string] : identifier[self] . identifier[get_tokens] ,
literal[string] : identifier[self] . identifier[delete_tokens] ,
literal[string] : identifier[self] . identifier[create_email_template] ,
literal[string] : identifier[self] . identifier[get_email_template_by_id] ,
literal[string] : identifier[self] . identifier[get_email_template_by_name] ,
literal[string] : identifier[self] . identifier[update_email_template] ,
literal[string] : identifier[self] . identifier[delete_email_template] ,
literal[string] : identifier[self] . identifier[get_email_templates] ,
literal[string] : identifier[self] . identifier[get_email_templates_yield] ,
literal[string] : identifier[self] . identifier[get_email_template_content] ,
literal[string] : identifier[self] . identifier[update_email_template_content] ,
literal[string] : identifier[self] . identifier[approve_email_template] ,
literal[string] : identifier[self] . identifier[unapprove_email_template] ,
literal[string] : identifier[self] . identifier[discard_email_template_draft] ,
literal[string] : identifier[self] . identifier[clone_email_template] ,
literal[string] : identifier[self] . identifier[create_email] ,
literal[string] : identifier[self] . identifier[get_email_by_id] ,
literal[string] : identifier[self] . identifier[get_email_by_name] ,
literal[string] : identifier[self] . identifier[delete_email] ,
literal[string] : identifier[self] . identifier[update_email] ,
literal[string] : identifier[self] . identifier[get_emails] ,
literal[string] : identifier[self] . identifier[get_emails_yield] ,
literal[string] : identifier[self] . identifier[get_email_content] ,
literal[string] : identifier[self] . identifier[update_email_content] ,
literal[string] : identifier[self] . identifier[update_email_content_in_editable_section] ,
literal[string] : identifier[self] . identifier[get_email_dynamic_content] ,
literal[string] : identifier[self] . identifier[update_email_dynamic_content] ,
literal[string] : identifier[self] . identifier[approve_email] ,
literal[string] : identifier[self] . identifier[unapprove_email] ,
literal[string] : identifier[self] . identifier[discard_email_draft] ,
literal[string] : identifier[self] . identifier[clone_email] ,
literal[string] : identifier[self] . identifier[send_sample_email] ,
literal[string] : identifier[self] . identifier[get_email_full_content] ,
literal[string] : identifier[self] . identifier[create_landing_page] ,
literal[string] : identifier[self] . identifier[get_landing_page_by_id] ,
literal[string] : identifier[self] . identifier[get_landing_page_by_name] ,
literal[string] : identifier[self] . identifier[delete_landing_page] ,
literal[string] : identifier[self] . identifier[update_landing_page] ,
literal[string] : identifier[self] . identifier[get_landing_pages] ,
literal[string] : identifier[self] . identifier[get_landing_pages_yield] ,
literal[string] : identifier[self] . identifier[get_landing_page_content] ,
literal[string] : identifier[self] . identifier[create_landing_page_content_section] ,
literal[string] : identifier[self] . identifier[update_landing_page_content_section] ,
literal[string] : identifier[self] . identifier[delete_landing_page_content_section] ,
literal[string] : identifier[self] . identifier[get_landing_page_dynamic_content] ,
literal[string] : identifier[self] . identifier[update_landing_page_dynamic_content] ,
literal[string] : identifier[self] . identifier[approve_landing_page] ,
literal[string] : identifier[self] . identifier[unapprove_landing_page] ,
literal[string] : identifier[self] . identifier[discard_landing_page_draft] ,
literal[string] : identifier[self] . identifier[clone_landing_page] ,
literal[string] : identifier[self] . identifier[create_form] ,
literal[string] : identifier[self] . identifier[get_form_by_id] ,
literal[string] : identifier[self] . identifier[get_form_by_name] ,
literal[string] : identifier[self] . identifier[delete_form] ,
literal[string] : identifier[self] . identifier[update_form] ,
literal[string] : identifier[self] . identifier[get_forms] ,
literal[string] : identifier[self] . identifier[get_forms_yield] ,
literal[string] : identifier[self] . identifier[get_form_fields] ,
literal[string] : identifier[self] . identifier[create_form_field] ,
literal[string] : identifier[self] . identifier[update_form_field] ,
literal[string] : identifier[self] . identifier[delete_form_field] ,
literal[string] : identifier[self] . identifier[approve_form] ,
literal[string] : identifier[self] . identifier[unapprove_form] ,
literal[string] : identifier[self] . identifier[discard_form_draft] ,
literal[string] : identifier[self] . identifier[clone_form] ,
literal[string] : identifier[self] . identifier[create_file] ,
literal[string] : identifier[self] . identifier[get_file_by_id] ,
literal[string] : identifier[self] . identifier[get_file_by_name] ,
literal[string] : identifier[self] . identifier[list_files] ,
literal[string] : identifier[self] . identifier[get_files_yield] ,
literal[string] : identifier[self] . identifier[update_file_content] ,
literal[string] : identifier[self] . identifier[create_snippet] ,
literal[string] : identifier[self] . identifier[get_snippet_by_id] ,
literal[string] : identifier[self] . identifier[delete_snippet] ,
literal[string] : identifier[self] . identifier[update_snippet] ,
literal[string] : identifier[self] . identifier[get_snippets] ,
literal[string] : identifier[self] . identifier[get_snippets_yield] ,
literal[string] : identifier[self] . identifier[get_snippet_content] ,
literal[string] : identifier[self] . identifier[update_snippet_content] ,
literal[string] : identifier[self] . identifier[approve_snippet] ,
literal[string] : identifier[self] . identifier[unapprove_snippet] ,
literal[string] : identifier[self] . identifier[discard_snippet_draft] ,
literal[string] : identifier[self] . identifier[clone_snippet] ,
literal[string] : identifier[self] . identifier[update_snippet_dynamic_content] ,
literal[string] : identifier[self] . identifier[get_snippet_dynamic_content] ,
literal[string] : identifier[self] . identifier[get_segmentations] ,
literal[string] : identifier[self] . identifier[get_segments] ,
literal[string] : identifier[self] . identifier[create_landing_page_template] ,
literal[string] : identifier[self] . identifier[get_landing_page_template_by_id] ,
literal[string] : identifier[self] . identifier[get_landing_page_template_by_name] ,
literal[string] : identifier[self] . identifier[get_landing_page_templates] ,
literal[string] : identifier[self] . identifier[get_landing_page_templates_yield] ,
literal[string] : identifier[self] . identifier[get_landing_page_template_content] ,
literal[string] : identifier[self] . identifier[update_landing_page_template_content] ,
literal[string] : identifier[self] . identifier[update_landing_page_template] ,
literal[string] : identifier[self] . identifier[delete_landing_page_template] ,
literal[string] : identifier[self] . identifier[approve_landing_page_template] ,
literal[string] : identifier[self] . identifier[unapprove_landing_page_template] ,
literal[string] : identifier[self] . identifier[discard_landing_page_template_draft] ,
literal[string] : identifier[self] . identifier[clone_landing_page_template] ,
literal[string] : identifier[self] . identifier[create_program] ,
literal[string] : identifier[self] . identifier[get_program_by_id] ,
literal[string] : identifier[self] . identifier[get_program_by_name] ,
literal[string] : identifier[self] . identifier[get_program_by_tag_type] ,
literal[string] : identifier[self] . identifier[update_program] ,
literal[string] : identifier[self] . identifier[delete_program] ,
literal[string] : identifier[self] . identifier[browse_programs] ,
literal[string] : identifier[self] . identifier[get_programs_yield] ,
literal[string] : identifier[self] . identifier[clone_program] ,
literal[string] : identifier[self] . identifier[approve_program] ,
literal[string] : identifier[self] . identifier[unapprove_program] ,
literal[string] : identifier[self] . identifier[get_channels] ,
literal[string] : identifier[self] . identifier[get_channel_by_name] ,
literal[string] : identifier[self] . identifier[get_tags] ,
literal[string] : identifier[self] . identifier[get_tag_by_name] ,
literal[string] : identifier[self] . identifier[get_list_of_custom_objects] ,
literal[string] : identifier[self] . identifier[describe_custom_object] ,
literal[string] : identifier[self] . identifier[create_update_custom_objects] ,
literal[string] : identifier[self] . identifier[delete_custom_objects] ,
literal[string] : identifier[self] . identifier[get_custom_objects] ,
literal[string] : identifier[self] . identifier[describe_opportunity] ,
literal[string] : identifier[self] . identifier[create_update_opportunities] ,
literal[string] : identifier[self] . identifier[delete_opportunities] ,
literal[string] : identifier[self] . identifier[get_opportunities] ,
literal[string] : identifier[self] . identifier[describe_opportunity_role] ,
literal[string] : identifier[self] . identifier[create_update_opportunities_roles] ,
literal[string] : identifier[self] . identifier[delete_opportunity_roles] ,
literal[string] : identifier[self] . identifier[get_opportunity_roles] ,
literal[string] : identifier[self] . identifier[describe_company] ,
literal[string] : identifier[self] . identifier[create_update_companies] ,
literal[string] : identifier[self] . identifier[delete_companies] ,
literal[string] : identifier[self] . identifier[get_companies] ,
literal[string] : identifier[self] . identifier[describe_sales_person] ,
literal[string] : identifier[self] . identifier[create_update_sales_persons] ,
literal[string] : identifier[self] . identifier[delete_sales_persons] ,
literal[string] : identifier[self] . identifier[get_sales_persons] ,
literal[string] : identifier[self] . identifier[get_custom_activity_types] ,
literal[string] : identifier[self] . identifier[describe_custom_activity_type] ,
literal[string] : identifier[self] . identifier[create_custom_activity_type] ,
literal[string] : identifier[self] . identifier[update_custom_activity_type] ,
literal[string] : identifier[self] . identifier[approve_custom_activity_type] ,
literal[string] : identifier[self] . identifier[create_custom_activity_type_attribute] ,
literal[string] : identifier[self] . identifier[discard_custom_activity_type_draft] ,
literal[string] : identifier[self] . identifier[delete_custom_activity_type] ,
literal[string] : identifier[self] . identifier[update_custom_activity_type_attribute] ,
literal[string] : identifier[self] . identifier[delete_custom_activity_type_attribute] ,
literal[string] : identifier[self] . identifier[get_leads_export_jobs_list] ,
literal[string] : identifier[self] . identifier[get_activities_export_jobs_list] ,
literal[string] : identifier[self] . identifier[create_leads_export_job] ,
literal[string] : identifier[self] . identifier[create_activities_export_job] ,
literal[string] : identifier[self] . identifier[enqueue_leads_export_job] ,
literal[string] : identifier[self] . identifier[enqueue_activities_export_job] ,
literal[string] : identifier[self] . identifier[cancel_leads_export_job] ,
literal[string] : identifier[self] . identifier[cancel_activities_export_job] ,
literal[string] : identifier[self] . identifier[get_leads_export_job_status] ,
literal[string] : identifier[self] . identifier[get_activities_export_job_status] ,
literal[string] : identifier[self] . identifier[get_leads_export_job_file] ,
literal[string] : identifier[self] . identifier[get_activities_export_job_file]
}
identifier[result] = identifier[method_map] [ identifier[method] ](* identifier[args] ,** identifier[kargs] )
keyword[except] identifier[MarketoException] keyword[as] identifier[e] :
literal[string]
keyword[if] identifier[e] . identifier[code] keyword[in] [ literal[string] , literal[string] ]:
identifier[self] . identifier[authenticate] ()
keyword[continue]
keyword[else] :
keyword[raise] identifier[Exception] ({ literal[string] : identifier[e] . identifier[message] , literal[string] : identifier[e] . identifier[code] })
keyword[break]
keyword[return] identifier[result]
|
def execute(self, method, *args, **kargs):
result = None
'\n max 10 rechecks\n '
for i in range(0, 10):
try:
method_map = {'get_lead_by_id': self.get_lead_by_id, 'get_multiple_leads_by_filter_type': self.get_multiple_leads_by_filter_type, 'get_multiple_leads_by_list_id': self.get_multiple_leads_by_list_id, 'get_multiple_leads_by_list_id_yield': self.get_multiple_leads_by_list_id_yield, 'get_multiple_leads_by_program_id': self.get_multiple_leads_by_program_id, 'get_multiple_leads_by_program_id_yield': self.get_multiple_leads_by_program_id_yield, 'change_lead_program_status': self.change_lead_program_status, 'create_update_leads': self.create_update_leads, 'associate_lead': self.associate_lead, 'push_lead': self.push_lead, 'merge_lead': self.merge_lead, 'get_lead_partitions': self.get_lead_partitions, 'create_list': self.create_list, 'update_list': self.update_list, 'delete_list': self.delete_list, 'get_list_by_id': self.get_list_by_id, 'get_list_by_name': self.get_list_by_name, 'get_multiple_lists': self.get_multiple_lists, 'browse_lists': self.browse_lists, 'add_leads_to_list': self.add_leads_to_list, 'remove_leads_from_list': self.remove_leads_from_list, 'member_of_list': self.member_of_list, 'get_campaign_by_id': self.get_campaign_by_id, 'get_multiple_campaigns': self.get_multiple_campaigns, 'schedule_campaign': self.schedule_campaign, 'request_campaign': self.request_campaign, 'import_lead': self.import_lead, 'get_import_lead_status': self.get_import_lead_status, 'get_import_failure_file': self.get_import_failure_file, 'get_import_warning_file': self.get_import_warning_file, 'describe': self.describe, 'get_activity_types': self.get_activity_types, 'get_paging_token': self.get_paging_token, 'get_lead_activities': self.get_lead_activities, 'get_lead_activities_yield': self.get_lead_activities_yield, 'get_lead_changes': self.get_lead_changes, 'get_lead_changes_yield': self.get_lead_changes_yield, 'add_custom_activities': self.add_custom_activities, 'get_daily_usage': self.get_daily_usage, 'get_last_7_days_usage': self.get_last_7_days_usage, 'get_daily_errors': self.get_daily_errors, 'get_last_7_days_errors': self.get_last_7_days_errors, 'delete_lead': self.delete_lead, 'get_deleted_leads': self.get_deleted_leads, 'update_leads_partition': self.update_leads_partition, 'create_folder': self.create_folder, 'get_folder_by_id': self.get_folder_by_id, 'get_folder_by_name': self.get_folder_by_name, 'get_folder_contents': self.get_folder_contents, 'update_folder': self.update_folder, 'delete_folder': self.delete_folder, 'browse_folders': self.browse_folders, 'create_token': self.create_token, 'get_tokens': self.get_tokens, 'delete_tokens': self.delete_tokens, 'create_email_template': self.create_email_template, 'get_email_template_by_id': self.get_email_template_by_id, 'get_email_template_by_name': self.get_email_template_by_name, 'update_email_template': self.update_email_template, 'delete_email_template': self.delete_email_template, 'get_email_templates': self.get_email_templates, 'get_email_templates_yield': self.get_email_templates_yield, 'get_email_template_content': self.get_email_template_content, 'update_email_template_content': self.update_email_template_content, 'approve_email_template': self.approve_email_template, 'unapprove_email_template': self.unapprove_email_template, 'discard_email_template_draft': self.discard_email_template_draft, 'clone_email_template': self.clone_email_template, 'create_email': self.create_email, 'get_email_by_id': self.get_email_by_id, 'get_email_by_name': self.get_email_by_name, 'delete_email': self.delete_email, 'update_email': self.update_email, 'get_emails': self.get_emails, 'get_emails_yield': self.get_emails_yield, 'get_email_content': self.get_email_content, 'update_email_content': self.update_email_content, 'update_email_content_in_editable_section': self.update_email_content_in_editable_section, 'get_email_dynamic_content': self.get_email_dynamic_content, 'update_email_dynamic_content': self.update_email_dynamic_content, 'approve_email': self.approve_email, 'unapprove_email': self.unapprove_email, 'discard_email_draft': self.discard_email_draft, 'clone_email': self.clone_email, 'send_sample_email': self.send_sample_email, 'get_email_full_content': self.get_email_full_content, 'create_landing_page': self.create_landing_page, 'get_landing_page_by_id': self.get_landing_page_by_id, 'get_landing_page_by_name': self.get_landing_page_by_name, 'delete_landing_page': self.delete_landing_page, 'update_landing_page': self.update_landing_page, 'get_landing_pages': self.get_landing_pages, 'get_landing_pages_yield': self.get_landing_pages_yield, 'get_landing_page_content': self.get_landing_page_content, 'create_landing_page_content_section': self.create_landing_page_content_section, 'update_landing_page_content_section': self.update_landing_page_content_section, 'delete_landing_page_content_section': self.delete_landing_page_content_section, 'get_landing_page_dynamic_content': self.get_landing_page_dynamic_content, 'update_landing_page_dynamic_content': self.update_landing_page_dynamic_content, 'approve_landing_page': self.approve_landing_page, 'unapprove_landing_page': self.unapprove_landing_page, 'discard_landing_page_draft': self.discard_landing_page_draft, 'clone_landing_page': self.clone_landing_page, 'create_form': self.create_form, 'get_form_by_id': self.get_form_by_id, 'get_form_by_name': self.get_form_by_name, 'delete_form': self.delete_form, 'update_form': self.update_form, 'get_forms': self.get_forms, 'get_forms_yield': self.get_forms_yield, 'get_form_fields': self.get_form_fields, 'create_form_field': self.create_form_field, 'update_form_field': self.update_form_field, 'delete_form_field': self.delete_form_field, 'approve_form': self.approve_form, 'unapprove_form': self.unapprove_form, 'discard_form_draft': self.discard_form_draft, 'clone_form': self.clone_form, 'create_file': self.create_file, 'get_file_by_id': self.get_file_by_id, 'get_file_by_name': self.get_file_by_name, 'list_files': self.list_files, 'get_files_yield': self.get_files_yield, 'update_file_content': self.update_file_content, 'create_snippet': self.create_snippet, 'get_snippet_by_id': self.get_snippet_by_id, 'delete_snippet': self.delete_snippet, 'update_snippet': self.update_snippet, 'get_snippets': self.get_snippets, 'get_snippets_yield': self.get_snippets_yield, 'get_snippet_content': self.get_snippet_content, 'update_snippet_content': self.update_snippet_content, 'approve_snippet': self.approve_snippet, 'unapprove_snippet': self.unapprove_snippet, 'discard_snippet_draft': self.discard_snippet_draft, 'clone_snippet': self.clone_snippet, 'update_snippet_dynamic_content': self.update_snippet_dynamic_content, 'get_snippet_dynamic_content': self.get_snippet_dynamic_content, 'get_segmentations': self.get_segmentations, 'get_segments': self.get_segments, 'create_landing_page_template': self.create_landing_page_template, 'get_landing_page_template_by_id': self.get_landing_page_template_by_id, 'get_landing_page_template_by_name': self.get_landing_page_template_by_name, 'get_landing_page_templates': self.get_landing_page_templates, 'get_landing_page_templates_yield': self.get_landing_page_templates_yield, 'get_landing_page_template_content': self.get_landing_page_template_content, 'update_landing_page_template_content': self.update_landing_page_template_content, 'update_landing_page_template': self.update_landing_page_template, 'delete_landing_page_template': self.delete_landing_page_template, 'approve_landing_page_template': self.approve_landing_page_template, 'unapprove_landing_page_template': self.unapprove_landing_page_template, 'discard_landing_page_template_draft': self.discard_landing_page_template_draft, 'clone_landing_page_template': self.clone_landing_page_template, 'create_program': self.create_program, 'get_program_by_id': self.get_program_by_id, 'get_program_by_name': self.get_program_by_name, 'get_program_by_tag_type': self.get_program_by_tag_type, 'update_program': self.update_program, 'delete_program': self.delete_program, 'browse_programs': self.browse_programs, 'get_programs_yield': self.get_programs_yield, 'clone_program': self.clone_program, 'approve_program': self.approve_program, 'unapprove_program': self.unapprove_program, 'get_channels': self.get_channels, 'get_channel_by_name': self.get_channel_by_name, 'get_tags': self.get_tags, 'get_tag_by_name': self.get_tag_by_name, 'get_list_of_custom_objects': self.get_list_of_custom_objects, 'describe_custom_object': self.describe_custom_object, 'create_update_custom_objects': self.create_update_custom_objects, 'delete_custom_objects': self.delete_custom_objects, 'get_custom_objects': self.get_custom_objects, 'describe_opportunity': self.describe_opportunity, 'create_update_opportunities': self.create_update_opportunities, 'delete_opportunities': self.delete_opportunities, 'get_opportunities': self.get_opportunities, 'describe_opportunity_role': self.describe_opportunity_role, 'create_update_opportunities_roles': self.create_update_opportunities_roles, 'delete_opportunity_roles': self.delete_opportunity_roles, 'get_opportunity_roles': self.get_opportunity_roles, 'describe_company': self.describe_company, 'create_update_companies': self.create_update_companies, 'delete_companies': self.delete_companies, 'get_companies': self.get_companies, 'describe_sales_person': self.describe_sales_person, 'create_update_sales_persons': self.create_update_sales_persons, 'delete_sales_persons': self.delete_sales_persons, 'get_sales_persons': self.get_sales_persons, 'get_custom_activity_types': self.get_custom_activity_types, 'describe_custom_activity_type': self.describe_custom_activity_type, 'create_custom_activity_type': self.create_custom_activity_type, 'update_custom_activity_type': self.update_custom_activity_type, 'approve_custom_activity_type': self.approve_custom_activity_type, 'create_custom_activity_type_attribute': self.create_custom_activity_type_attribute, 'discard_custom_activity_type_draft': self.discard_custom_activity_type_draft, 'delete_custom_activity_type': self.delete_custom_activity_type, 'update_custom_activity_type_attribute': self.update_custom_activity_type_attribute, 'delete_custom_activity_type_attribute': self.delete_custom_activity_type_attribute, 'get_leads_export_jobs_list': self.get_leads_export_jobs_list, 'get_activities_export_jobs_list': self.get_activities_export_jobs_list, 'create_leads_export_job': self.create_leads_export_job, 'create_activities_export_job': self.create_activities_export_job, 'enqueue_leads_export_job': self.enqueue_leads_export_job, 'enqueue_activities_export_job': self.enqueue_activities_export_job, 'cancel_leads_export_job': self.cancel_leads_export_job, 'cancel_activities_export_job': self.cancel_activities_export_job, 'get_leads_export_job_status': self.get_leads_export_job_status, 'get_activities_export_job_status': self.get_activities_export_job_status, 'get_leads_export_job_file': self.get_leads_export_job_file, 'get_activities_export_job_file': self.get_activities_export_job_file}
result = method_map[method](*args, **kargs) # depends on [control=['try'], data=[]]
except MarketoException as e:
'\n 601 -> auth token not valid\n 602 -> auth token expired\n '
if e.code in ['601', '602']:
self.authenticate()
continue # depends on [control=['if'], data=[]]
else:
raise Exception({'message': e.message, 'code': e.code}) # depends on [control=['except'], data=['e']]
break # depends on [control=['for'], data=[]]
return result
|
def apply_time_to_configurations(self, configurations, data):
"""Applies the correct time index to configurations"""
time_value = None
if data.get('time'):
time_value = data['time']
# Only single time values are supported. For extents, just grab the first value
if isinstance(data['time'], (tuple, list)):
time_value = time_value[0]
if time_value:
for config in configurations:
config.set_time_index_from_datetime(time_value, best_fit=ALLOW_BEST_FIT_TIME_INDEX)
return configurations
|
def function[apply_time_to_configurations, parameter[self, configurations, data]]:
constant[Applies the correct time index to configurations]
variable[time_value] assign[=] constant[None]
if call[name[data].get, parameter[constant[time]]] begin[:]
variable[time_value] assign[=] call[name[data]][constant[time]]
if call[name[isinstance], parameter[call[name[data]][constant[time]], tuple[[<ast.Name object at 0x7da18bccbbb0>, <ast.Name object at 0x7da18bccb1c0>]]]] begin[:]
variable[time_value] assign[=] call[name[time_value]][constant[0]]
if name[time_value] begin[:]
for taget[name[config]] in starred[name[configurations]] begin[:]
call[name[config].set_time_index_from_datetime, parameter[name[time_value]]]
return[name[configurations]]
|
keyword[def] identifier[apply_time_to_configurations] ( identifier[self] , identifier[configurations] , identifier[data] ):
literal[string]
identifier[time_value] = keyword[None]
keyword[if] identifier[data] . identifier[get] ( literal[string] ):
identifier[time_value] = identifier[data] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[data] [ literal[string] ],( identifier[tuple] , identifier[list] )):
identifier[time_value] = identifier[time_value] [ literal[int] ]
keyword[if] identifier[time_value] :
keyword[for] identifier[config] keyword[in] identifier[configurations] :
identifier[config] . identifier[set_time_index_from_datetime] ( identifier[time_value] , identifier[best_fit] = identifier[ALLOW_BEST_FIT_TIME_INDEX] )
keyword[return] identifier[configurations]
|
def apply_time_to_configurations(self, configurations, data):
"""Applies the correct time index to configurations"""
time_value = None
if data.get('time'):
time_value = data['time']
# Only single time values are supported. For extents, just grab the first value
if isinstance(data['time'], (tuple, list)):
time_value = time_value[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if time_value:
for config in configurations:
config.set_time_index_from_datetime(time_value, best_fit=ALLOW_BEST_FIT_TIME_INDEX) # depends on [control=['for'], data=['config']] # depends on [control=['if'], data=[]]
return configurations
|
def load(cls, filename):
"""
Loads the experiment from disk.
:param filename: the filename of the experiment to load
:type filename: str
:return: the experiment
:rtype: Experiment
"""
jobject = javabridge.static_call(
"weka/experiment/Experiment", "read", "(Ljava/lang/String;)Lweka/experiment/Experiment;",
filename)
return Experiment(jobject=jobject)
|
def function[load, parameter[cls, filename]]:
constant[
Loads the experiment from disk.
:param filename: the filename of the experiment to load
:type filename: str
:return: the experiment
:rtype: Experiment
]
variable[jobject] assign[=] call[name[javabridge].static_call, parameter[constant[weka/experiment/Experiment], constant[read], constant[(Ljava/lang/String;)Lweka/experiment/Experiment;], name[filename]]]
return[call[name[Experiment], parameter[]]]
|
keyword[def] identifier[load] ( identifier[cls] , identifier[filename] ):
literal[string]
identifier[jobject] = identifier[javabridge] . identifier[static_call] (
literal[string] , literal[string] , literal[string] ,
identifier[filename] )
keyword[return] identifier[Experiment] ( identifier[jobject] = identifier[jobject] )
|
def load(cls, filename):
"""
Loads the experiment from disk.
:param filename: the filename of the experiment to load
:type filename: str
:return: the experiment
:rtype: Experiment
"""
jobject = javabridge.static_call('weka/experiment/Experiment', 'read', '(Ljava/lang/String;)Lweka/experiment/Experiment;', filename)
return Experiment(jobject=jobject)
|
def retrieve(self, id) :
"""
Retrieve a single deal
Returns a single deal available to the user, according to the unique deal ID provided
If the specified deal does not exist, the request will return an error
:calls: ``get /deals/{id}``
:param int id: Unique identifier of a Deal.
:return: Dictionary that support attriubte-style access and represent Deal resource.
:rtype: dict
"""
_, _, deal = self.http_client.get("/deals/{id}".format(id=id))
deal["value"] = Coercion.to_decimal(deal["value"])
return deal
|
def function[retrieve, parameter[self, id]]:
constant[
Retrieve a single deal
Returns a single deal available to the user, according to the unique deal ID provided
If the specified deal does not exist, the request will return an error
:calls: ``get /deals/{id}``
:param int id: Unique identifier of a Deal.
:return: Dictionary that support attriubte-style access and represent Deal resource.
:rtype: dict
]
<ast.Tuple object at 0x7da18f722f20> assign[=] call[name[self].http_client.get, parameter[call[constant[/deals/{id}].format, parameter[]]]]
call[name[deal]][constant[value]] assign[=] call[name[Coercion].to_decimal, parameter[call[name[deal]][constant[value]]]]
return[name[deal]]
|
keyword[def] identifier[retrieve] ( identifier[self] , identifier[id] ):
literal[string]
identifier[_] , identifier[_] , identifier[deal] = identifier[self] . identifier[http_client] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] = identifier[id] ))
identifier[deal] [ literal[string] ]= identifier[Coercion] . identifier[to_decimal] ( identifier[deal] [ literal[string] ])
keyword[return] identifier[deal]
|
def retrieve(self, id):
"""
Retrieve a single deal
Returns a single deal available to the user, according to the unique deal ID provided
If the specified deal does not exist, the request will return an error
:calls: ``get /deals/{id}``
:param int id: Unique identifier of a Deal.
:return: Dictionary that support attriubte-style access and represent Deal resource.
:rtype: dict
"""
(_, _, deal) = self.http_client.get('/deals/{id}'.format(id=id))
deal['value'] = Coercion.to_decimal(deal['value'])
return deal
|
def check_perms(path,
ret=None,
owner=None,
grant_perms=None,
deny_perms=None,
inheritance=True,
reset=False):
'''
Check owner and permissions for the passed directory. This function checks
the permissions and sets them, returning the changes made. Used by the file
state to populate the return dict
Args:
path (str):
The full path to the directory.
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str):
The owner to set for the directory.
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
Default is ``None``.
deny_perms (dict):
A dictionary containing the user/group and permissions to
check/deny. Default is ``None``.
inheritance (bool):
``True will check if inheritance is enabled and enable it. ``False``
will check if inheritance is disabled and disable it. Default is
``True``.
reset (bool):
``True`` will show what permissions will be removed by resetting the
DACL. ``False`` will do nothing. Default is ``False``.
Returns:
dict: A dictionary of changes that have been made
CLI Example:
.. code-block:: bash
# To see changes to ``C:\\Temp`` if the 'Users' group is given 'read & execute' permissions.
salt '*' file.check_perms C:\\Temp\\ {} Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.check_perms C:\\Temp\\ {} Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.check_perms C:\\Temp\\ {} Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
path = os.path.expanduser(path)
return __utils__['dacl.check_perms'](obj_name=path,
obj_type='file',
ret=ret,
owner=owner,
grant_perms=grant_perms,
deny_perms=deny_perms,
inheritance=inheritance,
reset=reset)
|
def function[check_perms, parameter[path, ret, owner, grant_perms, deny_perms, inheritance, reset]]:
constant[
Check owner and permissions for the passed directory. This function checks
the permissions and sets them, returning the changes made. Used by the file
state to populate the return dict
Args:
path (str):
The full path to the directory.
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str):
The owner to set for the directory.
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
Default is ``None``.
deny_perms (dict):
A dictionary containing the user/group and permissions to
check/deny. Default is ``None``.
inheritance (bool):
``True will check if inheritance is enabled and enable it. ``False``
will check if inheritance is disabled and disable it. Default is
``True``.
reset (bool):
``True`` will show what permissions will be removed by resetting the
DACL. ``False`` will do nothing. Default is ``False``.
Returns:
dict: A dictionary of changes that have been made
CLI Example:
.. code-block:: bash
# To see changes to ``C:\Temp`` if the 'Users' group is given 'read & execute' permissions.
salt '*' file.check_perms C:\Temp\ {} Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.check_perms C:\Temp\ {} Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.check_perms C:\Temp\ {} Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
]
if <ast.UnaryOp object at 0x7da1b1f34730> begin[:]
<ast.Raise object at 0x7da1b1f343d0>
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
return[call[call[name[__utils__]][constant[dacl.check_perms]], parameter[]]]
|
keyword[def] identifier[check_perms] ( identifier[path] ,
identifier[ret] = keyword[None] ,
identifier[owner] = keyword[None] ,
identifier[grant_perms] = keyword[None] ,
identifier[deny_perms] = keyword[None] ,
identifier[inheritance] = keyword[True] ,
identifier[reset] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
keyword[return] identifier[__utils__] [ literal[string] ]( identifier[obj_name] = identifier[path] ,
identifier[obj_type] = literal[string] ,
identifier[ret] = identifier[ret] ,
identifier[owner] = identifier[owner] ,
identifier[grant_perms] = identifier[grant_perms] ,
identifier[deny_perms] = identifier[deny_perms] ,
identifier[inheritance] = identifier[inheritance] ,
identifier[reset] = identifier[reset] )
|
def check_perms(path, ret=None, owner=None, grant_perms=None, deny_perms=None, inheritance=True, reset=False):
"""
Check owner and permissions for the passed directory. This function checks
the permissions and sets them, returning the changes made. Used by the file
state to populate the return dict
Args:
path (str):
The full path to the directory.
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str):
The owner to set for the directory.
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
Default is ``None``.
deny_perms (dict):
A dictionary containing the user/group and permissions to
check/deny. Default is ``None``.
inheritance (bool):
``True will check if inheritance is enabled and enable it. ``False``
will check if inheritance is disabled and disable it. Default is
``True``.
reset (bool):
``True`` will show what permissions will be removed by resetting the
DACL. ``False`` will do nothing. Default is ``False``.
Returns:
dict: A dictionary of changes that have been made
CLI Example:
.. code-block:: bash
# To see changes to ``C:\\Temp`` if the 'Users' group is given 'read & execute' permissions.
salt '*' file.check_perms C:\\Temp\\ {} Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.check_perms C:\\Temp\\ {} Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.check_perms C:\\Temp\\ {} Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
"""
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path)) # depends on [control=['if'], data=[]]
path = os.path.expanduser(path)
return __utils__['dacl.check_perms'](obj_name=path, obj_type='file', ret=ret, owner=owner, grant_perms=grant_perms, deny_perms=deny_perms, inheritance=inheritance, reset=reset)
|
def get_bordercolor(self):
"""Get bordercolor based on hdrgos and usergos."""
hdrgos_all = self.grprobj.hdrobj.get_hdrgos()
hdrgos_unused = hdrgos_all.difference(self.hdrgos_actual)
go2bordercolor = {}
# hdrgos that went unused
for hdrgo in hdrgos_unused:
go2bordercolor[hdrgo] = self.hdrcol_all
# hdrgos used in this grouping that are NOT usrgos
for hdrgo in self.grprobj.hdrgo2usrgos.keys():
go2bordercolor[hdrgo] = self.hdrcol_all
# hdrgos used in this grouping that ARE usrgos
for hdrgo in self.grprobj.hdrgo_is_usrgo:
go2bordercolor[hdrgo] = 'blue'
# usrgos which are NOT hdrgos
usrgos_rem = self.grprobj.usrgos.difference(self.grprobj.hdrgo_is_usrgo)
for usrgo in usrgos_rem:
go2bordercolor[usrgo] = '#029386' # teal
# print("{N:5} hdrgos actual".format(N=len(self.hdrgos_actual)))
# print("{N:5} hdrgos unused".format(N=len(hdrgos_unused)))
# print("{N:5} hdrgos only BLACK".format(N=len(self.grprobj.hdrgo2usrgos.keys())))
# print("{N:5} usrgos".format(N=len(self.grprobj.usrgos)))
# print("{N:5} usrgos AND hdrgos BLUE".format(N=len(self.grprobj.hdrgo_is_usrgo)))
# print("{N:5} usrgos Only".format(N=len(usrgos_rem)))
return go2bordercolor
|
def function[get_bordercolor, parameter[self]]:
constant[Get bordercolor based on hdrgos and usergos.]
variable[hdrgos_all] assign[=] call[name[self].grprobj.hdrobj.get_hdrgos, parameter[]]
variable[hdrgos_unused] assign[=] call[name[hdrgos_all].difference, parameter[name[self].hdrgos_actual]]
variable[go2bordercolor] assign[=] dictionary[[], []]
for taget[name[hdrgo]] in starred[name[hdrgos_unused]] begin[:]
call[name[go2bordercolor]][name[hdrgo]] assign[=] name[self].hdrcol_all
for taget[name[hdrgo]] in starred[call[name[self].grprobj.hdrgo2usrgos.keys, parameter[]]] begin[:]
call[name[go2bordercolor]][name[hdrgo]] assign[=] name[self].hdrcol_all
for taget[name[hdrgo]] in starred[name[self].grprobj.hdrgo_is_usrgo] begin[:]
call[name[go2bordercolor]][name[hdrgo]] assign[=] constant[blue]
variable[usrgos_rem] assign[=] call[name[self].grprobj.usrgos.difference, parameter[name[self].grprobj.hdrgo_is_usrgo]]
for taget[name[usrgo]] in starred[name[usrgos_rem]] begin[:]
call[name[go2bordercolor]][name[usrgo]] assign[=] constant[#029386]
return[name[go2bordercolor]]
|
keyword[def] identifier[get_bordercolor] ( identifier[self] ):
literal[string]
identifier[hdrgos_all] = identifier[self] . identifier[grprobj] . identifier[hdrobj] . identifier[get_hdrgos] ()
identifier[hdrgos_unused] = identifier[hdrgos_all] . identifier[difference] ( identifier[self] . identifier[hdrgos_actual] )
identifier[go2bordercolor] ={}
keyword[for] identifier[hdrgo] keyword[in] identifier[hdrgos_unused] :
identifier[go2bordercolor] [ identifier[hdrgo] ]= identifier[self] . identifier[hdrcol_all]
keyword[for] identifier[hdrgo] keyword[in] identifier[self] . identifier[grprobj] . identifier[hdrgo2usrgos] . identifier[keys] ():
identifier[go2bordercolor] [ identifier[hdrgo] ]= identifier[self] . identifier[hdrcol_all]
keyword[for] identifier[hdrgo] keyword[in] identifier[self] . identifier[grprobj] . identifier[hdrgo_is_usrgo] :
identifier[go2bordercolor] [ identifier[hdrgo] ]= literal[string]
identifier[usrgos_rem] = identifier[self] . identifier[grprobj] . identifier[usrgos] . identifier[difference] ( identifier[self] . identifier[grprobj] . identifier[hdrgo_is_usrgo] )
keyword[for] identifier[usrgo] keyword[in] identifier[usrgos_rem] :
identifier[go2bordercolor] [ identifier[usrgo] ]= literal[string]
keyword[return] identifier[go2bordercolor]
|
def get_bordercolor(self):
"""Get bordercolor based on hdrgos and usergos."""
hdrgos_all = self.grprobj.hdrobj.get_hdrgos()
hdrgos_unused = hdrgos_all.difference(self.hdrgos_actual)
go2bordercolor = {}
# hdrgos that went unused
for hdrgo in hdrgos_unused:
go2bordercolor[hdrgo] = self.hdrcol_all # depends on [control=['for'], data=['hdrgo']]
# hdrgos used in this grouping that are NOT usrgos
for hdrgo in self.grprobj.hdrgo2usrgos.keys():
go2bordercolor[hdrgo] = self.hdrcol_all # depends on [control=['for'], data=['hdrgo']]
# hdrgos used in this grouping that ARE usrgos
for hdrgo in self.grprobj.hdrgo_is_usrgo:
go2bordercolor[hdrgo] = 'blue' # depends on [control=['for'], data=['hdrgo']]
# usrgos which are NOT hdrgos
usrgos_rem = self.grprobj.usrgos.difference(self.grprobj.hdrgo_is_usrgo)
for usrgo in usrgos_rem:
go2bordercolor[usrgo] = '#029386' # teal # depends on [control=['for'], data=['usrgo']]
# print("{N:5} hdrgos actual".format(N=len(self.hdrgos_actual)))
# print("{N:5} hdrgos unused".format(N=len(hdrgos_unused)))
# print("{N:5} hdrgos only BLACK".format(N=len(self.grprobj.hdrgo2usrgos.keys())))
# print("{N:5} usrgos".format(N=len(self.grprobj.usrgos)))
# print("{N:5} usrgos AND hdrgos BLUE".format(N=len(self.grprobj.hdrgo_is_usrgo)))
# print("{N:5} usrgos Only".format(N=len(usrgos_rem)))
return go2bordercolor
|
def optimize(self, objective_sense=None, raise_error=False):
"""
Optimize the model using flux balance analysis.
Parameters
----------
objective_sense : {None, 'maximize' 'minimize'}, optional
Whether fluxes should be maximized or minimized. In case of None,
the previous direction is used.
raise_error : bool
If true, raise an OptimizationError if solver status is not
optimal.
Notes
-----
Only the most commonly used parameters are presented here. Additional
parameters for cobra.solvers may be available and specified with the
appropriate keyword argument.
"""
original_direction = self.objective.direction
self.objective.direction = \
{"maximize": "max", "minimize": "min"}.get(
objective_sense, original_direction)
self.slim_optimize()
solution = get_solution(self, raise_error=raise_error)
self.objective.direction = original_direction
return solution
|
def function[optimize, parameter[self, objective_sense, raise_error]]:
constant[
Optimize the model using flux balance analysis.
Parameters
----------
objective_sense : {None, 'maximize' 'minimize'}, optional
Whether fluxes should be maximized or minimized. In case of None,
the previous direction is used.
raise_error : bool
If true, raise an OptimizationError if solver status is not
optimal.
Notes
-----
Only the most commonly used parameters are presented here. Additional
parameters for cobra.solvers may be available and specified with the
appropriate keyword argument.
]
variable[original_direction] assign[=] name[self].objective.direction
name[self].objective.direction assign[=] call[dictionary[[<ast.Constant object at 0x7da1b01920b0>, <ast.Constant object at 0x7da1b0191000>], [<ast.Constant object at 0x7da1b0190ca0>, <ast.Constant object at 0x7da1b0190eb0>]].get, parameter[name[objective_sense], name[original_direction]]]
call[name[self].slim_optimize, parameter[]]
variable[solution] assign[=] call[name[get_solution], parameter[name[self]]]
name[self].objective.direction assign[=] name[original_direction]
return[name[solution]]
|
keyword[def] identifier[optimize] ( identifier[self] , identifier[objective_sense] = keyword[None] , identifier[raise_error] = keyword[False] ):
literal[string]
identifier[original_direction] = identifier[self] . identifier[objective] . identifier[direction]
identifier[self] . identifier[objective] . identifier[direction] ={ literal[string] : literal[string] , literal[string] : literal[string] }. identifier[get] (
identifier[objective_sense] , identifier[original_direction] )
identifier[self] . identifier[slim_optimize] ()
identifier[solution] = identifier[get_solution] ( identifier[self] , identifier[raise_error] = identifier[raise_error] )
identifier[self] . identifier[objective] . identifier[direction] = identifier[original_direction]
keyword[return] identifier[solution]
|
def optimize(self, objective_sense=None, raise_error=False):
"""
Optimize the model using flux balance analysis.
Parameters
----------
objective_sense : {None, 'maximize' 'minimize'}, optional
Whether fluxes should be maximized or minimized. In case of None,
the previous direction is used.
raise_error : bool
If true, raise an OptimizationError if solver status is not
optimal.
Notes
-----
Only the most commonly used parameters are presented here. Additional
parameters for cobra.solvers may be available and specified with the
appropriate keyword argument.
"""
original_direction = self.objective.direction
self.objective.direction = {'maximize': 'max', 'minimize': 'min'}.get(objective_sense, original_direction)
self.slim_optimize()
solution = get_solution(self, raise_error=raise_error)
self.objective.direction = original_direction
return solution
|
def change_version_byte(address, new_version=None, new_crypto=None):
"""
Convert the passed in address (or any base58 encoded string), and change the
version byte to `new_version`.
"""
if not new_version and new_crypto:
try:
new_version = crypto_data[new_crypto]['address_version_byte']
except KeyError:
raise CurrencyNotSupported("Unknown currency symbol: " + new_crypto)
if not new_version:
raise CurrencyNotSupported("Can't yet make %s addresses." % new_crypto)
payload = b58decode_check(address)[1:]
if is_py2:
byte = chr(new_version)
else:
byte = bytes(chr(new_version), 'ascii')
return b58encode_check(byte + payload)
|
def function[change_version_byte, parameter[address, new_version, new_crypto]]:
constant[
Convert the passed in address (or any base58 encoded string), and change the
version byte to `new_version`.
]
if <ast.BoolOp object at 0x7da1b11181f0> begin[:]
<ast.Try object at 0x7da1b1118fd0>
if <ast.UnaryOp object at 0x7da1b111baf0> begin[:]
<ast.Raise object at 0x7da1b111bf10>
variable[payload] assign[=] call[call[name[b58decode_check], parameter[name[address]]]][<ast.Slice object at 0x7da1b11d2fb0>]
if name[is_py2] begin[:]
variable[byte] assign[=] call[name[chr], parameter[name[new_version]]]
return[call[name[b58encode_check], parameter[binary_operation[name[byte] + name[payload]]]]]
|
keyword[def] identifier[change_version_byte] ( identifier[address] , identifier[new_version] = keyword[None] , identifier[new_crypto] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[new_version] keyword[and] identifier[new_crypto] :
keyword[try] :
identifier[new_version] = identifier[crypto_data] [ identifier[new_crypto] ][ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[CurrencyNotSupported] ( literal[string] + identifier[new_crypto] )
keyword[if] keyword[not] identifier[new_version] :
keyword[raise] identifier[CurrencyNotSupported] ( literal[string] % identifier[new_crypto] )
identifier[payload] = identifier[b58decode_check] ( identifier[address] )[ literal[int] :]
keyword[if] identifier[is_py2] :
identifier[byte] = identifier[chr] ( identifier[new_version] )
keyword[else] :
identifier[byte] = identifier[bytes] ( identifier[chr] ( identifier[new_version] ), literal[string] )
keyword[return] identifier[b58encode_check] ( identifier[byte] + identifier[payload] )
|
def change_version_byte(address, new_version=None, new_crypto=None):
"""
Convert the passed in address (or any base58 encoded string), and change the
version byte to `new_version`.
"""
if not new_version and new_crypto:
try:
new_version = crypto_data[new_crypto]['address_version_byte'] # depends on [control=['try'], data=[]]
except KeyError:
raise CurrencyNotSupported('Unknown currency symbol: ' + new_crypto) # depends on [control=['except'], data=[]]
if not new_version:
raise CurrencyNotSupported("Can't yet make %s addresses." % new_crypto) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
payload = b58decode_check(address)[1:]
if is_py2:
byte = chr(new_version) # depends on [control=['if'], data=[]]
else:
byte = bytes(chr(new_version), 'ascii')
return b58encode_check(byte + payload)
|
def _generateForTokenSecurity(self,
username,
password,
referer=None,
tokenUrl=None,
expiration=None,
proxy_url=None,
proxy_port=None):
""" generates a token for a feature service """
if referer is None:
referer = self._referer_url
if tokenUrl is None:
tokenUrl = self._token_url
query_dict = {'username': self._username,
'password': self._password,
'expiration': str(_defaultTokenExpiration),
'referer': referer,
'f': 'json'}
if expiration is not None:
query_dict['expiration'] = str(expiration)
self._token_created_on = datetime.datetime.now()
token = self._post(url=tokenUrl,
param_dict=query_dict,
securityHandler=None,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in token:
self._token = None
return token
self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] / 1000) - \
datetime.timedelta(seconds=10)
if "token" not in token:
self._token = None
return None
else:
httpPrefix = self._url
if token['ssl'] == True:
httpPrefix = self._surl
self._token = token['token']
return token['token'], httpPrefix
|
def function[_generateForTokenSecurity, parameter[self, username, password, referer, tokenUrl, expiration, proxy_url, proxy_port]]:
constant[ generates a token for a feature service ]
if compare[name[referer] is constant[None]] begin[:]
variable[referer] assign[=] name[self]._referer_url
if compare[name[tokenUrl] is constant[None]] begin[:]
variable[tokenUrl] assign[=] name[self]._token_url
variable[query_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b12901f0>, <ast.Constant object at 0x7da1b12906d0>, <ast.Constant object at 0x7da1b1290580>, <ast.Constant object at 0x7da1b12902b0>, <ast.Constant object at 0x7da1b1290490>], [<ast.Attribute object at 0x7da1b1290d60>, <ast.Attribute object at 0x7da1b1290190>, <ast.Call object at 0x7da1b1291180>, <ast.Name object at 0x7da1b1291270>, <ast.Constant object at 0x7da1b1291240>]]
if compare[name[expiration] is_not constant[None]] begin[:]
call[name[query_dict]][constant[expiration]] assign[=] call[name[str], parameter[name[expiration]]]
name[self]._token_created_on assign[=] call[name[datetime].datetime.now, parameter[]]
variable[token] assign[=] call[name[self]._post, parameter[]]
if compare[constant[error] in name[token]] begin[:]
name[self]._token assign[=] constant[None]
return[name[token]]
name[self]._token_expires_on assign[=] binary_operation[call[name[datetime].datetime.fromtimestamp, parameter[binary_operation[call[name[token]][constant[expires]] / constant[1000]]]] - call[name[datetime].timedelta, parameter[]]]
if compare[constant[token] <ast.NotIn object at 0x7da2590d7190> name[token]] begin[:]
name[self]._token assign[=] constant[None]
return[constant[None]]
|
keyword[def] identifier[_generateForTokenSecurity] ( identifier[self] ,
identifier[username] ,
identifier[password] ,
identifier[referer] = keyword[None] ,
identifier[tokenUrl] = keyword[None] ,
identifier[expiration] = keyword[None] ,
identifier[proxy_url] = keyword[None] ,
identifier[proxy_port] = keyword[None] ):
literal[string]
keyword[if] identifier[referer] keyword[is] keyword[None] :
identifier[referer] = identifier[self] . identifier[_referer_url]
keyword[if] identifier[tokenUrl] keyword[is] keyword[None] :
identifier[tokenUrl] = identifier[self] . identifier[_token_url]
identifier[query_dict] ={ literal[string] : identifier[self] . identifier[_username] ,
literal[string] : identifier[self] . identifier[_password] ,
literal[string] : identifier[str] ( identifier[_defaultTokenExpiration] ),
literal[string] : identifier[referer] ,
literal[string] : literal[string] }
keyword[if] identifier[expiration] keyword[is] keyword[not] keyword[None] :
identifier[query_dict] [ literal[string] ]= identifier[str] ( identifier[expiration] )
identifier[self] . identifier[_token_created_on] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[token] = identifier[self] . identifier[_post] ( identifier[url] = identifier[tokenUrl] ,
identifier[param_dict] = identifier[query_dict] ,
identifier[securityHandler] = keyword[None] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] )
keyword[if] literal[string] keyword[in] identifier[token] :
identifier[self] . identifier[_token] = keyword[None]
keyword[return] identifier[token]
identifier[self] . identifier[_token_expires_on] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[token] [ literal[string] ]/ literal[int] )- identifier[datetime] . identifier[timedelta] ( identifier[seconds] = literal[int] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[token] :
identifier[self] . identifier[_token] = keyword[None]
keyword[return] keyword[None]
keyword[else] :
identifier[httpPrefix] = identifier[self] . identifier[_url]
keyword[if] identifier[token] [ literal[string] ]== keyword[True] :
identifier[httpPrefix] = identifier[self] . identifier[_surl]
identifier[self] . identifier[_token] = identifier[token] [ literal[string] ]
keyword[return] identifier[token] [ literal[string] ], identifier[httpPrefix]
|
def _generateForTokenSecurity(self, username, password, referer=None, tokenUrl=None, expiration=None, proxy_url=None, proxy_port=None):
""" generates a token for a feature service """
if referer is None:
referer = self._referer_url # depends on [control=['if'], data=['referer']]
if tokenUrl is None:
tokenUrl = self._token_url # depends on [control=['if'], data=['tokenUrl']]
query_dict = {'username': self._username, 'password': self._password, 'expiration': str(_defaultTokenExpiration), 'referer': referer, 'f': 'json'}
if expiration is not None:
query_dict['expiration'] = str(expiration) # depends on [control=['if'], data=['expiration']]
self._token_created_on = datetime.datetime.now()
token = self._post(url=tokenUrl, param_dict=query_dict, securityHandler=None, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
if 'error' in token:
self._token = None
return token # depends on [control=['if'], data=['token']]
self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] / 1000) - datetime.timedelta(seconds=10)
if 'token' not in token:
self._token = None
return None # depends on [control=['if'], data=[]]
else:
httpPrefix = self._url
if token['ssl'] == True:
httpPrefix = self._surl # depends on [control=['if'], data=[]]
self._token = token['token']
return (token['token'], httpPrefix)
|
def recur(obj, type_func_tuple_list=()):
'''recuring dealing an object'''
for obj_type, func in type_func_tuple_list:
if type(obj) == type(obj_type):
return func(obj)
# by default, we wolud recurring list, tuple and dict
if isinstance(obj, list) or isinstance(obj, tuple):
n_obj = []
for i in obj:
n_obj.append(recur(i))
return n_obj if isinstance(obj, list) else tuple(obj)
elif isinstance(obj, dict):
n_obj = {}
for k,v in obj.items():
n_obj[k] = recur(v)
return n_obj
return obj
|
def function[recur, parameter[obj, type_func_tuple_list]]:
constant[recuring dealing an object]
for taget[tuple[[<ast.Name object at 0x7da204565090>, <ast.Name object at 0x7da204566320>]]] in starred[name[type_func_tuple_list]] begin[:]
if compare[call[name[type], parameter[name[obj]]] equal[==] call[name[type], parameter[name[obj_type]]]] begin[:]
return[call[name[func], parameter[name[obj]]]]
if <ast.BoolOp object at 0x7da2054a5a20> begin[:]
variable[n_obj] assign[=] list[[]]
for taget[name[i]] in starred[name[obj]] begin[:]
call[name[n_obj].append, parameter[call[name[recur], parameter[name[i]]]]]
return[<ast.IfExp object at 0x7da2054a4160>]
return[name[obj]]
|
keyword[def] identifier[recur] ( identifier[obj] , identifier[type_func_tuple_list] =()):
literal[string]
keyword[for] identifier[obj_type] , identifier[func] keyword[in] identifier[type_func_tuple_list] :
keyword[if] identifier[type] ( identifier[obj] )== identifier[type] ( identifier[obj_type] ):
keyword[return] identifier[func] ( identifier[obj] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[obj] , identifier[tuple] ):
identifier[n_obj] =[]
keyword[for] identifier[i] keyword[in] identifier[obj] :
identifier[n_obj] . identifier[append] ( identifier[recur] ( identifier[i] ))
keyword[return] identifier[n_obj] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[list] ) keyword[else] identifier[tuple] ( identifier[obj] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
identifier[n_obj] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[items] ():
identifier[n_obj] [ identifier[k] ]= identifier[recur] ( identifier[v] )
keyword[return] identifier[n_obj]
keyword[return] identifier[obj]
|
def recur(obj, type_func_tuple_list=()):
"""recuring dealing an object"""
for (obj_type, func) in type_func_tuple_list:
if type(obj) == type(obj_type):
return func(obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# by default, we wolud recurring list, tuple and dict
if isinstance(obj, list) or isinstance(obj, tuple):
n_obj = []
for i in obj:
n_obj.append(recur(i)) # depends on [control=['for'], data=['i']]
return n_obj if isinstance(obj, list) else tuple(obj) # depends on [control=['if'], data=[]]
elif isinstance(obj, dict):
n_obj = {}
for (k, v) in obj.items():
n_obj[k] = recur(v) # depends on [control=['for'], data=[]]
return n_obj # depends on [control=['if'], data=[]]
return obj
|
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5
|
def function[diffpow, parameter[self, x, rot]]:
constant[Diffpow test objective function]
variable[N] assign[=] call[name[len], parameter[name[x]]]
if name[rot] begin[:]
variable[x] assign[=] call[name[rotate], parameter[name[x]]]
return[binary_operation[call[name[sum], parameter[binary_operation[call[name[np].abs, parameter[name[x]]] ** binary_operation[constant[2.0] + binary_operation[binary_operation[constant[4.0] * call[name[np].arange, parameter[name[N]]]] / binary_operation[name[N] - constant[1.0]]]]]]] ** constant[0.5]]]
|
keyword[def] identifier[diffpow] ( identifier[self] , identifier[x] , identifier[rot] = literal[int] ):
literal[string]
identifier[N] = identifier[len] ( identifier[x] )
keyword[if] identifier[rot] :
identifier[x] = identifier[rotate] ( identifier[x] )
keyword[return] identifier[sum] ( identifier[np] . identifier[abs] ( identifier[x] )**( literal[int] + literal[int] * identifier[np] . identifier[arange] ( identifier[N] )/( identifier[N] - literal[int] )))** literal[int]
|
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x) # depends on [control=['if'], data=[]]
return sum(np.abs(x) ** (2.0 + 4.0 * np.arange(N) / (N - 1.0))) ** 0.5
|
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct()
|
def function[get_active_threads_involving_all_participants, parameter[self]]:
constant[ Gets the threads where the specified participants are active and no one has left. ]
variable[query] assign[=] call[call[call[name[Thread].objects.exclude, parameter[]].annotate, parameter[]].filter, parameter[]]
for taget[name[participant_id]] in starred[name[participant_ids]] begin[:]
variable[query] assign[=] call[name[query].filter, parameter[]]
return[call[name[query].distinct, parameter[]]]
|
keyword[def] identifier[get_active_threads_involving_all_participants] ( identifier[self] ,* identifier[participant_ids] ):
literal[string]
identifier[query] = identifier[Thread] . identifier[objects] . identifier[exclude] ( identifier[participation__date_left__lte] = identifier[now] ()). identifier[annotate] ( identifier[count_participants] = identifier[Count] ( literal[string] )). identifier[filter] ( identifier[count_participants] = identifier[len] ( identifier[participant_ids] ))
keyword[for] identifier[participant_id] keyword[in] identifier[participant_ids] :
identifier[query] = identifier[query] . identifier[filter] ( identifier[participants__id] = identifier[participant_id] )
keyword[return] identifier[query] . identifier[distinct] ()
|
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.exclude(participation__date_left__lte=now()).annotate(count_participants=Count('participants')).filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id) # depends on [control=['for'], data=['participant_id']]
return query.distinct()
|
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
|
def function[_employer_phase, parameter[self]]:
constant[Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
]
call[name[self]._logger.log, parameter[constant[debug], constant[Employer bee phase]]]
variable[modified] assign[=] list[[]]
for taget[name[bee]] in starred[name[self]._employers] begin[:]
if compare[name[self]._processes less_or_equal[<=] constant[1]] begin[:]
variable[new_values] assign[=] call[name[self]._merge_bee, parameter[name[bee]]]
call[name[self]._move_bee, parameter[name[bee], name[new_values]]]
for taget[name[pair]] in starred[name[modified]] begin[:]
call[name[self]._move_bee, parameter[call[name[pair]][constant[0]], call[call[name[pair]][constant[1]].get, parameter[]]]]
|
keyword[def] identifier[_employer_phase] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[log] ( literal[string] , literal[string] )
identifier[modified] =[]
keyword[for] identifier[bee] keyword[in] identifier[self] . identifier[_employers] :
keyword[if] identifier[self] . identifier[_processes] <= literal[int] :
identifier[new_values] = identifier[self] . identifier[_merge_bee] ( identifier[bee] )
identifier[self] . identifier[_move_bee] ( identifier[bee] , identifier[new_values] )
keyword[else] :
identifier[modified] . identifier[append] ((
identifier[bee] ,
identifier[self] . identifier[_pool] . identifier[apply_async] ( identifier[self] . identifier[_merge_bee] ,[ identifier[bee] ])
))
keyword[for] identifier[pair] keyword[in] identifier[modified] :
identifier[self] . identifier[_move_bee] ( identifier[pair] [ literal[int] ], identifier[pair] [ literal[int] ]. identifier[get] ())
|
def _employer_phase(self):
"""Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
"""
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values) # depends on [control=['if'], data=[]]
else:
modified.append((bee, self._pool.apply_async(self._merge_bee, [bee]))) # depends on [control=['for'], data=['bee']]
for pair in modified:
self._move_bee(pair[0], pair[1].get()) # depends on [control=['for'], data=['pair']]
|
def master_primary_name(self) -> Optional[str]:
"""
Return the name of the primary node of the master instance
"""
master_primary_name = self.master_replica.primaryName
if master_primary_name:
return self.master_replica.getNodeName(master_primary_name)
return None
|
def function[master_primary_name, parameter[self]]:
constant[
Return the name of the primary node of the master instance
]
variable[master_primary_name] assign[=] name[self].master_replica.primaryName
if name[master_primary_name] begin[:]
return[call[name[self].master_replica.getNodeName, parameter[name[master_primary_name]]]]
return[constant[None]]
|
keyword[def] identifier[master_primary_name] ( identifier[self] )-> identifier[Optional] [ identifier[str] ]:
literal[string]
identifier[master_primary_name] = identifier[self] . identifier[master_replica] . identifier[primaryName]
keyword[if] identifier[master_primary_name] :
keyword[return] identifier[self] . identifier[master_replica] . identifier[getNodeName] ( identifier[master_primary_name] )
keyword[return] keyword[None]
|
def master_primary_name(self) -> Optional[str]:
"""
Return the name of the primary node of the master instance
"""
master_primary_name = self.master_replica.primaryName
if master_primary_name:
return self.master_replica.getNodeName(master_primary_name) # depends on [control=['if'], data=[]]
return None
|
def outputs_matching(outputs, patterns):
'''Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
'''
if isinstance(patterns, basestring):
patterns = (patterns, )
if isinstance(outputs, dict):
outputs = outputs.items()
for name, expr in outputs:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield name, expr
break
|
def function[outputs_matching, parameter[outputs, patterns]]:
constant[Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
]
if call[name[isinstance], parameter[name[patterns], name[basestring]]] begin[:]
variable[patterns] assign[=] tuple[[<ast.Name object at 0x7da1b0217a30>]]
if call[name[isinstance], parameter[name[outputs], name[dict]]] begin[:]
variable[outputs] assign[=] call[name[outputs].items, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0215960>, <ast.Name object at 0x7da1b0216050>]]] in starred[name[outputs]] begin[:]
for taget[name[pattern]] in starred[name[patterns]] begin[:]
if call[name[fnmatch].fnmatch, parameter[name[name], name[pattern]]] begin[:]
<ast.Yield object at 0x7da1b02161a0>
break
|
keyword[def] identifier[outputs_matching] ( identifier[outputs] , identifier[patterns] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[patterns] , identifier[basestring] ):
identifier[patterns] =( identifier[patterns] ,)
keyword[if] identifier[isinstance] ( identifier[outputs] , identifier[dict] ):
identifier[outputs] = identifier[outputs] . identifier[items] ()
keyword[for] identifier[name] , identifier[expr] keyword[in] identifier[outputs] :
keyword[for] identifier[pattern] keyword[in] identifier[patterns] :
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[name] , identifier[pattern] ):
keyword[yield] identifier[name] , identifier[expr]
keyword[break]
|
def outputs_matching(outputs, patterns):
"""Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
"""
if isinstance(patterns, basestring):
patterns = (patterns,) # depends on [control=['if'], data=[]]
if isinstance(outputs, dict):
outputs = outputs.items() # depends on [control=['if'], data=[]]
for (name, expr) in outputs:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield (name, expr)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']] # depends on [control=['for'], data=[]]
|
def qteKillMiniApplet(self):
"""
Remove the mini applet.
If a different applet is to be restored/focused then call
``qteMakeAppletActive`` for that applet *after* calling this
method.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
# Sanity check: is the handle valid?
if self._qteMiniApplet is None:
return
# Sanity check: is it really a mini applet?
if not self.qteIsMiniApplet(self._qteMiniApplet):
msg = ('Mini applet does not have its mini applet flag set.'
' Ignored.')
self.qteLogger.warning(msg)
if self._qteMiniApplet not in self._qteAppletList:
# Something is wrong because the mini applet is not part
# of the applet list.
msg = 'Custom mini applet not in applet list --> Bug.'
self.qteLogger.warning(msg)
else:
# Inform the mini applet that it is about to be killed.
try:
self._qteMiniApplet.qteToBeKilled()
except Exception:
msg = 'qteToBeKilledRoutine is faulty'
self.qteLogger.exception(msg, exc_info=True, stack_info=True)
# Shorthands to calling window.
win = self._qteMiniApplet._qteCallingWindow
# We need to move the focus from the mini applet back to a
# regular applet. Therefore, first look for the next
# visible applet in the current window (ie. the last one
# that was made active).
app = self.qteNextApplet(windowObj=win)
if app is not None:
# Found another (visible or invisible) applet --> make
# it active/visible.
self.qteMakeAppletActive(app)
else:
# No visible applet available in this window --> look
# for an invisible one.
app = self.qteNextApplet(skipInvisible=False, skipVisible=True)
if app is not None:
# Found an invisible applet --> make it
# active/visible.
self.qteMakeAppletActive(app)
else:
# There is no other visible applet in this window.
# The focus manager will therefore make a new applet
# active.
self._qteActiveApplet = None
self._qteAppletList.remove(self._qteMiniApplet)
# Close the mini applet applet and schedule it for deletion.
self._qteMiniApplet.close()
self._qteMiniApplet.deleteLater()
# Clear the handle to the mini applet.
self._qteMiniApplet = None
|
def function[qteKillMiniApplet, parameter[self]]:
constant[
Remove the mini applet.
If a different applet is to be restored/focused then call
``qteMakeAppletActive`` for that applet *after* calling this
method.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
]
if compare[name[self]._qteMiniApplet is constant[None]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da20e9b1e10> begin[:]
variable[msg] assign[=] constant[Mini applet does not have its mini applet flag set. Ignored.]
call[name[self].qteLogger.warning, parameter[name[msg]]]
if compare[name[self]._qteMiniApplet <ast.NotIn object at 0x7da2590d7190> name[self]._qteAppletList] begin[:]
variable[msg] assign[=] constant[Custom mini applet not in applet list --> Bug.]
call[name[self].qteLogger.warning, parameter[name[msg]]]
call[name[self]._qteMiniApplet.close, parameter[]]
call[name[self]._qteMiniApplet.deleteLater, parameter[]]
name[self]._qteMiniApplet assign[=] constant[None]
|
keyword[def] identifier[qteKillMiniApplet] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_qteMiniApplet] keyword[is] keyword[None] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[qteIsMiniApplet] ( identifier[self] . identifier[_qteMiniApplet] ):
identifier[msg] =( literal[string]
literal[string] )
identifier[self] . identifier[qteLogger] . identifier[warning] ( identifier[msg] )
keyword[if] identifier[self] . identifier[_qteMiniApplet] keyword[not] keyword[in] identifier[self] . identifier[_qteAppletList] :
identifier[msg] = literal[string]
identifier[self] . identifier[qteLogger] . identifier[warning] ( identifier[msg] )
keyword[else] :
keyword[try] :
identifier[self] . identifier[_qteMiniApplet] . identifier[qteToBeKilled] ()
keyword[except] identifier[Exception] :
identifier[msg] = literal[string]
identifier[self] . identifier[qteLogger] . identifier[exception] ( identifier[msg] , identifier[exc_info] = keyword[True] , identifier[stack_info] = keyword[True] )
identifier[win] = identifier[self] . identifier[_qteMiniApplet] . identifier[_qteCallingWindow]
identifier[app] = identifier[self] . identifier[qteNextApplet] ( identifier[windowObj] = identifier[win] )
keyword[if] identifier[app] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[qteMakeAppletActive] ( identifier[app] )
keyword[else] :
identifier[app] = identifier[self] . identifier[qteNextApplet] ( identifier[skipInvisible] = keyword[False] , identifier[skipVisible] = keyword[True] )
keyword[if] identifier[app] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[qteMakeAppletActive] ( identifier[app] )
keyword[else] :
identifier[self] . identifier[_qteActiveApplet] = keyword[None]
identifier[self] . identifier[_qteAppletList] . identifier[remove] ( identifier[self] . identifier[_qteMiniApplet] )
identifier[self] . identifier[_qteMiniApplet] . identifier[close] ()
identifier[self] . identifier[_qteMiniApplet] . identifier[deleteLater] ()
identifier[self] . identifier[_qteMiniApplet] = keyword[None]
|
def qteKillMiniApplet(self):
"""
Remove the mini applet.
If a different applet is to be restored/focused then call
``qteMakeAppletActive`` for that applet *after* calling this
method.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
# Sanity check: is the handle valid?
if self._qteMiniApplet is None:
return # depends on [control=['if'], data=[]]
# Sanity check: is it really a mini applet?
if not self.qteIsMiniApplet(self._qteMiniApplet):
msg = 'Mini applet does not have its mini applet flag set. Ignored.'
self.qteLogger.warning(msg) # depends on [control=['if'], data=[]]
if self._qteMiniApplet not in self._qteAppletList:
# Something is wrong because the mini applet is not part
# of the applet list.
msg = 'Custom mini applet not in applet list --> Bug.'
self.qteLogger.warning(msg) # depends on [control=['if'], data=[]]
else:
# Inform the mini applet that it is about to be killed.
try:
self._qteMiniApplet.qteToBeKilled() # depends on [control=['try'], data=[]]
except Exception:
msg = 'qteToBeKilledRoutine is faulty'
self.qteLogger.exception(msg, exc_info=True, stack_info=True) # depends on [control=['except'], data=[]]
# Shorthands to calling window.
win = self._qteMiniApplet._qteCallingWindow
# We need to move the focus from the mini applet back to a
# regular applet. Therefore, first look for the next
# visible applet in the current window (ie. the last one
# that was made active).
app = self.qteNextApplet(windowObj=win)
if app is not None:
# Found another (visible or invisible) applet --> make
# it active/visible.
self.qteMakeAppletActive(app) # depends on [control=['if'], data=['app']]
else:
# No visible applet available in this window --> look
# for an invisible one.
app = self.qteNextApplet(skipInvisible=False, skipVisible=True)
if app is not None:
# Found an invisible applet --> make it
# active/visible.
self.qteMakeAppletActive(app) # depends on [control=['if'], data=['app']]
else:
# There is no other visible applet in this window.
# The focus manager will therefore make a new applet
# active.
self._qteActiveApplet = None
self._qteAppletList.remove(self._qteMiniApplet)
# Close the mini applet applet and schedule it for deletion.
self._qteMiniApplet.close()
self._qteMiniApplet.deleteLater()
# Clear the handle to the mini applet.
self._qteMiniApplet = None
|
async def user_info(self, params=None, **kwargs):
"""Facebook required fields-param."""
params = params or {}
params[
'fields'] = 'id,email,first_name,last_name,name,link,locale,' \
'gender,location'
return await super(FacebookClient, self).user_info(params=params, **kwargs)
|
<ast.AsyncFunctionDef object at 0x7da1b0665e70>
|
keyword[async] keyword[def] identifier[user_info] ( identifier[self] , identifier[params] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[params] keyword[or] {}
identifier[params] [
literal[string] ]= literal[string] literal[string]
keyword[return] keyword[await] identifier[super] ( identifier[FacebookClient] , identifier[self] ). identifier[user_info] ( identifier[params] = identifier[params] ,** identifier[kwargs] )
|
async def user_info(self, params=None, **kwargs):
"""Facebook required fields-param."""
params = params or {}
params['fields'] = 'id,email,first_name,last_name,name,link,locale,gender,location'
return await super(FacebookClient, self).user_info(params=params, **kwargs)
|
def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run]
|
def function[batch_process_data, parameter[file_roots]]:
constant[Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
]
variable[base_dir] assign[=] call[name[kwargs].pop, parameter[constant[base_dir], constant[chains]]]
variable[process_func] assign[=] call[name[kwargs].pop, parameter[constant[process_func], name[process_polychord_run]]]
variable[func_kwargs] assign[=] call[name[kwargs].pop, parameter[constant[func_kwargs], dictionary[[], []]]]
call[name[func_kwargs]][constant[errors_to_handle]] assign[=] call[name[kwargs].pop, parameter[constant[errors_to_handle], tuple[[]]]]
variable[data] assign[=] call[name[nestcheck].parallel_utils.parallel_apply, parameter[name[process_error_helper], name[file_roots]]]
variable[data] assign[=] call[name[sorted], parameter[name[data]]]
variable[errors] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204347130>, <ast.Name object at 0x7da204345900>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:]
if compare[constant[error] in name[run]] begin[:]
<ast.Try object at 0x7da204346620>
for taget[tuple[[<ast.Name object at 0x7da204344f10>, <ast.Name object at 0x7da204345d50>]]] in starred[call[name[errors].items, parameter[]]] begin[:]
variable[message] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[error_name] + constant[ processing ]] + call[name[str], parameter[call[name[len], parameter[name[index_list]]]]]] + constant[ / ]] + call[name[str], parameter[call[name[len], parameter[name[file_roots]]]]]] + constant[ files]]
if compare[call[name[len], parameter[name[index_list]]] not_equal[!=] call[name[len], parameter[name[file_roots]]]] begin[:]
<ast.AugAssign object at 0x7da2054a7760>
call[name[print], parameter[name[message]]]
return[<ast.ListComp object at 0x7da2054a62f0>]
|
keyword[def] identifier[batch_process_data] ( identifier[file_roots] ,** identifier[kwargs] ):
literal[string]
identifier[base_dir] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[process_func] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[process_polychord_run] )
identifier[func_kwargs] = identifier[kwargs] . identifier[pop] ( literal[string] ,{})
identifier[func_kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] ,())
identifier[data] = identifier[nestcheck] . identifier[parallel_utils] . identifier[parallel_apply] (
identifier[process_error_helper] , identifier[file_roots] , identifier[func_args] =( identifier[base_dir] , identifier[process_func] ),
identifier[func_kwargs] = identifier[func_kwargs] ,** identifier[kwargs] )
identifier[data] = identifier[sorted] ( identifier[data] ,
identifier[key] = keyword[lambda] identifier[x] : identifier[file_roots] . identifier[index] ( identifier[x] [ literal[string] ][ literal[string] ]))
identifier[errors] ={}
keyword[for] identifier[i] , identifier[run] keyword[in] identifier[enumerate] ( identifier[data] ):
keyword[if] literal[string] keyword[in] identifier[run] :
keyword[try] :
identifier[errors] [ identifier[run] [ literal[string] ]]. identifier[append] ( identifier[i] )
keyword[except] identifier[KeyError] :
identifier[errors] [ identifier[run] [ literal[string] ]]=[ identifier[i] ]
keyword[for] identifier[error_name] , identifier[index_list] keyword[in] identifier[errors] . identifier[items] ():
identifier[message] =( identifier[error_name] + literal[string] + identifier[str] ( identifier[len] ( identifier[index_list] ))+ literal[string]
+ identifier[str] ( identifier[len] ( identifier[file_roots] ))+ literal[string] )
keyword[if] identifier[len] ( identifier[index_list] )!= identifier[len] ( identifier[file_roots] ):
identifier[message] +=( literal[string]
+ identifier[str] ( identifier[index_list] ))
identifier[print] ( identifier[message] )
keyword[return] [ identifier[run] keyword[for] identifier[run] keyword[in] identifier[data] keyword[if] literal[string] keyword[not] keyword[in] identifier[run] ]
|
def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(process_error_helper, file_roots, func_args=(base_dir, process_func), func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data, key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for (i, run) in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i) # depends on [control=['try'], data=[]]
except KeyError:
errors[run['error']] = [i] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['run']] # depends on [control=['for'], data=[]]
for (error_name, index_list) in errors.items():
message = error_name + ' processing ' + str(len(index_list)) + ' / ' + str(len(file_roots)) + ' files'
if len(index_list) != len(file_roots):
message += '. Roots with errors have (zero based) indexes: ' + str(index_list) # depends on [control=['if'], data=[]]
print(message) # depends on [control=['for'], data=[]]
# Return runs which did not have errors
return [run for run in data if 'error' not in run]
|
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
|
def function[del_object, parameter[self, obj]]:
constant[Debug deletes obj of obj[_type] with id of obj['_id']]
if <ast.BoolOp object at 0x7da20c795840> begin[:]
<ast.Raise object at 0x7da20c796230>
if <ast.BoolOp object at 0x7da20c796830> begin[:]
<ast.Raise object at 0x7da20c796680>
if <ast.BoolOp object at 0x7da20c794850> begin[:]
<ast.Raise object at 0x7da20c795bd0>
call[name[self].connect_es, parameter[]]
call[name[self].es.delete, parameter[]]
|
keyword[def] identifier[del_object] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[obj] [ literal[string] ] keyword[is] keyword[None] keyword[or] identifier[obj] [ literal[string] ]== literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[obj] [ literal[string] ] keyword[is] keyword[None] keyword[or] identifier[obj] [ literal[string] ]== literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[obj] [ literal[string] ] keyword[is] keyword[None] keyword[or] identifier[obj] [ literal[string] ]== literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[connect_es] ()
identifier[self] . identifier[es] . identifier[delete] ( identifier[index] = identifier[obj] [ literal[string] ],
identifier[id] = identifier[obj] [ literal[string] ],
identifier[doc_type] = identifier[obj] [ literal[string] ])
|
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == '':
raise Exception('Invalid Object') # depends on [control=['if'], data=[]]
if obj['_id'] is None or obj['_id'] == '':
raise Exception('Invalid Object') # depends on [control=['if'], data=[]]
if obj['_type'] is None or obj['_type'] == '':
raise Exception('Invalid Object') # depends on [control=['if'], data=[]]
self.connect_es()
self.es.delete(index=obj['_index'], id=obj['_id'], doc_type=obj['_type'])
|
def str_dict(some_dict):
"""Convert dict of ascii str/unicode to dict of str, if necessary"""
return {str(k): str(v) for k, v in some_dict.items()}
|
def function[str_dict, parameter[some_dict]]:
constant[Convert dict of ascii str/unicode to dict of str, if necessary]
return[<ast.DictComp object at 0x7da2044c3e80>]
|
keyword[def] identifier[str_dict] ( identifier[some_dict] ):
literal[string]
keyword[return] { identifier[str] ( identifier[k] ): identifier[str] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[some_dict] . identifier[items] ()}
|
def str_dict(some_dict):
"""Convert dict of ascii str/unicode to dict of str, if necessary"""
return {str(k): str(v) for (k, v) in some_dict.items()}
|
def annotate(self, text, lang = None, customParams = None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {"lang": lang, "text": text}
if customParams:
params.update(customParams)
return self._er.jsonRequestAnalytics("/api/v1/annotate", params)
|
def function[annotate, parameter[self, text, lang, customParams]]:
constant[
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f811420>, <ast.Constant object at 0x7da18f810130>], [<ast.Name object at 0x7da18f813d00>, <ast.Name object at 0x7da18f812950>]]
if name[customParams] begin[:]
call[name[params].update, parameter[name[customParams]]]
return[call[name[self]._er.jsonRequestAnalytics, parameter[constant[/api/v1/annotate], name[params]]]]
|
keyword[def] identifier[annotate] ( identifier[self] , identifier[text] , identifier[lang] = keyword[None] , identifier[customParams] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[lang] , literal[string] : identifier[text] }
keyword[if] identifier[customParams] :
identifier[params] . identifier[update] ( identifier[customParams] )
keyword[return] identifier[self] . identifier[_er] . identifier[jsonRequestAnalytics] ( literal[string] , identifier[params] )
|
def annotate(self, text, lang=None, customParams=None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {'lang': lang, 'text': text}
if customParams:
params.update(customParams) # depends on [control=['if'], data=[]]
return self._er.jsonRequestAnalytics('/api/v1/annotate', params)
|
def rasterize(vectorobject, reference, outname=None, burn_values=1, expressions=None, nodata=0, append=False):
"""
rasterize a vector object
Parameters
----------
vectorobject: Vector
the vector object to be rasterized
reference: Raster
a reference Raster object to retrieve geo information and extent from
outname: str or None
the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and
parameter outname is ignored
burn_values: int or list
the values to be written to the raster file
expressions: list
SQL expressions to filter the vector object by attributes
nodata: int
the nodata value of the target raster file
append: bool
if the output file already exists, update this file with new rasterized values?
If True and the output file exists, parameters `reference` and `nodata` are ignored.
Returns
-------
Raster or None
if outname is `None`, a raster object pointing to an in-memory dataset else `None`
Example
-------
>>> from spatialist import Vector, Raster, rasterize
>>> vec = Vector('source.shp')
>>> ref = Raster('reference.tif')
>>> outname = 'target.tif'
>>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2']
>>> burn_values = [1, 2]
>>> rasterize(vec, reference, outname, burn_values, expressions)
"""
if expressions is None:
expressions = ['']
if isinstance(burn_values, (int, float)):
burn_values = [burn_values]
if len(expressions) != len(burn_values):
raise RuntimeError('expressions and burn_values of different length')
failed = []
for exp in expressions:
try:
vectorobject.layer.SetAttributeFilter(exp)
except RuntimeError:
failed.append(exp)
if len(failed) > 0:
raise RuntimeError('failed to set the following attribute filter(s): ["{}"]'.format('", '.join(failed)))
if append and outname is not None and os.path.isfile(outname):
target_ds = gdal.Open(outname, GA_Update)
else:
if not isinstance(reference, Raster):
raise RuntimeError("parameter 'reference' must be of type Raster")
if outname is not None:
target_ds = gdal.GetDriverByName('GTiff').Create(outname, reference.cols, reference.rows, 1, gdal.GDT_Byte)
else:
target_ds = gdal.GetDriverByName('MEM').Create('', reference.cols, reference.rows, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform(reference.raster.GetGeoTransform())
target_ds.SetProjection(reference.raster.GetProjection())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(nodata)
band.FlushCache()
band = None
for expression, value in zip(expressions, burn_values):
vectorobject.layer.SetAttributeFilter(expression)
gdal.RasterizeLayer(target_ds, [1], vectorobject.layer, burn_values=[value])
vectorobject.layer.SetAttributeFilter('')
if outname is None:
return Raster(target_ds)
else:
target_ds = None
|
def function[rasterize, parameter[vectorobject, reference, outname, burn_values, expressions, nodata, append]]:
constant[
rasterize a vector object
Parameters
----------
vectorobject: Vector
the vector object to be rasterized
reference: Raster
a reference Raster object to retrieve geo information and extent from
outname: str or None
the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and
parameter outname is ignored
burn_values: int or list
the values to be written to the raster file
expressions: list
SQL expressions to filter the vector object by attributes
nodata: int
the nodata value of the target raster file
append: bool
if the output file already exists, update this file with new rasterized values?
If True and the output file exists, parameters `reference` and `nodata` are ignored.
Returns
-------
Raster or None
if outname is `None`, a raster object pointing to an in-memory dataset else `None`
Example
-------
>>> from spatialist import Vector, Raster, rasterize
>>> vec = Vector('source.shp')
>>> ref = Raster('reference.tif')
>>> outname = 'target.tif'
>>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2']
>>> burn_values = [1, 2]
>>> rasterize(vec, reference, outname, burn_values, expressions)
]
if compare[name[expressions] is constant[None]] begin[:]
variable[expressions] assign[=] list[[<ast.Constant object at 0x7da20e957fd0>]]
if call[name[isinstance], parameter[name[burn_values], tuple[[<ast.Name object at 0x7da20e954220>, <ast.Name object at 0x7da20e956380>]]]] begin[:]
variable[burn_values] assign[=] list[[<ast.Name object at 0x7da20e9556c0>]]
if compare[call[name[len], parameter[name[expressions]]] not_equal[!=] call[name[len], parameter[name[burn_values]]]] begin[:]
<ast.Raise object at 0x7da20e957670>
variable[failed] assign[=] list[[]]
for taget[name[exp]] in starred[name[expressions]] begin[:]
<ast.Try object at 0x7da20e9545e0>
if compare[call[name[len], parameter[name[failed]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9557e0>
if <ast.BoolOp object at 0x7da20e956530> begin[:]
variable[target_ds] assign[=] call[name[gdal].Open, parameter[name[outname], name[GA_Update]]]
for taget[tuple[[<ast.Name object at 0x7da20e957400>, <ast.Name object at 0x7da20e954b50>]]] in starred[call[name[zip], parameter[name[expressions], name[burn_values]]]] begin[:]
call[name[vectorobject].layer.SetAttributeFilter, parameter[name[expression]]]
call[name[gdal].RasterizeLayer, parameter[name[target_ds], list[[<ast.Constant object at 0x7da20e954a60>]], name[vectorobject].layer]]
call[name[vectorobject].layer.SetAttributeFilter, parameter[constant[]]]
if compare[name[outname] is constant[None]] begin[:]
return[call[name[Raster], parameter[name[target_ds]]]]
|
keyword[def] identifier[rasterize] ( identifier[vectorobject] , identifier[reference] , identifier[outname] = keyword[None] , identifier[burn_values] = literal[int] , identifier[expressions] = keyword[None] , identifier[nodata] = literal[int] , identifier[append] = keyword[False] ):
literal[string]
keyword[if] identifier[expressions] keyword[is] keyword[None] :
identifier[expressions] =[ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[burn_values] ,( identifier[int] , identifier[float] )):
identifier[burn_values] =[ identifier[burn_values] ]
keyword[if] identifier[len] ( identifier[expressions] )!= identifier[len] ( identifier[burn_values] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[failed] =[]
keyword[for] identifier[exp] keyword[in] identifier[expressions] :
keyword[try] :
identifier[vectorobject] . identifier[layer] . identifier[SetAttributeFilter] ( identifier[exp] )
keyword[except] identifier[RuntimeError] :
identifier[failed] . identifier[append] ( identifier[exp] )
keyword[if] identifier[len] ( identifier[failed] )> literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[failed] )))
keyword[if] identifier[append] keyword[and] identifier[outname] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[outname] ):
identifier[target_ds] = identifier[gdal] . identifier[Open] ( identifier[outname] , identifier[GA_Update] )
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[reference] , identifier[Raster] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[outname] keyword[is] keyword[not] keyword[None] :
identifier[target_ds] = identifier[gdal] . identifier[GetDriverByName] ( literal[string] ). identifier[Create] ( identifier[outname] , identifier[reference] . identifier[cols] , identifier[reference] . identifier[rows] , literal[int] , identifier[gdal] . identifier[GDT_Byte] )
keyword[else] :
identifier[target_ds] = identifier[gdal] . identifier[GetDriverByName] ( literal[string] ). identifier[Create] ( literal[string] , identifier[reference] . identifier[cols] , identifier[reference] . identifier[rows] , literal[int] , identifier[gdal] . identifier[GDT_Byte] )
identifier[target_ds] . identifier[SetGeoTransform] ( identifier[reference] . identifier[raster] . identifier[GetGeoTransform] ())
identifier[target_ds] . identifier[SetProjection] ( identifier[reference] . identifier[raster] . identifier[GetProjection] ())
identifier[band] = identifier[target_ds] . identifier[GetRasterBand] ( literal[int] )
identifier[band] . identifier[SetNoDataValue] ( identifier[nodata] )
identifier[band] . identifier[FlushCache] ()
identifier[band] = keyword[None]
keyword[for] identifier[expression] , identifier[value] keyword[in] identifier[zip] ( identifier[expressions] , identifier[burn_values] ):
identifier[vectorobject] . identifier[layer] . identifier[SetAttributeFilter] ( identifier[expression] )
identifier[gdal] . identifier[RasterizeLayer] ( identifier[target_ds] ,[ literal[int] ], identifier[vectorobject] . identifier[layer] , identifier[burn_values] =[ identifier[value] ])
identifier[vectorobject] . identifier[layer] . identifier[SetAttributeFilter] ( literal[string] )
keyword[if] identifier[outname] keyword[is] keyword[None] :
keyword[return] identifier[Raster] ( identifier[target_ds] )
keyword[else] :
identifier[target_ds] = keyword[None]
|
def rasterize(vectorobject, reference, outname=None, burn_values=1, expressions=None, nodata=0, append=False):
"""
rasterize a vector object
Parameters
----------
vectorobject: Vector
the vector object to be rasterized
reference: Raster
a reference Raster object to retrieve geo information and extent from
outname: str or None
the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and
parameter outname is ignored
burn_values: int or list
the values to be written to the raster file
expressions: list
SQL expressions to filter the vector object by attributes
nodata: int
the nodata value of the target raster file
append: bool
if the output file already exists, update this file with new rasterized values?
If True and the output file exists, parameters `reference` and `nodata` are ignored.
Returns
-------
Raster or None
if outname is `None`, a raster object pointing to an in-memory dataset else `None`
Example
-------
>>> from spatialist import Vector, Raster, rasterize
>>> vec = Vector('source.shp')
>>> ref = Raster('reference.tif')
>>> outname = 'target.tif'
>>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2']
>>> burn_values = [1, 2]
>>> rasterize(vec, reference, outname, burn_values, expressions)
"""
if expressions is None:
expressions = [''] # depends on [control=['if'], data=['expressions']]
if isinstance(burn_values, (int, float)):
burn_values = [burn_values] # depends on [control=['if'], data=[]]
if len(expressions) != len(burn_values):
raise RuntimeError('expressions and burn_values of different length') # depends on [control=['if'], data=[]]
failed = []
for exp in expressions:
try:
vectorobject.layer.SetAttributeFilter(exp) # depends on [control=['try'], data=[]]
except RuntimeError:
failed.append(exp) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['exp']]
if len(failed) > 0:
raise RuntimeError('failed to set the following attribute filter(s): ["{}"]'.format('", '.join(failed))) # depends on [control=['if'], data=[]]
if append and outname is not None and os.path.isfile(outname):
target_ds = gdal.Open(outname, GA_Update) # depends on [control=['if'], data=[]]
else:
if not isinstance(reference, Raster):
raise RuntimeError("parameter 'reference' must be of type Raster") # depends on [control=['if'], data=[]]
if outname is not None:
target_ds = gdal.GetDriverByName('GTiff').Create(outname, reference.cols, reference.rows, 1, gdal.GDT_Byte) # depends on [control=['if'], data=['outname']]
else:
target_ds = gdal.GetDriverByName('MEM').Create('', reference.cols, reference.rows, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform(reference.raster.GetGeoTransform())
target_ds.SetProjection(reference.raster.GetProjection())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(nodata)
band.FlushCache()
band = None
for (expression, value) in zip(expressions, burn_values):
vectorobject.layer.SetAttributeFilter(expression)
gdal.RasterizeLayer(target_ds, [1], vectorobject.layer, burn_values=[value]) # depends on [control=['for'], data=[]]
vectorobject.layer.SetAttributeFilter('')
if outname is None:
return Raster(target_ds) # depends on [control=['if'], data=[]]
else:
target_ds = None
|
def finetune_classification_cnn(config):
""" Main function. """
# read params
dataset = config['dataset']
x_names = config['x_names']
y_name = config['y_name']
model_dir = config['model_dir']
debug = config['debug']
num_classes = None
if 'num_classes' in config.keys():
num_classes = config['num_classes']
batch_size = config['training']['batch_size']
train_pct = config['training']['train_pct']
model_save_period = config['training']['model_save_period']
data_aug_config = config['data_augmentation']
preproc_config = config['preprocessing']
iterator_config = config['data_iteration']
model_config = config['model']
base_model_config = model_config['base']
optimization_config = config['optimization']
train_config = config['training']
generator_image_shape = None
if 'image_shape' in data_aug_config.keys():
generator_image_shape = data_aug_config['image_shape']
optimizer_name = optimization_config['optimizer']
model_params = {}
if 'params' in model_config.keys():
model_params = model_config['params']
base_model_params = {}
if 'params' in base_model_config.keys():
base_model_params = base_model_config['params']
if debug:
seed = 108
random.seed(seed)
np.random.seed(seed)
# generate model dir
if not os.path.exists(model_dir):
os.mkdir(model_dir)
model_id = utils.gen_experiment_id()
model_dir = os.path.join(model_dir, 'model_%s' %(model_id))
if not os.path.exists(model_dir):
os.mkdir(model_dir)
logging.info('Saving model to %s' %(model_dir))
latest_model_filename = os.path.join(model_dir, 'weights_{epoch:05d}.h5')
best_model_filename = os.path.join(model_dir, 'weights.h5')
# save config
training_config_filename = os.path.join(model_dir, 'training_config.yaml')
config.save(training_config_filename)
# open dataset
dataset = TensorDataset.open(dataset)
# split dataset
indices_filename = os.path.join(model_dir, 'splits.npz')
if os.path.exists(indices_filename):
indices = np.load(indices_filename)['arr_0'].tolist()
train_indices = indices['train']
val_indices = indices['val']
else:
train_indices, val_indices = dataset.split(train_pct)
indices = np.array({'train':train_indices,
'val':val_indices})
np.savez_compressed(indices_filename, indices)
num_train = train_indices.shape[0]
num_val = val_indices.shape[0]
val_steps = int(np.ceil(float(num_val) / batch_size))
# init generator
train_generator_filename = os.path.join(model_dir, 'train_preprocessor.pkl')
val_generator_filename = os.path.join(model_dir, 'val_preprocessor.pkl')
if os.path.exists(train_generator_filename):
logging.info('Loading generators')
train_generator = pkl.load(open(train_generator_filename, 'rb'))
val_generator = pkl.load(open(val_generator_filename, 'rb'))
else:
logging.info('Fitting generator')
train_generator = TensorDataGenerator(num_classes=num_classes,
**data_aug_config)
val_generator = TensorDataGenerator(featurewise_center=data_aug_config['featurewise_center'],
featurewise_std_normalization=data_aug_config['featurewise_std_normalization'],
image_shape=generator_image_shape,
num_classes=num_classes)
fit_start = time.time()
train_generator.fit(dataset, x_names, y_name, indices=train_indices, **preproc_config)
val_generator.mean = train_generator.mean
val_generator.std = train_generator.std
val_generator.min_output = train_generator.min_output
val_generator.max_output = train_generator.max_output
val_generator.num_classes = train_generator.num_classes
fit_stop = time.time()
logging.info('Generator fit took %.3f sec' %(fit_stop - fit_start))
pkl.dump(train_generator, open(train_generator_filename, 'wb'))
pkl.dump(val_generator, open(val_generator_filename, 'wb'))
if num_classes is None:
num_classes = int(train_generator.num_classes)
# init iterator
train_iterator = train_generator.flow_from_dataset(dataset, x_names, y_name,
indices=train_indices,
batch_size=batch_size,
**iterator_config)
val_iterator = val_generator.flow_from_dataset(dataset, x_names, y_name,
indices=val_indices,
batch_size=batch_size,
**iterator_config)
# setup model
base_cnn = ClassificationCNN.open(base_model_config['model'],
base_model_config['type'],
input_name=x_names[0],
**base_model_params)
cnn = FinetunedClassificationCNN(base_cnn=base_cnn,
name='dexresnet',
num_classes=num_classes,
output_name=y_name,
im_preprocessor=val_generator,
**model_params)
# setup training
cnn.freeze_base_cnn()
if optimizer_name == 'sgd':
optimizer = SGD(lr=optimization_config['lr'],
momentum=optimization_config['momentum'])
elif optimizer_name == 'adam':
optimizer = Adam(lr=optimization_config['lr'])
else:
raise ValueError('Optimizer %s not supported!' %(optimizer_name))
model = cnn.model
model.compile(optimizer=optimizer,
loss=optimization_config['loss'],
metrics=optimization_config['metrics'])
# train
steps_per_epoch = int(np.ceil(float(num_train) / batch_size))
latest_model_ckpt = ModelCheckpoint(latest_model_filename, period=model_save_period)
best_model_ckpt = ModelCheckpoint(best_model_filename,
save_best_only=True,
period=model_save_period)
train_history_cb = TrainHistory(model_dir)
callbacks = [latest_model_ckpt, best_model_ckpt, train_history_cb]
history = model.fit_generator(train_iterator,
steps_per_epoch=steps_per_epoch,
epochs=train_config['epochs'],
callbacks=callbacks,
validation_data=val_iterator,
validation_steps=val_steps,
class_weight=train_config['class_weight'],
use_multiprocessing=train_config['use_multiprocessing'])
# save model
cnn.save(model_dir)
# save history
history_filename = os.path.join(model_dir, 'history.pkl')
pkl.dump(history.history, open(history_filename, 'wb'))
|
def function[finetune_classification_cnn, parameter[config]]:
constant[ Main function. ]
variable[dataset] assign[=] call[name[config]][constant[dataset]]
variable[x_names] assign[=] call[name[config]][constant[x_names]]
variable[y_name] assign[=] call[name[config]][constant[y_name]]
variable[model_dir] assign[=] call[name[config]][constant[model_dir]]
variable[debug] assign[=] call[name[config]][constant[debug]]
variable[num_classes] assign[=] constant[None]
if compare[constant[num_classes] in call[name[config].keys, parameter[]]] begin[:]
variable[num_classes] assign[=] call[name[config]][constant[num_classes]]
variable[batch_size] assign[=] call[call[name[config]][constant[training]]][constant[batch_size]]
variable[train_pct] assign[=] call[call[name[config]][constant[training]]][constant[train_pct]]
variable[model_save_period] assign[=] call[call[name[config]][constant[training]]][constant[model_save_period]]
variable[data_aug_config] assign[=] call[name[config]][constant[data_augmentation]]
variable[preproc_config] assign[=] call[name[config]][constant[preprocessing]]
variable[iterator_config] assign[=] call[name[config]][constant[data_iteration]]
variable[model_config] assign[=] call[name[config]][constant[model]]
variable[base_model_config] assign[=] call[name[model_config]][constant[base]]
variable[optimization_config] assign[=] call[name[config]][constant[optimization]]
variable[train_config] assign[=] call[name[config]][constant[training]]
variable[generator_image_shape] assign[=] constant[None]
if compare[constant[image_shape] in call[name[data_aug_config].keys, parameter[]]] begin[:]
variable[generator_image_shape] assign[=] call[name[data_aug_config]][constant[image_shape]]
variable[optimizer_name] assign[=] call[name[optimization_config]][constant[optimizer]]
variable[model_params] assign[=] dictionary[[], []]
if compare[constant[params] in call[name[model_config].keys, parameter[]]] begin[:]
variable[model_params] assign[=] call[name[model_config]][constant[params]]
variable[base_model_params] assign[=] dictionary[[], []]
if compare[constant[params] in call[name[base_model_config].keys, parameter[]]] begin[:]
variable[base_model_params] assign[=] call[name[base_model_config]][constant[params]]
if name[debug] begin[:]
variable[seed] assign[=] constant[108]
call[name[random].seed, parameter[name[seed]]]
call[name[np].random.seed, parameter[name[seed]]]
if <ast.UnaryOp object at 0x7da1b048ab60> begin[:]
call[name[os].mkdir, parameter[name[model_dir]]]
variable[model_id] assign[=] call[name[utils].gen_experiment_id, parameter[]]
variable[model_dir] assign[=] call[name[os].path.join, parameter[name[model_dir], binary_operation[constant[model_%s] <ast.Mod object at 0x7da2590d6920> name[model_id]]]]
if <ast.UnaryOp object at 0x7da1b048a620> begin[:]
call[name[os].mkdir, parameter[name[model_dir]]]
call[name[logging].info, parameter[binary_operation[constant[Saving model to %s] <ast.Mod object at 0x7da2590d6920> name[model_dir]]]]
variable[latest_model_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[weights_{epoch:05d}.h5]]]
variable[best_model_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[weights.h5]]]
variable[training_config_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[training_config.yaml]]]
call[name[config].save, parameter[name[training_config_filename]]]
variable[dataset] assign[=] call[name[TensorDataset].open, parameter[name[dataset]]]
variable[indices_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[splits.npz]]]
if call[name[os].path.exists, parameter[name[indices_filename]]] begin[:]
variable[indices] assign[=] call[call[call[name[np].load, parameter[name[indices_filename]]]][constant[arr_0]].tolist, parameter[]]
variable[train_indices] assign[=] call[name[indices]][constant[train]]
variable[val_indices] assign[=] call[name[indices]][constant[val]]
variable[num_train] assign[=] call[name[train_indices].shape][constant[0]]
variable[num_val] assign[=] call[name[val_indices].shape][constant[0]]
variable[val_steps] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[call[name[float], parameter[name[num_val]]] / name[batch_size]]]]]]
variable[train_generator_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[train_preprocessor.pkl]]]
variable[val_generator_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[val_preprocessor.pkl]]]
if call[name[os].path.exists, parameter[name[train_generator_filename]]] begin[:]
call[name[logging].info, parameter[constant[Loading generators]]]
variable[train_generator] assign[=] call[name[pkl].load, parameter[call[name[open], parameter[name[train_generator_filename], constant[rb]]]]]
variable[val_generator] assign[=] call[name[pkl].load, parameter[call[name[open], parameter[name[val_generator_filename], constant[rb]]]]]
if compare[name[num_classes] is constant[None]] begin[:]
variable[num_classes] assign[=] call[name[int], parameter[name[train_generator].num_classes]]
variable[train_iterator] assign[=] call[name[train_generator].flow_from_dataset, parameter[name[dataset], name[x_names], name[y_name]]]
variable[val_iterator] assign[=] call[name[val_generator].flow_from_dataset, parameter[name[dataset], name[x_names], name[y_name]]]
variable[base_cnn] assign[=] call[name[ClassificationCNN].open, parameter[call[name[base_model_config]][constant[model]], call[name[base_model_config]][constant[type]]]]
variable[cnn] assign[=] call[name[FinetunedClassificationCNN], parameter[]]
call[name[cnn].freeze_base_cnn, parameter[]]
if compare[name[optimizer_name] equal[==] constant[sgd]] begin[:]
variable[optimizer] assign[=] call[name[SGD], parameter[]]
variable[model] assign[=] name[cnn].model
call[name[model].compile, parameter[]]
variable[steps_per_epoch] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[call[name[float], parameter[name[num_train]]] / name[batch_size]]]]]]
variable[latest_model_ckpt] assign[=] call[name[ModelCheckpoint], parameter[name[latest_model_filename]]]
variable[best_model_ckpt] assign[=] call[name[ModelCheckpoint], parameter[name[best_model_filename]]]
variable[train_history_cb] assign[=] call[name[TrainHistory], parameter[name[model_dir]]]
variable[callbacks] assign[=] list[[<ast.Name object at 0x7da1b0416c50>, <ast.Name object at 0x7da1b0416c20>, <ast.Name object at 0x7da1b0416bf0>]]
variable[history] assign[=] call[name[model].fit_generator, parameter[name[train_iterator]]]
call[name[cnn].save, parameter[name[model_dir]]]
variable[history_filename] assign[=] call[name[os].path.join, parameter[name[model_dir], constant[history.pkl]]]
call[name[pkl].dump, parameter[name[history].history, call[name[open], parameter[name[history_filename], constant[wb]]]]]
|
keyword[def] identifier[finetune_classification_cnn] ( identifier[config] ):
literal[string]
identifier[dataset] = identifier[config] [ literal[string] ]
identifier[x_names] = identifier[config] [ literal[string] ]
identifier[y_name] = identifier[config] [ literal[string] ]
identifier[model_dir] = identifier[config] [ literal[string] ]
identifier[debug] = identifier[config] [ literal[string] ]
identifier[num_classes] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[config] . identifier[keys] ():
identifier[num_classes] = identifier[config] [ literal[string] ]
identifier[batch_size] = identifier[config] [ literal[string] ][ literal[string] ]
identifier[train_pct] = identifier[config] [ literal[string] ][ literal[string] ]
identifier[model_save_period] = identifier[config] [ literal[string] ][ literal[string] ]
identifier[data_aug_config] = identifier[config] [ literal[string] ]
identifier[preproc_config] = identifier[config] [ literal[string] ]
identifier[iterator_config] = identifier[config] [ literal[string] ]
identifier[model_config] = identifier[config] [ literal[string] ]
identifier[base_model_config] = identifier[model_config] [ literal[string] ]
identifier[optimization_config] = identifier[config] [ literal[string] ]
identifier[train_config] = identifier[config] [ literal[string] ]
identifier[generator_image_shape] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[data_aug_config] . identifier[keys] ():
identifier[generator_image_shape] = identifier[data_aug_config] [ literal[string] ]
identifier[optimizer_name] = identifier[optimization_config] [ literal[string] ]
identifier[model_params] ={}
keyword[if] literal[string] keyword[in] identifier[model_config] . identifier[keys] ():
identifier[model_params] = identifier[model_config] [ literal[string] ]
identifier[base_model_params] ={}
keyword[if] literal[string] keyword[in] identifier[base_model_config] . identifier[keys] ():
identifier[base_model_params] = identifier[base_model_config] [ literal[string] ]
keyword[if] identifier[debug] :
identifier[seed] = literal[int]
identifier[random] . identifier[seed] ( identifier[seed] )
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[model_dir] ):
identifier[os] . identifier[mkdir] ( identifier[model_dir] )
identifier[model_id] = identifier[utils] . identifier[gen_experiment_id] ()
identifier[model_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] %( identifier[model_id] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[model_dir] ):
identifier[os] . identifier[mkdir] ( identifier[model_dir] )
identifier[logging] . identifier[info] ( literal[string] %( identifier[model_dir] ))
identifier[latest_model_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
identifier[best_model_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
identifier[training_config_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
identifier[config] . identifier[save] ( identifier[training_config_filename] )
identifier[dataset] = identifier[TensorDataset] . identifier[open] ( identifier[dataset] )
identifier[indices_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[indices_filename] ):
identifier[indices] = identifier[np] . identifier[load] ( identifier[indices_filename] )[ literal[string] ]. identifier[tolist] ()
identifier[train_indices] = identifier[indices] [ literal[string] ]
identifier[val_indices] = identifier[indices] [ literal[string] ]
keyword[else] :
identifier[train_indices] , identifier[val_indices] = identifier[dataset] . identifier[split] ( identifier[train_pct] )
identifier[indices] = identifier[np] . identifier[array] ({ literal[string] : identifier[train_indices] ,
literal[string] : identifier[val_indices] })
identifier[np] . identifier[savez_compressed] ( identifier[indices_filename] , identifier[indices] )
identifier[num_train] = identifier[train_indices] . identifier[shape] [ literal[int] ]
identifier[num_val] = identifier[val_indices] . identifier[shape] [ literal[int] ]
identifier[val_steps] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[float] ( identifier[num_val] )/ identifier[batch_size] ))
identifier[train_generator_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
identifier[val_generator_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[train_generator_filename] ):
identifier[logging] . identifier[info] ( literal[string] )
identifier[train_generator] = identifier[pkl] . identifier[load] ( identifier[open] ( identifier[train_generator_filename] , literal[string] ))
identifier[val_generator] = identifier[pkl] . identifier[load] ( identifier[open] ( identifier[val_generator_filename] , literal[string] ))
keyword[else] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[train_generator] = identifier[TensorDataGenerator] ( identifier[num_classes] = identifier[num_classes] ,
** identifier[data_aug_config] )
identifier[val_generator] = identifier[TensorDataGenerator] ( identifier[featurewise_center] = identifier[data_aug_config] [ literal[string] ],
identifier[featurewise_std_normalization] = identifier[data_aug_config] [ literal[string] ],
identifier[image_shape] = identifier[generator_image_shape] ,
identifier[num_classes] = identifier[num_classes] )
identifier[fit_start] = identifier[time] . identifier[time] ()
identifier[train_generator] . identifier[fit] ( identifier[dataset] , identifier[x_names] , identifier[y_name] , identifier[indices] = identifier[train_indices] ,** identifier[preproc_config] )
identifier[val_generator] . identifier[mean] = identifier[train_generator] . identifier[mean]
identifier[val_generator] . identifier[std] = identifier[train_generator] . identifier[std]
identifier[val_generator] . identifier[min_output] = identifier[train_generator] . identifier[min_output]
identifier[val_generator] . identifier[max_output] = identifier[train_generator] . identifier[max_output]
identifier[val_generator] . identifier[num_classes] = identifier[train_generator] . identifier[num_classes]
identifier[fit_stop] = identifier[time] . identifier[time] ()
identifier[logging] . identifier[info] ( literal[string] %( identifier[fit_stop] - identifier[fit_start] ))
identifier[pkl] . identifier[dump] ( identifier[train_generator] , identifier[open] ( identifier[train_generator_filename] , literal[string] ))
identifier[pkl] . identifier[dump] ( identifier[val_generator] , identifier[open] ( identifier[val_generator_filename] , literal[string] ))
keyword[if] identifier[num_classes] keyword[is] keyword[None] :
identifier[num_classes] = identifier[int] ( identifier[train_generator] . identifier[num_classes] )
identifier[train_iterator] = identifier[train_generator] . identifier[flow_from_dataset] ( identifier[dataset] , identifier[x_names] , identifier[y_name] ,
identifier[indices] = identifier[train_indices] ,
identifier[batch_size] = identifier[batch_size] ,
** identifier[iterator_config] )
identifier[val_iterator] = identifier[val_generator] . identifier[flow_from_dataset] ( identifier[dataset] , identifier[x_names] , identifier[y_name] ,
identifier[indices] = identifier[val_indices] ,
identifier[batch_size] = identifier[batch_size] ,
** identifier[iterator_config] )
identifier[base_cnn] = identifier[ClassificationCNN] . identifier[open] ( identifier[base_model_config] [ literal[string] ],
identifier[base_model_config] [ literal[string] ],
identifier[input_name] = identifier[x_names] [ literal[int] ],
** identifier[base_model_params] )
identifier[cnn] = identifier[FinetunedClassificationCNN] ( identifier[base_cnn] = identifier[base_cnn] ,
identifier[name] = literal[string] ,
identifier[num_classes] = identifier[num_classes] ,
identifier[output_name] = identifier[y_name] ,
identifier[im_preprocessor] = identifier[val_generator] ,
** identifier[model_params] )
identifier[cnn] . identifier[freeze_base_cnn] ()
keyword[if] identifier[optimizer_name] == literal[string] :
identifier[optimizer] = identifier[SGD] ( identifier[lr] = identifier[optimization_config] [ literal[string] ],
identifier[momentum] = identifier[optimization_config] [ literal[string] ])
keyword[elif] identifier[optimizer_name] == literal[string] :
identifier[optimizer] = identifier[Adam] ( identifier[lr] = identifier[optimization_config] [ literal[string] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[optimizer_name] ))
identifier[model] = identifier[cnn] . identifier[model]
identifier[model] . identifier[compile] ( identifier[optimizer] = identifier[optimizer] ,
identifier[loss] = identifier[optimization_config] [ literal[string] ],
identifier[metrics] = identifier[optimization_config] [ literal[string] ])
identifier[steps_per_epoch] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[float] ( identifier[num_train] )/ identifier[batch_size] ))
identifier[latest_model_ckpt] = identifier[ModelCheckpoint] ( identifier[latest_model_filename] , identifier[period] = identifier[model_save_period] )
identifier[best_model_ckpt] = identifier[ModelCheckpoint] ( identifier[best_model_filename] ,
identifier[save_best_only] = keyword[True] ,
identifier[period] = identifier[model_save_period] )
identifier[train_history_cb] = identifier[TrainHistory] ( identifier[model_dir] )
identifier[callbacks] =[ identifier[latest_model_ckpt] , identifier[best_model_ckpt] , identifier[train_history_cb] ]
identifier[history] = identifier[model] . identifier[fit_generator] ( identifier[train_iterator] ,
identifier[steps_per_epoch] = identifier[steps_per_epoch] ,
identifier[epochs] = identifier[train_config] [ literal[string] ],
identifier[callbacks] = identifier[callbacks] ,
identifier[validation_data] = identifier[val_iterator] ,
identifier[validation_steps] = identifier[val_steps] ,
identifier[class_weight] = identifier[train_config] [ literal[string] ],
identifier[use_multiprocessing] = identifier[train_config] [ literal[string] ])
identifier[cnn] . identifier[save] ( identifier[model_dir] )
identifier[history_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[model_dir] , literal[string] )
identifier[pkl] . identifier[dump] ( identifier[history] . identifier[history] , identifier[open] ( identifier[history_filename] , literal[string] ))
|
def finetune_classification_cnn(config):
""" Main function. """
# read params
dataset = config['dataset']
x_names = config['x_names']
y_name = config['y_name']
model_dir = config['model_dir']
debug = config['debug']
num_classes = None
if 'num_classes' in config.keys():
num_classes = config['num_classes'] # depends on [control=['if'], data=[]]
batch_size = config['training']['batch_size']
train_pct = config['training']['train_pct']
model_save_period = config['training']['model_save_period']
data_aug_config = config['data_augmentation']
preproc_config = config['preprocessing']
iterator_config = config['data_iteration']
model_config = config['model']
base_model_config = model_config['base']
optimization_config = config['optimization']
train_config = config['training']
generator_image_shape = None
if 'image_shape' in data_aug_config.keys():
generator_image_shape = data_aug_config['image_shape'] # depends on [control=['if'], data=[]]
optimizer_name = optimization_config['optimizer']
model_params = {}
if 'params' in model_config.keys():
model_params = model_config['params'] # depends on [control=['if'], data=[]]
base_model_params = {}
if 'params' in base_model_config.keys():
base_model_params = base_model_config['params'] # depends on [control=['if'], data=[]]
if debug:
seed = 108
random.seed(seed)
np.random.seed(seed) # depends on [control=['if'], data=[]]
# generate model dir
if not os.path.exists(model_dir):
os.mkdir(model_dir) # depends on [control=['if'], data=[]]
model_id = utils.gen_experiment_id()
model_dir = os.path.join(model_dir, 'model_%s' % model_id)
if not os.path.exists(model_dir):
os.mkdir(model_dir) # depends on [control=['if'], data=[]]
logging.info('Saving model to %s' % model_dir)
latest_model_filename = os.path.join(model_dir, 'weights_{epoch:05d}.h5')
best_model_filename = os.path.join(model_dir, 'weights.h5')
# save config
training_config_filename = os.path.join(model_dir, 'training_config.yaml')
config.save(training_config_filename)
# open dataset
dataset = TensorDataset.open(dataset)
# split dataset
indices_filename = os.path.join(model_dir, 'splits.npz')
if os.path.exists(indices_filename):
indices = np.load(indices_filename)['arr_0'].tolist()
train_indices = indices['train']
val_indices = indices['val'] # depends on [control=['if'], data=[]]
else:
(train_indices, val_indices) = dataset.split(train_pct)
indices = np.array({'train': train_indices, 'val': val_indices})
np.savez_compressed(indices_filename, indices)
num_train = train_indices.shape[0]
num_val = val_indices.shape[0]
val_steps = int(np.ceil(float(num_val) / batch_size))
# init generator
train_generator_filename = os.path.join(model_dir, 'train_preprocessor.pkl')
val_generator_filename = os.path.join(model_dir, 'val_preprocessor.pkl')
if os.path.exists(train_generator_filename):
logging.info('Loading generators')
train_generator = pkl.load(open(train_generator_filename, 'rb'))
val_generator = pkl.load(open(val_generator_filename, 'rb')) # depends on [control=['if'], data=[]]
else:
logging.info('Fitting generator')
train_generator = TensorDataGenerator(num_classes=num_classes, **data_aug_config)
val_generator = TensorDataGenerator(featurewise_center=data_aug_config['featurewise_center'], featurewise_std_normalization=data_aug_config['featurewise_std_normalization'], image_shape=generator_image_shape, num_classes=num_classes)
fit_start = time.time()
train_generator.fit(dataset, x_names, y_name, indices=train_indices, **preproc_config)
val_generator.mean = train_generator.mean
val_generator.std = train_generator.std
val_generator.min_output = train_generator.min_output
val_generator.max_output = train_generator.max_output
val_generator.num_classes = train_generator.num_classes
fit_stop = time.time()
logging.info('Generator fit took %.3f sec' % (fit_stop - fit_start))
pkl.dump(train_generator, open(train_generator_filename, 'wb'))
pkl.dump(val_generator, open(val_generator_filename, 'wb'))
if num_classes is None:
num_classes = int(train_generator.num_classes) # depends on [control=['if'], data=['num_classes']]
# init iterator
train_iterator = train_generator.flow_from_dataset(dataset, x_names, y_name, indices=train_indices, batch_size=batch_size, **iterator_config)
val_iterator = val_generator.flow_from_dataset(dataset, x_names, y_name, indices=val_indices, batch_size=batch_size, **iterator_config)
# setup model
base_cnn = ClassificationCNN.open(base_model_config['model'], base_model_config['type'], input_name=x_names[0], **base_model_params)
cnn = FinetunedClassificationCNN(base_cnn=base_cnn, name='dexresnet', num_classes=num_classes, output_name=y_name, im_preprocessor=val_generator, **model_params)
# setup training
cnn.freeze_base_cnn()
if optimizer_name == 'sgd':
optimizer = SGD(lr=optimization_config['lr'], momentum=optimization_config['momentum']) # depends on [control=['if'], data=[]]
elif optimizer_name == 'adam':
optimizer = Adam(lr=optimization_config['lr']) # depends on [control=['if'], data=[]]
else:
raise ValueError('Optimizer %s not supported!' % optimizer_name)
model = cnn.model
model.compile(optimizer=optimizer, loss=optimization_config['loss'], metrics=optimization_config['metrics'])
# train
steps_per_epoch = int(np.ceil(float(num_train) / batch_size))
latest_model_ckpt = ModelCheckpoint(latest_model_filename, period=model_save_period)
best_model_ckpt = ModelCheckpoint(best_model_filename, save_best_only=True, period=model_save_period)
train_history_cb = TrainHistory(model_dir)
callbacks = [latest_model_ckpt, best_model_ckpt, train_history_cb]
history = model.fit_generator(train_iterator, steps_per_epoch=steps_per_epoch, epochs=train_config['epochs'], callbacks=callbacks, validation_data=val_iterator, validation_steps=val_steps, class_weight=train_config['class_weight'], use_multiprocessing=train_config['use_multiprocessing'])
# save model
cnn.save(model_dir)
# save history
history_filename = os.path.join(model_dir, 'history.pkl')
pkl.dump(history.history, open(history_filename, 'wb'))
|
def get_precursor_mz(exact_mass, precursor_type):
""" Calculate precursor mz based on exact mass and precursor type
Args:
exact_mass (float): exact mass of compound of interest
precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+'
Return:
neutral mass of compound
"""
# these are just taken from what was present in the massbank .msp file for those missing the exact mass
d = {'[M-H]-': -1.007276,
'[M+H]+': 1.007276,
'[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949)
}
try:
return exact_mass + d[precursor_type]
except KeyError as e:
print(e)
return False
|
def function[get_precursor_mz, parameter[exact_mass, precursor_type]]:
constant[ Calculate precursor mz based on exact mass and precursor type
Args:
exact_mass (float): exact mass of compound of interest
precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+'
Return:
neutral mass of compound
]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d67a00>, <ast.Constant object at 0x7da1b1d65ff0>, <ast.Constant object at 0x7da1b1d65c90>], [<ast.UnaryOp object at 0x7da1b1d657b0>, <ast.Constant object at 0x7da1b1d640a0>, <ast.BinOp object at 0x7da1b1d66710>]]
<ast.Try object at 0x7da1b1d673a0>
|
keyword[def] identifier[get_precursor_mz] ( identifier[exact_mass] , identifier[precursor_type] ):
literal[string]
identifier[d] ={ literal[string] :- literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] -(( literal[int] * literal[int] )+ literal[int] )
}
keyword[try] :
keyword[return] identifier[exact_mass] + identifier[d] [ identifier[precursor_type] ]
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
keyword[return] keyword[False]
|
def get_precursor_mz(exact_mass, precursor_type):
""" Calculate precursor mz based on exact mass and precursor type
Args:
exact_mass (float): exact mass of compound of interest
precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+'
Return:
neutral mass of compound
"""
# these are just taken from what was present in the massbank .msp file for those missing the exact mass
d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - (1.007276 * 2 + 15.9949)}
try:
return exact_mass + d[precursor_type] # depends on [control=['try'], data=[]]
except KeyError as e:
print(e)
return False # depends on [control=['except'], data=['e']]
|
def contains(self, data):
"""
Check if an item has been added to the bloomfilter.
:param bytes data: a bytestring representing the item to check.
:returns: a boolean indicating whether or not the item is present in
the bloomfilter. False-positives are possible, but a negative
return value is definitive.
"""
bfo = BitFieldOperation(self.database, self.key)
for bit_index in self._get_seeds(data):
bfo.get('u1', bit_index)
return all(bfo.execute())
|
def function[contains, parameter[self, data]]:
constant[
Check if an item has been added to the bloomfilter.
:param bytes data: a bytestring representing the item to check.
:returns: a boolean indicating whether or not the item is present in
the bloomfilter. False-positives are possible, but a negative
return value is definitive.
]
variable[bfo] assign[=] call[name[BitFieldOperation], parameter[name[self].database, name[self].key]]
for taget[name[bit_index]] in starred[call[name[self]._get_seeds, parameter[name[data]]]] begin[:]
call[name[bfo].get, parameter[constant[u1], name[bit_index]]]
return[call[name[all], parameter[call[name[bfo].execute, parameter[]]]]]
|
keyword[def] identifier[contains] ( identifier[self] , identifier[data] ):
literal[string]
identifier[bfo] = identifier[BitFieldOperation] ( identifier[self] . identifier[database] , identifier[self] . identifier[key] )
keyword[for] identifier[bit_index] keyword[in] identifier[self] . identifier[_get_seeds] ( identifier[data] ):
identifier[bfo] . identifier[get] ( literal[string] , identifier[bit_index] )
keyword[return] identifier[all] ( identifier[bfo] . identifier[execute] ())
|
def contains(self, data):
"""
Check if an item has been added to the bloomfilter.
:param bytes data: a bytestring representing the item to check.
:returns: a boolean indicating whether or not the item is present in
the bloomfilter. False-positives are possible, but a negative
return value is definitive.
"""
bfo = BitFieldOperation(self.database, self.key)
for bit_index in self._get_seeds(data):
bfo.get('u1', bit_index) # depends on [control=['for'], data=['bit_index']]
return all(bfo.execute())
|
def createFinalTPEDandTFAM(tped, toReadPrefix, prefix, snpToRemove):
"""Creates the final TPED and TFAM.
:param tped: a representation of the ``tped`` of duplicated markers.
:param toReadPrefix: the prefix of the unique files.
:param prefix: the prefix of the output files.
:param snpToRemove: the markers to remove.
:type tped: numpy.array
:type toReadPrefix: str
:type prefix: str
:type snpToRemove: set
Starts by copying the unique markers' ``tfam`` file to
``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file,
in which the chosen markers will be appended.
The final data set will include the unique markers, the chosen markers
which were completed, and the problematic duplicated markers (for further
analysis). The markers that were used to complete the chosen ones are not
present in the final data set.
"""
# First, copying the tfam
try:
shutil.copy(toReadPrefix + ".tfam", prefix + ".final.tfam")
except IOError:
msg = "%(toReadPrefix)s.tfam: can't copy file to " \
"%(prefix)s.final.tfam" % locals()
raise ProgramError(msg)
# Next, copy the tped, and append at the end
try:
shutil.copy(toReadPrefix + ".tped", prefix + ".final.tped")
except IOError:
msg = "%(toReadPrefix)s.tped: can't copy fil to " \
"%(prefix)s.final.tped" % locals()
raise ProgramError(msg)
tpedFile = None
try:
tpedFile = open(prefix + ".final.tped", "a")
except IOError:
msg = "%(prefix)s.final.tped: can't append to file" % locals()
raise ProgramError(msg)
for i, row in enumerate(tped):
if i not in snpToRemove:
print >>tpedFile, "\t".join(row)
tpedFile.close()
|
def function[createFinalTPEDandTFAM, parameter[tped, toReadPrefix, prefix, snpToRemove]]:
constant[Creates the final TPED and TFAM.
:param tped: a representation of the ``tped`` of duplicated markers.
:param toReadPrefix: the prefix of the unique files.
:param prefix: the prefix of the output files.
:param snpToRemove: the markers to remove.
:type tped: numpy.array
:type toReadPrefix: str
:type prefix: str
:type snpToRemove: set
Starts by copying the unique markers' ``tfam`` file to
``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file,
in which the chosen markers will be appended.
The final data set will include the unique markers, the chosen markers
which were completed, and the problematic duplicated markers (for further
analysis). The markers that were used to complete the chosen ones are not
present in the final data set.
]
<ast.Try object at 0x7da1b0a42b30>
<ast.Try object at 0x7da1b0a42f50>
variable[tpedFile] assign[=] constant[None]
<ast.Try object at 0x7da1b0a438b0>
for taget[tuple[[<ast.Name object at 0x7da1b0a42e30>, <ast.Name object at 0x7da1b0a42b00>]]] in starred[call[name[enumerate], parameter[name[tped]]]] begin[:]
if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[snpToRemove]] begin[:]
tuple[[<ast.BinOp object at 0x7da1b0a42f20>, <ast.Call object at 0x7da1b0a42e60>]]
call[name[tpedFile].close, parameter[]]
|
keyword[def] identifier[createFinalTPEDandTFAM] ( identifier[tped] , identifier[toReadPrefix] , identifier[prefix] , identifier[snpToRemove] ):
literal[string]
keyword[try] :
identifier[shutil] . identifier[copy] ( identifier[toReadPrefix] + literal[string] , identifier[prefix] + literal[string] )
keyword[except] identifier[IOError] :
identifier[msg] = literal[string] literal[string] % identifier[locals] ()
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[try] :
identifier[shutil] . identifier[copy] ( identifier[toReadPrefix] + literal[string] , identifier[prefix] + literal[string] )
keyword[except] identifier[IOError] :
identifier[msg] = literal[string] literal[string] % identifier[locals] ()
keyword[raise] identifier[ProgramError] ( identifier[msg] )
identifier[tpedFile] = keyword[None]
keyword[try] :
identifier[tpedFile] = identifier[open] ( identifier[prefix] + literal[string] , literal[string] )
keyword[except] identifier[IOError] :
identifier[msg] = literal[string] % identifier[locals] ()
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[tped] ):
keyword[if] identifier[i] keyword[not] keyword[in] identifier[snpToRemove] :
identifier[print] >> identifier[tpedFile] , literal[string] . identifier[join] ( identifier[row] )
identifier[tpedFile] . identifier[close] ()
|
def createFinalTPEDandTFAM(tped, toReadPrefix, prefix, snpToRemove):
"""Creates the final TPED and TFAM.
:param tped: a representation of the ``tped`` of duplicated markers.
:param toReadPrefix: the prefix of the unique files.
:param prefix: the prefix of the output files.
:param snpToRemove: the markers to remove.
:type tped: numpy.array
:type toReadPrefix: str
:type prefix: str
:type snpToRemove: set
Starts by copying the unique markers' ``tfam`` file to
``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file,
in which the chosen markers will be appended.
The final data set will include the unique markers, the chosen markers
which were completed, and the problematic duplicated markers (for further
analysis). The markers that were used to complete the chosen ones are not
present in the final data set.
"""
# First, copying the tfam
try:
shutil.copy(toReadPrefix + '.tfam', prefix + '.final.tfam') # depends on [control=['try'], data=[]]
except IOError:
msg = "%(toReadPrefix)s.tfam: can't copy file to %(prefix)s.final.tfam" % locals()
raise ProgramError(msg) # depends on [control=['except'], data=[]]
# Next, copy the tped, and append at the end
try:
shutil.copy(toReadPrefix + '.tped', prefix + '.final.tped') # depends on [control=['try'], data=[]]
except IOError:
msg = "%(toReadPrefix)s.tped: can't copy fil to %(prefix)s.final.tped" % locals()
raise ProgramError(msg) # depends on [control=['except'], data=[]]
tpedFile = None
try:
tpedFile = open(prefix + '.final.tped', 'a') # depends on [control=['try'], data=[]]
except IOError:
msg = "%(prefix)s.final.tped: can't append to file" % locals()
raise ProgramError(msg) # depends on [control=['except'], data=[]]
for (i, row) in enumerate(tped):
if i not in snpToRemove:
(print >> tpedFile, '\t'.join(row)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
tpedFile.close()
|
def timeit(method):
"""Decorator to measure the time used by the recipe"""
import datetime
@functools.wraps(method)
def timed_method(self, rinput):
time_start = datetime.datetime.utcnow()
result = method(self, rinput)
time_end = datetime.datetime.utcnow()
result.time_it(time_start, time_end)
self.logger.info('total time measured')
return result
return timed_method
|
def function[timeit, parameter[method]]:
constant[Decorator to measure the time used by the recipe]
import module[datetime]
def function[timed_method, parameter[self, rinput]]:
variable[time_start] assign[=] call[name[datetime].datetime.utcnow, parameter[]]
variable[result] assign[=] call[name[method], parameter[name[self], name[rinput]]]
variable[time_end] assign[=] call[name[datetime].datetime.utcnow, parameter[]]
call[name[result].time_it, parameter[name[time_start], name[time_end]]]
call[name[self].logger.info, parameter[constant[total time measured]]]
return[name[result]]
return[name[timed_method]]
|
keyword[def] identifier[timeit] ( identifier[method] ):
literal[string]
keyword[import] identifier[datetime]
@ identifier[functools] . identifier[wraps] ( identifier[method] )
keyword[def] identifier[timed_method] ( identifier[self] , identifier[rinput] ):
identifier[time_start] = identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
identifier[result] = identifier[method] ( identifier[self] , identifier[rinput] )
identifier[time_end] = identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
identifier[result] . identifier[time_it] ( identifier[time_start] , identifier[time_end] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[result]
keyword[return] identifier[timed_method]
|
def timeit(method):
"""Decorator to measure the time used by the recipe"""
import datetime
@functools.wraps(method)
def timed_method(self, rinput):
time_start = datetime.datetime.utcnow()
result = method(self, rinput)
time_end = datetime.datetime.utcnow()
result.time_it(time_start, time_end)
self.logger.info('total time measured')
return result
return timed_method
|
def getExtensions(self):
"""returns objects for all map service extensions"""
extensions = []
if isinstance(self.supportedExtensions, list):
for ext in self.supportedExtensions:
extensionURL = self._url + "/exts/%s" % ext
if ext == "SchematicsServer":
extensions.append(SchematicsService(url=extensionURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
return extensions
else:
extensionURL = self._url + "/exts/%s" % self.supportedExtensions
if self.supportedExtensions == "SchematicsServer":
extensions.append(SchematicsService(url=extensionURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
return extensions
|
def function[getExtensions, parameter[self]]:
constant[returns objects for all map service extensions]
variable[extensions] assign[=] list[[]]
if call[name[isinstance], parameter[name[self].supportedExtensions, name[list]]] begin[:]
for taget[name[ext]] in starred[name[self].supportedExtensions] begin[:]
variable[extensionURL] assign[=] binary_operation[name[self]._url + binary_operation[constant[/exts/%s] <ast.Mod object at 0x7da2590d6920> name[ext]]]
if compare[name[ext] equal[==] constant[SchematicsServer]] begin[:]
call[name[extensions].append, parameter[call[name[SchematicsService], parameter[]]]]
return[name[extensions]]
|
keyword[def] identifier[getExtensions] ( identifier[self] ):
literal[string]
identifier[extensions] =[]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[supportedExtensions] , identifier[list] ):
keyword[for] identifier[ext] keyword[in] identifier[self] . identifier[supportedExtensions] :
identifier[extensionURL] = identifier[self] . identifier[_url] + literal[string] % identifier[ext]
keyword[if] identifier[ext] == literal[string] :
identifier[extensions] . identifier[append] ( identifier[SchematicsService] ( identifier[url] = identifier[extensionURL] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ))
keyword[return] identifier[extensions]
keyword[else] :
identifier[extensionURL] = identifier[self] . identifier[_url] + literal[string] % identifier[self] . identifier[supportedExtensions]
keyword[if] identifier[self] . identifier[supportedExtensions] == literal[string] :
identifier[extensions] . identifier[append] ( identifier[SchematicsService] ( identifier[url] = identifier[extensionURL] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ))
keyword[return] identifier[extensions]
|
def getExtensions(self):
"""returns objects for all map service extensions"""
extensions = []
if isinstance(self.supportedExtensions, list):
for ext in self.supportedExtensions:
extensionURL = self._url + '/exts/%s' % ext
if ext == 'SchematicsServer':
extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ext']]
return extensions # depends on [control=['if'], data=[]]
else:
extensionURL = self._url + '/exts/%s' % self.supportedExtensions
if self.supportedExtensions == 'SchematicsServer':
extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) # depends on [control=['if'], data=[]]
return extensions
|
def _tree_load_sub_branch(self, traj_node, branch_name,
load_data=pypetconstants.LOAD_DATA,
with_links=True, recursive=False,
max_depth=None, _trajectory=None,
_as_new=False, _hdf5_group=None):
"""Loads data starting from a node along a branch and starts recursively loading
all data at end of branch.
:param traj_node: The node from where loading starts
:param branch_name:
A branch along which loading progresses. Colon Notation is used:
'group1.group2.group3' loads 'group1', then 'group2', then 'group3' and then finally
recursively all children and children's children below 'group3'
:param load_data:
How to load the data
:param with_links:
If links should be loaded
:param recursive:
If loading recursively
:param max_depth:
The maximum depth to load the tree
:param _trajectory:
The trajectory
:param _as_new:
If trajectory is loaded as new
:param _hdf5_group:
HDF5 node in the file corresponding to `traj_node`.
"""
if load_data == pypetconstants.LOAD_NOTHING:
return
if max_depth is None:
max_depth = float('inf')
if _trajectory is None:
_trajectory = traj_node.v_root
if _hdf5_group is None:
hdf5_group_name = traj_node.v_full_name.replace('.', '/')
# Get child node to load
if hdf5_group_name == '':
_hdf5_group = self._trajectory_group
else:
try:
_hdf5_group = self._hdf5file.get_node(where=self._trajectory_group,
name=hdf5_group_name)
except pt.NoSuchNodeError:
self._logger.error('Cannot find `%s` the hdf5 node `%s` does not exist!'
% (traj_node.v_full_name, hdf5_group_name))
raise
split_names = branch_name.split('.')
final_group_name = split_names.pop()
current_depth = 1
for name in split_names:
if current_depth > max_depth:
return
# First load along the branch
_hdf5_group = getattr(_hdf5_group, name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links,
recursive=False, max_depth=max_depth, current_depth=current_depth,
trajectory=_trajectory, as_new=_as_new,
hdf5_group=_hdf5_group)
current_depth += 1
traj_node = traj_node._children[name]
if current_depth <= max_depth:
# Then load recursively all data in the last group and below
_hdf5_group = getattr(_hdf5_group, final_group_name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links,
recursive=recursive, max_depth=max_depth,
current_depth=current_depth, trajectory=_trajectory,
as_new=_as_new, hdf5_group=_hdf5_group)
|
def function[_tree_load_sub_branch, parameter[self, traj_node, branch_name, load_data, with_links, recursive, max_depth, _trajectory, _as_new, _hdf5_group]]:
constant[Loads data starting from a node along a branch and starts recursively loading
all data at end of branch.
:param traj_node: The node from where loading starts
:param branch_name:
A branch along which loading progresses. Colon Notation is used:
'group1.group2.group3' loads 'group1', then 'group2', then 'group3' and then finally
recursively all children and children's children below 'group3'
:param load_data:
How to load the data
:param with_links:
If links should be loaded
:param recursive:
If loading recursively
:param max_depth:
The maximum depth to load the tree
:param _trajectory:
The trajectory
:param _as_new:
If trajectory is loaded as new
:param _hdf5_group:
HDF5 node in the file corresponding to `traj_node`.
]
if compare[name[load_data] equal[==] name[pypetconstants].LOAD_NOTHING] begin[:]
return[None]
if compare[name[max_depth] is constant[None]] begin[:]
variable[max_depth] assign[=] call[name[float], parameter[constant[inf]]]
if compare[name[_trajectory] is constant[None]] begin[:]
variable[_trajectory] assign[=] name[traj_node].v_root
if compare[name[_hdf5_group] is constant[None]] begin[:]
variable[hdf5_group_name] assign[=] call[name[traj_node].v_full_name.replace, parameter[constant[.], constant[/]]]
if compare[name[hdf5_group_name] equal[==] constant[]] begin[:]
variable[_hdf5_group] assign[=] name[self]._trajectory_group
variable[split_names] assign[=] call[name[branch_name].split, parameter[constant[.]]]
variable[final_group_name] assign[=] call[name[split_names].pop, parameter[]]
variable[current_depth] assign[=] constant[1]
for taget[name[name]] in starred[name[split_names]] begin[:]
if compare[name[current_depth] greater[>] name[max_depth]] begin[:]
return[None]
variable[_hdf5_group] assign[=] call[name[getattr], parameter[name[_hdf5_group], name[name]]]
call[name[self]._tree_load_nodes_dfs, parameter[name[traj_node]]]
<ast.AugAssign object at 0x7da1b01e0280>
variable[traj_node] assign[=] call[name[traj_node]._children][name[name]]
if compare[name[current_depth] less_or_equal[<=] name[max_depth]] begin[:]
variable[_hdf5_group] assign[=] call[name[getattr], parameter[name[_hdf5_group], name[final_group_name]]]
call[name[self]._tree_load_nodes_dfs, parameter[name[traj_node]]]
|
keyword[def] identifier[_tree_load_sub_branch] ( identifier[self] , identifier[traj_node] , identifier[branch_name] ,
identifier[load_data] = identifier[pypetconstants] . identifier[LOAD_DATA] ,
identifier[with_links] = keyword[True] , identifier[recursive] = keyword[False] ,
identifier[max_depth] = keyword[None] , identifier[_trajectory] = keyword[None] ,
identifier[_as_new] = keyword[False] , identifier[_hdf5_group] = keyword[None] ):
literal[string]
keyword[if] identifier[load_data] == identifier[pypetconstants] . identifier[LOAD_NOTHING] :
keyword[return]
keyword[if] identifier[max_depth] keyword[is] keyword[None] :
identifier[max_depth] = identifier[float] ( literal[string] )
keyword[if] identifier[_trajectory] keyword[is] keyword[None] :
identifier[_trajectory] = identifier[traj_node] . identifier[v_root]
keyword[if] identifier[_hdf5_group] keyword[is] keyword[None] :
identifier[hdf5_group_name] = identifier[traj_node] . identifier[v_full_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[hdf5_group_name] == literal[string] :
identifier[_hdf5_group] = identifier[self] . identifier[_trajectory_group]
keyword[else] :
keyword[try] :
identifier[_hdf5_group] = identifier[self] . identifier[_hdf5file] . identifier[get_node] ( identifier[where] = identifier[self] . identifier[_trajectory_group] ,
identifier[name] = identifier[hdf5_group_name] )
keyword[except] identifier[pt] . identifier[NoSuchNodeError] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string]
%( identifier[traj_node] . identifier[v_full_name] , identifier[hdf5_group_name] ))
keyword[raise]
identifier[split_names] = identifier[branch_name] . identifier[split] ( literal[string] )
identifier[final_group_name] = identifier[split_names] . identifier[pop] ()
identifier[current_depth] = literal[int]
keyword[for] identifier[name] keyword[in] identifier[split_names] :
keyword[if] identifier[current_depth] > identifier[max_depth] :
keyword[return]
identifier[_hdf5_group] = identifier[getattr] ( identifier[_hdf5_group] , identifier[name] )
identifier[self] . identifier[_tree_load_nodes_dfs] ( identifier[traj_node] , identifier[load_data] = identifier[load_data] , identifier[with_links] = identifier[with_links] ,
identifier[recursive] = keyword[False] , identifier[max_depth] = identifier[max_depth] , identifier[current_depth] = identifier[current_depth] ,
identifier[trajectory] = identifier[_trajectory] , identifier[as_new] = identifier[_as_new] ,
identifier[hdf5_group] = identifier[_hdf5_group] )
identifier[current_depth] += literal[int]
identifier[traj_node] = identifier[traj_node] . identifier[_children] [ identifier[name] ]
keyword[if] identifier[current_depth] <= identifier[max_depth] :
identifier[_hdf5_group] = identifier[getattr] ( identifier[_hdf5_group] , identifier[final_group_name] )
identifier[self] . identifier[_tree_load_nodes_dfs] ( identifier[traj_node] , identifier[load_data] = identifier[load_data] , identifier[with_links] = identifier[with_links] ,
identifier[recursive] = identifier[recursive] , identifier[max_depth] = identifier[max_depth] ,
identifier[current_depth] = identifier[current_depth] , identifier[trajectory] = identifier[_trajectory] ,
identifier[as_new] = identifier[_as_new] , identifier[hdf5_group] = identifier[_hdf5_group] )
|
def _tree_load_sub_branch(self, traj_node, branch_name, load_data=pypetconstants.LOAD_DATA, with_links=True, recursive=False, max_depth=None, _trajectory=None, _as_new=False, _hdf5_group=None):
"""Loads data starting from a node along a branch and starts recursively loading
all data at end of branch.
:param traj_node: The node from where loading starts
:param branch_name:
A branch along which loading progresses. Colon Notation is used:
'group1.group2.group3' loads 'group1', then 'group2', then 'group3' and then finally
recursively all children and children's children below 'group3'
:param load_data:
How to load the data
:param with_links:
If links should be loaded
:param recursive:
If loading recursively
:param max_depth:
The maximum depth to load the tree
:param _trajectory:
The trajectory
:param _as_new:
If trajectory is loaded as new
:param _hdf5_group:
HDF5 node in the file corresponding to `traj_node`.
"""
if load_data == pypetconstants.LOAD_NOTHING:
return # depends on [control=['if'], data=[]]
if max_depth is None:
max_depth = float('inf') # depends on [control=['if'], data=['max_depth']]
if _trajectory is None:
_trajectory = traj_node.v_root # depends on [control=['if'], data=['_trajectory']]
if _hdf5_group is None:
hdf5_group_name = traj_node.v_full_name.replace('.', '/')
# Get child node to load
if hdf5_group_name == '':
_hdf5_group = self._trajectory_group # depends on [control=['if'], data=[]]
else:
try:
_hdf5_group = self._hdf5file.get_node(where=self._trajectory_group, name=hdf5_group_name) # depends on [control=['try'], data=[]]
except pt.NoSuchNodeError:
self._logger.error('Cannot find `%s` the hdf5 node `%s` does not exist!' % (traj_node.v_full_name, hdf5_group_name))
raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['_hdf5_group']]
split_names = branch_name.split('.')
final_group_name = split_names.pop()
current_depth = 1
for name in split_names:
if current_depth > max_depth:
return # depends on [control=['if'], data=[]]
# First load along the branch
_hdf5_group = getattr(_hdf5_group, name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links, recursive=False, max_depth=max_depth, current_depth=current_depth, trajectory=_trajectory, as_new=_as_new, hdf5_group=_hdf5_group)
current_depth += 1
traj_node = traj_node._children[name] # depends on [control=['for'], data=['name']]
if current_depth <= max_depth:
# Then load recursively all data in the last group and below
_hdf5_group = getattr(_hdf5_group, final_group_name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links, recursive=recursive, max_depth=max_depth, current_depth=current_depth, trajectory=_trajectory, as_new=_as_new, hdf5_group=_hdf5_group) # depends on [control=['if'], data=['current_depth', 'max_depth']]
|
def read_local_ifaddrs (self):
"""
IP addresses for all active interfaces.
@return: list of IP addresses
@rtype: list of strings
"""
if os.name != 'posix':
# only POSIX is supported right now
return []
try:
from linkcheck.network import IfConfig
except ImportError:
return []
ifaddrs = []
ifc = IfConfig()
for iface in ifc.getInterfaceList(flags=IfConfig.IFF_UP):
addr = ifc.getAddr(iface)
if addr:
ifaddrs.append(addr)
return ifaddrs
|
def function[read_local_ifaddrs, parameter[self]]:
constant[
IP addresses for all active interfaces.
@return: list of IP addresses
@rtype: list of strings
]
if compare[name[os].name not_equal[!=] constant[posix]] begin[:]
return[list[[]]]
<ast.Try object at 0x7da1b0aba530>
variable[ifaddrs] assign[=] list[[]]
variable[ifc] assign[=] call[name[IfConfig], parameter[]]
for taget[name[iface]] in starred[call[name[ifc].getInterfaceList, parameter[]]] begin[:]
variable[addr] assign[=] call[name[ifc].getAddr, parameter[name[iface]]]
if name[addr] begin[:]
call[name[ifaddrs].append, parameter[name[addr]]]
return[name[ifaddrs]]
|
keyword[def] identifier[read_local_ifaddrs] ( identifier[self] ):
literal[string]
keyword[if] identifier[os] . identifier[name] != literal[string] :
keyword[return] []
keyword[try] :
keyword[from] identifier[linkcheck] . identifier[network] keyword[import] identifier[IfConfig]
keyword[except] identifier[ImportError] :
keyword[return] []
identifier[ifaddrs] =[]
identifier[ifc] = identifier[IfConfig] ()
keyword[for] identifier[iface] keyword[in] identifier[ifc] . identifier[getInterfaceList] ( identifier[flags] = identifier[IfConfig] . identifier[IFF_UP] ):
identifier[addr] = identifier[ifc] . identifier[getAddr] ( identifier[iface] )
keyword[if] identifier[addr] :
identifier[ifaddrs] . identifier[append] ( identifier[addr] )
keyword[return] identifier[ifaddrs]
|
def read_local_ifaddrs(self):
"""
IP addresses for all active interfaces.
@return: list of IP addresses
@rtype: list of strings
"""
if os.name != 'posix':
# only POSIX is supported right now
return [] # depends on [control=['if'], data=[]]
try:
from linkcheck.network import IfConfig # depends on [control=['try'], data=[]]
except ImportError:
return [] # depends on [control=['except'], data=[]]
ifaddrs = []
ifc = IfConfig()
for iface in ifc.getInterfaceList(flags=IfConfig.IFF_UP):
addr = ifc.getAddr(iface)
if addr:
ifaddrs.append(addr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['iface']]
return ifaddrs
|
def _do(name, fun, path=None):
'''
Invoke a function in the lxc module with no args
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
'''
host = find_guest(name, quiet=True, path=path)
if not host:
return False
client = salt.client.get_local_client(__opts__['conf_file'])
cmd_ret = client.cmd_iter(
host,
'lxc.{0}'.format(fun),
[name],
kwarg={'path': path},
timeout=60)
data = next(cmd_ret)
data = data.get(host, {}).get('ret', None)
if data:
data = {host: data}
return data
|
def function[_do, parameter[name, fun, path]]:
constant[
Invoke a function in the lxc module with no args
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
]
variable[host] assign[=] call[name[find_guest], parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b2169720> begin[:]
return[constant[False]]
variable[client] assign[=] call[name[salt].client.get_local_client, parameter[call[name[__opts__]][constant[conf_file]]]]
variable[cmd_ret] assign[=] call[name[client].cmd_iter, parameter[name[host], call[constant[lxc.{0}].format, parameter[name[fun]]], list[[<ast.Name object at 0x7da1b216b4c0>]]]]
variable[data] assign[=] call[name[next], parameter[name[cmd_ret]]]
variable[data] assign[=] call[call[name[data].get, parameter[name[host], dictionary[[], []]]].get, parameter[constant[ret], constant[None]]]
if name[data] begin[:]
variable[data] assign[=] dictionary[[<ast.Name object at 0x7da1b2134fa0>], [<ast.Name object at 0x7da1b2136f50>]]
return[name[data]]
|
keyword[def] identifier[_do] ( identifier[name] , identifier[fun] , identifier[path] = keyword[None] ):
literal[string]
identifier[host] = identifier[find_guest] ( identifier[name] , identifier[quiet] = keyword[True] , identifier[path] = identifier[path] )
keyword[if] keyword[not] identifier[host] :
keyword[return] keyword[False]
identifier[client] = identifier[salt] . identifier[client] . identifier[get_local_client] ( identifier[__opts__] [ literal[string] ])
identifier[cmd_ret] = identifier[client] . identifier[cmd_iter] (
identifier[host] ,
literal[string] . identifier[format] ( identifier[fun] ),
[ identifier[name] ],
identifier[kwarg] ={ literal[string] : identifier[path] },
identifier[timeout] = literal[int] )
identifier[data] = identifier[next] ( identifier[cmd_ret] )
identifier[data] = identifier[data] . identifier[get] ( identifier[host] ,{}). identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[data] :
identifier[data] ={ identifier[host] : identifier[data] }
keyword[return] identifier[data]
|
def _do(name, fun, path=None):
"""
Invoke a function in the lxc module with no args
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
"""
host = find_guest(name, quiet=True, path=path)
if not host:
return False # depends on [control=['if'], data=[]]
client = salt.client.get_local_client(__opts__['conf_file'])
cmd_ret = client.cmd_iter(host, 'lxc.{0}'.format(fun), [name], kwarg={'path': path}, timeout=60)
data = next(cmd_ret)
data = data.get(host, {}).get('ret', None)
if data:
data = {host: data} # depends on [control=['if'], data=[]]
return data
|
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError:
raise ParseError('@type is missing when parsing any message.')
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value['value'], sub_message)
elif full_name in _WKTJSONMETHODS:
methodcaller(
_WKTJSONMETHODS[full_name][1], value['value'], sub_message)(self)
else:
del value['@type']
self._ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
|
def function[_ConvertAnyMessage, parameter[self, value, message]]:
constant[Convert a JSON representation into Any message.]
if <ast.BoolOp object at 0x7da1b1f648b0> begin[:]
return[None]
<ast.Try object at 0x7da1b1f67c10>
variable[sub_message] assign[=] call[name[_CreateMessageFromTypeUrl], parameter[name[type_url]]]
variable[message_descriptor] assign[=] name[sub_message].DESCRIPTOR
variable[full_name] assign[=] name[message_descriptor].full_name
if call[name[_IsWrapperMessage], parameter[name[message_descriptor]]] begin[:]
call[name[self]._ConvertWrapperMessage, parameter[call[name[value]][constant[value]], name[sub_message]]]
name[message].value assign[=] call[name[sub_message].SerializeToString, parameter[]]
name[message].type_url assign[=] name[type_url]
|
keyword[def] identifier[_ConvertAnyMessage] ( identifier[self] , identifier[value] , identifier[message] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] keyword[not] identifier[value] :
keyword[return]
keyword[try] :
identifier[type_url] = identifier[value] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ParseError] ( literal[string] )
identifier[sub_message] = identifier[_CreateMessageFromTypeUrl] ( identifier[type_url] )
identifier[message_descriptor] = identifier[sub_message] . identifier[DESCRIPTOR]
identifier[full_name] = identifier[message_descriptor] . identifier[full_name]
keyword[if] identifier[_IsWrapperMessage] ( identifier[message_descriptor] ):
identifier[self] . identifier[_ConvertWrapperMessage] ( identifier[value] [ literal[string] ], identifier[sub_message] )
keyword[elif] identifier[full_name] keyword[in] identifier[_WKTJSONMETHODS] :
identifier[methodcaller] (
identifier[_WKTJSONMETHODS] [ identifier[full_name] ][ literal[int] ], identifier[value] [ literal[string] ], identifier[sub_message] )( identifier[self] )
keyword[else] :
keyword[del] identifier[value] [ literal[string] ]
identifier[self] . identifier[_ConvertFieldValuePair] ( identifier[value] , identifier[sub_message] )
identifier[message] . identifier[value] = identifier[sub_message] . identifier[SerializeToString] ()
identifier[message] . identifier[type_url] = identifier[type_url]
|
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and (not value):
return # depends on [control=['if'], data=[]]
try:
type_url = value['@type'] # depends on [control=['try'], data=[]]
except KeyError:
raise ParseError('@type is missing when parsing any message.') # depends on [control=['except'], data=[]]
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value['value'], sub_message) # depends on [control=['if'], data=[]]
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message)(self) # depends on [control=['if'], data=['full_name', '_WKTJSONMETHODS']]
else:
del value['@type']
self._ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
|
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
|
def function[add_oid_entry, parameter[self, oid, type, value, label]]:
constant[General function to add an oid entry to the MIB subtree.]
if name[self].debug begin[:]
call[name[print], parameter[binary_operation[constant[DEBUG: %s %s %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6e5bd0>, <ast.Name object at 0x7da20c6e5c90>, <ast.Name object at 0x7da20c6e7580>, <ast.Name object at 0x7da20c6e51e0>]]]]]
variable[item] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7ee0>, <ast.Constant object at 0x7da20c6e6710>], [<ast.Call object at 0x7da20c6e54e0>, <ast.Call object at 0x7da20c6e74f0>]]
if compare[name[label] is_not constant[None]] begin[:]
call[name[item]][constant[label]] assign[=] call[name[str], parameter[name[label]]]
call[name[self].pending][name[oid]] assign[=] name[item]
|
keyword[def] identifier[add_oid_entry] ( identifier[self] , identifier[oid] , identifier[type] , identifier[value] , identifier[label] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[debug] :
identifier[print] ( literal[string] %( identifier[oid] , identifier[type] , identifier[value] , identifier[label] ))
identifier[item] ={ literal[string] : identifier[str] ( identifier[type] ), literal[string] : identifier[str] ( identifier[value] )}
keyword[if] identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[item] [ literal[string] ]= identifier[str] ( identifier[label] )
identifier[self] . identifier[pending] [ identifier[oid] ]= identifier[item]
|
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s' % (oid, type, value, label)) # depends on [control=['if'], data=[]]
item = {'type': str(type), 'value': str(value)}
if label is not None:
item['label'] = str(label) # depends on [control=['if'], data=['label']]
self.pending[oid] = item
|
def __write_divider(self, top=False, bot=False, nl=True):
"""
Write a divider line
:return none:
"""
if top:
self.noaa_txt += "\n#"
if nl:
self.noaa_txt += "\n"
self.noaa_txt += "#------------------\n"
if bot:
self.noaa_txt += "\n#"
return
|
def function[__write_divider, parameter[self, top, bot, nl]]:
constant[
Write a divider line
:return none:
]
if name[top] begin[:]
<ast.AugAssign object at 0x7da20c6c4d60>
if name[nl] begin[:]
<ast.AugAssign object at 0x7da20c6c5150>
<ast.AugAssign object at 0x7da20c6c5450>
if name[bot] begin[:]
<ast.AugAssign object at 0x7da20c6c7ac0>
return[None]
|
keyword[def] identifier[__write_divider] ( identifier[self] , identifier[top] = keyword[False] , identifier[bot] = keyword[False] , identifier[nl] = keyword[True] ):
literal[string]
keyword[if] identifier[top] :
identifier[self] . identifier[noaa_txt] += literal[string]
keyword[if] identifier[nl] :
identifier[self] . identifier[noaa_txt] += literal[string]
identifier[self] . identifier[noaa_txt] += literal[string]
keyword[if] identifier[bot] :
identifier[self] . identifier[noaa_txt] += literal[string]
keyword[return]
|
def __write_divider(self, top=False, bot=False, nl=True):
"""
Write a divider line
:return none:
"""
if top:
self.noaa_txt += '\n#' # depends on [control=['if'], data=[]]
if nl:
self.noaa_txt += '\n' # depends on [control=['if'], data=[]]
self.noaa_txt += '#------------------\n'
if bot:
self.noaa_txt += '\n#' # depends on [control=['if'], data=[]]
return
|
def resample_boundaries(polygon, resolution, clip=None):
"""
Return a version of a polygon with boundaries resampled
to a specified resolution.
Parameters
-------------
polygon: shapely.geometry.Polygon object
resolution: float, desired distance between points on boundary
clip: (2,) int, upper and lower bounds to clip
number of samples to (to avoid exploding counts)
Returns
------------
kwargs: dict, keyword args for a Polygon(**kwargs)
"""
def resample_boundary(boundary):
# add a polygon.exterior or polygon.interior to
# the deque after resampling based on our resolution
count = boundary.length / resolution
count = int(np.clip(count, *clip))
return resample_path(boundary.coords, count=count)
if clip is None:
clip = [8, 200]
# create a sequence of [(n,2)] points
kwargs = {'shell': resample_boundary(polygon.exterior),
'holes': deque()}
for interior in polygon.interiors:
kwargs['holes'].append(resample_boundary(interior))
kwargs['holes'] = np.array(kwargs['holes'])
return kwargs
|
def function[resample_boundaries, parameter[polygon, resolution, clip]]:
constant[
Return a version of a polygon with boundaries resampled
to a specified resolution.
Parameters
-------------
polygon: shapely.geometry.Polygon object
resolution: float, desired distance between points on boundary
clip: (2,) int, upper and lower bounds to clip
number of samples to (to avoid exploding counts)
Returns
------------
kwargs: dict, keyword args for a Polygon(**kwargs)
]
def function[resample_boundary, parameter[boundary]]:
variable[count] assign[=] binary_operation[name[boundary].length / name[resolution]]
variable[count] assign[=] call[name[int], parameter[call[name[np].clip, parameter[name[count], <ast.Starred object at 0x7da1b22d1cf0>]]]]
return[call[name[resample_path], parameter[name[boundary].coords]]]
if compare[name[clip] is constant[None]] begin[:]
variable[clip] assign[=] list[[<ast.Constant object at 0x7da20c6aaa70>, <ast.Constant object at 0x7da20c6aa170>]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a8280>, <ast.Constant object at 0x7da20c6a9d20>], [<ast.Call object at 0x7da20c6aa500>, <ast.Call object at 0x7da1b22d7910>]]
for taget[name[interior]] in starred[name[polygon].interiors] begin[:]
call[call[name[kwargs]][constant[holes]].append, parameter[call[name[resample_boundary], parameter[name[interior]]]]]
call[name[kwargs]][constant[holes]] assign[=] call[name[np].array, parameter[call[name[kwargs]][constant[holes]]]]
return[name[kwargs]]
|
keyword[def] identifier[resample_boundaries] ( identifier[polygon] , identifier[resolution] , identifier[clip] = keyword[None] ):
literal[string]
keyword[def] identifier[resample_boundary] ( identifier[boundary] ):
identifier[count] = identifier[boundary] . identifier[length] / identifier[resolution]
identifier[count] = identifier[int] ( identifier[np] . identifier[clip] ( identifier[count] ,* identifier[clip] ))
keyword[return] identifier[resample_path] ( identifier[boundary] . identifier[coords] , identifier[count] = identifier[count] )
keyword[if] identifier[clip] keyword[is] keyword[None] :
identifier[clip] =[ literal[int] , literal[int] ]
identifier[kwargs] ={ literal[string] : identifier[resample_boundary] ( identifier[polygon] . identifier[exterior] ),
literal[string] : identifier[deque] ()}
keyword[for] identifier[interior] keyword[in] identifier[polygon] . identifier[interiors] :
identifier[kwargs] [ literal[string] ]. identifier[append] ( identifier[resample_boundary] ( identifier[interior] ))
identifier[kwargs] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[kwargs] [ literal[string] ])
keyword[return] identifier[kwargs]
|
def resample_boundaries(polygon, resolution, clip=None):
"""
Return a version of a polygon with boundaries resampled
to a specified resolution.
Parameters
-------------
polygon: shapely.geometry.Polygon object
resolution: float, desired distance between points on boundary
clip: (2,) int, upper and lower bounds to clip
number of samples to (to avoid exploding counts)
Returns
------------
kwargs: dict, keyword args for a Polygon(**kwargs)
"""
def resample_boundary(boundary):
# add a polygon.exterior or polygon.interior to
# the deque after resampling based on our resolution
count = boundary.length / resolution
count = int(np.clip(count, *clip))
return resample_path(boundary.coords, count=count)
if clip is None:
clip = [8, 200] # depends on [control=['if'], data=['clip']]
# create a sequence of [(n,2)] points
kwargs = {'shell': resample_boundary(polygon.exterior), 'holes': deque()}
for interior in polygon.interiors:
kwargs['holes'].append(resample_boundary(interior)) # depends on [control=['for'], data=['interior']]
kwargs['holes'] = np.array(kwargs['holes'])
return kwargs
|
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
|
def function[process_sequence, parameter[sequence, rules, skip_non_vietnamese]]:
constant[ Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
]
variable[result] assign[=] constant[]
variable[raw] assign[=] name[result]
variable[result_parts] assign[=] list[[]]
if compare[name[rules] is constant[None]] begin[:]
variable[rules] assign[=] call[name[get_telex_definition], parameter[]]
variable[accepted_chars] assign[=] call[name[_accepted_chars], parameter[name[rules]]]
for taget[name[key]] in starred[name[sequence]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[accepted_chars]] begin[:]
call[name[result_parts].append, parameter[name[result]]]
call[name[result_parts].append, parameter[name[key]]]
variable[result] assign[=] constant[]
variable[raw] assign[=] constant[]
call[name[result_parts].append, parameter[name[result]]]
return[call[constant[].join, parameter[name[result_parts]]]]
|
keyword[def] identifier[process_sequence] ( identifier[sequence] ,
identifier[rules] = keyword[None] ,
identifier[skip_non_vietnamese] = keyword[True] ):
literal[string]
identifier[result] = literal[string]
identifier[raw] = identifier[result]
identifier[result_parts] =[]
keyword[if] identifier[rules] keyword[is] keyword[None] :
identifier[rules] = identifier[get_telex_definition] ()
identifier[accepted_chars] = identifier[_accepted_chars] ( identifier[rules] )
keyword[for] identifier[key] keyword[in] identifier[sequence] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[accepted_chars] :
identifier[result_parts] . identifier[append] ( identifier[result] )
identifier[result_parts] . identifier[append] ( identifier[key] )
identifier[result] = literal[string]
identifier[raw] = literal[string]
keyword[else] :
identifier[result] , identifier[raw] = identifier[process_key] (
identifier[string] = identifier[result] ,
identifier[key] = identifier[key] ,
identifier[fallback_sequence] = identifier[raw] ,
identifier[rules] = identifier[rules] ,
identifier[skip_non_vietnamese] = identifier[skip_non_vietnamese] )
identifier[result_parts] . identifier[append] ( identifier[result] )
keyword[return] literal[string] . identifier[join] ( identifier[result_parts] )
|
def process_sequence(sequence, rules=None, skip_non_vietnamese=True):
""" Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ''
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition() # depends on [control=['if'], data=['rules']]
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ''
raw = '' # depends on [control=['if'], data=['key']]
else:
(result, raw) = process_key(string=result, key=key, fallback_sequence=raw, rules=rules, skip_non_vietnamese=skip_non_vietnamese) # depends on [control=['for'], data=['key']]
result_parts.append(result)
return ''.join(result_parts)
|
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_transmit_hold_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
transmit_hold_count = ET.SubElement(mstp, "transmit-hold-count")
transmit_hold_count.text = kwargs.pop('transmit_hold_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def function[get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_transmit_hold_count, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_stp_brief_info] assign[=] call[name[ET].Element, parameter[constant[get_stp_brief_info]]]
variable[config] assign[=] name[get_stp_brief_info]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_stp_brief_info], constant[output]]]
variable[spanning_tree_info] assign[=] call[name[ET].SubElement, parameter[name[output], constant[spanning-tree-info]]]
variable[spanning_tree_mode] assign[=] call[name[ET].SubElement, parameter[name[spanning_tree_info], constant[spanning-tree-mode]]]
variable[mstp] assign[=] call[name[ET].SubElement, parameter[name[spanning_tree_mode], constant[mstp]]]
variable[mstp] assign[=] call[name[ET].SubElement, parameter[name[mstp], constant[mstp]]]
variable[transmit_hold_count] assign[=] call[name[ET].SubElement, parameter[name[mstp], constant[transmit-hold-count]]]
name[transmit_hold_count].text assign[=] call[name[kwargs].pop, parameter[constant[transmit_hold_count]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]]
|
keyword[def] identifier[get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_transmit_hold_count] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_stp_brief_info] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_stp_brief_info]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_stp_brief_info] , literal[string] )
identifier[spanning_tree_info] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[spanning_tree_mode] = identifier[ET] . identifier[SubElement] ( identifier[spanning_tree_info] , literal[string] )
identifier[mstp] = identifier[ET] . identifier[SubElement] ( identifier[spanning_tree_mode] , literal[string] )
identifier[mstp] = identifier[ET] . identifier[SubElement] ( identifier[mstp] , literal[string] )
identifier[transmit_hold_count] = identifier[ET] . identifier[SubElement] ( identifier[mstp] , literal[string] )
identifier[transmit_hold_count] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] )
|
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_transmit_hold_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_stp_brief_info = ET.Element('get_stp_brief_info')
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, 'output')
spanning_tree_info = ET.SubElement(output, 'spanning-tree-info')
spanning_tree_mode = ET.SubElement(spanning_tree_info, 'spanning-tree-mode')
mstp = ET.SubElement(spanning_tree_mode, 'mstp')
mstp = ET.SubElement(mstp, 'mstp')
transmit_hold_count = ET.SubElement(mstp, 'transmit-hold-count')
transmit_hold_count.text = kwargs.pop('transmit_hold_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def _start(self, my_task, force=False):
"""
Checks whether the preconditions for going to READY state are met.
Returns True if the threshold was reached, False otherwise.
Also returns the list of tasks that yet need to be completed.
"""
# If the threshold was already reached, there is nothing else to do.
if my_task._has_state(Task.COMPLETED):
return True, None
if my_task._has_state(Task.READY):
return True, None
# Check whether we may fire.
if self.split_task is None:
return self._check_threshold_unstructured(my_task, force)
return self._check_threshold_structured(my_task, force)
|
def function[_start, parameter[self, my_task, force]]:
constant[
Checks whether the preconditions for going to READY state are met.
Returns True if the threshold was reached, False otherwise.
Also returns the list of tasks that yet need to be completed.
]
if call[name[my_task]._has_state, parameter[name[Task].COMPLETED]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b01c1f90>, <ast.Constant object at 0x7da1b01c1b40>]]]
if call[name[my_task]._has_state, parameter[name[Task].READY]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b01c0730>, <ast.Constant object at 0x7da1b01c3610>]]]
if compare[name[self].split_task is constant[None]] begin[:]
return[call[name[self]._check_threshold_unstructured, parameter[name[my_task], name[force]]]]
return[call[name[self]._check_threshold_structured, parameter[name[my_task], name[force]]]]
|
keyword[def] identifier[_start] ( identifier[self] , identifier[my_task] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[my_task] . identifier[_has_state] ( identifier[Task] . identifier[COMPLETED] ):
keyword[return] keyword[True] , keyword[None]
keyword[if] identifier[my_task] . identifier[_has_state] ( identifier[Task] . identifier[READY] ):
keyword[return] keyword[True] , keyword[None]
keyword[if] identifier[self] . identifier[split_task] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_check_threshold_unstructured] ( identifier[my_task] , identifier[force] )
keyword[return] identifier[self] . identifier[_check_threshold_structured] ( identifier[my_task] , identifier[force] )
|
def _start(self, my_task, force=False):
"""
Checks whether the preconditions for going to READY state are met.
Returns True if the threshold was reached, False otherwise.
Also returns the list of tasks that yet need to be completed.
"""
# If the threshold was already reached, there is nothing else to do.
if my_task._has_state(Task.COMPLETED):
return (True, None) # depends on [control=['if'], data=[]]
if my_task._has_state(Task.READY):
return (True, None) # depends on [control=['if'], data=[]]
# Check whether we may fire.
if self.split_task is None:
return self._check_threshold_unstructured(my_task, force) # depends on [control=['if'], data=[]]
return self._check_threshold_structured(my_task, force)
|
def do_build(self):
"""
We need this hack, else 'self' would be replaced by __iter__.next().
"""
tmp = self.explicit
self.explicit = True
b = super(KeyShareEntry, self).do_build()
self.explicit = tmp
return b
|
def function[do_build, parameter[self]]:
constant[
We need this hack, else 'self' would be replaced by __iter__.next().
]
variable[tmp] assign[=] name[self].explicit
name[self].explicit assign[=] constant[True]
variable[b] assign[=] call[call[name[super], parameter[name[KeyShareEntry], name[self]]].do_build, parameter[]]
name[self].explicit assign[=] name[tmp]
return[name[b]]
|
keyword[def] identifier[do_build] ( identifier[self] ):
literal[string]
identifier[tmp] = identifier[self] . identifier[explicit]
identifier[self] . identifier[explicit] = keyword[True]
identifier[b] = identifier[super] ( identifier[KeyShareEntry] , identifier[self] ). identifier[do_build] ()
identifier[self] . identifier[explicit] = identifier[tmp]
keyword[return] identifier[b]
|
def do_build(self):
"""
We need this hack, else 'self' would be replaced by __iter__.next().
"""
tmp = self.explicit
self.explicit = True
b = super(KeyShareEntry, self).do_build()
self.explicit = tmp
return b
|
def wrap(obj, wrapper=None, methods_to_add=(), name=None, skip=(), wrap_return_values=False, clear_cache=True):
"""
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return \
tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap \
will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will \
check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function \
are supported)
:param bool clear_cache: Clear wrapped objects cache after wrapping
:return: Wrapped `obj`
"""
result = _wrap(obj=obj,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values)
if clear_cache:
_wrapped_objs.clear()
return result
|
def function[wrap, parameter[obj, wrapper, methods_to_add, name, skip, wrap_return_values, clear_cache]]:
constant[
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function are supported)
:param bool clear_cache: Clear wrapped objects cache after wrapping
:return: Wrapped `obj`
]
variable[result] assign[=] call[name[_wrap], parameter[]]
if name[clear_cache] begin[:]
call[name[_wrapped_objs].clear, parameter[]]
return[name[result]]
|
keyword[def] identifier[wrap] ( identifier[obj] , identifier[wrapper] = keyword[None] , identifier[methods_to_add] =(), identifier[name] = keyword[None] , identifier[skip] =(), identifier[wrap_return_values] = keyword[False] , identifier[clear_cache] = keyword[True] ):
literal[string]
identifier[result] = identifier[_wrap] ( identifier[obj] = identifier[obj] ,
identifier[wrapper] = identifier[wrapper] ,
identifier[methods_to_add] = identifier[methods_to_add] ,
identifier[name] = identifier[name] ,
identifier[skip] = identifier[skip] ,
identifier[wrap_return_values] = identifier[wrap_return_values] )
keyword[if] identifier[clear_cache] :
identifier[_wrapped_objs] . identifier[clear] ()
keyword[return] identifier[result]
|
def wrap(obj, wrapper=None, methods_to_add=(), name=None, skip=(), wrap_return_values=False, clear_cache=True):
"""
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function are supported)
:param bool clear_cache: Clear wrapped objects cache after wrapping
:return: Wrapped `obj`
"""
result = _wrap(obj=obj, wrapper=wrapper, methods_to_add=methods_to_add, name=name, skip=skip, wrap_return_values=wrap_return_values)
if clear_cache:
_wrapped_objs.clear() # depends on [control=['if'], data=[]]
return result
|
def _create(self, **kwargs):
'''Create service on device and create accompanying Python object.
:params kwargs: keyword arguments passed in from create call
:raises: HTTPError
:returns: Python Service object
'''
try:
return super(Service, self)._create(**kwargs)
except HTTPError as ex:
if "The configuration was updated successfully but could not be " \
"retrieved" not in ex.response.text:
raise
# BIG-IP® will create in Common partition if none is given.
# In order to create the uri properly in this class's load,
# drop in Common as the partition in kwargs.
if 'partition' not in kwargs:
kwargs['partition'] = 'Common'
# Pop all but the necessary load kwargs from the kwargs given to
# create. Otherwise, load may fail.
kwargs_copy = kwargs.copy()
for key in kwargs_copy:
if key not in self._meta_data['required_load_parameters']:
kwargs.pop(key)
# If response was created successfully, do a local_update.
# If not, call to overridden _load method via load
return self.load(**kwargs)
|
def function[_create, parameter[self]]:
constant[Create service on device and create accompanying Python object.
:params kwargs: keyword arguments passed in from create call
:raises: HTTPError
:returns: Python Service object
]
<ast.Try object at 0x7da20e9637c0>
|
keyword[def] identifier[_create] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[super] ( identifier[Service] , identifier[self] ). identifier[_create] (** identifier[kwargs] )
keyword[except] identifier[HTTPError] keyword[as] identifier[ex] :
keyword[if] literal[string] literal[string] keyword[not] keyword[in] identifier[ex] . identifier[response] . identifier[text] :
keyword[raise]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[kwargs_copy] = identifier[kwargs] . identifier[copy] ()
keyword[for] identifier[key] keyword[in] identifier[kwargs_copy] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[_meta_data] [ literal[string] ]:
identifier[kwargs] . identifier[pop] ( identifier[key] )
keyword[return] identifier[self] . identifier[load] (** identifier[kwargs] )
|
def _create(self, **kwargs):
"""Create service on device and create accompanying Python object.
:params kwargs: keyword arguments passed in from create call
:raises: HTTPError
:returns: Python Service object
"""
try:
return super(Service, self)._create(**kwargs) # depends on [control=['try'], data=[]]
except HTTPError as ex:
if 'The configuration was updated successfully but could not be retrieved' not in ex.response.text:
raise # depends on [control=['if'], data=[]]
# BIG-IP® will create in Common partition if none is given.
# In order to create the uri properly in this class's load,
# drop in Common as the partition in kwargs.
if 'partition' not in kwargs:
kwargs['partition'] = 'Common' # depends on [control=['if'], data=['kwargs']]
# Pop all but the necessary load kwargs from the kwargs given to
# create. Otherwise, load may fail.
kwargs_copy = kwargs.copy()
for key in kwargs_copy:
if key not in self._meta_data['required_load_parameters']:
kwargs.pop(key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
# If response was created successfully, do a local_update.
# If not, call to overridden _load method via load
return self.load(**kwargs) # depends on [control=['except'], data=['ex']]
|
def set_seed(seed: int):
""" Set random seed for python, numpy and pytorch RNGs """
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
|
def function[set_seed, parameter[seed]]:
constant[ Set random seed for python, numpy and pytorch RNGs ]
call[name[random].seed, parameter[name[seed]]]
call[name[np].random.seed, parameter[name[seed]]]
call[name[torch].random.manual_seed, parameter[name[seed]]]
|
keyword[def] identifier[set_seed] ( identifier[seed] : identifier[int] ):
literal[string]
identifier[random] . identifier[seed] ( identifier[seed] )
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
identifier[torch] . identifier[random] . identifier[manual_seed] ( identifier[seed] )
|
def set_seed(seed: int):
""" Set random seed for python, numpy and pytorch RNGs """
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.