code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def span_tokenize_sents(self, strings):
"""
Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
return iter((self.span_tokenize(s) for s in strings))
:rtype: iter(list(tuple(int, int)))
"""
raise NotImplementedError("span_tokenizer and span_tokenzie_sents not yet implemented. ;)")
for s in strings:
yield list(self.span_tokenize(s)) | def function[span_tokenize_sents, parameter[self, strings]]:
constant[
Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
return iter((self.span_tokenize(s) for s in strings))
:rtype: iter(list(tuple(int, int)))
]
<ast.Raise object at 0x7da20c6a8130>
for taget[name[s]] in starred[name[strings]] begin[:]
<ast.Yield object at 0x7da20c6aa110> | keyword[def] identifier[span_tokenize_sents] ( identifier[self] , identifier[strings] ):
literal[string]
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[for] identifier[s] keyword[in] identifier[strings] :
keyword[yield] identifier[list] ( identifier[self] . identifier[span_tokenize] ( identifier[s] )) | def span_tokenize_sents(self, strings):
"""
Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
return iter((self.span_tokenize(s) for s in strings))
:rtype: iter(list(tuple(int, int)))
"""
raise NotImplementedError('span_tokenizer and span_tokenzie_sents not yet implemented. ;)')
for s in strings:
yield list(self.span_tokenize(s)) # depends on [control=['for'], data=['s']] |
def _get_next_object(self, object_class):
"""stub"""
try:
next_object = OsidList.next(self)
except StopIteration:
raise
except Exception: # Need to specify exceptions here!
raise OperationFailed()
if isinstance(next_object, dict):
next_object = object_class(next_object)
return next_object | def function[_get_next_object, parameter[self, object_class]]:
constant[stub]
<ast.Try object at 0x7da18c4cf4c0>
if call[name[isinstance], parameter[name[next_object], name[dict]]] begin[:]
variable[next_object] assign[=] call[name[object_class], parameter[name[next_object]]]
return[name[next_object]] | keyword[def] identifier[_get_next_object] ( identifier[self] , identifier[object_class] ):
literal[string]
keyword[try] :
identifier[next_object] = identifier[OsidList] . identifier[next] ( identifier[self] )
keyword[except] identifier[StopIteration] :
keyword[raise]
keyword[except] identifier[Exception] :
keyword[raise] identifier[OperationFailed] ()
keyword[if] identifier[isinstance] ( identifier[next_object] , identifier[dict] ):
identifier[next_object] = identifier[object_class] ( identifier[next_object] )
keyword[return] identifier[next_object] | def _get_next_object(self, object_class):
"""stub"""
try:
next_object = OsidList.next(self) # depends on [control=['try'], data=[]]
except StopIteration:
raise # depends on [control=['except'], data=[]]
except Exception: # Need to specify exceptions here!
raise OperationFailed() # depends on [control=['except'], data=[]]
if isinstance(next_object, dict):
next_object = object_class(next_object) # depends on [control=['if'], data=[]]
return next_object |
def reviewboard(client, channel, nick, message, matches):
"""
Automatically responds to reviewboard urls if a user mentions a pattern
like cr####. Requires REVIEWBOARD_URL to exist in settings with formattable
substring '{review}'
"""
url_fmt = getattr(settings, 'REVIEWBOARD_URL', 'http://localhost/{review}')
reviews = [url_fmt.format(review=cr) for cr in matches]
return '{0} might be talking about codereview: {1}'.format(nick, ', '.join(reviews)) | def function[reviewboard, parameter[client, channel, nick, message, matches]]:
constant[
Automatically responds to reviewboard urls if a user mentions a pattern
like cr####. Requires REVIEWBOARD_URL to exist in settings with formattable
substring '{review}'
]
variable[url_fmt] assign[=] call[name[getattr], parameter[name[settings], constant[REVIEWBOARD_URL], constant[http://localhost/{review}]]]
variable[reviews] assign[=] <ast.ListComp object at 0x7da2041db040>
return[call[constant[{0} might be talking about codereview: {1}].format, parameter[name[nick], call[constant[, ].join, parameter[name[reviews]]]]]] | keyword[def] identifier[reviewboard] ( identifier[client] , identifier[channel] , identifier[nick] , identifier[message] , identifier[matches] ):
literal[string]
identifier[url_fmt] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] )
identifier[reviews] =[ identifier[url_fmt] . identifier[format] ( identifier[review] = identifier[cr] ) keyword[for] identifier[cr] keyword[in] identifier[matches] ]
keyword[return] literal[string] . identifier[format] ( identifier[nick] , literal[string] . identifier[join] ( identifier[reviews] )) | def reviewboard(client, channel, nick, message, matches):
"""
Automatically responds to reviewboard urls if a user mentions a pattern
like cr####. Requires REVIEWBOARD_URL to exist in settings with formattable
substring '{review}'
"""
url_fmt = getattr(settings, 'REVIEWBOARD_URL', 'http://localhost/{review}')
reviews = [url_fmt.format(review=cr) for cr in matches]
return '{0} might be talking about codereview: {1}'.format(nick, ', '.join(reviews)) |
def encode(self, transmission):
"""
Encodes the data, creating a CWR structure from an instance from the
domain model.
:param entity: the instance to encode
:return: a cwr string structure created from the received data
"""
data = ''
data += self._record_encode(transmission.header)
for group in transmission.groups:
data += self._record_encode(group.group_header)
for transaction in group.transactions:
for record in transaction:
data += self._record_encode(record)
data += self._record_encode(group.group_trailer)
data += self._record_encode(transmission.trailer)
return data | def function[encode, parameter[self, transmission]]:
constant[
Encodes the data, creating a CWR structure from an instance from the
domain model.
:param entity: the instance to encode
:return: a cwr string structure created from the received data
]
variable[data] assign[=] constant[]
<ast.AugAssign object at 0x7da1b19710f0>
for taget[name[group]] in starred[name[transmission].groups] begin[:]
<ast.AugAssign object at 0x7da1b19723b0>
for taget[name[transaction]] in starred[name[group].transactions] begin[:]
for taget[name[record]] in starred[name[transaction]] begin[:]
<ast.AugAssign object at 0x7da1b19728f0>
<ast.AugAssign object at 0x7da1b190b2e0>
<ast.AugAssign object at 0x7da1b190af20>
return[name[data]] | keyword[def] identifier[encode] ( identifier[self] , identifier[transmission] ):
literal[string]
identifier[data] = literal[string]
identifier[data] += identifier[self] . identifier[_record_encode] ( identifier[transmission] . identifier[header] )
keyword[for] identifier[group] keyword[in] identifier[transmission] . identifier[groups] :
identifier[data] += identifier[self] . identifier[_record_encode] ( identifier[group] . identifier[group_header] )
keyword[for] identifier[transaction] keyword[in] identifier[group] . identifier[transactions] :
keyword[for] identifier[record] keyword[in] identifier[transaction] :
identifier[data] += identifier[self] . identifier[_record_encode] ( identifier[record] )
identifier[data] += identifier[self] . identifier[_record_encode] ( identifier[group] . identifier[group_trailer] )
identifier[data] += identifier[self] . identifier[_record_encode] ( identifier[transmission] . identifier[trailer] )
keyword[return] identifier[data] | def encode(self, transmission):
"""
Encodes the data, creating a CWR structure from an instance from the
domain model.
:param entity: the instance to encode
:return: a cwr string structure created from the received data
"""
data = ''
data += self._record_encode(transmission.header)
for group in transmission.groups:
data += self._record_encode(group.group_header)
for transaction in group.transactions:
for record in transaction:
data += self._record_encode(record) # depends on [control=['for'], data=['record']] # depends on [control=['for'], data=['transaction']]
data += self._record_encode(group.group_trailer) # depends on [control=['for'], data=['group']]
data += self._record_encode(transmission.trailer)
return data |
def git_version():
"""Constructs a version string of the form:
<tag>[.<distance-from-tag>[+<branch-name-if-not-master>]]
Master is understood to be always buildable and thus untagged
versions are treated as patch levels. Branches not master are treated
as PEP-440 "local version identifiers".
"""
tag = cmd('git', 'describe').strip()
pieces = s(tag).split('-')
dotted = pieces[0]
if len(pieces) < 2:
distance = None
else:
# Distance from the latest tag is treated as a patch level.
distance = pieces[1]
dotted += '.' + s(distance)
# Branches that are not master are treated as local:
# https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
if distance is not None:
branch = get_git_branch()
if branch != 'master':
dotted += '+' + s(branch)
return dotted | def function[git_version, parameter[]]:
constant[Constructs a version string of the form:
<tag>[.<distance-from-tag>[+<branch-name-if-not-master>]]
Master is understood to be always buildable and thus untagged
versions are treated as patch levels. Branches not master are treated
as PEP-440 "local version identifiers".
]
variable[tag] assign[=] call[call[name[cmd], parameter[constant[git], constant[describe]]].strip, parameter[]]
variable[pieces] assign[=] call[call[name[s], parameter[name[tag]]].split, parameter[constant[-]]]
variable[dotted] assign[=] call[name[pieces]][constant[0]]
if compare[call[name[len], parameter[name[pieces]]] less[<] constant[2]] begin[:]
variable[distance] assign[=] constant[None]
if compare[name[distance] is_not constant[None]] begin[:]
variable[branch] assign[=] call[name[get_git_branch], parameter[]]
if compare[name[branch] not_equal[!=] constant[master]] begin[:]
<ast.AugAssign object at 0x7da20c9932e0>
return[name[dotted]] | keyword[def] identifier[git_version] ():
literal[string]
identifier[tag] = identifier[cmd] ( literal[string] , literal[string] ). identifier[strip] ()
identifier[pieces] = identifier[s] ( identifier[tag] ). identifier[split] ( literal[string] )
identifier[dotted] = identifier[pieces] [ literal[int] ]
keyword[if] identifier[len] ( identifier[pieces] )< literal[int] :
identifier[distance] = keyword[None]
keyword[else] :
identifier[distance] = identifier[pieces] [ literal[int] ]
identifier[dotted] += literal[string] + identifier[s] ( identifier[distance] )
keyword[if] identifier[distance] keyword[is] keyword[not] keyword[None] :
identifier[branch] = identifier[get_git_branch] ()
keyword[if] identifier[branch] != literal[string] :
identifier[dotted] += literal[string] + identifier[s] ( identifier[branch] )
keyword[return] identifier[dotted] | def git_version():
"""Constructs a version string of the form:
<tag>[.<distance-from-tag>[+<branch-name-if-not-master>]]
Master is understood to be always buildable and thus untagged
versions are treated as patch levels. Branches not master are treated
as PEP-440 "local version identifiers".
"""
tag = cmd('git', 'describe').strip()
pieces = s(tag).split('-')
dotted = pieces[0]
if len(pieces) < 2:
distance = None # depends on [control=['if'], data=[]]
else:
# Distance from the latest tag is treated as a patch level.
distance = pieces[1]
dotted += '.' + s(distance)
# Branches that are not master are treated as local:
# https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
if distance is not None:
branch = get_git_branch()
if branch != 'master':
dotted += '+' + s(branch) # depends on [control=['if'], data=['branch']] # depends on [control=['if'], data=[]]
return dotted |
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT):
"""
Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
:param filedir: This field is target directory to get files from.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
file_data = {}
logging.info('Characterizing files in {}'.format(filedir))
for filename in os.listdir(filedir):
filepath = os.path.join(filedir, filename)
file_stats = os.stat(filepath)
creation_date = arrow.get(file_stats.st_ctime).isoformat()
file_size = file_stats.st_size
if file_size <= max_bytes:
file_md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
file_md5.update(chunk)
md5 = file_md5.hexdigest()
file_data[filename] = {
'tags': guess_tags(filename),
'description': '',
'md5': md5,
'creation_date': creation_date,
}
return file_data | def function[characterize_local_files, parameter[filedir, max_bytes]]:
constant[
Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
:param filedir: This field is target directory to get files from.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
]
variable[file_data] assign[=] dictionary[[], []]
call[name[logging].info, parameter[call[constant[Characterizing files in {}].format, parameter[name[filedir]]]]]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[filedir]]]] begin[:]
variable[filepath] assign[=] call[name[os].path.join, parameter[name[filedir], name[filename]]]
variable[file_stats] assign[=] call[name[os].stat, parameter[name[filepath]]]
variable[creation_date] assign[=] call[call[name[arrow].get, parameter[name[file_stats].st_ctime]].isoformat, parameter[]]
variable[file_size] assign[=] name[file_stats].st_size
if compare[name[file_size] less_or_equal[<=] name[max_bytes]] begin[:]
variable[file_md5] assign[=] call[name[hashlib].md5, parameter[]]
with call[name[open], parameter[name[filepath], constant[rb]]] begin[:]
for taget[name[chunk]] in starred[call[name[iter], parameter[<ast.Lambda object at 0x7da1b0f200a0>, constant[b'']]]] begin[:]
call[name[file_md5].update, parameter[name[chunk]]]
variable[md5] assign[=] call[name[file_md5].hexdigest, parameter[]]
call[name[file_data]][name[filename]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f239d0>, <ast.Constant object at 0x7da1b0f21c30>, <ast.Constant object at 0x7da1b0f21120>, <ast.Constant object at 0x7da1b0f207c0>], [<ast.Call object at 0x7da1b0f23d90>, <ast.Constant object at 0x7da1b0f23be0>, <ast.Name object at 0x7da1b0f20df0>, <ast.Name object at 0x7da1b0f3b4f0>]]
return[name[file_data]] | keyword[def] identifier[characterize_local_files] ( identifier[filedir] , identifier[max_bytes] = identifier[MAX_FILE_DEFAULT] ):
literal[string]
identifier[file_data] ={}
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[filedir] ))
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[filedir] ):
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[filedir] , identifier[filename] )
identifier[file_stats] = identifier[os] . identifier[stat] ( identifier[filepath] )
identifier[creation_date] = identifier[arrow] . identifier[get] ( identifier[file_stats] . identifier[st_ctime] ). identifier[isoformat] ()
identifier[file_size] = identifier[file_stats] . identifier[st_size]
keyword[if] identifier[file_size] <= identifier[max_bytes] :
identifier[file_md5] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[chunk] keyword[in] identifier[iter] ( keyword[lambda] : identifier[f] . identifier[read] ( literal[int] ), literal[string] ):
identifier[file_md5] . identifier[update] ( identifier[chunk] )
identifier[md5] = identifier[file_md5] . identifier[hexdigest] ()
identifier[file_data] [ identifier[filename] ]={
literal[string] : identifier[guess_tags] ( identifier[filename] ),
literal[string] : literal[string] ,
literal[string] : identifier[md5] ,
literal[string] : identifier[creation_date] ,
}
keyword[return] identifier[file_data] | def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT):
"""
Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
:param filedir: This field is target directory to get files from.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
file_data = {}
logging.info('Characterizing files in {}'.format(filedir))
for filename in os.listdir(filedir):
filepath = os.path.join(filedir, filename)
file_stats = os.stat(filepath)
creation_date = arrow.get(file_stats.st_ctime).isoformat()
file_size = file_stats.st_size
if file_size <= max_bytes:
file_md5 = hashlib.md5()
with open(filepath, 'rb') as f:
for chunk in iter(lambda : f.read(4096), b''):
file_md5.update(chunk) # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['f']]
md5 = file_md5.hexdigest()
file_data[filename] = {'tags': guess_tags(filename), 'description': '', 'md5': md5, 'creation_date': creation_date} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
return file_data |
def handle_dump(args):
"""usage: cosmic-ray dump <session-file>
JSON dump of session data. This output is typically run through other
programs to produce reports.
Each line of output is a list with two elements: a WorkItem and a
WorkResult, both JSON-serialized. The WorkResult can be null, indicating a
WorkItem with no results.
"""
session_file = get_db_name(args['<session-file>'])
with use_db(session_file, WorkDB.Mode.open) as database:
for work_item, result in database.completed_work_items:
print(json.dumps((work_item, result), cls=WorkItemJsonEncoder))
for work_item in database.pending_work_items:
print(json.dumps((work_item, None), cls=WorkItemJsonEncoder))
return ExitCode.OK | def function[handle_dump, parameter[args]]:
constant[usage: cosmic-ray dump <session-file>
JSON dump of session data. This output is typically run through other
programs to produce reports.
Each line of output is a list with two elements: a WorkItem and a
WorkResult, both JSON-serialized. The WorkResult can be null, indicating a
WorkItem with no results.
]
variable[session_file] assign[=] call[name[get_db_name], parameter[call[name[args]][constant[<session-file>]]]]
with call[name[use_db], parameter[name[session_file], name[WorkDB].Mode.open]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0789ae0>, <ast.Name object at 0x7da1b078a920>]]] in starred[name[database].completed_work_items] begin[:]
call[name[print], parameter[call[name[json].dumps, parameter[tuple[[<ast.Name object at 0x7da1b078ab00>, <ast.Name object at 0x7da1b078ab90>]]]]]]
for taget[name[work_item]] in starred[name[database].pending_work_items] begin[:]
call[name[print], parameter[call[name[json].dumps, parameter[tuple[[<ast.Name object at 0x7da1b2345b70>, <ast.Constant object at 0x7da1b23460e0>]]]]]]
return[name[ExitCode].OK] | keyword[def] identifier[handle_dump] ( identifier[args] ):
literal[string]
identifier[session_file] = identifier[get_db_name] ( identifier[args] [ literal[string] ])
keyword[with] identifier[use_db] ( identifier[session_file] , identifier[WorkDB] . identifier[Mode] . identifier[open] ) keyword[as] identifier[database] :
keyword[for] identifier[work_item] , identifier[result] keyword[in] identifier[database] . identifier[completed_work_items] :
identifier[print] ( identifier[json] . identifier[dumps] (( identifier[work_item] , identifier[result] ), identifier[cls] = identifier[WorkItemJsonEncoder] ))
keyword[for] identifier[work_item] keyword[in] identifier[database] . identifier[pending_work_items] :
identifier[print] ( identifier[json] . identifier[dumps] (( identifier[work_item] , keyword[None] ), identifier[cls] = identifier[WorkItemJsonEncoder] ))
keyword[return] identifier[ExitCode] . identifier[OK] | def handle_dump(args):
"""usage: cosmic-ray dump <session-file>
JSON dump of session data. This output is typically run through other
programs to produce reports.
Each line of output is a list with two elements: a WorkItem and a
WorkResult, both JSON-serialized. The WorkResult can be null, indicating a
WorkItem with no results.
"""
session_file = get_db_name(args['<session-file>'])
with use_db(session_file, WorkDB.Mode.open) as database:
for (work_item, result) in database.completed_work_items:
print(json.dumps((work_item, result), cls=WorkItemJsonEncoder)) # depends on [control=['for'], data=[]]
for work_item in database.pending_work_items:
print(json.dumps((work_item, None), cls=WorkItemJsonEncoder)) # depends on [control=['for'], data=['work_item']] # depends on [control=['with'], data=['database']]
return ExitCode.OK |
def score(self, test_model=None, score_method='VAMP2'):
"""Compute the VAMP score for this model or the cross-validation score between self and a second model.
Parameters
----------
test_model : VAMPModel, optional, default=None
If `test_model` is not None, this method computes the cross-validation score
between self and `test_model`. It is assumed that self was estimated from
the "training" data and `test_model` was estimated from the "test" data. The
score is computed for one realization of self and `test_model`. Estimation
of the average cross-validation score and partitioning of data into test and
training part is not performed by this method.
If `test_model` is None, this method computes the VAMP score for the model
contained in self.
score_method : str, optional, default='VAMP2'
Available scores are based on the variational approach for Markov processes [1]_:
* 'VAMP1' Sum of singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the sum of
Koopman matrix eigenvalues, also called Rayleigh quotient [1]_.
* 'VAMP2' Sum of squared singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the kinetic variance [2]_ .
* 'VAMPE' Approximation error of the estimated Koopman operator with respect to
the true Koopman operator up to an additive constant [1]_ .
Returns
-------
score : float
If `test_model` is not None, returns the cross-validation VAMP score between
self and `test_model`. Otherwise return the selected VAMP-score of self.
References
----------
.. [1] Wu, H. and Noe, F. 2017. Variational approach for learning Markov processes from time series data.
arXiv:1707.04659v1
.. [2] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
"""
# TODO: implement for TICA too
if test_model is None:
test_model = self
Uk = self.U[:, 0:self.dimension()]
Vk = self.V[:, 0:self.dimension()]
res = None
if score_method == 'VAMP1' or score_method == 'VAMP2':
A = spd_inv_sqrt(Uk.T.dot(test_model.C00).dot(Uk))
B = Uk.T.dot(test_model.C0t).dot(Vk)
C = spd_inv_sqrt(Vk.T.dot(test_model.Ctt).dot(Vk))
ABC = mdot(A, B, C)
if score_method == 'VAMP1':
res = np.linalg.norm(ABC, ord='nuc')
elif score_method == 'VAMP2':
res = np.linalg.norm(ABC, ord='fro')**2
elif score_method == 'VAMPE':
Sk = np.diag(self.singular_values[0:self.dimension()])
res = np.trace(2.0 * mdot(Vk, Sk, Uk.T, test_model.C0t) - mdot(Vk, Sk, Uk.T, test_model.C00, Uk, Sk, Vk.T, test_model.Ctt))
else:
raise ValueError('"score" should be one of VAMP1, VAMP2 or VAMPE')
# add the contribution (+1) of the constant singular functions to the result
assert res
return res + 1 | def function[score, parameter[self, test_model, score_method]]:
constant[Compute the VAMP score for this model or the cross-validation score between self and a second model.
Parameters
----------
test_model : VAMPModel, optional, default=None
If `test_model` is not None, this method computes the cross-validation score
between self and `test_model`. It is assumed that self was estimated from
the "training" data and `test_model` was estimated from the "test" data. The
score is computed for one realization of self and `test_model`. Estimation
of the average cross-validation score and partitioning of data into test and
training part is not performed by this method.
If `test_model` is None, this method computes the VAMP score for the model
contained in self.
score_method : str, optional, default='VAMP2'
Available scores are based on the variational approach for Markov processes [1]_:
* 'VAMP1' Sum of singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the sum of
Koopman matrix eigenvalues, also called Rayleigh quotient [1]_.
* 'VAMP2' Sum of squared singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the kinetic variance [2]_ .
* 'VAMPE' Approximation error of the estimated Koopman operator with respect to
the true Koopman operator up to an additive constant [1]_ .
Returns
-------
score : float
If `test_model` is not None, returns the cross-validation VAMP score between
self and `test_model`. Otherwise return the selected VAMP-score of self.
References
----------
.. [1] Wu, H. and Noe, F. 2017. Variational approach for learning Markov processes from time series data.
arXiv:1707.04659v1
.. [2] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
]
if compare[name[test_model] is constant[None]] begin[:]
variable[test_model] assign[=] name[self]
variable[Uk] assign[=] call[name[self].U][tuple[[<ast.Slice object at 0x7da1b078ecb0>, <ast.Slice object at 0x7da1b078c6d0>]]]
variable[Vk] assign[=] call[name[self].V][tuple[[<ast.Slice object at 0x7da1b078d9c0>, <ast.Slice object at 0x7da1b078d9f0>]]]
variable[res] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b078f880> begin[:]
variable[A] assign[=] call[name[spd_inv_sqrt], parameter[call[call[name[Uk].T.dot, parameter[name[test_model].C00]].dot, parameter[name[Uk]]]]]
variable[B] assign[=] call[call[name[Uk].T.dot, parameter[name[test_model].C0t]].dot, parameter[name[Vk]]]
variable[C] assign[=] call[name[spd_inv_sqrt], parameter[call[call[name[Vk].T.dot, parameter[name[test_model].Ctt]].dot, parameter[name[Vk]]]]]
variable[ABC] assign[=] call[name[mdot], parameter[name[A], name[B], name[C]]]
if compare[name[score_method] equal[==] constant[VAMP1]] begin[:]
variable[res] assign[=] call[name[np].linalg.norm, parameter[name[ABC]]]
assert[name[res]]
return[binary_operation[name[res] + constant[1]]] | keyword[def] identifier[score] ( identifier[self] , identifier[test_model] = keyword[None] , identifier[score_method] = literal[string] ):
literal[string]
keyword[if] identifier[test_model] keyword[is] keyword[None] :
identifier[test_model] = identifier[self]
identifier[Uk] = identifier[self] . identifier[U] [:, literal[int] : identifier[self] . identifier[dimension] ()]
identifier[Vk] = identifier[self] . identifier[V] [:, literal[int] : identifier[self] . identifier[dimension] ()]
identifier[res] = keyword[None]
keyword[if] identifier[score_method] == literal[string] keyword[or] identifier[score_method] == literal[string] :
identifier[A] = identifier[spd_inv_sqrt] ( identifier[Uk] . identifier[T] . identifier[dot] ( identifier[test_model] . identifier[C00] ). identifier[dot] ( identifier[Uk] ))
identifier[B] = identifier[Uk] . identifier[T] . identifier[dot] ( identifier[test_model] . identifier[C0t] ). identifier[dot] ( identifier[Vk] )
identifier[C] = identifier[spd_inv_sqrt] ( identifier[Vk] . identifier[T] . identifier[dot] ( identifier[test_model] . identifier[Ctt] ). identifier[dot] ( identifier[Vk] ))
identifier[ABC] = identifier[mdot] ( identifier[A] , identifier[B] , identifier[C] )
keyword[if] identifier[score_method] == literal[string] :
identifier[res] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[ABC] , identifier[ord] = literal[string] )
keyword[elif] identifier[score_method] == literal[string] :
identifier[res] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[ABC] , identifier[ord] = literal[string] )** literal[int]
keyword[elif] identifier[score_method] == literal[string] :
identifier[Sk] = identifier[np] . identifier[diag] ( identifier[self] . identifier[singular_values] [ literal[int] : identifier[self] . identifier[dimension] ()])
identifier[res] = identifier[np] . identifier[trace] ( literal[int] * identifier[mdot] ( identifier[Vk] , identifier[Sk] , identifier[Uk] . identifier[T] , identifier[test_model] . identifier[C0t] )- identifier[mdot] ( identifier[Vk] , identifier[Sk] , identifier[Uk] . identifier[T] , identifier[test_model] . identifier[C00] , identifier[Uk] , identifier[Sk] , identifier[Vk] . identifier[T] , identifier[test_model] . identifier[Ctt] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[assert] identifier[res]
keyword[return] identifier[res] + literal[int] | def score(self, test_model=None, score_method='VAMP2'):
"""Compute the VAMP score for this model or the cross-validation score between self and a second model.
Parameters
----------
test_model : VAMPModel, optional, default=None
If `test_model` is not None, this method computes the cross-validation score
between self and `test_model`. It is assumed that self was estimated from
the "training" data and `test_model` was estimated from the "test" data. The
score is computed for one realization of self and `test_model`. Estimation
of the average cross-validation score and partitioning of data into test and
training part is not performed by this method.
If `test_model` is None, this method computes the VAMP score for the model
contained in self.
score_method : str, optional, default='VAMP2'
Available scores are based on the variational approach for Markov processes [1]_:
* 'VAMP1' Sum of singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the sum of
Koopman matrix eigenvalues, also called Rayleigh quotient [1]_.
* 'VAMP2' Sum of squared singular values of the half-weighted Koopman matrix [1]_ .
If the model is reversible, this is equal to the kinetic variance [2]_ .
* 'VAMPE' Approximation error of the estimated Koopman operator with respect to
the true Koopman operator up to an additive constant [1]_ .
Returns
-------
score : float
If `test_model` is not None, returns the cross-validation VAMP score between
self and `test_model`. Otherwise return the selected VAMP-score of self.
References
----------
.. [1] Wu, H. and Noe, F. 2017. Variational approach for learning Markov processes from time series data.
arXiv:1707.04659v1
.. [2] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
"""
# TODO: implement for TICA too
if test_model is None:
test_model = self # depends on [control=['if'], data=['test_model']]
Uk = self.U[:, 0:self.dimension()]
Vk = self.V[:, 0:self.dimension()]
res = None
if score_method == 'VAMP1' or score_method == 'VAMP2':
A = spd_inv_sqrt(Uk.T.dot(test_model.C00).dot(Uk))
B = Uk.T.dot(test_model.C0t).dot(Vk)
C = spd_inv_sqrt(Vk.T.dot(test_model.Ctt).dot(Vk))
ABC = mdot(A, B, C)
if score_method == 'VAMP1':
res = np.linalg.norm(ABC, ord='nuc') # depends on [control=['if'], data=[]]
elif score_method == 'VAMP2':
res = np.linalg.norm(ABC, ord='fro') ** 2 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif score_method == 'VAMPE':
Sk = np.diag(self.singular_values[0:self.dimension()])
res = np.trace(2.0 * mdot(Vk, Sk, Uk.T, test_model.C0t) - mdot(Vk, Sk, Uk.T, test_model.C00, Uk, Sk, Vk.T, test_model.Ctt)) # depends on [control=['if'], data=[]]
else:
raise ValueError('"score" should be one of VAMP1, VAMP2 or VAMPE')
# add the contribution (+1) of the constant singular functions to the result
assert res
return res + 1 |
def get_chat_id(self):
"""Method to get chatid of group created."""
chat_id = self.json_response.get("chatid", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return chat_id | def function[get_chat_id, parameter[self]]:
constant[Method to get chatid of group created.]
variable[chat_id] assign[=] call[name[self].json_response.get, parameter[constant[chatid], constant[None]]]
call[name[self].logger.info, parameter[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b2391fc0>, <ast.Attribute object at 0x7da1b2390160>]]]]]
return[name[chat_id]] | keyword[def] identifier[get_chat_id] ( identifier[self] ):
literal[string]
identifier[chat_id] = identifier[self] . identifier[json_response] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[self] . identifier[request_method] , identifier[self] . identifier[request_url] ))
keyword[return] identifier[chat_id] | def get_chat_id(self):
"""Method to get chatid of group created."""
chat_id = self.json_response.get('chatid', None)
self.logger.info('%s\t%s' % (self.request_method, self.request_url))
return chat_id |
def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server tokens."""
return dict(id=obj.id,
client_id=obj.client_id,
user_id=obj.user_id,
token_type=obj.token_type,
access_token=obj.access_token,
refresh_token=obj.refresh_token,
expires=dt2iso_or_empty(obj.expires),
_scopes=obj._scopes,
is_personal=obj.is_personal,
is_internal=obj.is_internal) | def function[dump, parameter[obj, from_date, with_json, latest_only]]:
constant[Dump the oauth2server tokens.]
return[call[name[dict], parameter[]]] | keyword[def] identifier[dump] ( identifier[obj] , identifier[from_date] , identifier[with_json] = keyword[True] , identifier[latest_only] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[dict] ( identifier[id] = identifier[obj] . identifier[id] ,
identifier[client_id] = identifier[obj] . identifier[client_id] ,
identifier[user_id] = identifier[obj] . identifier[user_id] ,
identifier[token_type] = identifier[obj] . identifier[token_type] ,
identifier[access_token] = identifier[obj] . identifier[access_token] ,
identifier[refresh_token] = identifier[obj] . identifier[refresh_token] ,
identifier[expires] = identifier[dt2iso_or_empty] ( identifier[obj] . identifier[expires] ),
identifier[_scopes] = identifier[obj] . identifier[_scopes] ,
identifier[is_personal] = identifier[obj] . identifier[is_personal] ,
identifier[is_internal] = identifier[obj] . identifier[is_internal] ) | def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server tokens."""
return dict(id=obj.id, client_id=obj.client_id, user_id=obj.user_id, token_type=obj.token_type, access_token=obj.access_token, refresh_token=obj.refresh_token, expires=dt2iso_or_empty(obj.expires), _scopes=obj._scopes, is_personal=obj.is_personal, is_internal=obj.is_internal) |
def _walk(recursion):
"""Returns a recursive or non-recursive directory walker"""
try:
from scandir import walk as walk_function
except ImportError:
from os import walk as walk_function
if recursion:
walk = partial(walk_function)
else:
def walk(path): # pylint: disable=C0111
try:
yield next(walk_function(path))
except NameError:
yield walk_function(path)
return walk | def function[_walk, parameter[recursion]]:
constant[Returns a recursive or non-recursive directory walker]
<ast.Try object at 0x7da1b14c4ac0>
if name[recursion] begin[:]
variable[walk] assign[=] call[name[partial], parameter[name[walk_function]]]
return[name[walk]] | keyword[def] identifier[_walk] ( identifier[recursion] ):
literal[string]
keyword[try] :
keyword[from] identifier[scandir] keyword[import] identifier[walk] keyword[as] identifier[walk_function]
keyword[except] identifier[ImportError] :
keyword[from] identifier[os] keyword[import] identifier[walk] keyword[as] identifier[walk_function]
keyword[if] identifier[recursion] :
identifier[walk] = identifier[partial] ( identifier[walk_function] )
keyword[else] :
keyword[def] identifier[walk] ( identifier[path] ):
keyword[try] :
keyword[yield] identifier[next] ( identifier[walk_function] ( identifier[path] ))
keyword[except] identifier[NameError] :
keyword[yield] identifier[walk_function] ( identifier[path] )
keyword[return] identifier[walk] | def _walk(recursion):
"""Returns a recursive or non-recursive directory walker"""
try:
from scandir import walk as walk_function # depends on [control=['try'], data=[]]
except ImportError:
from os import walk as walk_function # depends on [control=['except'], data=[]]
if recursion:
walk = partial(walk_function) # depends on [control=['if'], data=[]]
else:
def walk(path): # pylint: disable=C0111
try:
yield next(walk_function(path)) # depends on [control=['try'], data=[]]
except NameError:
yield walk_function(path) # depends on [control=['except'], data=[]]
return walk |
def replace(self, obj, value, recursive=True):
"""Replace *obj* with *value*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`.
If *recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
self._slice_replace(context, index, str(obj), str(value)) | def function[replace, parameter[self, obj, value, recursive]]:
constant[Replace *obj* with *value*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`.
If *recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da204961840>, <ast.Name object at 0x7da2049604f0>]]]] begin[:]
<ast.Tuple object at 0x7da204961a50> assign[=] call[name[self]._do_strong_search, parameter[name[obj], name[recursive]]]
for taget[name[i]] in starred[call[name[range], parameter[name[index].start, name[index].stop]]] begin[:]
call[name[context].nodes.pop, parameter[name[index].start]]
call[name[context].insert, parameter[name[index].start, name[value]]] | keyword[def] identifier[replace] ( identifier[self] , identifier[obj] , identifier[value] , identifier[recursive] = keyword[True] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[Node] , identifier[Wikicode] )):
identifier[context] , identifier[index] = identifier[self] . identifier[_do_strong_search] ( identifier[obj] , identifier[recursive] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[index] . identifier[start] , identifier[index] . identifier[stop] ):
identifier[context] . identifier[nodes] . identifier[pop] ( identifier[index] . identifier[start] )
identifier[context] . identifier[insert] ( identifier[index] . identifier[start] , identifier[value] )
keyword[else] :
keyword[for] identifier[exact] , identifier[context] , identifier[index] keyword[in] identifier[self] . identifier[_do_weak_search] ( identifier[obj] , identifier[recursive] ):
keyword[if] identifier[exact] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[index] . identifier[start] , identifier[index] . identifier[stop] ):
identifier[context] . identifier[nodes] . identifier[pop] ( identifier[index] . identifier[start] )
identifier[context] . identifier[insert] ( identifier[index] . identifier[start] , identifier[value] )
keyword[else] :
identifier[self] . identifier[_slice_replace] ( identifier[context] , identifier[index] , identifier[str] ( identifier[obj] ), identifier[str] ( identifier[value] )) | def replace(self, obj, value, recursive=True):
"""Replace *obj* with *value*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`.
If *recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
(context, index) = self._do_strong_search(obj, recursive)
for i in range(index.start, index.stop):
context.nodes.pop(index.start) # depends on [control=['for'], data=[]]
context.insert(index.start, value) # depends on [control=['if'], data=[]]
else:
for (exact, context, index) in self._do_weak_search(obj, recursive):
if exact:
for i in range(index.start, index.stop):
context.nodes.pop(index.start) # depends on [control=['for'], data=[]]
context.insert(index.start, value) # depends on [control=['if'], data=[]]
else:
self._slice_replace(context, index, str(obj), str(value)) # depends on [control=['for'], data=[]] |
def compose_config(self):
""" compose benchmark block """
# step file
self.stepper_wrapper.prepare_stepper()
self.stpd = self.stepper_wrapper.stpd
if self.stepper_wrapper.instances:
self.instances = self.stepper_wrapper.instances
if not self.stpd:
raise RuntimeError("Cannot proceed with no STPD file")
kwargs = {}
kwargs['sequence_no'] = self.sequence_no
if self.ssl:
_auth_section = ''
_ciphers = ''
ssl_template = "transport_t ssl_transport = transport_ssl_t {\n" \
" timeout = 1s\n" \
" %s\n" \
" %s}\n" \
" transport = ssl_transport"
if self.client_certificate or self.client_key:
_auth_section = 'auth_t def_auth = auth_t { key = "%s" cert = "%s"} auth = def_auth' \
% (self.client_key, self.client_certificate)
if self.client_cipher_suites:
_ciphers = 'ciphers = "%s"' % self.client_cipher_suites
kwargs['ssl_transport'] = ssl_template % (_auth_section, _ciphers)
else:
kwargs['ssl_transport'] = ""
kwargs['method_stream'] = self.method_prefix + \
"_ipv6_t" if self.ipv6 else self.method_prefix + "_ipv4_t"
kwargs['phout'] = self.phout_file
kwargs['answ_log'] = self.answ_log
kwargs['answ_log_level'] = self.answ_log_level
kwargs['comment_answ'] = "# " if self.answ_log_level == 'none' else ''
kwargs['stpd'] = self.stpd
kwargs['source_log_prefix'] = self.source_log_prefix
kwargs['method_options'] = self.method_options
if self.tank_type:
kwargs[
'proto'] = "proto=http_proto%s" % self.sequence_no if self.tank_type == 'http' else "proto=none_proto"
kwargs['comment_proto'] = ""
else:
kwargs['proto'] = ""
kwargs['comment_proto'] = "#"
if self.gatling:
kwargs['bind'] = 'bind={ ' + self.gatling + ' }'
else:
kwargs['bind'] = ''
kwargs['ip'] = self.resolved_ip
kwargs['port'] = self.port
kwargs['timeout'] = self.timeout
kwargs['instances'] = self.instances
tune = ''
if self.phantom_http_entity:
tune += "entity = " + self.phantom_http_entity + "\n"
if self.phantom_http_field:
tune += "field = " + self.phantom_http_field + "\n"
if self.phantom_http_field_num:
tune += "field_num = {}\n".format(self.phantom_http_field_num)
if self.phantom_http_line:
tune += "line = " + self.phantom_http_line + "\n"
if tune:
kwargs['reply_limits'] = 'reply_limits = {\n' + tune + "}"
else:
kwargs['reply_limits'] = ''
if self.is_main:
fname = 'phantom_benchmark_main.tpl'
else:
fname = 'phantom_benchmark_additional.tpl'
template_str = resource_string(
__name__, "config/" + fname)
tpl = string.Template(template_str)
config = tpl.substitute(kwargs)
return config | def function[compose_config, parameter[self]]:
constant[ compose benchmark block ]
call[name[self].stepper_wrapper.prepare_stepper, parameter[]]
name[self].stpd assign[=] name[self].stepper_wrapper.stpd
if name[self].stepper_wrapper.instances begin[:]
name[self].instances assign[=] name[self].stepper_wrapper.instances
if <ast.UnaryOp object at 0x7da1b050a4a0> begin[:]
<ast.Raise object at 0x7da1b050a590>
variable[kwargs] assign[=] dictionary[[], []]
call[name[kwargs]][constant[sequence_no]] assign[=] name[self].sequence_no
if name[self].ssl begin[:]
variable[_auth_section] assign[=] constant[]
variable[_ciphers] assign[=] constant[]
variable[ssl_template] assign[=] constant[transport_t ssl_transport = transport_ssl_t {
timeout = 1s
%s
%s}
transport = ssl_transport]
if <ast.BoolOp object at 0x7da1b05961d0> begin[:]
variable[_auth_section] assign[=] binary_operation[constant[auth_t def_auth = auth_t { key = "%s" cert = "%s"} auth = def_auth] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0596350>, <ast.Attribute object at 0x7da1b0597070>]]]
if name[self].client_cipher_suites begin[:]
variable[_ciphers] assign[=] binary_operation[constant[ciphers = "%s"] <ast.Mod object at 0x7da2590d6920> name[self].client_cipher_suites]
call[name[kwargs]][constant[ssl_transport]] assign[=] binary_operation[name[ssl_template] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0596110>, <ast.Name object at 0x7da1b05958a0>]]]
call[name[kwargs]][constant[method_stream]] assign[=] <ast.IfExp object at 0x7da1b05969e0>
call[name[kwargs]][constant[phout]] assign[=] name[self].phout_file
call[name[kwargs]][constant[answ_log]] assign[=] name[self].answ_log
call[name[kwargs]][constant[answ_log_level]] assign[=] name[self].answ_log_level
call[name[kwargs]][constant[comment_answ]] assign[=] <ast.IfExp object at 0x7da1b05968f0>
call[name[kwargs]][constant[stpd]] assign[=] name[self].stpd
call[name[kwargs]][constant[source_log_prefix]] assign[=] name[self].source_log_prefix
call[name[kwargs]][constant[method_options]] assign[=] name[self].method_options
if name[self].tank_type begin[:]
call[name[kwargs]][constant[proto]] assign[=] <ast.IfExp object at 0x7da1b03839d0>
call[name[kwargs]][constant[comment_proto]] assign[=] constant[]
if name[self].gatling begin[:]
call[name[kwargs]][constant[bind]] assign[=] binary_operation[binary_operation[constant[bind={ ] + name[self].gatling] + constant[ }]]
call[name[kwargs]][constant[ip]] assign[=] name[self].resolved_ip
call[name[kwargs]][constant[port]] assign[=] name[self].port
call[name[kwargs]][constant[timeout]] assign[=] name[self].timeout
call[name[kwargs]][constant[instances]] assign[=] name[self].instances
variable[tune] assign[=] constant[]
if name[self].phantom_http_entity begin[:]
<ast.AugAssign object at 0x7da1b0380e20>
if name[self].phantom_http_field begin[:]
<ast.AugAssign object at 0x7da1b0381840>
if name[self].phantom_http_field_num begin[:]
<ast.AugAssign object at 0x7da1b03825c0>
if name[self].phantom_http_line begin[:]
<ast.AugAssign object at 0x7da1b0382f80>
if name[tune] begin[:]
call[name[kwargs]][constant[reply_limits]] assign[=] binary_operation[binary_operation[constant[reply_limits = {
] + name[tune]] + constant[}]]
if name[self].is_main begin[:]
variable[fname] assign[=] constant[phantom_benchmark_main.tpl]
variable[template_str] assign[=] call[name[resource_string], parameter[name[__name__], binary_operation[constant[config/] + name[fname]]]]
variable[tpl] assign[=] call[name[string].Template, parameter[name[template_str]]]
variable[config] assign[=] call[name[tpl].substitute, parameter[name[kwargs]]]
return[name[config]] | keyword[def] identifier[compose_config] ( identifier[self] ):
literal[string]
identifier[self] . identifier[stepper_wrapper] . identifier[prepare_stepper] ()
identifier[self] . identifier[stpd] = identifier[self] . identifier[stepper_wrapper] . identifier[stpd]
keyword[if] identifier[self] . identifier[stepper_wrapper] . identifier[instances] :
identifier[self] . identifier[instances] = identifier[self] . identifier[stepper_wrapper] . identifier[instances]
keyword[if] keyword[not] identifier[self] . identifier[stpd] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[kwargs] ={}
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[sequence_no]
keyword[if] identifier[self] . identifier[ssl] :
identifier[_auth_section] = literal[string]
identifier[_ciphers] = literal[string]
identifier[ssl_template] = literal[string] literal[string] literal[string] literal[string] literal[string]
keyword[if] identifier[self] . identifier[client_certificate] keyword[or] identifier[self] . identifier[client_key] :
identifier[_auth_section] = literal[string] %( identifier[self] . identifier[client_key] , identifier[self] . identifier[client_certificate] )
keyword[if] identifier[self] . identifier[client_cipher_suites] :
identifier[_ciphers] = literal[string] % identifier[self] . identifier[client_cipher_suites]
identifier[kwargs] [ literal[string] ]= identifier[ssl_template] %( identifier[_auth_section] , identifier[_ciphers] )
keyword[else] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[method_prefix] + literal[string] keyword[if] identifier[self] . identifier[ipv6] keyword[else] identifier[self] . identifier[method_prefix] + literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[phout_file]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[answ_log]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[answ_log_level]
identifier[kwargs] [ literal[string] ]= literal[string] keyword[if] identifier[self] . identifier[answ_log_level] == literal[string] keyword[else] literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[stpd]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[source_log_prefix]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[method_options]
keyword[if] identifier[self] . identifier[tank_type] :
identifier[kwargs] [
literal[string] ]= literal[string] % identifier[self] . identifier[sequence_no] keyword[if] identifier[self] . identifier[tank_type] == literal[string] keyword[else] literal[string]
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[else] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[if] identifier[self] . identifier[gatling] :
identifier[kwargs] [ literal[string] ]= literal[string] + identifier[self] . identifier[gatling] + literal[string]
keyword[else] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[resolved_ip]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[port]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[timeout]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[instances]
identifier[tune] = literal[string]
keyword[if] identifier[self] . identifier[phantom_http_entity] :
identifier[tune] += literal[string] + identifier[self] . identifier[phantom_http_entity] + literal[string]
keyword[if] identifier[self] . identifier[phantom_http_field] :
identifier[tune] += literal[string] + identifier[self] . identifier[phantom_http_field] + literal[string]
keyword[if] identifier[self] . identifier[phantom_http_field_num] :
identifier[tune] += literal[string] . identifier[format] ( identifier[self] . identifier[phantom_http_field_num] )
keyword[if] identifier[self] . identifier[phantom_http_line] :
identifier[tune] += literal[string] + identifier[self] . identifier[phantom_http_line] + literal[string]
keyword[if] identifier[tune] :
identifier[kwargs] [ literal[string] ]= literal[string] + identifier[tune] + literal[string]
keyword[else] :
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[if] identifier[self] . identifier[is_main] :
identifier[fname] = literal[string]
keyword[else] :
identifier[fname] = literal[string]
identifier[template_str] = identifier[resource_string] (
identifier[__name__] , literal[string] + identifier[fname] )
identifier[tpl] = identifier[string] . identifier[Template] ( identifier[template_str] )
identifier[config] = identifier[tpl] . identifier[substitute] ( identifier[kwargs] )
keyword[return] identifier[config] | def compose_config(self):
""" compose benchmark block """
# step file
self.stepper_wrapper.prepare_stepper()
self.stpd = self.stepper_wrapper.stpd
if self.stepper_wrapper.instances:
self.instances = self.stepper_wrapper.instances # depends on [control=['if'], data=[]]
if not self.stpd:
raise RuntimeError('Cannot proceed with no STPD file') # depends on [control=['if'], data=[]]
kwargs = {}
kwargs['sequence_no'] = self.sequence_no
if self.ssl:
_auth_section = ''
_ciphers = ''
ssl_template = 'transport_t ssl_transport = transport_ssl_t {\n timeout = 1s\n %s\n %s}\n transport = ssl_transport'
if self.client_certificate or self.client_key:
_auth_section = 'auth_t def_auth = auth_t { key = "%s" cert = "%s"} auth = def_auth' % (self.client_key, self.client_certificate) # depends on [control=['if'], data=[]]
if self.client_cipher_suites:
_ciphers = 'ciphers = "%s"' % self.client_cipher_suites # depends on [control=['if'], data=[]]
kwargs['ssl_transport'] = ssl_template % (_auth_section, _ciphers) # depends on [control=['if'], data=[]]
else:
kwargs['ssl_transport'] = ''
kwargs['method_stream'] = self.method_prefix + '_ipv6_t' if self.ipv6 else self.method_prefix + '_ipv4_t'
kwargs['phout'] = self.phout_file
kwargs['answ_log'] = self.answ_log
kwargs['answ_log_level'] = self.answ_log_level
kwargs['comment_answ'] = '# ' if self.answ_log_level == 'none' else ''
kwargs['stpd'] = self.stpd
kwargs['source_log_prefix'] = self.source_log_prefix
kwargs['method_options'] = self.method_options
if self.tank_type:
kwargs['proto'] = 'proto=http_proto%s' % self.sequence_no if self.tank_type == 'http' else 'proto=none_proto'
kwargs['comment_proto'] = '' # depends on [control=['if'], data=[]]
else:
kwargs['proto'] = ''
kwargs['comment_proto'] = '#'
if self.gatling:
kwargs['bind'] = 'bind={ ' + self.gatling + ' }' # depends on [control=['if'], data=[]]
else:
kwargs['bind'] = ''
kwargs['ip'] = self.resolved_ip
kwargs['port'] = self.port
kwargs['timeout'] = self.timeout
kwargs['instances'] = self.instances
tune = ''
if self.phantom_http_entity:
tune += 'entity = ' + self.phantom_http_entity + '\n' # depends on [control=['if'], data=[]]
if self.phantom_http_field:
tune += 'field = ' + self.phantom_http_field + '\n' # depends on [control=['if'], data=[]]
if self.phantom_http_field_num:
tune += 'field_num = {}\n'.format(self.phantom_http_field_num) # depends on [control=['if'], data=[]]
if self.phantom_http_line:
tune += 'line = ' + self.phantom_http_line + '\n' # depends on [control=['if'], data=[]]
if tune:
kwargs['reply_limits'] = 'reply_limits = {\n' + tune + '}' # depends on [control=['if'], data=[]]
else:
kwargs['reply_limits'] = ''
if self.is_main:
fname = 'phantom_benchmark_main.tpl' # depends on [control=['if'], data=[]]
else:
fname = 'phantom_benchmark_additional.tpl'
template_str = resource_string(__name__, 'config/' + fname)
tpl = string.Template(template_str)
config = tpl.substitute(kwargs)
return config |
def idngram2lm(idngram_file, vocab_file, output_file, context_file=None, vocab_type=1, oov_fraction=0.5, four_byte_counts=False, min_unicount=0, zeroton_fraction=False, n=3, verbosity=2, arpa_output=True, ascii_input=False):
"""
Takes an idngram-file (in either binary (by default) or ASCII (if specified) format), a vocabulary file, and (optionally) a context cues file. Additional command line parameters will specify the cutoffs, the discounting strategy and parameters, etc. It outputs a language model, in either binary format (to be read by evallm), or in ARPA format.
"""
# TODO: Args still missing
# [ -calc_mem | -buffer 100 | -spec_num y ... z ]
# [ -two_byte_bo_weights
# [ -min_bo_weight nnnnn] [ -max_bo_weight nnnnn] [ -out_of_range_bo_weights] ]
# [ -linear | -absolute | -good_turing | -witten_bell ]
# [ -disc_ranges 1 7 7 ]
# [ -cutoffs 0 ... 0 ]
cmd = ['idngram2lm', '-idngram', os.path.abspath(idngram_file),
'-vocab', os.path.abspath(vocab_file),
'-vocab_type', vocab_type,
'-oov_fraction', oov_fraction,
'-min_unicount',min_unicount,
'-verbosity',verbosity,
'-n',n]
if arpa_output:
cmd.extend(['-arpa',output_file])
else:
cmd.extend(['-binary',output_file])
if four_byte_counts:
cmd.append('-four_byte_counts')
if zeroton_fraction:
cmd.append('-zeroton_fraction')
if ascii_input:
cmd.append('-ascii_input')
else:
cmd.append('-bin_input')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | def function[idngram2lm, parameter[idngram_file, vocab_file, output_file, context_file, vocab_type, oov_fraction, four_byte_counts, min_unicount, zeroton_fraction, n, verbosity, arpa_output, ascii_input]]:
constant[
Takes an idngram-file (in either binary (by default) or ASCII (if specified) format), a vocabulary file, and (optionally) a context cues file. Additional command line parameters will specify the cutoffs, the discounting strategy and parameters, etc. It outputs a language model, in either binary format (to be read by evallm), or in ARPA format.
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b2344d00>, <ast.Constant object at 0x7da1b2346770>, <ast.Call object at 0x7da1b2344a00>, <ast.Constant object at 0x7da1b2347640>, <ast.Call object at 0x7da1b2344520>, <ast.Constant object at 0x7da1b23444c0>, <ast.Name object at 0x7da1b2344640>, <ast.Constant object at 0x7da1b2346e30>, <ast.Name object at 0x7da1b23444f0>, <ast.Constant object at 0x7da1b2347e50>, <ast.Name object at 0x7da1b2346620>, <ast.Constant object at 0x7da1b2346440>, <ast.Name object at 0x7da1b2347b80>, <ast.Constant object at 0x7da1b2347ac0>, <ast.Name object at 0x7da1b2347e80>]]
if name[arpa_output] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b23453f0>, <ast.Name object at 0x7da1b2345480>]]]]
if name[four_byte_counts] begin[:]
call[name[cmd].append, parameter[constant[-four_byte_counts]]]
if name[zeroton_fraction] begin[:]
call[name[cmd].append, parameter[constant[-zeroton_fraction]]]
if name[ascii_input] begin[:]
call[name[cmd].append, parameter[constant[-ascii_input]]]
variable[cmd] assign[=] <ast.ListComp object at 0x7da1b2346a40>
with call[name[tempfile].SpooledTemporaryFile, parameter[]] begin[:]
with call[name[output_to_debuglogger], parameter[]] begin[:]
variable[exitcode] assign[=] call[name[subprocess].call, parameter[name[cmd]]]
variable[output] assign[=] call[name[output_f].read, parameter[]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].debug, parameter[binary_operation[constant[Command '%s' returned with exit code '%d'.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b2345d80>, <ast.Name object at 0x7da1b2347eb0>]]]]]
if compare[name[exitcode] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b23463e0>
if <ast.BoolOp object at 0x7da1b2345540> begin[:]
variable[output] assign[=] call[name[output].decode, parameter[constant[utf-8]]]
return[call[name[output].strip, parameter[]]] | keyword[def] identifier[idngram2lm] ( identifier[idngram_file] , identifier[vocab_file] , identifier[output_file] , identifier[context_file] = keyword[None] , identifier[vocab_type] = literal[int] , identifier[oov_fraction] = literal[int] , identifier[four_byte_counts] = keyword[False] , identifier[min_unicount] = literal[int] , identifier[zeroton_fraction] = keyword[False] , identifier[n] = literal[int] , identifier[verbosity] = literal[int] , identifier[arpa_output] = keyword[True] , identifier[ascii_input] = keyword[False] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , identifier[os] . identifier[path] . identifier[abspath] ( identifier[idngram_file] ),
literal[string] , identifier[os] . identifier[path] . identifier[abspath] ( identifier[vocab_file] ),
literal[string] , identifier[vocab_type] ,
literal[string] , identifier[oov_fraction] ,
literal[string] , identifier[min_unicount] ,
literal[string] , identifier[verbosity] ,
literal[string] , identifier[n] ]
keyword[if] identifier[arpa_output] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[output_file] ])
keyword[else] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[output_file] ])
keyword[if] identifier[four_byte_counts] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[zeroton_fraction] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[ascii_input] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[else] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] =[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[cmd] ]
keyword[with] identifier[tempfile] . identifier[SpooledTemporaryFile] () keyword[as] identifier[output_f] :
keyword[with] identifier[output_to_debuglogger] () keyword[as] identifier[err_f] :
identifier[exitcode] = identifier[subprocess] . identifier[call] ( identifier[cmd] , identifier[stdout] = identifier[output_f] , identifier[stderr] = identifier[err_f] )
identifier[output] = identifier[output_f] . identifier[read] ()
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] %( literal[string] . identifier[join] ( identifier[cmd] ), identifier[exitcode] ))
keyword[if] identifier[exitcode] != literal[int] :
keyword[raise] identifier[ConversionError] ( literal[string] %( identifier[cmd] [ literal[int] ], identifier[exitcode] ))
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] ,) keyword[and] identifier[type] ( identifier[output] ) keyword[is] identifier[bytes] :
identifier[output] = identifier[output] . identifier[decode] ( literal[string] )
keyword[return] identifier[output] . identifier[strip] () | def idngram2lm(idngram_file, vocab_file, output_file, context_file=None, vocab_type=1, oov_fraction=0.5, four_byte_counts=False, min_unicount=0, zeroton_fraction=False, n=3, verbosity=2, arpa_output=True, ascii_input=False):
"""
Takes an idngram-file (in either binary (by default) or ASCII (if specified) format), a vocabulary file, and (optionally) a context cues file. Additional command line parameters will specify the cutoffs, the discounting strategy and parameters, etc. It outputs a language model, in either binary format (to be read by evallm), or in ARPA format.
"""
# TODO: Args still missing
# [ -calc_mem | -buffer 100 | -spec_num y ... z ]
# [ -two_byte_bo_weights
# [ -min_bo_weight nnnnn] [ -max_bo_weight nnnnn] [ -out_of_range_bo_weights] ]
# [ -linear | -absolute | -good_turing | -witten_bell ]
# [ -disc_ranges 1 7 7 ]
# [ -cutoffs 0 ... 0 ]
cmd = ['idngram2lm', '-idngram', os.path.abspath(idngram_file), '-vocab', os.path.abspath(vocab_file), '-vocab_type', vocab_type, '-oov_fraction', oov_fraction, '-min_unicount', min_unicount, '-verbosity', verbosity, '-n', n]
if arpa_output:
cmd.extend(['-arpa', output_file]) # depends on [control=['if'], data=[]]
else:
cmd.extend(['-binary', output_file])
if four_byte_counts:
cmd.append('-four_byte_counts') # depends on [control=['if'], data=[]]
if zeroton_fraction:
cmd.append('-zeroton_fraction') # depends on [control=['if'], data=[]]
if ascii_input:
cmd.append('-ascii_input') # depends on [control=['if'], data=[]]
else:
cmd.append('-bin_input')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f) # depends on [control=['with'], data=['err_f']]
output = output_f.read() # depends on [control=['with'], data=['output_f']]
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode)) # depends on [control=['if'], data=['exitcode']]
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8') # depends on [control=['if'], data=[]]
return output.strip() |
def calc_max_flexural_wavelength(self):
"""
Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit
"""
if np.isscalar(self.D):
Dmax = self.D
else:
Dmax = self.D.max()
# This is an approximation if there is fill that evolves with iterations
# (e.g., water), but should be good enough that this won't do much to it
alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter
self.maxFlexuralWavelength = 2*np.pi*alpha
self.maxFlexuralWavelength_ncells = int(np.ceil(self.maxFlexuralWavelength / self.dx)) | def function[calc_max_flexural_wavelength, parameter[self]]:
constant[
Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit
]
if call[name[np].isscalar, parameter[name[self].D]] begin[:]
variable[Dmax] assign[=] name[self].D
variable[alpha] assign[=] binary_operation[binary_operation[binary_operation[constant[4] * name[Dmax]] / binary_operation[name[self].drho * name[self].g]] ** constant[0.25]]
name[self].maxFlexuralWavelength assign[=] binary_operation[binary_operation[constant[2] * name[np].pi] * name[alpha]]
name[self].maxFlexuralWavelength_ncells assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[self].maxFlexuralWavelength / name[self].dx]]]]] | keyword[def] identifier[calc_max_flexural_wavelength] ( identifier[self] ):
literal[string]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[self] . identifier[D] ):
identifier[Dmax] = identifier[self] . identifier[D]
keyword[else] :
identifier[Dmax] = identifier[self] . identifier[D] . identifier[max] ()
identifier[alpha] =( literal[int] * identifier[Dmax] /( identifier[self] . identifier[drho] * identifier[self] . identifier[g] ))** literal[int]
identifier[self] . identifier[maxFlexuralWavelength] = literal[int] * identifier[np] . identifier[pi] * identifier[alpha]
identifier[self] . identifier[maxFlexuralWavelength_ncells] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[self] . identifier[maxFlexuralWavelength] / identifier[self] . identifier[dx] )) | def calc_max_flexural_wavelength(self):
"""
Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit
"""
if np.isscalar(self.D):
Dmax = self.D # depends on [control=['if'], data=[]]
else:
Dmax = self.D.max() # This is an approximation if there is fill that evolves with iterations
# (e.g., water), but should be good enough that this won't do much to it
alpha = (4 * Dmax / (self.drho * self.g)) ** 0.25 # 2D flexural parameter
self.maxFlexuralWavelength = 2 * np.pi * alpha
self.maxFlexuralWavelength_ncells = int(np.ceil(self.maxFlexuralWavelength / self.dx)) |
def method_delegate(**methods):
"""
Construct a renderer that delegates based on the request's HTTP method.
"""
methods = {k.upper(): v for k, v in iteritems(methods)}
if PY3:
methods = {k.encode("utf-8"): v for k, v in iteritems(methods)}
def render(request):
renderer = methods.get(request.method)
if renderer is None:
return Response(code=405)
return renderer(request)
return render | def function[method_delegate, parameter[]]:
constant[
Construct a renderer that delegates based on the request's HTTP method.
]
variable[methods] assign[=] <ast.DictComp object at 0x7da18f00c9a0>
if name[PY3] begin[:]
variable[methods] assign[=] <ast.DictComp object at 0x7da18f00da80>
def function[render, parameter[request]]:
variable[renderer] assign[=] call[name[methods].get, parameter[name[request].method]]
if compare[name[renderer] is constant[None]] begin[:]
return[call[name[Response], parameter[]]]
return[call[name[renderer], parameter[name[request]]]]
return[name[render]] | keyword[def] identifier[method_delegate] (** identifier[methods] ):
literal[string]
identifier[methods] ={ identifier[k] . identifier[upper] (): identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[methods] )}
keyword[if] identifier[PY3] :
identifier[methods] ={ identifier[k] . identifier[encode] ( literal[string] ): identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[methods] )}
keyword[def] identifier[render] ( identifier[request] ):
identifier[renderer] = identifier[methods] . identifier[get] ( identifier[request] . identifier[method] )
keyword[if] identifier[renderer] keyword[is] keyword[None] :
keyword[return] identifier[Response] ( identifier[code] = literal[int] )
keyword[return] identifier[renderer] ( identifier[request] )
keyword[return] identifier[render] | def method_delegate(**methods):
"""
Construct a renderer that delegates based on the request's HTTP method.
"""
methods = {k.upper(): v for (k, v) in iteritems(methods)}
if PY3:
methods = {k.encode('utf-8'): v for (k, v) in iteritems(methods)} # depends on [control=['if'], data=[]]
def render(request):
renderer = methods.get(request.method)
if renderer is None:
return Response(code=405) # depends on [control=['if'], data=[]]
return renderer(request)
return render |
def set_faultset_name(self, name, fsObj):
"""
Set name for Faultset
:param name: Name of Faultset
:param fsObj: ScaleIO FS object
:return: POST request response
:rtype: Requests POST response object
"""
# Set name of FaultSet
self.conn.connection._check_login()
faultSetNameDict = {'Name': name}
# This one is the most logical name comparing to other methods.
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "types/FaultSet::", fsObj.id, 'instances/action/setFaultSetName'), json=faultSetNameSdcDict)
# This is how its documented in REST API Chapter
#response = self._do_post("{}/{}{}/{}".format(self._api_url, "types/FaultSet::", fsObj.id, 'instances/action/setFaultSetName'), json=faultsetNameSdcDict)
return response | def function[set_faultset_name, parameter[self, name, fsObj]]:
constant[
Set name for Faultset
:param name: Name of Faultset
:param fsObj: ScaleIO FS object
:return: POST request response
:rtype: Requests POST response object
]
call[name[self].conn.connection._check_login, parameter[]]
variable[faultSetNameDict] assign[=] dictionary[[<ast.Constant object at 0x7da1b2538f10>], [<ast.Name object at 0x7da1b2539f60>]]
variable[response] assign[=] call[name[self].conn.connection._do_post, parameter[call[constant[{}/{}{}/{}].format, parameter[name[self].conn.connection._api_url, constant[types/FaultSet::], name[fsObj].id, constant[instances/action/setFaultSetName]]]]]
return[name[response]] | keyword[def] identifier[set_faultset_name] ( identifier[self] , identifier[name] , identifier[fsObj] ):
literal[string]
identifier[self] . identifier[conn] . identifier[connection] . identifier[_check_login] ()
identifier[faultSetNameDict] ={ literal[string] : identifier[name] }
identifier[response] = identifier[self] . identifier[conn] . identifier[connection] . identifier[_do_post] ( literal[string] . identifier[format] ( identifier[self] . identifier[conn] . identifier[connection] . identifier[_api_url] , literal[string] , identifier[fsObj] . identifier[id] , literal[string] ), identifier[json] = identifier[faultSetNameSdcDict] )
keyword[return] identifier[response] | def set_faultset_name(self, name, fsObj):
"""
Set name for Faultset
:param name: Name of Faultset
:param fsObj: ScaleIO FS object
:return: POST request response
:rtype: Requests POST response object
"""
# Set name of FaultSet
self.conn.connection._check_login()
faultSetNameDict = {'Name': name}
# This one is the most logical name comparing to other methods.
response = self.conn.connection._do_post('{}/{}{}/{}'.format(self.conn.connection._api_url, 'types/FaultSet::', fsObj.id, 'instances/action/setFaultSetName'), json=faultSetNameSdcDict)
# This is how its documented in REST API Chapter
#response = self._do_post("{}/{}{}/{}".format(self._api_url, "types/FaultSet::", fsObj.id, 'instances/action/setFaultSetName'), json=faultsetNameSdcDict)
return response |
def _get_converter(self, converter_str):
"""find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
"""
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter)
if ret is None:
ret = self.get_resource_clz_by_name(converter)
if ret is None:
ret = self.get_enum_by_name(converter)
if ret is None:
# try parser config
ret = self.get(converter)
if ret is None and converter_str is not None:
raise ValueError(
'Specified converter not supported: {}'.format(
converter_str))
return ret | def function[_get_converter, parameter[self, converter_str]]:
constant[find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
]
variable[ret] assign[=] constant[None]
if compare[name[converter_str] is_not constant[None]] begin[:]
variable[converter_desc_list] assign[=] call[name[converter_str].split, parameter[constant[.]]]
if compare[call[name[len], parameter[name[converter_desc_list]]] equal[==] constant[1]] begin[:]
variable[converter] assign[=] call[name[converter_desc_list]][constant[0]]
variable[ret] assign[=] call[name[getattr], parameter[name[cvt], name[converter], constant[None]]]
if compare[name[ret] is constant[None]] begin[:]
variable[ret] assign[=] call[name[self].get_converter, parameter[name[converter]]]
if compare[name[ret] is constant[None]] begin[:]
variable[ret] assign[=] call[name[self].get_resource_clz_by_name, parameter[name[converter]]]
if compare[name[ret] is constant[None]] begin[:]
variable[ret] assign[=] call[name[self].get_enum_by_name, parameter[name[converter]]]
if compare[name[ret] is constant[None]] begin[:]
variable[ret] assign[=] call[name[self].get, parameter[name[converter]]]
if <ast.BoolOp object at 0x7da204564bb0> begin[:]
<ast.Raise object at 0x7da204566230>
return[name[ret]] | keyword[def] identifier[_get_converter] ( identifier[self] , identifier[converter_str] ):
literal[string]
identifier[ret] = keyword[None]
keyword[if] identifier[converter_str] keyword[is] keyword[not] keyword[None] :
identifier[converter_desc_list] = identifier[converter_str] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[converter_desc_list] )== literal[int] :
identifier[converter] = identifier[converter_desc_list] [ literal[int] ]
identifier[ret] = identifier[getattr] ( identifier[cvt] , identifier[converter] , keyword[None] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[self] . identifier[get_converter] ( identifier[converter] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[self] . identifier[get_resource_clz_by_name] ( identifier[converter] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[self] . identifier[get_enum_by_name] ( identifier[converter] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[self] . identifier[get] ( identifier[converter] )
keyword[if] identifier[ret] keyword[is] keyword[None] keyword[and] identifier[converter_str] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[converter_str] ))
keyword[return] identifier[ret] | def _get_converter(self, converter_str):
"""find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
"""
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter) # depends on [control=['if'], data=['ret']]
if ret is None:
ret = self.get_resource_clz_by_name(converter) # depends on [control=['if'], data=['ret']]
if ret is None:
ret = self.get_enum_by_name(converter) # depends on [control=['if'], data=['ret']]
if ret is None:
# try parser config
ret = self.get(converter) # depends on [control=['if'], data=['ret']] # depends on [control=['if'], data=[]]
if ret is None and converter_str is not None:
raise ValueError('Specified converter not supported: {}'.format(converter_str)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['converter_str']]
return ret |
def resize_dimension(self, dimension, size):
"""
Resize a dimension to a certain size.
It will pad with the underlying HDF5 data sets' fill values (usually
zero) where necessary.
"""
if self.dimensions[dimension] is not None:
raise ValueError("Dimension '%s' is not unlimited and thus "
"cannot be resized." % dimension)
# Resize the dimension.
self._current_dim_sizes[dimension] = size
for var in self.variables.values():
new_shape = list(var.shape)
for i, d in enumerate(var.dimensions):
if d == dimension:
new_shape[i] = size
new_shape = tuple(new_shape)
if new_shape != var.shape:
var._h5ds.resize(new_shape)
# Recurse as dimensions are visible to this group and all child groups.
for i in self.groups.values():
i.resize_dimension(dimension, size) | def function[resize_dimension, parameter[self, dimension, size]]:
constant[
Resize a dimension to a certain size.
It will pad with the underlying HDF5 data sets' fill values (usually
zero) where necessary.
]
if compare[call[name[self].dimensions][name[dimension]] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1be5d20>
call[name[self]._current_dim_sizes][name[dimension]] assign[=] name[size]
for taget[name[var]] in starred[call[name[self].variables.values, parameter[]]] begin[:]
variable[new_shape] assign[=] call[name[list], parameter[name[var].shape]]
for taget[tuple[[<ast.Name object at 0x7da1b1be6ce0>, <ast.Name object at 0x7da1b1be7ca0>]]] in starred[call[name[enumerate], parameter[name[var].dimensions]]] begin[:]
if compare[name[d] equal[==] name[dimension]] begin[:]
call[name[new_shape]][name[i]] assign[=] name[size]
variable[new_shape] assign[=] call[name[tuple], parameter[name[new_shape]]]
if compare[name[new_shape] not_equal[!=] name[var].shape] begin[:]
call[name[var]._h5ds.resize, parameter[name[new_shape]]]
for taget[name[i]] in starred[call[name[self].groups.values, parameter[]]] begin[:]
call[name[i].resize_dimension, parameter[name[dimension], name[size]]] | keyword[def] identifier[resize_dimension] ( identifier[self] , identifier[dimension] , identifier[size] ):
literal[string]
keyword[if] identifier[self] . identifier[dimensions] [ identifier[dimension] ] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[dimension] )
identifier[self] . identifier[_current_dim_sizes] [ identifier[dimension] ]= identifier[size]
keyword[for] identifier[var] keyword[in] identifier[self] . identifier[variables] . identifier[values] ():
identifier[new_shape] = identifier[list] ( identifier[var] . identifier[shape] )
keyword[for] identifier[i] , identifier[d] keyword[in] identifier[enumerate] ( identifier[var] . identifier[dimensions] ):
keyword[if] identifier[d] == identifier[dimension] :
identifier[new_shape] [ identifier[i] ]= identifier[size]
identifier[new_shape] = identifier[tuple] ( identifier[new_shape] )
keyword[if] identifier[new_shape] != identifier[var] . identifier[shape] :
identifier[var] . identifier[_h5ds] . identifier[resize] ( identifier[new_shape] )
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[groups] . identifier[values] ():
identifier[i] . identifier[resize_dimension] ( identifier[dimension] , identifier[size] ) | def resize_dimension(self, dimension, size):
"""
Resize a dimension to a certain size.
It will pad with the underlying HDF5 data sets' fill values (usually
zero) where necessary.
"""
if self.dimensions[dimension] is not None:
raise ValueError("Dimension '%s' is not unlimited and thus cannot be resized." % dimension) # depends on [control=['if'], data=[]]
# Resize the dimension.
self._current_dim_sizes[dimension] = size
for var in self.variables.values():
new_shape = list(var.shape)
for (i, d) in enumerate(var.dimensions):
if d == dimension:
new_shape[i] = size # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
new_shape = tuple(new_shape)
if new_shape != var.shape:
var._h5ds.resize(new_shape) # depends on [control=['if'], data=['new_shape']] # depends on [control=['for'], data=['var']]
# Recurse as dimensions are visible to this group and all child groups.
for i in self.groups.values():
i.resize_dimension(dimension, size) # depends on [control=['for'], data=['i']] |
def _unichr(i):
"""
Helper function for taking a Unicode scalar value and returning a Unicode character.
:param s: Unicode scalar value to convert.
:return: Unicode character
"""
if not isinstance(i, int):
raise TypeError
try:
return six.unichr(i)
except ValueError:
# Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)"
return struct.pack("i", i).decode("utf-32") | def function[_unichr, parameter[i]]:
constant[
Helper function for taking a Unicode scalar value and returning a Unicode character.
:param s: Unicode scalar value to convert.
:return: Unicode character
]
if <ast.UnaryOp object at 0x7da1b1193310> begin[:]
<ast.Raise object at 0x7da1b1192620>
<ast.Try object at 0x7da1b1193070> | keyword[def] identifier[_unichr] ( identifier[i] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[i] , identifier[int] ):
keyword[raise] identifier[TypeError]
keyword[try] :
keyword[return] identifier[six] . identifier[unichr] ( identifier[i] )
keyword[except] identifier[ValueError] :
keyword[return] identifier[struct] . identifier[pack] ( literal[string] , identifier[i] ). identifier[decode] ( literal[string] ) | def _unichr(i):
"""
Helper function for taking a Unicode scalar value and returning a Unicode character.
:param s: Unicode scalar value to convert.
:return: Unicode character
"""
if not isinstance(i, int):
raise TypeError # depends on [control=['if'], data=[]]
try:
return six.unichr(i) # depends on [control=['try'], data=[]]
except ValueError:
# Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)"
return struct.pack('i', i).decode('utf-32') # depends on [control=['except'], data=[]] |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionVersionContext for this FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext
"""
if self._context is None:
self._context = FunctionVersionContext(
self._version,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionVersionContext for this FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[FunctionVersionContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[FunctionVersionContext] (
identifier[self] . identifier[_version] ,
identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[function_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionVersionContext for this FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext
"""
if self._context is None:
self._context = FunctionVersionContext(self._version, service_sid=self._solution['service_sid'], function_sid=self._solution['function_sid'], sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._context |
def addSuccess(self, test, capt):
"""
After test completion, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.PASS, test) | def function[addSuccess, parameter[self, test, capt]]:
constant[
After test completion, we want to record testcase run information.
]
call[name[self].__insert_test_result, parameter[name[constants].State.PASS, name[test]]] | keyword[def] identifier[addSuccess] ( identifier[self] , identifier[test] , identifier[capt] ):
literal[string]
identifier[self] . identifier[__insert_test_result] ( identifier[constants] . identifier[State] . identifier[PASS] , identifier[test] ) | def addSuccess(self, test, capt):
"""
After test completion, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.PASS, test) |
def install_dependencies(feature=None):
""" Install dependencies for a feature """
import subprocess
echo(green('\nInstall dependencies:'))
echo(green('-' * 40))
req_path = os.path.realpath(os.path.dirname(__file__) + '/../_requirements')
# list all features if no feature name
if not feature:
echo(yellow('Please specify a feature to install. \n'))
for index, item in enumerate(os.listdir(req_path)):
item = item.replace('.txt', '')
echo(green('{}. {}'.format(index + 1, item)))
echo()
return
# install if got feature name
feature_file = feature.lower() + '.txt'
feature_reqs = os.path.join(req_path, feature_file)
# check existence
if not os.path.isfile(feature_reqs):
msg = 'Unable to locate feature requirements file [{}]'
echo(red(msg.format(feature_file)) + '\n')
return
msg = 'Now installing dependencies for "{}" feature...'.format(feature)
echo(yellow(msg))
subprocess.check_call([
sys.executable, '-m', 'pip', 'install', '-r', feature_reqs]
)
# update requirements file with dependencies
reqs = os.path.join(os.getcwd(), 'requirements.txt')
if os.path.exists(reqs):
with open(reqs) as file:
existing = [x.strip().split('==')[0] for x in file.readlines() if x]
lines = ['\n']
with open(feature_reqs) as file:
incoming = file.readlines()
for line in incoming:
if not(len(line)) or line.startswith('#'):
lines.append(line)
continue
package = line.strip().split('==')[0]
if package not in existing:
lines.append(line)
with open(reqs, 'a') as file:
file.writelines(lines)
echo(green('DONE\n')) | def function[install_dependencies, parameter[feature]]:
constant[ Install dependencies for a feature ]
import module[subprocess]
call[name[echo], parameter[call[name[green], parameter[constant[
Install dependencies:]]]]]
call[name[echo], parameter[call[name[green], parameter[binary_operation[constant[-] * constant[40]]]]]]
variable[req_path] assign[=] call[name[os].path.realpath, parameter[binary_operation[call[name[os].path.dirname, parameter[name[__file__]]] + constant[/../_requirements]]]]
if <ast.UnaryOp object at 0x7da18bc70400> begin[:]
call[name[echo], parameter[call[name[yellow], parameter[constant[Please specify a feature to install.
]]]]]
for taget[tuple[[<ast.Name object at 0x7da18bc72e90>, <ast.Name object at 0x7da18bc70970>]]] in starred[call[name[enumerate], parameter[call[name[os].listdir, parameter[name[req_path]]]]]] begin[:]
variable[item] assign[=] call[name[item].replace, parameter[constant[.txt], constant[]]]
call[name[echo], parameter[call[name[green], parameter[call[constant[{}. {}].format, parameter[binary_operation[name[index] + constant[1]], name[item]]]]]]]
call[name[echo], parameter[]]
return[None]
variable[feature_file] assign[=] binary_operation[call[name[feature].lower, parameter[]] + constant[.txt]]
variable[feature_reqs] assign[=] call[name[os].path.join, parameter[name[req_path], name[feature_file]]]
if <ast.UnaryOp object at 0x7da18bc72320> begin[:]
variable[msg] assign[=] constant[Unable to locate feature requirements file [{}]]
call[name[echo], parameter[binary_operation[call[name[red], parameter[call[name[msg].format, parameter[name[feature_file]]]]] + constant[
]]]]
return[None]
variable[msg] assign[=] call[constant[Now installing dependencies for "{}" feature...].format, parameter[name[feature]]]
call[name[echo], parameter[call[name[yellow], parameter[name[msg]]]]]
call[name[subprocess].check_call, parameter[list[[<ast.Attribute object at 0x7da2046224d0>, <ast.Constant object at 0x7da204623820>, <ast.Constant object at 0x7da204621180>, <ast.Constant object at 0x7da204622c50>, <ast.Constant object at 0x7da204623a00>, <ast.Name object at 0x7da204622860>]]]]
variable[reqs] assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], constant[requirements.txt]]]
if call[name[os].path.exists, parameter[name[reqs]]] begin[:]
with call[name[open], parameter[name[reqs]]] begin[:]
variable[existing] assign[=] <ast.ListComp object at 0x7da204621ea0>
variable[lines] assign[=] list[[<ast.Constant object at 0x7da204623910>]]
with call[name[open], parameter[name[feature_reqs]]] begin[:]
variable[incoming] assign[=] call[name[file].readlines, parameter[]]
for taget[name[line]] in starred[name[incoming]] begin[:]
if <ast.BoolOp object at 0x7da2046229e0> begin[:]
call[name[lines].append, parameter[name[line]]]
continue
variable[package] assign[=] call[call[call[name[line].strip, parameter[]].split, parameter[constant[==]]]][constant[0]]
if compare[name[package] <ast.NotIn object at 0x7da2590d7190> name[existing]] begin[:]
call[name[lines].append, parameter[name[line]]]
with call[name[open], parameter[name[reqs], constant[a]]] begin[:]
call[name[file].writelines, parameter[name[lines]]]
call[name[echo], parameter[call[name[green], parameter[constant[DONE
]]]]] | keyword[def] identifier[install_dependencies] ( identifier[feature] = keyword[None] ):
literal[string]
keyword[import] identifier[subprocess]
identifier[echo] ( identifier[green] ( literal[string] ))
identifier[echo] ( identifier[green] ( literal[string] * literal[int] ))
identifier[req_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )+ literal[string] )
keyword[if] keyword[not] identifier[feature] :
identifier[echo] ( identifier[yellow] ( literal[string] ))
keyword[for] identifier[index] , identifier[item] keyword[in] identifier[enumerate] ( identifier[os] . identifier[listdir] ( identifier[req_path] )):
identifier[item] = identifier[item] . identifier[replace] ( literal[string] , literal[string] )
identifier[echo] ( identifier[green] ( literal[string] . identifier[format] ( identifier[index] + literal[int] , identifier[item] )))
identifier[echo] ()
keyword[return]
identifier[feature_file] = identifier[feature] . identifier[lower] ()+ literal[string]
identifier[feature_reqs] = identifier[os] . identifier[path] . identifier[join] ( identifier[req_path] , identifier[feature_file] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[feature_reqs] ):
identifier[msg] = literal[string]
identifier[echo] ( identifier[red] ( identifier[msg] . identifier[format] ( identifier[feature_file] ))+ literal[string] )
keyword[return]
identifier[msg] = literal[string] . identifier[format] ( identifier[feature] )
identifier[echo] ( identifier[yellow] ( identifier[msg] ))
identifier[subprocess] . identifier[check_call] ([
identifier[sys] . identifier[executable] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[feature_reqs] ]
)
identifier[reqs] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[reqs] ):
keyword[with] identifier[open] ( identifier[reqs] ) keyword[as] identifier[file] :
identifier[existing] =[ identifier[x] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[file] . identifier[readlines] () keyword[if] identifier[x] ]
identifier[lines] =[ literal[string] ]
keyword[with] identifier[open] ( identifier[feature_reqs] ) keyword[as] identifier[file] :
identifier[incoming] = identifier[file] . identifier[readlines] ()
keyword[for] identifier[line] keyword[in] identifier[incoming] :
keyword[if] keyword[not] ( identifier[len] ( identifier[line] )) keyword[or] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[lines] . identifier[append] ( identifier[line] )
keyword[continue]
identifier[package] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[package] keyword[not] keyword[in] identifier[existing] :
identifier[lines] . identifier[append] ( identifier[line] )
keyword[with] identifier[open] ( identifier[reqs] , literal[string] ) keyword[as] identifier[file] :
identifier[file] . identifier[writelines] ( identifier[lines] )
identifier[echo] ( identifier[green] ( literal[string] )) | def install_dependencies(feature=None):
""" Install dependencies for a feature """
import subprocess
echo(green('\nInstall dependencies:'))
echo(green('-' * 40))
req_path = os.path.realpath(os.path.dirname(__file__) + '/../_requirements')
# list all features if no feature name
if not feature:
echo(yellow('Please specify a feature to install. \n'))
for (index, item) in enumerate(os.listdir(req_path)):
item = item.replace('.txt', '')
echo(green('{}. {}'.format(index + 1, item))) # depends on [control=['for'], data=[]]
echo()
return # depends on [control=['if'], data=[]]
# install if got feature name
feature_file = feature.lower() + '.txt'
feature_reqs = os.path.join(req_path, feature_file)
# check existence
if not os.path.isfile(feature_reqs):
msg = 'Unable to locate feature requirements file [{}]'
echo(red(msg.format(feature_file)) + '\n')
return # depends on [control=['if'], data=[]]
msg = 'Now installing dependencies for "{}" feature...'.format(feature)
echo(yellow(msg))
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', feature_reqs])
# update requirements file with dependencies
reqs = os.path.join(os.getcwd(), 'requirements.txt')
if os.path.exists(reqs):
with open(reqs) as file:
existing = [x.strip().split('==')[0] for x in file.readlines() if x] # depends on [control=['with'], data=['file']]
lines = ['\n']
with open(feature_reqs) as file:
incoming = file.readlines()
for line in incoming:
if not len(line) or line.startswith('#'):
lines.append(line)
continue # depends on [control=['if'], data=[]]
package = line.strip().split('==')[0]
if package not in existing:
lines.append(line) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file']]
with open(reqs, 'a') as file:
file.writelines(lines) # depends on [control=['with'], data=['file']] # depends on [control=['if'], data=[]]
echo(green('DONE\n')) |
def _publish_stats(self, counter_prefix, stats):
"""Given a stats dictionary from _get_stats_from_socket,
publish the individual values.
"""
for stat_name, stat_value in flatten_dictionary(
stats,
prefix=counter_prefix,
):
self.publish_gauge(stat_name, stat_value) | def function[_publish_stats, parameter[self, counter_prefix, stats]]:
constant[Given a stats dictionary from _get_stats_from_socket,
publish the individual values.
]
for taget[tuple[[<ast.Name object at 0x7da2047ebca0>, <ast.Name object at 0x7da2047e9060>]]] in starred[call[name[flatten_dictionary], parameter[name[stats]]]] begin[:]
call[name[self].publish_gauge, parameter[name[stat_name], name[stat_value]]] | keyword[def] identifier[_publish_stats] ( identifier[self] , identifier[counter_prefix] , identifier[stats] ):
literal[string]
keyword[for] identifier[stat_name] , identifier[stat_value] keyword[in] identifier[flatten_dictionary] (
identifier[stats] ,
identifier[prefix] = identifier[counter_prefix] ,
):
identifier[self] . identifier[publish_gauge] ( identifier[stat_name] , identifier[stat_value] ) | def _publish_stats(self, counter_prefix, stats):
"""Given a stats dictionary from _get_stats_from_socket,
publish the individual values.
"""
for (stat_name, stat_value) in flatten_dictionary(stats, prefix=counter_prefix):
self.publish_gauge(stat_name, stat_value) # depends on [control=['for'], data=[]] |
def _write_error_batch(batch, database, measurements):
"""Invoked when a batch submission fails, this method will submit one
measurement to InfluxDB. It then adds a timeout to the IOLoop which will
invoke :meth:`_write_error_batch_wait` which will evaluate the result and
then determine what to do next.
:param str batch: The batch ID for correlation purposes
:param str database: The database name for the measurements
:param list measurements: The measurements that failed to write as a batch
"""
if not measurements:
LOGGER.info('All %s measurements from batch %s processed',
database, batch)
return
LOGGER.debug('Processing batch %s for %s by measurement, %i left',
batch, database, len(measurements))
url = '{}?db={}&precision=ms'.format(_base_url, database)
measurement = measurements.pop(0)
# Create the request future
future = _http_client.fetch(
url, method='POST', body=measurement.encode('utf-8'))
# Check in 25ms to see if it's done
ioloop.IOLoop.current().add_timeout(
ioloop.IOLoop.current().time() + 0.025,
_write_error_batch_wait, future, batch, database, measurement,
measurements) | def function[_write_error_batch, parameter[batch, database, measurements]]:
constant[Invoked when a batch submission fails, this method will submit one
measurement to InfluxDB. It then adds a timeout to the IOLoop which will
invoke :meth:`_write_error_batch_wait` which will evaluate the result and
then determine what to do next.
:param str batch: The batch ID for correlation purposes
:param str database: The database name for the measurements
:param list measurements: The measurements that failed to write as a batch
]
if <ast.UnaryOp object at 0x7da204344220> begin[:]
call[name[LOGGER].info, parameter[constant[All %s measurements from batch %s processed], name[database], name[batch]]]
return[None]
call[name[LOGGER].debug, parameter[constant[Processing batch %s for %s by measurement, %i left], name[batch], name[database], call[name[len], parameter[name[measurements]]]]]
variable[url] assign[=] call[constant[{}?db={}&precision=ms].format, parameter[name[_base_url], name[database]]]
variable[measurement] assign[=] call[name[measurements].pop, parameter[constant[0]]]
variable[future] assign[=] call[name[_http_client].fetch, parameter[name[url]]]
call[call[name[ioloop].IOLoop.current, parameter[]].add_timeout, parameter[binary_operation[call[call[name[ioloop].IOLoop.current, parameter[]].time, parameter[]] + constant[0.025]], name[_write_error_batch_wait], name[future], name[batch], name[database], name[measurement], name[measurements]]] | keyword[def] identifier[_write_error_batch] ( identifier[batch] , identifier[database] , identifier[measurements] ):
literal[string]
keyword[if] keyword[not] identifier[measurements] :
identifier[LOGGER] . identifier[info] ( literal[string] ,
identifier[database] , identifier[batch] )
keyword[return]
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[batch] , identifier[database] , identifier[len] ( identifier[measurements] ))
identifier[url] = literal[string] . identifier[format] ( identifier[_base_url] , identifier[database] )
identifier[measurement] = identifier[measurements] . identifier[pop] ( literal[int] )
identifier[future] = identifier[_http_client] . identifier[fetch] (
identifier[url] , identifier[method] = literal[string] , identifier[body] = identifier[measurement] . identifier[encode] ( literal[string] ))
identifier[ioloop] . identifier[IOLoop] . identifier[current] (). identifier[add_timeout] (
identifier[ioloop] . identifier[IOLoop] . identifier[current] (). identifier[time] ()+ literal[int] ,
identifier[_write_error_batch_wait] , identifier[future] , identifier[batch] , identifier[database] , identifier[measurement] ,
identifier[measurements] ) | def _write_error_batch(batch, database, measurements):
"""Invoked when a batch submission fails, this method will submit one
measurement to InfluxDB. It then adds a timeout to the IOLoop which will
invoke :meth:`_write_error_batch_wait` which will evaluate the result and
then determine what to do next.
:param str batch: The batch ID for correlation purposes
:param str database: The database name for the measurements
:param list measurements: The measurements that failed to write as a batch
"""
if not measurements:
LOGGER.info('All %s measurements from batch %s processed', database, batch)
return # depends on [control=['if'], data=[]]
LOGGER.debug('Processing batch %s for %s by measurement, %i left', batch, database, len(measurements))
url = '{}?db={}&precision=ms'.format(_base_url, database)
measurement = measurements.pop(0)
# Create the request future
future = _http_client.fetch(url, method='POST', body=measurement.encode('utf-8'))
# Check in 25ms to see if it's done
ioloop.IOLoop.current().add_timeout(ioloop.IOLoop.current().time() + 0.025, _write_error_batch_wait, future, batch, database, measurement, measurements) |
def snapshot_data_item(self, data_item: DataItem) -> DataItem:
"""Snapshot a data item. Similar to copy but with a data snapshot.
.. versionadded:: 1.0
Scriptable: No
"""
data_item = data_item._data_item.snapshot()
self.__document_model.append_data_item(data_item)
return DataItem(data_item) | def function[snapshot_data_item, parameter[self, data_item]]:
constant[Snapshot a data item. Similar to copy but with a data snapshot.
.. versionadded:: 1.0
Scriptable: No
]
variable[data_item] assign[=] call[name[data_item]._data_item.snapshot, parameter[]]
call[name[self].__document_model.append_data_item, parameter[name[data_item]]]
return[call[name[DataItem], parameter[name[data_item]]]] | keyword[def] identifier[snapshot_data_item] ( identifier[self] , identifier[data_item] : identifier[DataItem] )-> identifier[DataItem] :
literal[string]
identifier[data_item] = identifier[data_item] . identifier[_data_item] . identifier[snapshot] ()
identifier[self] . identifier[__document_model] . identifier[append_data_item] ( identifier[data_item] )
keyword[return] identifier[DataItem] ( identifier[data_item] ) | def snapshot_data_item(self, data_item: DataItem) -> DataItem:
"""Snapshot a data item. Similar to copy but with a data snapshot.
.. versionadded:: 1.0
Scriptable: No
"""
data_item = data_item._data_item.snapshot()
self.__document_model.append_data_item(data_item)
return DataItem(data_item) |
def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError("web_element parameter is not of type WebElement.")
self._web_element = web_element
return self | def function[from_web_element, parameter[self, web_element]]:
constant[
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
]
if compare[call[name[isinstance], parameter[name[web_element], name[WebElement]]] is_not constant[True]] begin[:]
<ast.Raise object at 0x7da1b020ddb0>
name[self]._web_element assign[=] name[web_element]
return[name[self]] | keyword[def] identifier[from_web_element] ( identifier[self] , identifier[web_element] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[web_element] , identifier[WebElement] ) keyword[is] keyword[not] keyword[True] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_web_element] = identifier[web_element]
keyword[return] identifier[self] | def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError('web_element parameter is not of type WebElement.') # depends on [control=['if'], data=[]]
self._web_element = web_element
return self |
def request(self, method, args, response_cb):
"""Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
"""
request_id = self._next_request_id
self._next_request_id = request_id + 1
self._msgpack_stream.send([0, request_id, method, args])
self._pending_requests[request_id] = response_cb | def function[request, parameter[self, method, args, response_cb]]:
constant[Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
]
variable[request_id] assign[=] name[self]._next_request_id
name[self]._next_request_id assign[=] binary_operation[name[request_id] + constant[1]]
call[name[self]._msgpack_stream.send, parameter[list[[<ast.Constant object at 0x7da1b1dfab90>, <ast.Name object at 0x7da1b1dfabc0>, <ast.Name object at 0x7da1b1dfacb0>, <ast.Name object at 0x7da1b1dfb820>]]]]
call[name[self]._pending_requests][name[request_id]] assign[=] name[response_cb] | keyword[def] identifier[request] ( identifier[self] , identifier[method] , identifier[args] , identifier[response_cb] ):
literal[string]
identifier[request_id] = identifier[self] . identifier[_next_request_id]
identifier[self] . identifier[_next_request_id] = identifier[request_id] + literal[int]
identifier[self] . identifier[_msgpack_stream] . identifier[send] ([ literal[int] , identifier[request_id] , identifier[method] , identifier[args] ])
identifier[self] . identifier[_pending_requests] [ identifier[request_id] ]= identifier[response_cb] | def request(self, method, args, response_cb):
"""Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
"""
request_id = self._next_request_id
self._next_request_id = request_id + 1
self._msgpack_stream.send([0, request_id, method, args])
self._pending_requests[request_id] = response_cb |
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day) | def function[replace, parameter[self, year, month, day]]:
constant[Return a new date with new values for the specified fields.]
if compare[name[year] is constant[None]] begin[:]
variable[year] assign[=] name[self]._year
if compare[name[month] is constant[None]] begin[:]
variable[month] assign[=] name[self]._month
if compare[name[day] is constant[None]] begin[:]
variable[day] assign[=] name[self]._day
call[name[_check_date_fields], parameter[name[year], name[month], name[day]]]
return[call[name[date], parameter[name[year], name[month], name[day]]]] | keyword[def] identifier[replace] ( identifier[self] , identifier[year] = keyword[None] , identifier[month] = keyword[None] , identifier[day] = keyword[None] ):
literal[string]
keyword[if] identifier[year] keyword[is] keyword[None] :
identifier[year] = identifier[self] . identifier[_year]
keyword[if] identifier[month] keyword[is] keyword[None] :
identifier[month] = identifier[self] . identifier[_month]
keyword[if] identifier[day] keyword[is] keyword[None] :
identifier[day] = identifier[self] . identifier[_day]
identifier[_check_date_fields] ( identifier[year] , identifier[month] , identifier[day] )
keyword[return] identifier[date] ( identifier[year] , identifier[month] , identifier[day] ) | def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year # depends on [control=['if'], data=['year']]
if month is None:
month = self._month # depends on [control=['if'], data=['month']]
if day is None:
day = self._day # depends on [control=['if'], data=['day']]
_check_date_fields(year, month, day)
return date(year, month, day) |
def includeme(config):
""" Add pyramid_webpack methods and config to the app """
settings = config.registry.settings
root_package_name = config.root_package.__name__
config.registry.webpack = {
'DEFAULT': WebpackState(settings, root_package_name)
}
for extra_config in aslist(settings.get('webpack.configs', [])):
state = WebpackState(settings, root_package_name, name=extra_config)
config.registry.webpack[extra_config] = state
# Set up any static views
for state in six.itervalues(config.registry.webpack):
if state.static_view:
config.add_static_view(name=state.static_view_name,
path=state.static_view_path,
cache_max_age=state.cache_max_age)
config.add_request_method(get_webpack, 'webpack') | def function[includeme, parameter[config]]:
constant[ Add pyramid_webpack methods and config to the app ]
variable[settings] assign[=] name[config].registry.settings
variable[root_package_name] assign[=] name[config].root_package.__name__
name[config].registry.webpack assign[=] dictionary[[<ast.Constant object at 0x7da1b10250c0>], [<ast.Call object at 0x7da1b1025d20>]]
for taget[name[extra_config]] in starred[call[name[aslist], parameter[call[name[settings].get, parameter[constant[webpack.configs], list[[]]]]]]] begin[:]
variable[state] assign[=] call[name[WebpackState], parameter[name[settings], name[root_package_name]]]
call[name[config].registry.webpack][name[extra_config]] assign[=] name[state]
for taget[name[state]] in starred[call[name[six].itervalues, parameter[name[config].registry.webpack]]] begin[:]
if name[state].static_view begin[:]
call[name[config].add_static_view, parameter[]]
call[name[config].add_request_method, parameter[name[get_webpack], constant[webpack]]] | keyword[def] identifier[includeme] ( identifier[config] ):
literal[string]
identifier[settings] = identifier[config] . identifier[registry] . identifier[settings]
identifier[root_package_name] = identifier[config] . identifier[root_package] . identifier[__name__]
identifier[config] . identifier[registry] . identifier[webpack] ={
literal[string] : identifier[WebpackState] ( identifier[settings] , identifier[root_package_name] )
}
keyword[for] identifier[extra_config] keyword[in] identifier[aslist] ( identifier[settings] . identifier[get] ( literal[string] ,[])):
identifier[state] = identifier[WebpackState] ( identifier[settings] , identifier[root_package_name] , identifier[name] = identifier[extra_config] )
identifier[config] . identifier[registry] . identifier[webpack] [ identifier[extra_config] ]= identifier[state]
keyword[for] identifier[state] keyword[in] identifier[six] . identifier[itervalues] ( identifier[config] . identifier[registry] . identifier[webpack] ):
keyword[if] identifier[state] . identifier[static_view] :
identifier[config] . identifier[add_static_view] ( identifier[name] = identifier[state] . identifier[static_view_name] ,
identifier[path] = identifier[state] . identifier[static_view_path] ,
identifier[cache_max_age] = identifier[state] . identifier[cache_max_age] )
identifier[config] . identifier[add_request_method] ( identifier[get_webpack] , literal[string] ) | def includeme(config):
""" Add pyramid_webpack methods and config to the app """
settings = config.registry.settings
root_package_name = config.root_package.__name__
config.registry.webpack = {'DEFAULT': WebpackState(settings, root_package_name)}
for extra_config in aslist(settings.get('webpack.configs', [])):
state = WebpackState(settings, root_package_name, name=extra_config)
config.registry.webpack[extra_config] = state # depends on [control=['for'], data=['extra_config']]
# Set up any static views
for state in six.itervalues(config.registry.webpack):
if state.static_view:
config.add_static_view(name=state.static_view_name, path=state.static_view_path, cache_max_age=state.cache_max_age) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['state']]
config.add_request_method(get_webpack, 'webpack') |
def cross_correlate(self, templates, **kwargs):
"""
Cross correlate the spectrum against a set of templates.
"""
# templates can be:
# - a single Spectrum1D object
# - (template_dispersion, template_fluxes)
# templates can be a single spectrum or a tuple of (dispersion, fluxes)
if isinstance(templates, (Spectrum1D, )):
template_dispersion = templates.disp
template_fluxes = templates.flux
else:
template_dispersion = templates[0]
template_fluxes = templates[1]
return _cross_correlate(self, template_dispersion, template_fluxes,
**kwargs) | def function[cross_correlate, parameter[self, templates]]:
constant[
Cross correlate the spectrum against a set of templates.
]
if call[name[isinstance], parameter[name[templates], tuple[[<ast.Name object at 0x7da20c7968f0>]]]] begin[:]
variable[template_dispersion] assign[=] name[templates].disp
variable[template_fluxes] assign[=] name[templates].flux
return[call[name[_cross_correlate], parameter[name[self], name[template_dispersion], name[template_fluxes]]]] | keyword[def] identifier[cross_correlate] ( identifier[self] , identifier[templates] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[templates] ,( identifier[Spectrum1D] ,)):
identifier[template_dispersion] = identifier[templates] . identifier[disp]
identifier[template_fluxes] = identifier[templates] . identifier[flux]
keyword[else] :
identifier[template_dispersion] = identifier[templates] [ literal[int] ]
identifier[template_fluxes] = identifier[templates] [ literal[int] ]
keyword[return] identifier[_cross_correlate] ( identifier[self] , identifier[template_dispersion] , identifier[template_fluxes] ,
** identifier[kwargs] ) | def cross_correlate(self, templates, **kwargs):
"""
Cross correlate the spectrum against a set of templates.
"""
# templates can be:
# - a single Spectrum1D object
# - (template_dispersion, template_fluxes)
# templates can be a single spectrum or a tuple of (dispersion, fluxes)
if isinstance(templates, (Spectrum1D,)):
template_dispersion = templates.disp
template_fluxes = templates.flux # depends on [control=['if'], data=[]]
else:
template_dispersion = templates[0]
template_fluxes = templates[1]
return _cross_correlate(self, template_dispersion, template_fluxes, **kwargs) |
def validate(self, value):
"""Base validation method. Check if type is valid, or try brute casting.
Args:
value (object): A value for validation.
Returns:
Base_type instance.
Raises:
SchemaError, if validation or type casting fails.
"""
cast_callback = self.cast_callback if self.cast_callback else self.cast_type
try:
return value if isinstance(value, self.cast_type) else cast_callback(value)
except Exception:
raise NodeTypeError('Invalid value `{}` for {}.'.format(value, self.cast_type)) | def function[validate, parameter[self, value]]:
constant[Base validation method. Check if type is valid, or try brute casting.
Args:
value (object): A value for validation.
Returns:
Base_type instance.
Raises:
SchemaError, if validation or type casting fails.
]
variable[cast_callback] assign[=] <ast.IfExp object at 0x7da2047e8130>
<ast.Try object at 0x7da2047e8df0> | keyword[def] identifier[validate] ( identifier[self] , identifier[value] ):
literal[string]
identifier[cast_callback] = identifier[self] . identifier[cast_callback] keyword[if] identifier[self] . identifier[cast_callback] keyword[else] identifier[self] . identifier[cast_type]
keyword[try] :
keyword[return] identifier[value] keyword[if] identifier[isinstance] ( identifier[value] , identifier[self] . identifier[cast_type] ) keyword[else] identifier[cast_callback] ( identifier[value] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[NodeTypeError] ( literal[string] . identifier[format] ( identifier[value] , identifier[self] . identifier[cast_type] )) | def validate(self, value):
"""Base validation method. Check if type is valid, or try brute casting.
Args:
value (object): A value for validation.
Returns:
Base_type instance.
Raises:
SchemaError, if validation or type casting fails.
"""
cast_callback = self.cast_callback if self.cast_callback else self.cast_type
try:
return value if isinstance(value, self.cast_type) else cast_callback(value) # depends on [control=['try'], data=[]]
except Exception:
raise NodeTypeError('Invalid value `{}` for {}.'.format(value, self.cast_type)) # depends on [control=['except'], data=[]] |
def suggest_path(root_dir):
"""List all files and subdirectories in a directory.
If the directory is not specified, suggest root directory,
user directory, current and parent directory.
:param root_dir: string: directory to list
:return: list
"""
if not root_dir:
return [os.path.abspath(os.sep), '~', os.curdir, os.pardir]
if '~' in root_dir:
root_dir = os.path.expanduser(root_dir)
if not os.path.exists(root_dir):
root_dir, _ = os.path.split(root_dir)
return list_path(root_dir) | def function[suggest_path, parameter[root_dir]]:
constant[List all files and subdirectories in a directory.
If the directory is not specified, suggest root directory,
user directory, current and parent directory.
:param root_dir: string: directory to list
:return: list
]
if <ast.UnaryOp object at 0x7da20e74a770> begin[:]
return[list[[<ast.Call object at 0x7da20e74ad70>, <ast.Constant object at 0x7da20c7cb370>, <ast.Attribute object at 0x7da20c7c9390>, <ast.Attribute object at 0x7da20c7c80a0>]]]
if compare[constant[~] in name[root_dir]] begin[:]
variable[root_dir] assign[=] call[name[os].path.expanduser, parameter[name[root_dir]]]
if <ast.UnaryOp object at 0x7da20c7c9ba0> begin[:]
<ast.Tuple object at 0x7da20c7ca1a0> assign[=] call[name[os].path.split, parameter[name[root_dir]]]
return[call[name[list_path], parameter[name[root_dir]]]] | keyword[def] identifier[suggest_path] ( identifier[root_dir] ):
literal[string]
keyword[if] keyword[not] identifier[root_dir] :
keyword[return] [ identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[sep] ), literal[string] , identifier[os] . identifier[curdir] , identifier[os] . identifier[pardir] ]
keyword[if] literal[string] keyword[in] identifier[root_dir] :
identifier[root_dir] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[root_dir] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[root_dir] ):
identifier[root_dir] , identifier[_] = identifier[os] . identifier[path] . identifier[split] ( identifier[root_dir] )
keyword[return] identifier[list_path] ( identifier[root_dir] ) | def suggest_path(root_dir):
"""List all files and subdirectories in a directory.
If the directory is not specified, suggest root directory,
user directory, current and parent directory.
:param root_dir: string: directory to list
:return: list
"""
if not root_dir:
return [os.path.abspath(os.sep), '~', os.curdir, os.pardir] # depends on [control=['if'], data=[]]
if '~' in root_dir:
root_dir = os.path.expanduser(root_dir) # depends on [control=['if'], data=['root_dir']]
if not os.path.exists(root_dir):
(root_dir, _) = os.path.split(root_dir) # depends on [control=['if'], data=[]]
return list_path(root_dir) |
def build_package_configs(project_name,
version=None,
copyright=None,
doxygen_xml_dirname=None):
"""Builds a `dict` of Sphinx configurations useful for the ``doc/conf.py``
files of individual LSST Stack packages.
The ``doc/conf.py`` of packages can ingest these configurations via::
from documenteer.sphinxconfig.stackconf import build_package_configs
_g = globals()
_g.update(build_package_configs(
project_name='afw',
version=lsst.afw.version.__version__))
You can subsequently customize the Sphinx configuration by directly
assigning global variables, as usual in a Sphinx ``config.py``, e.g.:
.. code:: python
copyright = '2016 Association of Universities for '
'Research in Astronomy, Inc.'
Parameters
----------
project_name : str
Name of the package.
copyright : str, optional
Copyright statement. Do not include the 'Copyright (c)' string; it'll
be added automatically.
version : str
Version string. Use the ``__version__`` member in a package's
``version`` module.
doxygen_xml_dirname : str
Path to doxygen-generated XML, allowing C++ APIs to be documented
through breathe. If not set, the breathe sphinx extension will not be
enabled.
Returns
-------
c : dict
Dictionary of configurations that should be added to the ``conf.py``
global namespace via::
_g = global()
_g.update(c)
"""
c = {}
c = _insert_common_sphinx_configs(
c,
project_name=project_name)
# HTML theme
c = _insert_html_configs(
c,
project_name=project_name,
short_project_name=project_name)
# Sphinx extension modules
c = _insert_extensions(c)
# Intersphinx configuration
c = _insert_intersphinx_mapping(c)
# Breathe extension configuration
c = _insert_breathe_configs(
c,
project_name=project_name,
doxygen_xml_dirname=doxygen_xml_dirname)
# Automodapi and numpydoc configurations
c = _insert_automodapi_configs(c)
# Matplotlib configurations
c = _insert_matplotlib_configs(c)
# Graphviz configurations
c = _insert_graphviz_configs(c)
# Add versioning information
c = _insert_single_package_eups_version(c, version)
try:
date = read_git_commit_timestamp()
except Exception:
date = datetime.datetime.now()
if copyright is not None:
c['copyright'] = copyright
else:
c['copyright'] = '{:s} LSST contributors'.format(
date.strftime('%Y'))
c['today'] = date.strftime('%Y-%m-%d')
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
c['exclude_patterns'] = [
'_build',
'README.rst',
]
# Show rendered todo directives in package docs since they're developer
# facing.
c['todo_include_todos'] = True
# Insert rst_epilog configuration
c = _insert_rst_epilog(c)
# Set up the context for the sphinx-jinja extension
c = _insert_jinja_configuration(c)
return c | def function[build_package_configs, parameter[project_name, version, copyright, doxygen_xml_dirname]]:
constant[Builds a `dict` of Sphinx configurations useful for the ``doc/conf.py``
files of individual LSST Stack packages.
The ``doc/conf.py`` of packages can ingest these configurations via::
from documenteer.sphinxconfig.stackconf import build_package_configs
_g = globals()
_g.update(build_package_configs(
project_name='afw',
version=lsst.afw.version.__version__))
You can subsequently customize the Sphinx configuration by directly
assigning global variables, as usual in a Sphinx ``config.py``, e.g.:
.. code:: python
copyright = '2016 Association of Universities for '
'Research in Astronomy, Inc.'
Parameters
----------
project_name : str
Name of the package.
copyright : str, optional
Copyright statement. Do not include the 'Copyright (c)' string; it'll
be added automatically.
version : str
Version string. Use the ``__version__`` member in a package's
``version`` module.
doxygen_xml_dirname : str
Path to doxygen-generated XML, allowing C++ APIs to be documented
through breathe. If not set, the breathe sphinx extension will not be
enabled.
Returns
-------
c : dict
Dictionary of configurations that should be added to the ``conf.py``
global namespace via::
_g = global()
_g.update(c)
]
variable[c] assign[=] dictionary[[], []]
variable[c] assign[=] call[name[_insert_common_sphinx_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_html_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_extensions], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_intersphinx_mapping], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_breathe_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_automodapi_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_matplotlib_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_graphviz_configs], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_single_package_eups_version], parameter[name[c], name[version]]]
<ast.Try object at 0x7da1b24c1e70>
if compare[name[copyright] is_not constant[None]] begin[:]
call[name[c]][constant[copyright]] assign[=] name[copyright]
call[name[c]][constant[today]] assign[=] call[name[date].strftime, parameter[constant[%Y-%m-%d]]]
call[name[c]][constant[exclude_patterns]] assign[=] list[[<ast.Constant object at 0x7da1b237d3c0>, <ast.Constant object at 0x7da1b237c8b0>]]
call[name[c]][constant[todo_include_todos]] assign[=] constant[True]
variable[c] assign[=] call[name[_insert_rst_epilog], parameter[name[c]]]
variable[c] assign[=] call[name[_insert_jinja_configuration], parameter[name[c]]]
return[name[c]] | keyword[def] identifier[build_package_configs] ( identifier[project_name] ,
identifier[version] = keyword[None] ,
identifier[copyright] = keyword[None] ,
identifier[doxygen_xml_dirname] = keyword[None] ):
literal[string]
identifier[c] ={}
identifier[c] = identifier[_insert_common_sphinx_configs] (
identifier[c] ,
identifier[project_name] = identifier[project_name] )
identifier[c] = identifier[_insert_html_configs] (
identifier[c] ,
identifier[project_name] = identifier[project_name] ,
identifier[short_project_name] = identifier[project_name] )
identifier[c] = identifier[_insert_extensions] ( identifier[c] )
identifier[c] = identifier[_insert_intersphinx_mapping] ( identifier[c] )
identifier[c] = identifier[_insert_breathe_configs] (
identifier[c] ,
identifier[project_name] = identifier[project_name] ,
identifier[doxygen_xml_dirname] = identifier[doxygen_xml_dirname] )
identifier[c] = identifier[_insert_automodapi_configs] ( identifier[c] )
identifier[c] = identifier[_insert_matplotlib_configs] ( identifier[c] )
identifier[c] = identifier[_insert_graphviz_configs] ( identifier[c] )
identifier[c] = identifier[_insert_single_package_eups_version] ( identifier[c] , identifier[version] )
keyword[try] :
identifier[date] = identifier[read_git_commit_timestamp] ()
keyword[except] identifier[Exception] :
identifier[date] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[if] identifier[copyright] keyword[is] keyword[not] keyword[None] :
identifier[c] [ literal[string] ]= identifier[copyright]
keyword[else] :
identifier[c] [ literal[string] ]= literal[string] . identifier[format] (
identifier[date] . identifier[strftime] ( literal[string] ))
identifier[c] [ literal[string] ]= identifier[date] . identifier[strftime] ( literal[string] )
identifier[c] [ literal[string] ]=[
literal[string] ,
literal[string] ,
]
identifier[c] [ literal[string] ]= keyword[True]
identifier[c] = identifier[_insert_rst_epilog] ( identifier[c] )
identifier[c] = identifier[_insert_jinja_configuration] ( identifier[c] )
keyword[return] identifier[c] | def build_package_configs(project_name, version=None, copyright=None, doxygen_xml_dirname=None):
"""Builds a `dict` of Sphinx configurations useful for the ``doc/conf.py``
files of individual LSST Stack packages.
The ``doc/conf.py`` of packages can ingest these configurations via::
from documenteer.sphinxconfig.stackconf import build_package_configs
_g = globals()
_g.update(build_package_configs(
project_name='afw',
version=lsst.afw.version.__version__))
You can subsequently customize the Sphinx configuration by directly
assigning global variables, as usual in a Sphinx ``config.py``, e.g.:
.. code:: python
copyright = '2016 Association of Universities for '
'Research in Astronomy, Inc.'
Parameters
----------
project_name : str
Name of the package.
copyright : str, optional
Copyright statement. Do not include the 'Copyright (c)' string; it'll
be added automatically.
version : str
Version string. Use the ``__version__`` member in a package's
``version`` module.
doxygen_xml_dirname : str
Path to doxygen-generated XML, allowing C++ APIs to be documented
through breathe. If not set, the breathe sphinx extension will not be
enabled.
Returns
-------
c : dict
Dictionary of configurations that should be added to the ``conf.py``
global namespace via::
_g = global()
_g.update(c)
"""
c = {}
c = _insert_common_sphinx_configs(c, project_name=project_name)
# HTML theme
c = _insert_html_configs(c, project_name=project_name, short_project_name=project_name)
# Sphinx extension modules
c = _insert_extensions(c)
# Intersphinx configuration
c = _insert_intersphinx_mapping(c)
# Breathe extension configuration
c = _insert_breathe_configs(c, project_name=project_name, doxygen_xml_dirname=doxygen_xml_dirname)
# Automodapi and numpydoc configurations
c = _insert_automodapi_configs(c)
# Matplotlib configurations
c = _insert_matplotlib_configs(c)
# Graphviz configurations
c = _insert_graphviz_configs(c)
# Add versioning information
c = _insert_single_package_eups_version(c, version)
try:
date = read_git_commit_timestamp() # depends on [control=['try'], data=[]]
except Exception:
date = datetime.datetime.now() # depends on [control=['except'], data=[]]
if copyright is not None:
c['copyright'] = copyright # depends on [control=['if'], data=['copyright']]
else:
c['copyright'] = '{:s} LSST contributors'.format(date.strftime('%Y'))
c['today'] = date.strftime('%Y-%m-%d')
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
c['exclude_patterns'] = ['_build', 'README.rst']
# Show rendered todo directives in package docs since they're developer
# facing.
c['todo_include_todos'] = True
# Insert rst_epilog configuration
c = _insert_rst_epilog(c)
# Set up the context for the sphinx-jinja extension
c = _insert_jinja_configuration(c)
return c |
def load_parameters(filename):
""" Load parameters stored in file ``filename``.
``p = nonlinear_fit.load_p(filename)`` is used to recover the
values of fit parameters dumped using ``fit.dump_p(filename)`` (or
``fit.dump_pmean(filename)``) where ``fit`` is of type
:class:`lsqfit.nonlinear_fit`. The layout of the returned
parameters ``p`` is the same as that of ``fit.p`` (or
``fit.pmean``).
"""
warnings.warn(
"nonlinear_fit.load_parameters deprecated; use pickle.load or gvar.load instead",
DeprecationWarning,
)
with open(filename,"rb") as f:
return pickle.load(f) | def function[load_parameters, parameter[filename]]:
constant[ Load parameters stored in file ``filename``.
``p = nonlinear_fit.load_p(filename)`` is used to recover the
values of fit parameters dumped using ``fit.dump_p(filename)`` (or
``fit.dump_pmean(filename)``) where ``fit`` is of type
:class:`lsqfit.nonlinear_fit`. The layout of the returned
parameters ``p`` is the same as that of ``fit.p`` (or
``fit.pmean``).
]
call[name[warnings].warn, parameter[constant[nonlinear_fit.load_parameters deprecated; use pickle.load or gvar.load instead], name[DeprecationWarning]]]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
return[call[name[pickle].load, parameter[name[f]]]] | keyword[def] identifier[load_parameters] ( identifier[filename] ):
literal[string]
identifier[warnings] . identifier[warn] (
literal[string] ,
identifier[DeprecationWarning] ,
)
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[pickle] . identifier[load] ( identifier[f] ) | def load_parameters(filename):
""" Load parameters stored in file ``filename``.
``p = nonlinear_fit.load_p(filename)`` is used to recover the
values of fit parameters dumped using ``fit.dump_p(filename)`` (or
``fit.dump_pmean(filename)``) where ``fit`` is of type
:class:`lsqfit.nonlinear_fit`. The layout of the returned
parameters ``p`` is the same as that of ``fit.p`` (or
``fit.pmean``).
"""
warnings.warn('nonlinear_fit.load_parameters deprecated; use pickle.load or gvar.load instead', DeprecationWarning)
with open(filename, 'rb') as f:
return pickle.load(f) # depends on [control=['with'], data=['f']] |
def api_exists(name, description=None, region=None, key=None, keyid=None, profile=None):
'''
Check to see if the given Rest API Name and optionally description exists.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.exists myapi_name
'''
apis = _find_apis_by_name(name, description=description,
region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(apis.get('restapi'))} | def function[api_exists, parameter[name, description, region, key, keyid, profile]]:
constant[
Check to see if the given Rest API Name and optionally description exists.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.exists myapi_name
]
variable[apis] assign[=] call[name[_find_apis_by_name], parameter[name[name]]]
return[dictionary[[<ast.Constant object at 0x7da1b216b6d0>], [<ast.Call object at 0x7da1b216b0d0>]]] | keyword[def] identifier[api_exists] ( identifier[name] , identifier[description] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[apis] = identifier[_find_apis_by_name] ( identifier[name] , identifier[description] = identifier[description] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[return] { literal[string] : identifier[bool] ( identifier[apis] . identifier[get] ( literal[string] ))} | def api_exists(name, description=None, region=None, key=None, keyid=None, profile=None):
"""
Check to see if the given Rest API Name and optionally description exists.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.exists myapi_name
"""
apis = _find_apis_by_name(name, description=description, region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(apis.get('restapi'))} |
def inputs_outputs(self):
"""Get information on method inputs & outputs."""
r = fapi.get_inputs_outputs(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json() | def function[inputs_outputs, parameter[self]]:
constant[Get information on method inputs & outputs.]
variable[r] assign[=] call[name[fapi].get_inputs_outputs, parameter[name[self].namespace, name[self].name, name[self].snapshot_id, name[self].api_url]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]]
return[call[name[r].json, parameter[]]] | keyword[def] identifier[inputs_outputs] ( identifier[self] ):
literal[string]
identifier[r] = identifier[fapi] . identifier[get_inputs_outputs] ( identifier[self] . identifier[namespace] , identifier[self] . identifier[name] ,
identifier[self] . identifier[snapshot_id] , identifier[self] . identifier[api_url] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
keyword[return] identifier[r] . identifier[json] () | def inputs_outputs(self):
"""Get information on method inputs & outputs."""
r = fapi.get_inputs_outputs(self.namespace, self.name, self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json() |
def resp_set_wififirmware(self, resp):
"""Default callback for get_wififirmware
"""
if resp:
self.wifi_firmware_version = float(str(str(resp.version >> 16) + "." + str(resp.version & 0xff)))
self.wifi_firmware_build_timestamp = resp.build | def function[resp_set_wififirmware, parameter[self, resp]]:
constant[Default callback for get_wififirmware
]
if name[resp] begin[:]
name[self].wifi_firmware_version assign[=] call[name[float], parameter[call[name[str], parameter[binary_operation[binary_operation[call[name[str], parameter[binary_operation[name[resp].version <ast.RShift object at 0x7da2590d6a40> constant[16]]]] + constant[.]] + call[name[str], parameter[binary_operation[name[resp].version <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]]]]]]
name[self].wifi_firmware_build_timestamp assign[=] name[resp].build | keyword[def] identifier[resp_set_wififirmware] ( identifier[self] , identifier[resp] ):
literal[string]
keyword[if] identifier[resp] :
identifier[self] . identifier[wifi_firmware_version] = identifier[float] ( identifier[str] ( identifier[str] ( identifier[resp] . identifier[version] >> literal[int] )+ literal[string] + identifier[str] ( identifier[resp] . identifier[version] & literal[int] )))
identifier[self] . identifier[wifi_firmware_build_timestamp] = identifier[resp] . identifier[build] | def resp_set_wififirmware(self, resp):
"""Default callback for get_wififirmware
"""
if resp:
self.wifi_firmware_version = float(str(str(resp.version >> 16) + '.' + str(resp.version & 255)))
self.wifi_firmware_build_timestamp = resp.build # depends on [control=['if'], data=[]] |
def signature(self):
"""Gets completion or call signature information for the current cursor."""
#We can't really do anything sensible without the name of the function
#whose signature we are completing.
iexec, execmod = self.context.parser.tree_find(self.context.el_name,
self.context.module, "executables")
if iexec is None:
#Look in the interfaces next using a tree find as well
iexec, execmod = self.context.parser.tree_find(self.context.el_name, self.context.module,
"interfaces")
if iexec is None:
return []
return self._signature_index(iexec) | def function[signature, parameter[self]]:
constant[Gets completion or call signature information for the current cursor.]
<ast.Tuple object at 0x7da1b26edd50> assign[=] call[name[self].context.parser.tree_find, parameter[name[self].context.el_name, name[self].context.module, constant[executables]]]
if compare[name[iexec] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b26edc00> assign[=] call[name[self].context.parser.tree_find, parameter[name[self].context.el_name, name[self].context.module, constant[interfaces]]]
if compare[name[iexec] is constant[None]] begin[:]
return[list[[]]]
return[call[name[self]._signature_index, parameter[name[iexec]]]] | keyword[def] identifier[signature] ( identifier[self] ):
literal[string]
identifier[iexec] , identifier[execmod] = identifier[self] . identifier[context] . identifier[parser] . identifier[tree_find] ( identifier[self] . identifier[context] . identifier[el_name] ,
identifier[self] . identifier[context] . identifier[module] , literal[string] )
keyword[if] identifier[iexec] keyword[is] keyword[None] :
identifier[iexec] , identifier[execmod] = identifier[self] . identifier[context] . identifier[parser] . identifier[tree_find] ( identifier[self] . identifier[context] . identifier[el_name] , identifier[self] . identifier[context] . identifier[module] ,
literal[string] )
keyword[if] identifier[iexec] keyword[is] keyword[None] :
keyword[return] []
keyword[return] identifier[self] . identifier[_signature_index] ( identifier[iexec] ) | def signature(self):
"""Gets completion or call signature information for the current cursor."""
#We can't really do anything sensible without the name of the function
#whose signature we are completing.
(iexec, execmod) = self.context.parser.tree_find(self.context.el_name, self.context.module, 'executables')
if iexec is None:
#Look in the interfaces next using a tree find as well
(iexec, execmod) = self.context.parser.tree_find(self.context.el_name, self.context.module, 'interfaces') # depends on [control=['if'], data=['iexec']]
if iexec is None:
return [] # depends on [control=['if'], data=[]]
return self._signature_index(iexec) |
def order_by(self, order_by: Union[set, str]):
"""Update order_by setting for filter set"""
clone = self._clone()
if isinstance(order_by, str):
order_by = {order_by}
clone._order_by = clone._order_by.union(order_by)
return clone | def function[order_by, parameter[self, order_by]]:
constant[Update order_by setting for filter set]
variable[clone] assign[=] call[name[self]._clone, parameter[]]
if call[name[isinstance], parameter[name[order_by], name[str]]] begin[:]
variable[order_by] assign[=] <ast.Set object at 0x7da2044c2230>
name[clone]._order_by assign[=] call[name[clone]._order_by.union, parameter[name[order_by]]]
return[name[clone]] | keyword[def] identifier[order_by] ( identifier[self] , identifier[order_by] : identifier[Union] [ identifier[set] , identifier[str] ]):
literal[string]
identifier[clone] = identifier[self] . identifier[_clone] ()
keyword[if] identifier[isinstance] ( identifier[order_by] , identifier[str] ):
identifier[order_by] ={ identifier[order_by] }
identifier[clone] . identifier[_order_by] = identifier[clone] . identifier[_order_by] . identifier[union] ( identifier[order_by] )
keyword[return] identifier[clone] | def order_by(self, order_by: Union[set, str]):
"""Update order_by setting for filter set"""
clone = self._clone()
if isinstance(order_by, str):
order_by = {order_by} # depends on [control=['if'], data=[]]
clone._order_by = clone._order_by.union(order_by)
return clone |
def move_selection(reverse=False):
"""
Goes through the list of gunicorns, setting the selected as the one after
the currently selected.
"""
global selected_pid
if selected_pid not in gunicorns:
selected_pid = None
found = False
pids = sorted(gunicorns.keys(), reverse=reverse)
# Iterate items twice to enable wrapping.
for pid in pids + pids:
if selected_pid is None or found:
selected_pid = pid
return
found = pid == selected_pid | def function[move_selection, parameter[reverse]]:
constant[
Goes through the list of gunicorns, setting the selected as the one after
the currently selected.
]
<ast.Global object at 0x7da1b2249180>
if compare[name[selected_pid] <ast.NotIn object at 0x7da2590d7190> name[gunicorns]] begin[:]
variable[selected_pid] assign[=] constant[None]
variable[found] assign[=] constant[False]
variable[pids] assign[=] call[name[sorted], parameter[call[name[gunicorns].keys, parameter[]]]]
for taget[name[pid]] in starred[binary_operation[name[pids] + name[pids]]] begin[:]
if <ast.BoolOp object at 0x7da1b224a860> begin[:]
variable[selected_pid] assign[=] name[pid]
return[None]
variable[found] assign[=] compare[name[pid] equal[==] name[selected_pid]] | keyword[def] identifier[move_selection] ( identifier[reverse] = keyword[False] ):
literal[string]
keyword[global] identifier[selected_pid]
keyword[if] identifier[selected_pid] keyword[not] keyword[in] identifier[gunicorns] :
identifier[selected_pid] = keyword[None]
identifier[found] = keyword[False]
identifier[pids] = identifier[sorted] ( identifier[gunicorns] . identifier[keys] (), identifier[reverse] = identifier[reverse] )
keyword[for] identifier[pid] keyword[in] identifier[pids] + identifier[pids] :
keyword[if] identifier[selected_pid] keyword[is] keyword[None] keyword[or] identifier[found] :
identifier[selected_pid] = identifier[pid]
keyword[return]
identifier[found] = identifier[pid] == identifier[selected_pid] | def move_selection(reverse=False):
"""
Goes through the list of gunicorns, setting the selected as the one after
the currently selected.
"""
global selected_pid
if selected_pid not in gunicorns:
selected_pid = None # depends on [control=['if'], data=['selected_pid']]
found = False
pids = sorted(gunicorns.keys(), reverse=reverse)
# Iterate items twice to enable wrapping.
for pid in pids + pids:
if selected_pid is None or found:
selected_pid = pid
return # depends on [control=['if'], data=[]]
found = pid == selected_pid # depends on [control=['for'], data=['pid']] |
def main():
"""
Main function
"""
bc = BackendUpdate()
bc.initialize()
logger.info("backend_client, version: %s", __version__)
logger.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
success = False
if bc.item_type and bc.action == 'list':
success = bc.get_resource_list(bc.item_type, bc.item)
if bc.item_type and bc.action == 'get':
if bc.list:
success = bc.get_resource_list(bc.item_type, bc.item)
else:
if not bc.item:
logger.error("Can not %s a %s with no name!", bc.action, bc.item_type)
logger.error("Perharps you missed some parameters, run 'alignak-backend-cli -h'")
exit(64)
success = bc.get_resource(bc.item_type, bc.item)
if bc.action in ['add', 'update']:
success = bc.create_update_resource(bc.item_type, bc.item, bc.action == 'update')
if bc.action == 'delete':
success = bc.delete_resource(bc.item_type, bc.item)
if not success:
logger.error("%s '%s' %s failed", bc.item_type, bc.item, bc.action)
if not bc.verbose:
logger.warning("Set verbose mode to have more information (-v)")
exit(2)
exit(0) | def function[main, parameter[]]:
constant[
Main function
]
variable[bc] assign[=] call[name[BackendUpdate], parameter[]]
call[name[bc].initialize, parameter[]]
call[name[logger].info, parameter[constant[backend_client, version: %s], name[__version__]]]
call[name[logger].debug, parameter[constant[~~~~~~~~~~~~~~~~~~~~~~~~~~~~]]]
variable[success] assign[=] constant[False]
if <ast.BoolOp object at 0x7da207f9bdc0> begin[:]
variable[success] assign[=] call[name[bc].get_resource_list, parameter[name[bc].item_type, name[bc].item]]
if <ast.BoolOp object at 0x7da207f9ace0> begin[:]
if name[bc].list begin[:]
variable[success] assign[=] call[name[bc].get_resource_list, parameter[name[bc].item_type, name[bc].item]]
if compare[name[bc].action in list[[<ast.Constant object at 0x7da207f9a950>, <ast.Constant object at 0x7da207f9b8b0>]]] begin[:]
variable[success] assign[=] call[name[bc].create_update_resource, parameter[name[bc].item_type, name[bc].item, compare[name[bc].action equal[==] constant[update]]]]
if compare[name[bc].action equal[==] constant[delete]] begin[:]
variable[success] assign[=] call[name[bc].delete_resource, parameter[name[bc].item_type, name[bc].item]]
if <ast.UnaryOp object at 0x7da20c6a9150> begin[:]
call[name[logger].error, parameter[constant[%s '%s' %s failed], name[bc].item_type, name[bc].item, name[bc].action]]
if <ast.UnaryOp object at 0x7da20c6a9000> begin[:]
call[name[logger].warning, parameter[constant[Set verbose mode to have more information (-v)]]]
call[name[exit], parameter[constant[2]]]
call[name[exit], parameter[constant[0]]] | keyword[def] identifier[main] ():
literal[string]
identifier[bc] = identifier[BackendUpdate] ()
identifier[bc] . identifier[initialize] ()
identifier[logger] . identifier[info] ( literal[string] , identifier[__version__] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[success] = keyword[False]
keyword[if] identifier[bc] . identifier[item_type] keyword[and] identifier[bc] . identifier[action] == literal[string] :
identifier[success] = identifier[bc] . identifier[get_resource_list] ( identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] )
keyword[if] identifier[bc] . identifier[item_type] keyword[and] identifier[bc] . identifier[action] == literal[string] :
keyword[if] identifier[bc] . identifier[list] :
identifier[success] = identifier[bc] . identifier[get_resource_list] ( identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] )
keyword[else] :
keyword[if] keyword[not] identifier[bc] . identifier[item] :
identifier[logger] . identifier[error] ( literal[string] , identifier[bc] . identifier[action] , identifier[bc] . identifier[item_type] )
identifier[logger] . identifier[error] ( literal[string] )
identifier[exit] ( literal[int] )
identifier[success] = identifier[bc] . identifier[get_resource] ( identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] )
keyword[if] identifier[bc] . identifier[action] keyword[in] [ literal[string] , literal[string] ]:
identifier[success] = identifier[bc] . identifier[create_update_resource] ( identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] , identifier[bc] . identifier[action] == literal[string] )
keyword[if] identifier[bc] . identifier[action] == literal[string] :
identifier[success] = identifier[bc] . identifier[delete_resource] ( identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] )
keyword[if] keyword[not] identifier[success] :
identifier[logger] . identifier[error] ( literal[string] , identifier[bc] . identifier[item_type] , identifier[bc] . identifier[item] , identifier[bc] . identifier[action] )
keyword[if] keyword[not] identifier[bc] . identifier[verbose] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[exit] ( literal[int] )
identifier[exit] ( literal[int] ) | def main():
"""
Main function
"""
bc = BackendUpdate()
bc.initialize()
logger.info('backend_client, version: %s', __version__)
logger.debug('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
success = False
if bc.item_type and bc.action == 'list':
success = bc.get_resource_list(bc.item_type, bc.item) # depends on [control=['if'], data=[]]
if bc.item_type and bc.action == 'get':
if bc.list:
success = bc.get_resource_list(bc.item_type, bc.item) # depends on [control=['if'], data=[]]
else:
if not bc.item:
logger.error('Can not %s a %s with no name!', bc.action, bc.item_type)
logger.error("Perharps you missed some parameters, run 'alignak-backend-cli -h'")
exit(64) # depends on [control=['if'], data=[]]
success = bc.get_resource(bc.item_type, bc.item) # depends on [control=['if'], data=[]]
if bc.action in ['add', 'update']:
success = bc.create_update_resource(bc.item_type, bc.item, bc.action == 'update') # depends on [control=['if'], data=[]]
if bc.action == 'delete':
success = bc.delete_resource(bc.item_type, bc.item) # depends on [control=['if'], data=[]]
if not success:
logger.error("%s '%s' %s failed", bc.item_type, bc.item, bc.action)
if not bc.verbose:
logger.warning('Set verbose mode to have more information (-v)') # depends on [control=['if'], data=[]]
exit(2) # depends on [control=['if'], data=[]]
exit(0) |
def close_job(self, job_id):
"""
Closes job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: close response as xml
"""
if not job_id or not self.has_active_session():
raise Exception("Can not close job without valid job_id and an active session.")
response = requests.post(self._get_close_job_url(job_id),
headers=self._get_close_job_headers(),
data=self._get_close_job_xml())
response.raise_for_status()
return response | def function[close_job, parameter[self, job_id]]:
constant[
Closes job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: close response as xml
]
if <ast.BoolOp object at 0x7da18dc06080> begin[:]
<ast.Raise object at 0x7da18dc047c0>
variable[response] assign[=] call[name[requests].post, parameter[call[name[self]._get_close_job_url, parameter[name[job_id]]]]]
call[name[response].raise_for_status, parameter[]]
return[name[response]] | keyword[def] identifier[close_job] ( identifier[self] , identifier[job_id] ):
literal[string]
keyword[if] keyword[not] identifier[job_id] keyword[or] keyword[not] identifier[self] . identifier[has_active_session] ():
keyword[raise] identifier[Exception] ( literal[string] )
identifier[response] = identifier[requests] . identifier[post] ( identifier[self] . identifier[_get_close_job_url] ( identifier[job_id] ),
identifier[headers] = identifier[self] . identifier[_get_close_job_headers] (),
identifier[data] = identifier[self] . identifier[_get_close_job_xml] ())
identifier[response] . identifier[raise_for_status] ()
keyword[return] identifier[response] | def close_job(self, job_id):
"""
Closes job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: close response as xml
"""
if not job_id or not self.has_active_session():
raise Exception('Can not close job without valid job_id and an active session.') # depends on [control=['if'], data=[]]
response = requests.post(self._get_close_job_url(job_id), headers=self._get_close_job_headers(), data=self._get_close_job_xml())
response.raise_for_status()
return response |
def _modify_new_lines(code_to_modify, offset, code_to_insert):
"""
Update new lines: the bytecode inserted should be the last instruction of the previous line.
:return: bytes sequence of code with updated lines offsets
"""
# There's a nice overview of co_lnotab in
# https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt
new_list = list(code_to_modify.co_lnotab)
if not new_list:
# Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to
# tracing).
return None
# As all numbers are relative, what we want is to hide the code we inserted in the previous line
# (it should be the last thing right before we increment the line so that we have a line event
# right after the inserted code).
bytecode_delta = len(code_to_insert)
byte_increments = code_to_modify.co_lnotab[0::2]
line_increments = code_to_modify.co_lnotab[1::2]
if offset == 0:
new_list[0] += bytecode_delta
else:
addr = 0
it = zip(byte_increments, line_increments)
for i, (byte_incr, _line_incr) in enumerate(it):
addr += byte_incr
if addr == offset:
new_list[i * 2] += bytecode_delta
break
return bytes(new_list) | def function[_modify_new_lines, parameter[code_to_modify, offset, code_to_insert]]:
constant[
Update new lines: the bytecode inserted should be the last instruction of the previous line.
:return: bytes sequence of code with updated lines offsets
]
variable[new_list] assign[=] call[name[list], parameter[name[code_to_modify].co_lnotab]]
if <ast.UnaryOp object at 0x7da1b06fb2e0> begin[:]
return[constant[None]]
variable[bytecode_delta] assign[=] call[name[len], parameter[name[code_to_insert]]]
variable[byte_increments] assign[=] call[name[code_to_modify].co_lnotab][<ast.Slice object at 0x7da1b06fbf70>]
variable[line_increments] assign[=] call[name[code_to_modify].co_lnotab][<ast.Slice object at 0x7da1b06fae00>]
if compare[name[offset] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b06fad10>
return[call[name[bytes], parameter[name[new_list]]]] | keyword[def] identifier[_modify_new_lines] ( identifier[code_to_modify] , identifier[offset] , identifier[code_to_insert] ):
literal[string]
identifier[new_list] = identifier[list] ( identifier[code_to_modify] . identifier[co_lnotab] )
keyword[if] keyword[not] identifier[new_list] :
keyword[return] keyword[None]
identifier[bytecode_delta] = identifier[len] ( identifier[code_to_insert] )
identifier[byte_increments] = identifier[code_to_modify] . identifier[co_lnotab] [ literal[int] :: literal[int] ]
identifier[line_increments] = identifier[code_to_modify] . identifier[co_lnotab] [ literal[int] :: literal[int] ]
keyword[if] identifier[offset] == literal[int] :
identifier[new_list] [ literal[int] ]+= identifier[bytecode_delta]
keyword[else] :
identifier[addr] = literal[int]
identifier[it] = identifier[zip] ( identifier[byte_increments] , identifier[line_increments] )
keyword[for] identifier[i] ,( identifier[byte_incr] , identifier[_line_incr] ) keyword[in] identifier[enumerate] ( identifier[it] ):
identifier[addr] += identifier[byte_incr]
keyword[if] identifier[addr] == identifier[offset] :
identifier[new_list] [ identifier[i] * literal[int] ]+= identifier[bytecode_delta]
keyword[break]
keyword[return] identifier[bytes] ( identifier[new_list] ) | def _modify_new_lines(code_to_modify, offset, code_to_insert):
"""
Update new lines: the bytecode inserted should be the last instruction of the previous line.
:return: bytes sequence of code with updated lines offsets
"""
# There's a nice overview of co_lnotab in
# https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt
new_list = list(code_to_modify.co_lnotab)
if not new_list:
# Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to
# tracing).
return None # depends on [control=['if'], data=[]]
# As all numbers are relative, what we want is to hide the code we inserted in the previous line
# (it should be the last thing right before we increment the line so that we have a line event
# right after the inserted code).
bytecode_delta = len(code_to_insert)
byte_increments = code_to_modify.co_lnotab[0::2]
line_increments = code_to_modify.co_lnotab[1::2]
if offset == 0:
new_list[0] += bytecode_delta # depends on [control=['if'], data=[]]
else:
addr = 0
it = zip(byte_increments, line_increments)
for (i, (byte_incr, _line_incr)) in enumerate(it):
addr += byte_incr
if addr == offset:
new_list[i * 2] += bytecode_delta
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return bytes(new_list) |
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw | def function[_paint_line_number_bg, parameter[self, im]]:
constant[
Paint the line number background on the image.
]
if <ast.UnaryOp object at 0x7da18bc73340> begin[:]
return[None]
if compare[name[self].line_number_fg is constant[None]] begin[:]
return[None]
variable[draw] assign[=] call[name[ImageDraw].Draw, parameter[name[im]]]
variable[recth] assign[=] call[name[im].size][<ast.UnaryOp object at 0x7da18bcca3b0>]
variable[rectw] assign[=] binary_operation[binary_operation[name[self].image_pad + name[self].line_number_width] - name[self].line_number_pad]
call[name[draw].rectangle, parameter[list[[<ast.Tuple object at 0x7da18bccb3d0>, <ast.Tuple object at 0x7da18bcca410>]]]]
call[name[draw].line, parameter[list[[<ast.Tuple object at 0x7da18bccabc0>, <ast.Tuple object at 0x7da18bccab00>]]]]
<ast.Delete object at 0x7da18bccbf10> | keyword[def] identifier[_paint_line_number_bg] ( identifier[self] , identifier[im] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[line_numbers] :
keyword[return]
keyword[if] identifier[self] . identifier[line_number_fg] keyword[is] keyword[None] :
keyword[return]
identifier[draw] = identifier[ImageDraw] . identifier[Draw] ( identifier[im] )
identifier[recth] = identifier[im] . identifier[size] [- literal[int] ]
identifier[rectw] = identifier[self] . identifier[image_pad] + identifier[self] . identifier[line_number_width] - identifier[self] . identifier[line_number_pad]
identifier[draw] . identifier[rectangle] ([( literal[int] , literal[int] ),( identifier[rectw] , identifier[recth] )],
identifier[fill] = identifier[self] . identifier[line_number_bg] )
identifier[draw] . identifier[line] ([( identifier[rectw] , literal[int] ),( identifier[rectw] , identifier[recth] )], identifier[fill] = identifier[self] . identifier[line_number_fg] )
keyword[del] identifier[draw] | def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return # depends on [control=['if'], data=[]]
if self.line_number_fg is None:
return # depends on [control=['if'], data=[]]
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)], fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw |
def get_last_update(self):
"""Gets or creates the last update object for this widget."""
instance, created = \
models.DashboardWidgetLastUpdate.objects.get_or_create(
widget_name=self.get_name())
return instance | def function[get_last_update, parameter[self]]:
constant[Gets or creates the last update object for this widget.]
<ast.Tuple object at 0x7da20c6a8ee0> assign[=] call[name[models].DashboardWidgetLastUpdate.objects.get_or_create, parameter[]]
return[name[instance]] | keyword[def] identifier[get_last_update] ( identifier[self] ):
literal[string]
identifier[instance] , identifier[created] = identifier[models] . identifier[DashboardWidgetLastUpdate] . identifier[objects] . identifier[get_or_create] (
identifier[widget_name] = identifier[self] . identifier[get_name] ())
keyword[return] identifier[instance] | def get_last_update(self):
"""Gets or creates the last update object for this widget."""
(instance, created) = models.DashboardWidgetLastUpdate.objects.get_or_create(widget_name=self.get_name())
return instance |
def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
schema_name, include_public = task._old_schema
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return
connection.set_schema(schema_name, include_public=include_public) | def function[restore_schema, parameter[task]]:
constant[ Switches the schema back to the one from before running the task. ]
from relative_module[compat] import module[get_public_schema_name]
variable[schema_name] assign[=] call[name[get_public_schema_name], parameter[]]
variable[include_public] assign[=] constant[True]
if call[name[hasattr], parameter[name[task], constant[_old_schema]]] begin[:]
<ast.Tuple object at 0x7da1b04d0730> assign[=] name[task]._old_schema
if compare[name[connection].schema_name equal[==] name[schema_name]] begin[:]
return[None]
call[name[connection].set_schema, parameter[name[schema_name]]] | keyword[def] identifier[restore_schema] ( identifier[task] ,** identifier[kwargs] ):
literal[string]
keyword[from] . identifier[compat] keyword[import] identifier[get_public_schema_name]
identifier[schema_name] = identifier[get_public_schema_name] ()
identifier[include_public] = keyword[True]
keyword[if] identifier[hasattr] ( identifier[task] , literal[string] ):
identifier[schema_name] , identifier[include_public] = identifier[task] . identifier[_old_schema]
keyword[if] identifier[connection] . identifier[schema_name] == identifier[schema_name] :
keyword[return]
identifier[connection] . identifier[set_schema] ( identifier[schema_name] , identifier[include_public] = identifier[include_public] ) | def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
(schema_name, include_public) = task._old_schema # depends on [control=['if'], data=[]]
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return # depends on [control=['if'], data=[]]
connection.set_schema(schema_name, include_public=include_public) |
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect() | def function[on_connection_closed, parameter[self, connection, reply_code, reply_text]]:
constant[This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
]
variable[start_state] assign[=] name[self].state
name[self].state assign[=] name[self].STATE_CLOSED
if name[self].on_unavailable begin[:]
call[name[self].on_unavailable, parameter[name[self]]]
name[self].connection assign[=] constant[None]
name[self].channel assign[=] constant[None]
if compare[name[start_state] not_equal[!=] name[self].STATE_CLOSING] begin[:]
call[name[LOGGER].warning, parameter[constant[%s closed while %s: (%s) %s], name[connection], name[self].state_description, name[reply_code], name[reply_text]]]
call[name[self]._reconnect, parameter[]] | keyword[def] identifier[on_connection_closed] ( identifier[self] , identifier[connection] , identifier[reply_code] , identifier[reply_text] ):
literal[string]
identifier[start_state] = identifier[self] . identifier[state]
identifier[self] . identifier[state] = identifier[self] . identifier[STATE_CLOSED]
keyword[if] identifier[self] . identifier[on_unavailable] :
identifier[self] . identifier[on_unavailable] ( identifier[self] )
identifier[self] . identifier[connection] = keyword[None]
identifier[self] . identifier[channel] = keyword[None]
keyword[if] identifier[start_state] != identifier[self] . identifier[STATE_CLOSING] :
identifier[LOGGER] . identifier[warning] ( literal[string] ,
identifier[connection] , identifier[self] . identifier[state_description] ,
identifier[reply_code] , identifier[reply_text] )
identifier[self] . identifier[_reconnect] () | def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self) # depends on [control=['if'], data=[]]
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s', connection, self.state_description, reply_code, reply_text)
self._reconnect() # depends on [control=['if'], data=[]] |
def get_all_db_ids(language=DEFAULT_LANG):
"""
:return: A list with all the database IDs as integers
"""
_ids = []
json_path = DBVuln.get_json_path(language=language)
for _file in os.listdir(json_path):
if not _file.endswith('.json'):
continue
_id = _file.split('-')[0]
_ids.append(_id)
return _ids | def function[get_all_db_ids, parameter[language]]:
constant[
:return: A list with all the database IDs as integers
]
variable[_ids] assign[=] list[[]]
variable[json_path] assign[=] call[name[DBVuln].get_json_path, parameter[]]
for taget[name[_file]] in starred[call[name[os].listdir, parameter[name[json_path]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26af460> begin[:]
continue
variable[_id] assign[=] call[call[name[_file].split, parameter[constant[-]]]][constant[0]]
call[name[_ids].append, parameter[name[_id]]]
return[name[_ids]] | keyword[def] identifier[get_all_db_ids] ( identifier[language] = identifier[DEFAULT_LANG] ):
literal[string]
identifier[_ids] =[]
identifier[json_path] = identifier[DBVuln] . identifier[get_json_path] ( identifier[language] = identifier[language] )
keyword[for] identifier[_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[json_path] ):
keyword[if] keyword[not] identifier[_file] . identifier[endswith] ( literal[string] ):
keyword[continue]
identifier[_id] = identifier[_file] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[_ids] . identifier[append] ( identifier[_id] )
keyword[return] identifier[_ids] | def get_all_db_ids(language=DEFAULT_LANG):
"""
:return: A list with all the database IDs as integers
"""
_ids = []
json_path = DBVuln.get_json_path(language=language)
for _file in os.listdir(json_path):
if not _file.endswith('.json'):
continue # depends on [control=['if'], data=[]]
_id = _file.split('-')[0]
_ids.append(_id) # depends on [control=['for'], data=['_file']]
return _ids |
def compute_cumsum(
df,
id_cols: List[str],
reference_cols: List[str],
value_cols: List[str],
new_value_cols: List[str] = None,
cols_to_keep: List[str] = None
):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
"""
if cols_to_keep is None:
cols_to_keep = []
if new_value_cols is None:
new_value_cols = value_cols
if len(value_cols) != len(new_value_cols):
raise ParamsValueError('`value_cols` and `new_value_cols` needs '
'to have the same number of elements')
check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)
levels = list(range(0, len(id_cols)))
df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()
df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()
return df.reset_index() | def function[compute_cumsum, parameter[df, id_cols, reference_cols, value_cols, new_value_cols, cols_to_keep]]:
constant[
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
]
if compare[name[cols_to_keep] is constant[None]] begin[:]
variable[cols_to_keep] assign[=] list[[]]
if compare[name[new_value_cols] is constant[None]] begin[:]
variable[new_value_cols] assign[=] name[value_cols]
if compare[call[name[len], parameter[name[value_cols]]] not_equal[!=] call[name[len], parameter[name[new_value_cols]]]] begin[:]
<ast.Raise object at 0x7da1b03ba2c0>
call[name[check_params_columns_duplicate], parameter[binary_operation[binary_operation[binary_operation[name[id_cols] + name[reference_cols]] + name[cols_to_keep]] + name[value_cols]]]]
variable[levels] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], call[name[len], parameter[name[id_cols]]]]]]]
variable[df] assign[=] call[call[name[df].groupby, parameter[binary_operation[binary_operation[name[id_cols] + name[reference_cols]] + name[cols_to_keep]]]].sum, parameter[]]
call[name[df]][name[new_value_cols]] assign[=] call[call[call[name[df].groupby, parameter[]]][name[value_cols]].cumsum, parameter[]]
return[call[name[df].reset_index, parameter[]]] | keyword[def] identifier[compute_cumsum] (
identifier[df] ,
identifier[id_cols] : identifier[List] [ identifier[str] ],
identifier[reference_cols] : identifier[List] [ identifier[str] ],
identifier[value_cols] : identifier[List] [ identifier[str] ],
identifier[new_value_cols] : identifier[List] [ identifier[str] ]= keyword[None] ,
identifier[cols_to_keep] : identifier[List] [ identifier[str] ]= keyword[None]
):
literal[string]
keyword[if] identifier[cols_to_keep] keyword[is] keyword[None] :
identifier[cols_to_keep] =[]
keyword[if] identifier[new_value_cols] keyword[is] keyword[None] :
identifier[new_value_cols] = identifier[value_cols]
keyword[if] identifier[len] ( identifier[value_cols] )!= identifier[len] ( identifier[new_value_cols] ):
keyword[raise] identifier[ParamsValueError] ( literal[string]
literal[string] )
identifier[check_params_columns_duplicate] ( identifier[id_cols] + identifier[reference_cols] + identifier[cols_to_keep] + identifier[value_cols] )
identifier[levels] = identifier[list] ( identifier[range] ( literal[int] , identifier[len] ( identifier[id_cols] )))
identifier[df] = identifier[df] . identifier[groupby] ( identifier[id_cols] + identifier[reference_cols] + identifier[cols_to_keep] ). identifier[sum] ()
identifier[df] [ identifier[new_value_cols] ]= identifier[df] . identifier[groupby] ( identifier[level] = identifier[levels] )[ identifier[value_cols] ]. identifier[cumsum] ()
keyword[return] identifier[df] . identifier[reset_index] () | def compute_cumsum(df, id_cols: List[str], reference_cols: List[str], value_cols: List[str], new_value_cols: List[str]=None, cols_to_keep: List[str]=None):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
"""
if cols_to_keep is None:
cols_to_keep = [] # depends on [control=['if'], data=['cols_to_keep']]
if new_value_cols is None:
new_value_cols = value_cols # depends on [control=['if'], data=['new_value_cols']]
if len(value_cols) != len(new_value_cols):
raise ParamsValueError('`value_cols` and `new_value_cols` needs to have the same number of elements') # depends on [control=['if'], data=[]]
check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)
levels = list(range(0, len(id_cols)))
df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()
df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()
return df.reset_index() |
def mini_histogram(series, **kwargs):
"""Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor("w")
else:
plot.set_facecolor("w")
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False)
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8)
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string | def function[mini_histogram, parameter[series]]:
constant[Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
]
variable[imgdata] assign[=] call[name[BytesIO], parameter[]]
variable[plot] assign[=] call[name[_plot_histogram], parameter[name[series]]]
call[call[name[plot].axes.get_yaxis, parameter[]].set_visible, parameter[constant[False]]]
if compare[call[name[LooseVersion], parameter[name[matplotlib].__version__]] less_or_equal[<=] constant[1.5.9]] begin[:]
call[name[plot].set_axis_bgcolor, parameter[constant[w]]]
variable[xticks] assign[=] call[name[plot].xaxis.get_major_ticks, parameter[]]
for taget[name[tick]] in starred[call[name[xticks]][<ast.Slice object at 0x7da2044c1720>]] begin[:]
call[name[tick].set_visible, parameter[constant[False]]]
call[name[tick].label.set_visible, parameter[constant[False]]]
for taget[name[tick]] in starred[tuple[[<ast.Subscript object at 0x7da2044c32b0>, <ast.Subscript object at 0x7da2044c1ff0>]]] begin[:]
call[name[tick].label.set_fontsize, parameter[constant[8]]]
call[name[plot].figure.subplots_adjust, parameter[]]
call[name[plot].figure.savefig, parameter[name[imgdata]]]
call[name[imgdata].seek, parameter[constant[0]]]
variable[result_string] assign[=] binary_operation[constant[data:image/png;base64,] + call[name[quote], parameter[call[name[base64].b64encode, parameter[call[name[imgdata].getvalue, parameter[]]]]]]]
call[name[plt].close, parameter[name[plot].figure]]
return[name[result_string]] | keyword[def] identifier[mini_histogram] ( identifier[series] ,** identifier[kwargs] ):
literal[string]
identifier[imgdata] = identifier[BytesIO] ()
identifier[plot] = identifier[_plot_histogram] ( identifier[series] , identifier[figsize] =( literal[int] , literal[int] ),** identifier[kwargs] )
identifier[plot] . identifier[axes] . identifier[get_yaxis] (). identifier[set_visible] ( keyword[False] )
keyword[if] identifier[LooseVersion] ( identifier[matplotlib] . identifier[__version__] )<= literal[string] :
identifier[plot] . identifier[set_axis_bgcolor] ( literal[string] )
keyword[else] :
identifier[plot] . identifier[set_facecolor] ( literal[string] )
identifier[xticks] = identifier[plot] . identifier[xaxis] . identifier[get_major_ticks] ()
keyword[for] identifier[tick] keyword[in] identifier[xticks] [ literal[int] :- literal[int] ]:
identifier[tick] . identifier[set_visible] ( keyword[False] )
identifier[tick] . identifier[label] . identifier[set_visible] ( keyword[False] )
keyword[for] identifier[tick] keyword[in] ( identifier[xticks] [ literal[int] ], identifier[xticks] [- literal[int] ]):
identifier[tick] . identifier[label] . identifier[set_fontsize] ( literal[int] )
identifier[plot] . identifier[figure] . identifier[subplots_adjust] ( identifier[left] = literal[int] , identifier[right] = literal[int] , identifier[top] = literal[int] , identifier[bottom] = literal[int] , identifier[wspace] = literal[int] , identifier[hspace] = literal[int] )
identifier[plot] . identifier[figure] . identifier[savefig] ( identifier[imgdata] )
identifier[imgdata] . identifier[seek] ( literal[int] )
identifier[result_string] = literal[string] + identifier[quote] ( identifier[base64] . identifier[b64encode] ( identifier[imgdata] . identifier[getvalue] ()))
identifier[plt] . identifier[close] ( identifier[plot] . identifier[figure] )
keyword[return] identifier[result_string] | def mini_histogram(series, **kwargs):
"""Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor('w') # depends on [control=['if'], data=[]]
else:
plot.set_facecolor('w')
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False) # depends on [control=['for'], data=['tick']]
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8) # depends on [control=['for'], data=['tick']]
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string |
def __draw_block(self, block, block_alpha=0.0):
"""!
@brief Display single BANG block on axis.
@param[in] block (bang_block): BANG block that should be displayed.
@param[in] block_alpha (double): Transparency level - value of alpha.
"""
max_corner, min_corner = block.get_spatial_block().get_corners()
face_color = matplotlib.colors.to_rgba('blue', alpha=block_alpha)
edge_color = matplotlib.colors.to_rgba('black', alpha=1.0)
rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1],
fill=True,
facecolor=face_color,
edgecolor=edge_color,
linewidth=0.5)
self.__ax.add_patch(rect) | def function[__draw_block, parameter[self, block, block_alpha]]:
constant[!
@brief Display single BANG block on axis.
@param[in] block (bang_block): BANG block that should be displayed.
@param[in] block_alpha (double): Transparency level - value of alpha.
]
<ast.Tuple object at 0x7da1b016d270> assign[=] call[call[name[block].get_spatial_block, parameter[]].get_corners, parameter[]]
variable[face_color] assign[=] call[name[matplotlib].colors.to_rgba, parameter[constant[blue]]]
variable[edge_color] assign[=] call[name[matplotlib].colors.to_rgba, parameter[constant[black]]]
variable[rect] assign[=] call[name[patches].Rectangle, parameter[name[min_corner], binary_operation[call[name[max_corner]][constant[0]] - call[name[min_corner]][constant[0]]], binary_operation[call[name[max_corner]][constant[1]] - call[name[min_corner]][constant[1]]]]]
call[name[self].__ax.add_patch, parameter[name[rect]]] | keyword[def] identifier[__draw_block] ( identifier[self] , identifier[block] , identifier[block_alpha] = literal[int] ):
literal[string]
identifier[max_corner] , identifier[min_corner] = identifier[block] . identifier[get_spatial_block] (). identifier[get_corners] ()
identifier[face_color] = identifier[matplotlib] . identifier[colors] . identifier[to_rgba] ( literal[string] , identifier[alpha] = identifier[block_alpha] )
identifier[edge_color] = identifier[matplotlib] . identifier[colors] . identifier[to_rgba] ( literal[string] , identifier[alpha] = literal[int] )
identifier[rect] = identifier[patches] . identifier[Rectangle] ( identifier[min_corner] , identifier[max_corner] [ literal[int] ]- identifier[min_corner] [ literal[int] ], identifier[max_corner] [ literal[int] ]- identifier[min_corner] [ literal[int] ],
identifier[fill] = keyword[True] ,
identifier[facecolor] = identifier[face_color] ,
identifier[edgecolor] = identifier[edge_color] ,
identifier[linewidth] = literal[int] )
identifier[self] . identifier[__ax] . identifier[add_patch] ( identifier[rect] ) | def __draw_block(self, block, block_alpha=0.0):
"""!
@brief Display single BANG block on axis.
@param[in] block (bang_block): BANG block that should be displayed.
@param[in] block_alpha (double): Transparency level - value of alpha.
"""
(max_corner, min_corner) = block.get_spatial_block().get_corners()
face_color = matplotlib.colors.to_rgba('blue', alpha=block_alpha)
edge_color = matplotlib.colors.to_rgba('black', alpha=1.0)
rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1], fill=True, facecolor=face_color, edgecolor=edge_color, linewidth=0.5)
self.__ax.add_patch(rect) |
def return_cursor(self, dataout, sx, sy, frame, wcs, key, strval=''):
"""
writes the cursor position to dataout.
input:
dataout: the output stream
sx: x coordinate
sy: y coordinate
wcs: nonzero if we want WCS translation
frame: frame buffer index
key: keystroke used as trigger
strval: optional string value
"""
#print "RETURN CURSOR"
wcscode = (frame + 1) * 100 + wcs
if (key == '\32'):
curval = "EOF"
else:
if (key in string.printable and key not in string.whitespace):
keystr = key
else:
keystr = "\\%03o" % (ord(key))
# send the necessary infor to the client
curval = "%10.3f %10.3f %d %s %s\n" % (sx, sy, wcscode, keystr, strval)
dataout.write(right_pad(curval, SZ_IMCURVAL)) | def function[return_cursor, parameter[self, dataout, sx, sy, frame, wcs, key, strval]]:
constant[
writes the cursor position to dataout.
input:
dataout: the output stream
sx: x coordinate
sy: y coordinate
wcs: nonzero if we want WCS translation
frame: frame buffer index
key: keystroke used as trigger
strval: optional string value
]
variable[wcscode] assign[=] binary_operation[binary_operation[binary_operation[name[frame] + constant[1]] * constant[100]] + name[wcs]]
if compare[name[key] equal[==] constant[]] begin[:]
variable[curval] assign[=] constant[EOF]
variable[curval] assign[=] binary_operation[constant[%10.3f %10.3f %d %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811b40>, <ast.Name object at 0x7da18f8125c0>, <ast.Name object at 0x7da18f811ab0>, <ast.Name object at 0x7da18f811510>, <ast.Name object at 0x7da18f813310>]]]
call[name[dataout].write, parameter[call[name[right_pad], parameter[name[curval], name[SZ_IMCURVAL]]]]] | keyword[def] identifier[return_cursor] ( identifier[self] , identifier[dataout] , identifier[sx] , identifier[sy] , identifier[frame] , identifier[wcs] , identifier[key] , identifier[strval] = literal[string] ):
literal[string]
identifier[wcscode] =( identifier[frame] + literal[int] )* literal[int] + identifier[wcs]
keyword[if] ( identifier[key] == literal[string] ):
identifier[curval] = literal[string]
keyword[else] :
keyword[if] ( identifier[key] keyword[in] identifier[string] . identifier[printable] keyword[and] identifier[key] keyword[not] keyword[in] identifier[string] . identifier[whitespace] ):
identifier[keystr] = identifier[key]
keyword[else] :
identifier[keystr] = literal[string] %( identifier[ord] ( identifier[key] ))
identifier[curval] = literal[string] %( identifier[sx] , identifier[sy] , identifier[wcscode] , identifier[keystr] , identifier[strval] )
identifier[dataout] . identifier[write] ( identifier[right_pad] ( identifier[curval] , identifier[SZ_IMCURVAL] )) | def return_cursor(self, dataout, sx, sy, frame, wcs, key, strval=''):
"""
writes the cursor position to dataout.
input:
dataout: the output stream
sx: x coordinate
sy: y coordinate
wcs: nonzero if we want WCS translation
frame: frame buffer index
key: keystroke used as trigger
strval: optional string value
"""
#print "RETURN CURSOR"
wcscode = (frame + 1) * 100 + wcs
if key == '\x1a':
curval = 'EOF' # depends on [control=['if'], data=[]]
elif key in string.printable and key not in string.whitespace:
keystr = key # depends on [control=['if'], data=[]]
else:
keystr = '\\%03o' % ord(key)
# send the necessary infor to the client
curval = '%10.3f %10.3f %d %s %s\n' % (sx, sy, wcscode, keystr, strval)
dataout.write(right_pad(curval, SZ_IMCURVAL)) |
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
:arg stream: a file-like object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element).
:arg scripting: treat noscript elements as if JavaScript was turned on
:returns: parsed tree
Example:
>>> from html5lib.html5parser import HTMLParser
>>> parser = HTMLParser()
>>> parser.parse('<html><body><p>This is a doc</p></body></html>')
<Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument() | def function[parse, parameter[self, stream]]:
constant[Parse a HTML document into a well-formed tree
:arg stream: a file-like object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element).
:arg scripting: treat noscript elements as if JavaScript was turned on
:returns: parsed tree
Example:
>>> from html5lib.html5parser import HTMLParser
>>> parser = HTMLParser()
>>> parser.parse('<html><body><p>This is a doc</p></body></html>')
<Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
]
call[name[self]._parse, parameter[name[stream], constant[False], constant[None], <ast.Starred object at 0x7da1b1ea3bb0>]]
return[call[name[self].tree.getDocument, parameter[]]] | keyword[def] identifier[parse] ( identifier[self] , identifier[stream] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_parse] ( identifier[stream] , keyword[False] , keyword[None] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[tree] . identifier[getDocument] () | def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
:arg stream: a file-like object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element).
:arg scripting: treat noscript elements as if JavaScript was turned on
:returns: parsed tree
Example:
>>> from html5lib.html5parser import HTMLParser
>>> parser = HTMLParser()
>>> parser.parse('<html><body><p>This is a doc</p></body></html>')
<Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument() |
def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type,
sv_type):
"""Display variants in the database."""
if sv_type:
variant_type = 'sv'
adapter = ctx.obj['adapter']
if (start or end):
if not (chromosome and start and end):
LOG.warning("Regions must be specified with chromosome, start and end")
return
if variant_id:
variant = adapter.get_variant({'_id':variant_id})
if variant:
click.echo(variant)
else:
LOG.info("Variant {0} does not exist in database".format(variant_id))
return
if variant_type == 'snv':
result = adapter.get_variants(
chromosome=chromosome,
start=start,
end=end
)
else:
LOG.info("Search for svs")
result = adapter.get_sv_variants(
chromosome=chromosome,
end_chromosome=end_chromosome,
sv_type=sv_type,
pos=start,
end=end
)
i = 0
for variant in result:
i += 1
pp(variant)
LOG.info("Number of variants found in database: %s", i) | def function[variants, parameter[ctx, variant_id, chromosome, end_chromosome, start, end, variant_type, sv_type]]:
constant[Display variants in the database.]
if name[sv_type] begin[:]
variable[variant_type] assign[=] constant[sv]
variable[adapter] assign[=] call[name[ctx].obj][constant[adapter]]
if <ast.BoolOp object at 0x7da1b1bb9630> begin[:]
if <ast.UnaryOp object at 0x7da1b1bbb9d0> begin[:]
call[name[LOG].warning, parameter[constant[Regions must be specified with chromosome, start and end]]]
return[None]
if name[variant_id] begin[:]
variable[variant] assign[=] call[name[adapter].get_variant, parameter[dictionary[[<ast.Constant object at 0x7da1b1bbb3d0>], [<ast.Name object at 0x7da1b1bb8250>]]]]
if name[variant] begin[:]
call[name[click].echo, parameter[name[variant]]]
return[None]
if compare[name[variant_type] equal[==] constant[snv]] begin[:]
variable[result] assign[=] call[name[adapter].get_variants, parameter[]]
variable[i] assign[=] constant[0]
for taget[name[variant]] in starred[name[result]] begin[:]
<ast.AugAssign object at 0x7da1b195cf40>
call[name[pp], parameter[name[variant]]]
call[name[LOG].info, parameter[constant[Number of variants found in database: %s], name[i]]] | keyword[def] identifier[variants] ( identifier[ctx] , identifier[variant_id] , identifier[chromosome] , identifier[end_chromosome] , identifier[start] , identifier[end] , identifier[variant_type] ,
identifier[sv_type] ):
literal[string]
keyword[if] identifier[sv_type] :
identifier[variant_type] = literal[string]
identifier[adapter] = identifier[ctx] . identifier[obj] [ literal[string] ]
keyword[if] ( identifier[start] keyword[or] identifier[end] ):
keyword[if] keyword[not] ( identifier[chromosome] keyword[and] identifier[start] keyword[and] identifier[end] ):
identifier[LOG] . identifier[warning] ( literal[string] )
keyword[return]
keyword[if] identifier[variant_id] :
identifier[variant] = identifier[adapter] . identifier[get_variant] ({ literal[string] : identifier[variant_id] })
keyword[if] identifier[variant] :
identifier[click] . identifier[echo] ( identifier[variant] )
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] ( identifier[variant_id] ))
keyword[return]
keyword[if] identifier[variant_type] == literal[string] :
identifier[result] = identifier[adapter] . identifier[get_variants] (
identifier[chromosome] = identifier[chromosome] ,
identifier[start] = identifier[start] ,
identifier[end] = identifier[end]
)
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] )
identifier[result] = identifier[adapter] . identifier[get_sv_variants] (
identifier[chromosome] = identifier[chromosome] ,
identifier[end_chromosome] = identifier[end_chromosome] ,
identifier[sv_type] = identifier[sv_type] ,
identifier[pos] = identifier[start] ,
identifier[end] = identifier[end]
)
identifier[i] = literal[int]
keyword[for] identifier[variant] keyword[in] identifier[result] :
identifier[i] += literal[int]
identifier[pp] ( identifier[variant] )
identifier[LOG] . identifier[info] ( literal[string] , identifier[i] ) | def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type, sv_type):
"""Display variants in the database."""
if sv_type:
variant_type = 'sv' # depends on [control=['if'], data=[]]
adapter = ctx.obj['adapter']
if start or end:
if not (chromosome and start and end):
LOG.warning('Regions must be specified with chromosome, start and end')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if variant_id:
variant = adapter.get_variant({'_id': variant_id})
if variant:
click.echo(variant) # depends on [control=['if'], data=[]]
else:
LOG.info('Variant {0} does not exist in database'.format(variant_id))
return # depends on [control=['if'], data=[]]
if variant_type == 'snv':
result = adapter.get_variants(chromosome=chromosome, start=start, end=end) # depends on [control=['if'], data=[]]
else:
LOG.info('Search for svs')
result = adapter.get_sv_variants(chromosome=chromosome, end_chromosome=end_chromosome, sv_type=sv_type, pos=start, end=end)
i = 0
for variant in result:
i += 1
pp(variant) # depends on [control=['for'], data=['variant']]
LOG.info('Number of variants found in database: %s', i) |
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text) | def function[link, parameter[self, link, title, text]]:
constant[Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
]
variable[link] assign[=] call[name[escape_link], parameter[name[link]]]
if <ast.UnaryOp object at 0x7da18ede5f90> begin[:]
return[binary_operation[constant[<a href="%s">%s</a>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18ede7130>, <ast.Name object at 0x7da18ede6da0>]]]]
variable[title] assign[=] call[name[escape], parameter[name[title]]]
return[binary_operation[constant[<a href="%s" title="%s">%s</a>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18ede5e70>, <ast.Name object at 0x7da18ede6d40>, <ast.Name object at 0x7da18ede7970>]]]] | keyword[def] identifier[link] ( identifier[self] , identifier[link] , identifier[title] , identifier[text] ):
literal[string]
identifier[link] = identifier[escape_link] ( identifier[link] )
keyword[if] keyword[not] identifier[title] :
keyword[return] literal[string] %( identifier[link] , identifier[text] )
identifier[title] = identifier[escape] ( identifier[title] , identifier[quote] = keyword[True] )
keyword[return] literal[string] %( identifier[link] , identifier[title] , identifier[text] ) | def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text) # depends on [control=['if'], data=[]]
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text) |
def available_datasets(self):
"""Automatically determine datasets provided by this file"""
res = self.resolution
coordinates = ['pixel_longitude', 'pixel_latitude']
for var_name, val in self.file_content.items():
if isinstance(val, netCDF4.Variable):
ds_info = {
'file_type': self.filetype_info['file_type'],
'resolution': res,
}
if not self.is_geo:
ds_info['coordinates'] = coordinates
yield DatasetID(name=var_name, resolution=res), ds_info | def function[available_datasets, parameter[self]]:
constant[Automatically determine datasets provided by this file]
variable[res] assign[=] name[self].resolution
variable[coordinates] assign[=] list[[<ast.Constant object at 0x7da1b1d37490>, <ast.Constant object at 0x7da1b1d374c0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1d34e20>, <ast.Name object at 0x7da1b1d37ac0>]]] in starred[call[name[self].file_content.items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[val], name[netCDF4].Variable]] begin[:]
variable[ds_info] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d37f10>, <ast.Constant object at 0x7da1b1d361d0>], [<ast.Subscript object at 0x7da1b1d36fe0>, <ast.Name object at 0x7da1b1d36650>]]
if <ast.UnaryOp object at 0x7da1b1d376a0> begin[:]
call[name[ds_info]][constant[coordinates]] assign[=] name[coordinates]
<ast.Yield object at 0x7da1b1d37f40> | keyword[def] identifier[available_datasets] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] . identifier[resolution]
identifier[coordinates] =[ literal[string] , literal[string] ]
keyword[for] identifier[var_name] , identifier[val] keyword[in] identifier[self] . identifier[file_content] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[val] , identifier[netCDF4] . identifier[Variable] ):
identifier[ds_info] ={
literal[string] : identifier[self] . identifier[filetype_info] [ literal[string] ],
literal[string] : identifier[res] ,
}
keyword[if] keyword[not] identifier[self] . identifier[is_geo] :
identifier[ds_info] [ literal[string] ]= identifier[coordinates]
keyword[yield] identifier[DatasetID] ( identifier[name] = identifier[var_name] , identifier[resolution] = identifier[res] ), identifier[ds_info] | def available_datasets(self):
"""Automatically determine datasets provided by this file"""
res = self.resolution
coordinates = ['pixel_longitude', 'pixel_latitude']
for (var_name, val) in self.file_content.items():
if isinstance(val, netCDF4.Variable):
ds_info = {'file_type': self.filetype_info['file_type'], 'resolution': res}
if not self.is_geo:
ds_info['coordinates'] = coordinates # depends on [control=['if'], data=[]]
yield (DatasetID(name=var_name, resolution=res), ds_info) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def get_random(self, n, l=None):
""" Return n random sequences from this Fasta object """
random_f = Fasta()
if l:
ids = self.ids[:]
random.shuffle(ids)
i = 0
while (i < n) and (len(ids) > 0):
seq_id = ids.pop()
if (len(self[seq_id]) >= l):
start = random.randint(0, len(self[seq_id]) - l)
random_f["random%s" % (i + 1)] = self[seq_id][start:start+l]
i += 1
if len(random_f) != n:
sys.stderr.write("Not enough sequences of required length")
return
else:
return random_f
else:
choice = random.sample(self.ids, n)
for i in range(n):
random_f[choice[i]] = self[choice[i]]
return random_f | def function[get_random, parameter[self, n, l]]:
constant[ Return n random sequences from this Fasta object ]
variable[random_f] assign[=] call[name[Fasta], parameter[]]
if name[l] begin[:]
variable[ids] assign[=] call[name[self].ids][<ast.Slice object at 0x7da204347820>]
call[name[random].shuffle, parameter[name[ids]]]
variable[i] assign[=] constant[0]
while <ast.BoolOp object at 0x7da204344370> begin[:]
variable[seq_id] assign[=] call[name[ids].pop, parameter[]]
if compare[call[name[len], parameter[call[name[self]][name[seq_id]]]] greater_or_equal[>=] name[l]] begin[:]
variable[start] assign[=] call[name[random].randint, parameter[constant[0], binary_operation[call[name[len], parameter[call[name[self]][name[seq_id]]]] - name[l]]]]
call[name[random_f]][binary_operation[constant[random%s] <ast.Mod object at 0x7da2590d6920> binary_operation[name[i] + constant[1]]]] assign[=] call[call[name[self]][name[seq_id]]][<ast.Slice object at 0x7da1b0fe95d0>]
<ast.AugAssign object at 0x7da1b0feaa10>
if compare[call[name[len], parameter[name[random_f]]] not_equal[!=] name[n]] begin[:]
call[name[sys].stderr.write, parameter[constant[Not enough sequences of required length]]]
return[None]
return[name[random_f]] | keyword[def] identifier[get_random] ( identifier[self] , identifier[n] , identifier[l] = keyword[None] ):
literal[string]
identifier[random_f] = identifier[Fasta] ()
keyword[if] identifier[l] :
identifier[ids] = identifier[self] . identifier[ids] [:]
identifier[random] . identifier[shuffle] ( identifier[ids] )
identifier[i] = literal[int]
keyword[while] ( identifier[i] < identifier[n] ) keyword[and] ( identifier[len] ( identifier[ids] )> literal[int] ):
identifier[seq_id] = identifier[ids] . identifier[pop] ()
keyword[if] ( identifier[len] ( identifier[self] [ identifier[seq_id] ])>= identifier[l] ):
identifier[start] = identifier[random] . identifier[randint] ( literal[int] , identifier[len] ( identifier[self] [ identifier[seq_id] ])- identifier[l] )
identifier[random_f] [ literal[string] %( identifier[i] + literal[int] )]= identifier[self] [ identifier[seq_id] ][ identifier[start] : identifier[start] + identifier[l] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[random_f] )!= identifier[n] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
keyword[return]
keyword[else] :
keyword[return] identifier[random_f]
keyword[else] :
identifier[choice] = identifier[random] . identifier[sample] ( identifier[self] . identifier[ids] , identifier[n] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[random_f] [ identifier[choice] [ identifier[i] ]]= identifier[self] [ identifier[choice] [ identifier[i] ]]
keyword[return] identifier[random_f] | def get_random(self, n, l=None):
""" Return n random sequences from this Fasta object """
random_f = Fasta()
if l:
ids = self.ids[:]
random.shuffle(ids)
i = 0
while i < n and len(ids) > 0:
seq_id = ids.pop()
if len(self[seq_id]) >= l:
start = random.randint(0, len(self[seq_id]) - l)
random_f['random%s' % (i + 1)] = self[seq_id][start:start + l]
i += 1 # depends on [control=['if'], data=['l']] # depends on [control=['while'], data=[]]
if len(random_f) != n:
sys.stderr.write('Not enough sequences of required length')
return # depends on [control=['if'], data=[]]
else:
return random_f # depends on [control=['if'], data=[]]
else:
choice = random.sample(self.ids, n)
for i in range(n):
random_f[choice[i]] = self[choice[i]] # depends on [control=['for'], data=['i']]
return random_f |
def i18n_support_locale(lc_parent):
"""
Find out whether lc is supported. Returns all child locales (and eventually lc) which do have support.
:param lc_parent: Locale for which we want to know the child locales that are supported
:return: list of supported locales
"""
log.debug('i18n_support_locale( locale="{locale}" ) called'.format(locale=lc_parent))
lc_childs = i18n_locale_fallbacks_calculate(lc_parent)
locales = []
locale_path = i18n_get_path()
mo_file = '{project}.mo'.format(project=project.PROJECT_TITLE.lower())
for lc in lc_childs:
lc_mo_path = locale_path / lc / 'LC_MESSAGES' / mo_file
log.debug('Locale data "{lc_mo_path}" exists? ...'.format(lc_mo_path=lc_mo_path))
if lc_mo_path.is_file():
log.debug('... Yes! "{locale_path}" contains {mo_file}.'.format(locale_path=locale_path, mo_file=mo_file))
locales.append(lc)
else:
log.debug('... No')
log.debug('i18n_support_locale( lc="{lc}" ) = {locales}'.format(lc=lc_parent, locales=locales))
return locales | def function[i18n_support_locale, parameter[lc_parent]]:
constant[
Find out whether lc is supported. Returns all child locales (and eventually lc) which do have support.
:param lc_parent: Locale for which we want to know the child locales that are supported
:return: list of supported locales
]
call[name[log].debug, parameter[call[constant[i18n_support_locale( locale="{locale}" ) called].format, parameter[]]]]
variable[lc_childs] assign[=] call[name[i18n_locale_fallbacks_calculate], parameter[name[lc_parent]]]
variable[locales] assign[=] list[[]]
variable[locale_path] assign[=] call[name[i18n_get_path], parameter[]]
variable[mo_file] assign[=] call[constant[{project}.mo].format, parameter[]]
for taget[name[lc]] in starred[name[lc_childs]] begin[:]
variable[lc_mo_path] assign[=] binary_operation[binary_operation[binary_operation[name[locale_path] / name[lc]] / constant[LC_MESSAGES]] / name[mo_file]]
call[name[log].debug, parameter[call[constant[Locale data "{lc_mo_path}" exists? ...].format, parameter[]]]]
if call[name[lc_mo_path].is_file, parameter[]] begin[:]
call[name[log].debug, parameter[call[constant[... Yes! "{locale_path}" contains {mo_file}.].format, parameter[]]]]
call[name[locales].append, parameter[name[lc]]]
call[name[log].debug, parameter[call[constant[i18n_support_locale( lc="{lc}" ) = {locales}].format, parameter[]]]]
return[name[locales]] | keyword[def] identifier[i18n_support_locale] ( identifier[lc_parent] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[locale] = identifier[lc_parent] ))
identifier[lc_childs] = identifier[i18n_locale_fallbacks_calculate] ( identifier[lc_parent] )
identifier[locales] =[]
identifier[locale_path] = identifier[i18n_get_path] ()
identifier[mo_file] = literal[string] . identifier[format] ( identifier[project] = identifier[project] . identifier[PROJECT_TITLE] . identifier[lower] ())
keyword[for] identifier[lc] keyword[in] identifier[lc_childs] :
identifier[lc_mo_path] = identifier[locale_path] / identifier[lc] / literal[string] / identifier[mo_file]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[lc_mo_path] = identifier[lc_mo_path] ))
keyword[if] identifier[lc_mo_path] . identifier[is_file] ():
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[locale_path] = identifier[locale_path] , identifier[mo_file] = identifier[mo_file] ))
identifier[locales] . identifier[append] ( identifier[lc] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[lc] = identifier[lc_parent] , identifier[locales] = identifier[locales] ))
keyword[return] identifier[locales] | def i18n_support_locale(lc_parent):
"""
Find out whether lc is supported. Returns all child locales (and eventually lc) which do have support.
:param lc_parent: Locale for which we want to know the child locales that are supported
:return: list of supported locales
"""
log.debug('i18n_support_locale( locale="{locale}" ) called'.format(locale=lc_parent))
lc_childs = i18n_locale_fallbacks_calculate(lc_parent)
locales = []
locale_path = i18n_get_path()
mo_file = '{project}.mo'.format(project=project.PROJECT_TITLE.lower())
for lc in lc_childs:
lc_mo_path = locale_path / lc / 'LC_MESSAGES' / mo_file
log.debug('Locale data "{lc_mo_path}" exists? ...'.format(lc_mo_path=lc_mo_path))
if lc_mo_path.is_file():
log.debug('... Yes! "{locale_path}" contains {mo_file}.'.format(locale_path=locale_path, mo_file=mo_file))
locales.append(lc) # depends on [control=['if'], data=[]]
else:
log.debug('... No') # depends on [control=['for'], data=['lc']]
log.debug('i18n_support_locale( lc="{lc}" ) = {locales}'.format(lc=lc_parent, locales=locales))
return locales |
def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([]) | def function[iterpoints, parameter[resp, parser]]:
constant[Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
]
for taget[name[statement]] in starred[call[name[resp]][constant[results]]] begin[:]
if compare[constant[series] <ast.NotIn object at 0x7da2590d7190> name[statement]] begin[:]
continue
for taget[name[series]] in starred[call[name[statement]][constant[series]]] begin[:]
if compare[name[parser] is constant[None]] begin[:]
return[<ast.GeneratorExp object at 0x7da2044c06d0>]
return[call[name[iter], parameter[list[[]]]]] | keyword[def] identifier[iterpoints] ( identifier[resp] : identifier[dict] , identifier[parser] : identifier[Optional] [ identifier[Callable] ]= keyword[None] )-> identifier[Iterator] [ identifier[Any] ]:
literal[string]
keyword[for] identifier[statement] keyword[in] identifier[resp] [ literal[string] ]:
keyword[if] literal[string] keyword[not] keyword[in] identifier[statement] :
keyword[continue]
keyword[for] identifier[series] keyword[in] identifier[statement] [ literal[string] ]:
keyword[if] identifier[parser] keyword[is] keyword[None] :
keyword[return] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[series] [ literal[string] ])
keyword[elif] literal[string] keyword[in] identifier[inspect] . identifier[signature] ( identifier[parser] ). identifier[parameters] :
identifier[meta] ={ identifier[k] : identifier[series] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[series] keyword[if] identifier[k] != literal[string] }
identifier[meta] [ literal[string] ]= identifier[statement] [ literal[string] ]
keyword[return] ( identifier[parser] (* identifier[x] , identifier[meta] = identifier[meta] ) keyword[for] identifier[x] keyword[in] identifier[series] [ literal[string] ])
keyword[else] :
keyword[return] ( identifier[parser] (* identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[series] [ literal[string] ])
keyword[return] identifier[iter] ([]) | def iterpoints(resp: dict, parser: Optional[Callable]=None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue # depends on [control=['if'], data=[]]
for series in statement['series']:
if parser is None:
return (x for x in series['values']) # depends on [control=['if'], data=[]]
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values']) # depends on [control=['if'], data=[]]
else:
return (parser(*x) for x in series['values']) # depends on [control=['for'], data=['series']] # depends on [control=['for'], data=['statement']]
return iter([]) |
def create_auth_strategy(self):
"""depends on config"""
if self.config.authPolicy == LOCAL_AUTH_POLICY:
return LocalAuthStrategy(auth_map=self.auth_map,
anyone_can_write_map=self.anyone_can_write_map if self.anyone_can_write else None)
elif self.config.authPolicy == CONFIG_LEDGER_AUTH_POLICY:
return ConfigLedgerAuthStrategy(auth_map=self.auth_map,
state=self.config_state,
serializer=self.state_serializer,
anyone_can_write_map=self.anyone_can_write_map if self.anyone_can_write else None,
metrics=self.metrics) | def function[create_auth_strategy, parameter[self]]:
constant[depends on config]
if compare[name[self].config.authPolicy equal[==] name[LOCAL_AUTH_POLICY]] begin[:]
return[call[name[LocalAuthStrategy], parameter[]]] | keyword[def] identifier[create_auth_strategy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[config] . identifier[authPolicy] == identifier[LOCAL_AUTH_POLICY] :
keyword[return] identifier[LocalAuthStrategy] ( identifier[auth_map] = identifier[self] . identifier[auth_map] ,
identifier[anyone_can_write_map] = identifier[self] . identifier[anyone_can_write_map] keyword[if] identifier[self] . identifier[anyone_can_write] keyword[else] keyword[None] )
keyword[elif] identifier[self] . identifier[config] . identifier[authPolicy] == identifier[CONFIG_LEDGER_AUTH_POLICY] :
keyword[return] identifier[ConfigLedgerAuthStrategy] ( identifier[auth_map] = identifier[self] . identifier[auth_map] ,
identifier[state] = identifier[self] . identifier[config_state] ,
identifier[serializer] = identifier[self] . identifier[state_serializer] ,
identifier[anyone_can_write_map] = identifier[self] . identifier[anyone_can_write_map] keyword[if] identifier[self] . identifier[anyone_can_write] keyword[else] keyword[None] ,
identifier[metrics] = identifier[self] . identifier[metrics] ) | def create_auth_strategy(self):
"""depends on config"""
if self.config.authPolicy == LOCAL_AUTH_POLICY:
return LocalAuthStrategy(auth_map=self.auth_map, anyone_can_write_map=self.anyone_can_write_map if self.anyone_can_write else None) # depends on [control=['if'], data=[]]
elif self.config.authPolicy == CONFIG_LEDGER_AUTH_POLICY:
return ConfigLedgerAuthStrategy(auth_map=self.auth_map, state=self.config_state, serializer=self.state_serializer, anyone_can_write_map=self.anyone_can_write_map if self.anyone_can_write else None, metrics=self.metrics) # depends on [control=['if'], data=[]] |
def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
def AddStandardOptions(parser, *args):
"""Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
"""
if 'application' in args:
parser.add_argument('-a', '--application', default='.',
help='The path to the Python App Engine App')
if 'format' in args:
# This used to be a valid option, allowing the user to select 'rest' or 'rpc',
# but now 'rest' is the only valid type. The argument remains so scripts using it
# won't break.
parser.add_argument('-f', '--format', default='rest',
choices=['rest'],
help='The requested API protocol type (ignored)')
if 'hostname' in args:
help_text = ('Default application hostname, if none is specified '
'for API service.')
parser.add_argument('--hostname', help=help_text)
if 'output' in args:
parser.add_argument('-o', '--output', default='.',
help='The directory to store output files')
if 'language' in args:
parser.add_argument('language',
help='The target output programming language')
if 'service' in args:
parser.add_argument('service', nargs='+',
help='Fully qualified service class name')
if 'discovery_doc' in args:
parser.add_argument('discovery_doc', nargs=1,
help='Path to the discovery document')
if 'build_system' in args:
parser.add_argument('-bs', '--build_system', default='default',
help='The target build system')
parser = _EndpointsParser(prog=prog)
subparsers = parser.add_subparsers(
title='subcommands', metavar='{%s}' % ', '.join(_VISIBLE_COMMANDS))
get_client_lib = subparsers.add_parser(
'get_client_lib', help=('Generates discovery documents and client '
'libraries from service classes'))
get_client_lib.set_defaults(callback=_GetClientLibCallback)
AddStandardOptions(get_client_lib, 'application', 'hostname', 'output',
'language', 'service', 'build_system')
get_discovery_doc = subparsers.add_parser(
'get_discovery_doc',
help='Generates discovery documents from service classes')
get_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(get_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
get_openapi_spec = subparsers.add_parser(
'get_openapi_spec',
help='Generates OpenAPI (Swagger) specs from service classes')
get_openapi_spec.set_defaults(callback=_GenOpenApiSpecCallback)
AddStandardOptions(get_openapi_spec, 'application', 'hostname', 'output',
'service')
get_openapi_spec.add_argument('--x-google-api-name', action='store_true',
help="Add the 'x-google-api-name' field to the generated spec")
# Create an alias for get_openapi_spec called get_swagger_spec to support
# the old-style naming. This won't be a visible command, but it will still
# function to support legacy scripts.
get_swagger_spec = subparsers.add_parser(
'get_swagger_spec',
help='Generates OpenAPI (Swagger) specs from service classes')
get_swagger_spec.set_defaults(callback=_GenOpenApiSpecCallback)
AddStandardOptions(get_swagger_spec, 'application', 'hostname', 'output',
'service')
# By removing the help attribute, the following three actions won't be
# displayed in usage message
gen_api_config = subparsers.add_parser('gen_api_config')
gen_api_config.set_defaults(callback=_GenApiConfigCallback)
AddStandardOptions(gen_api_config, 'application', 'hostname', 'output',
'service')
gen_discovery_doc = subparsers.add_parser('gen_discovery_doc')
gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
gen_client_lib = subparsers.add_parser('gen_client_lib')
gen_client_lib.set_defaults(callback=_GenClientLibCallback)
AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc',
'build_system')
return parser | def function[MakeParser, parameter[prog]]:
constant[Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
]
def function[AddStandardOptions, parameter[parser]]:
constant[Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
]
if compare[constant[application] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[-a], constant[--application]]]
if compare[constant[format] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[-f], constant[--format]]]
if compare[constant[hostname] in name[args]] begin[:]
variable[help_text] assign[=] constant[Default application hostname, if none is specified for API service.]
call[name[parser].add_argument, parameter[constant[--hostname]]]
if compare[constant[output] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[-o], constant[--output]]]
if compare[constant[language] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[language]]]
if compare[constant[service] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[service]]]
if compare[constant[discovery_doc] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[discovery_doc]]]
if compare[constant[build_system] in name[args]] begin[:]
call[name[parser].add_argument, parameter[constant[-bs], constant[--build_system]]]
variable[parser] assign[=] call[name[_EndpointsParser], parameter[]]
variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]]
variable[get_client_lib] assign[=] call[name[subparsers].add_parser, parameter[constant[get_client_lib]]]
call[name[get_client_lib].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[get_client_lib], constant[application], constant[hostname], constant[output], constant[language], constant[service], constant[build_system]]]
variable[get_discovery_doc] assign[=] call[name[subparsers].add_parser, parameter[constant[get_discovery_doc]]]
call[name[get_discovery_doc].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[get_discovery_doc], constant[application], constant[format], constant[hostname], constant[output], constant[service]]]
variable[get_openapi_spec] assign[=] call[name[subparsers].add_parser, parameter[constant[get_openapi_spec]]]
call[name[get_openapi_spec].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[get_openapi_spec], constant[application], constant[hostname], constant[output], constant[service]]]
call[name[get_openapi_spec].add_argument, parameter[constant[--x-google-api-name]]]
variable[get_swagger_spec] assign[=] call[name[subparsers].add_parser, parameter[constant[get_swagger_spec]]]
call[name[get_swagger_spec].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[get_swagger_spec], constant[application], constant[hostname], constant[output], constant[service]]]
variable[gen_api_config] assign[=] call[name[subparsers].add_parser, parameter[constant[gen_api_config]]]
call[name[gen_api_config].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[gen_api_config], constant[application], constant[hostname], constant[output], constant[service]]]
variable[gen_discovery_doc] assign[=] call[name[subparsers].add_parser, parameter[constant[gen_discovery_doc]]]
call[name[gen_discovery_doc].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[gen_discovery_doc], constant[application], constant[format], constant[hostname], constant[output], constant[service]]]
variable[gen_client_lib] assign[=] call[name[subparsers].add_parser, parameter[constant[gen_client_lib]]]
call[name[gen_client_lib].set_defaults, parameter[]]
call[name[AddStandardOptions], parameter[name[gen_client_lib], constant[output], constant[language], constant[discovery_doc], constant[build_system]]]
return[name[parser]] | keyword[def] identifier[MakeParser] ( identifier[prog] ):
literal[string]
keyword[def] identifier[AddStandardOptions] ( identifier[parser] ,* identifier[args] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] ,
identifier[choices] =[ literal[string] ],
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[help_text] =( literal[string]
literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[help_text] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[int] ,
identifier[help] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] = identifier[_EndpointsParser] ( identifier[prog] = identifier[prog] )
identifier[subparsers] = identifier[parser] . identifier[add_subparsers] (
identifier[title] = literal[string] , identifier[metavar] = literal[string] % literal[string] . identifier[join] ( identifier[_VISIBLE_COMMANDS] ))
identifier[get_client_lib] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[help] =( literal[string]
literal[string] ))
identifier[get_client_lib] . identifier[set_defaults] ( identifier[callback] = identifier[_GetClientLibCallback] )
identifier[AddStandardOptions] ( identifier[get_client_lib] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] )
identifier[get_discovery_doc] = identifier[subparsers] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string] )
identifier[get_discovery_doc] . identifier[set_defaults] ( identifier[callback] = identifier[_GenDiscoveryDocCallback] )
identifier[AddStandardOptions] ( identifier[get_discovery_doc] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] )
identifier[get_openapi_spec] = identifier[subparsers] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string] )
identifier[get_openapi_spec] . identifier[set_defaults] ( identifier[callback] = identifier[_GenOpenApiSpecCallback] )
identifier[AddStandardOptions] ( identifier[get_openapi_spec] , literal[string] , literal[string] , literal[string] ,
literal[string] )
identifier[get_openapi_spec] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[get_swagger_spec] = identifier[subparsers] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string] )
identifier[get_swagger_spec] . identifier[set_defaults] ( identifier[callback] = identifier[_GenOpenApiSpecCallback] )
identifier[AddStandardOptions] ( identifier[get_swagger_spec] , literal[string] , literal[string] , literal[string] ,
literal[string] )
identifier[gen_api_config] = identifier[subparsers] . identifier[add_parser] ( literal[string] )
identifier[gen_api_config] . identifier[set_defaults] ( identifier[callback] = identifier[_GenApiConfigCallback] )
identifier[AddStandardOptions] ( identifier[gen_api_config] , literal[string] , literal[string] , literal[string] ,
literal[string] )
identifier[gen_discovery_doc] = identifier[subparsers] . identifier[add_parser] ( literal[string] )
identifier[gen_discovery_doc] . identifier[set_defaults] ( identifier[callback] = identifier[_GenDiscoveryDocCallback] )
identifier[AddStandardOptions] ( identifier[gen_discovery_doc] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] )
identifier[gen_client_lib] = identifier[subparsers] . identifier[add_parser] ( literal[string] )
identifier[gen_client_lib] . identifier[set_defaults] ( identifier[callback] = identifier[_GenClientLibCallback] )
identifier[AddStandardOptions] ( identifier[gen_client_lib] , literal[string] , literal[string] , literal[string] ,
literal[string] )
keyword[return] identifier[parser] | def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
def AddStandardOptions(parser, *args):
"""Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
"""
if 'application' in args:
parser.add_argument('-a', '--application', default='.', help='The path to the Python App Engine App') # depends on [control=['if'], data=[]]
if 'format' in args:
# This used to be a valid option, allowing the user to select 'rest' or 'rpc',
# but now 'rest' is the only valid type. The argument remains so scripts using it
# won't break.
parser.add_argument('-f', '--format', default='rest', choices=['rest'], help='The requested API protocol type (ignored)') # depends on [control=['if'], data=[]]
if 'hostname' in args:
help_text = 'Default application hostname, if none is specified for API service.'
parser.add_argument('--hostname', help=help_text) # depends on [control=['if'], data=[]]
if 'output' in args:
parser.add_argument('-o', '--output', default='.', help='The directory to store output files') # depends on [control=['if'], data=[]]
if 'language' in args:
parser.add_argument('language', help='The target output programming language') # depends on [control=['if'], data=[]]
if 'service' in args:
parser.add_argument('service', nargs='+', help='Fully qualified service class name') # depends on [control=['if'], data=[]]
if 'discovery_doc' in args:
parser.add_argument('discovery_doc', nargs=1, help='Path to the discovery document') # depends on [control=['if'], data=[]]
if 'build_system' in args:
parser.add_argument('-bs', '--build_system', default='default', help='The target build system') # depends on [control=['if'], data=[]]
parser = _EndpointsParser(prog=prog)
subparsers = parser.add_subparsers(title='subcommands', metavar='{%s}' % ', '.join(_VISIBLE_COMMANDS))
get_client_lib = subparsers.add_parser('get_client_lib', help='Generates discovery documents and client libraries from service classes')
get_client_lib.set_defaults(callback=_GetClientLibCallback)
AddStandardOptions(get_client_lib, 'application', 'hostname', 'output', 'language', 'service', 'build_system')
get_discovery_doc = subparsers.add_parser('get_discovery_doc', help='Generates discovery documents from service classes')
get_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(get_discovery_doc, 'application', 'format', 'hostname', 'output', 'service')
get_openapi_spec = subparsers.add_parser('get_openapi_spec', help='Generates OpenAPI (Swagger) specs from service classes')
get_openapi_spec.set_defaults(callback=_GenOpenApiSpecCallback)
AddStandardOptions(get_openapi_spec, 'application', 'hostname', 'output', 'service')
get_openapi_spec.add_argument('--x-google-api-name', action='store_true', help="Add the 'x-google-api-name' field to the generated spec")
# Create an alias for get_openapi_spec called get_swagger_spec to support
# the old-style naming. This won't be a visible command, but it will still
# function to support legacy scripts.
get_swagger_spec = subparsers.add_parser('get_swagger_spec', help='Generates OpenAPI (Swagger) specs from service classes')
get_swagger_spec.set_defaults(callback=_GenOpenApiSpecCallback)
AddStandardOptions(get_swagger_spec, 'application', 'hostname', 'output', 'service')
# By removing the help attribute, the following three actions won't be
# displayed in usage message
gen_api_config = subparsers.add_parser('gen_api_config')
gen_api_config.set_defaults(callback=_GenApiConfigCallback)
AddStandardOptions(gen_api_config, 'application', 'hostname', 'output', 'service')
gen_discovery_doc = subparsers.add_parser('gen_discovery_doc')
gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname', 'output', 'service')
gen_client_lib = subparsers.add_parser('gen_client_lib')
gen_client_lib.set_defaults(callback=_GenClientLibCallback)
AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc', 'build_system')
return parser |
def _get_id(owner, date, content):
"""
Generate an unique Atom id for the given content
"""
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update("github.com/spacetelescope/asv".encode('utf-8'))
for x in content:
if x is None:
h.update(",".encode('utf-8'))
else:
h.update(x.encode('utf-8'))
h.update(",".encode('utf-8'))
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) | def function[_get_id, parameter[owner, date, content]]:
constant[
Generate an unique Atom id for the given content
]
variable[h] assign[=] call[name[hashlib].sha256, parameter[]]
call[name[h].update, parameter[call[constant[github.com/spacetelescope/asv].encode, parameter[constant[utf-8]]]]]
for taget[name[x]] in starred[name[content]] begin[:]
if compare[name[x] is constant[None]] begin[:]
call[name[h].update, parameter[call[constant[,].encode, parameter[constant[utf-8]]]]]
call[name[h].update, parameter[call[constant[,].encode, parameter[constant[utf-8]]]]]
if compare[name[date] is constant[None]] begin[:]
variable[date] assign[=] call[name[datetime].datetime, parameter[constant[1970], constant[1], constant[1]]]
return[call[constant[tag:{0},{1}:/{2}].format, parameter[name[owner], call[name[date].strftime, parameter[constant[%Y-%m-%d]]], call[name[h].hexdigest, parameter[]]]]] | keyword[def] identifier[_get_id] ( identifier[owner] , identifier[date] , identifier[content] ):
literal[string]
identifier[h] = identifier[hashlib] . identifier[sha256] ()
identifier[h] . identifier[update] ( literal[string] . identifier[encode] ( literal[string] ))
keyword[for] identifier[x] keyword[in] identifier[content] :
keyword[if] identifier[x] keyword[is] keyword[None] :
identifier[h] . identifier[update] ( literal[string] . identifier[encode] ( literal[string] ))
keyword[else] :
identifier[h] . identifier[update] ( identifier[x] . identifier[encode] ( literal[string] ))
identifier[h] . identifier[update] ( literal[string] . identifier[encode] ( literal[string] ))
keyword[if] identifier[date] keyword[is] keyword[None] :
identifier[date] = identifier[datetime] . identifier[datetime] ( literal[int] , literal[int] , literal[int] )
keyword[return] literal[string] . identifier[format] ( identifier[owner] , identifier[date] . identifier[strftime] ( literal[string] ), identifier[h] . identifier[hexdigest] ()) | def _get_id(owner, date, content):
"""
Generate an unique Atom id for the given content
"""
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update('github.com/spacetelescope/asv'.encode('utf-8'))
for x in content:
if x is None:
h.update(','.encode('utf-8')) # depends on [control=['if'], data=[]]
else:
h.update(x.encode('utf-8'))
h.update(','.encode('utf-8')) # depends on [control=['for'], data=['x']]
if date is None:
date = datetime.datetime(1970, 1, 1) # depends on [control=['if'], data=['date']]
return 'tag:{0},{1}:/{2}'.format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) |
def html_to_dom(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS):
"""Converts HTML to DOM."""
if isinstance(html, unicode):
decoded_html = html
# encode HTML for case it's XML with encoding declaration
forced_encoding = encoding if encoding else default_encoding
html = html.encode(forced_encoding, errors)
else:
decoded_html = decode_html(html, default_encoding, encoding, errors)
try:
dom = lxml.html.fromstring(decoded_html, parser=lxml.html.HTMLParser())
except ValueError:
# Unicode strings with encoding declaration are not supported.
# for XHTML files with encoding declaration, use the declared encoding
dom = lxml.html.fromstring(html, parser=lxml.html.HTMLParser())
return dom | def function[html_to_dom, parameter[html, default_encoding, encoding, errors]]:
constant[Converts HTML to DOM.]
if call[name[isinstance], parameter[name[html], name[unicode]]] begin[:]
variable[decoded_html] assign[=] name[html]
variable[forced_encoding] assign[=] <ast.IfExp object at 0x7da18ede7100>
variable[html] assign[=] call[name[html].encode, parameter[name[forced_encoding], name[errors]]]
<ast.Try object at 0x7da18ede7ee0>
return[name[dom]] | keyword[def] identifier[html_to_dom] ( identifier[html] , identifier[default_encoding] = identifier[DEFAULT_ENCODING] , identifier[encoding] = keyword[None] , identifier[errors] = identifier[DEFAULT_ENC_ERRORS] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[html] , identifier[unicode] ):
identifier[decoded_html] = identifier[html]
identifier[forced_encoding] = identifier[encoding] keyword[if] identifier[encoding] keyword[else] identifier[default_encoding]
identifier[html] = identifier[html] . identifier[encode] ( identifier[forced_encoding] , identifier[errors] )
keyword[else] :
identifier[decoded_html] = identifier[decode_html] ( identifier[html] , identifier[default_encoding] , identifier[encoding] , identifier[errors] )
keyword[try] :
identifier[dom] = identifier[lxml] . identifier[html] . identifier[fromstring] ( identifier[decoded_html] , identifier[parser] = identifier[lxml] . identifier[html] . identifier[HTMLParser] ())
keyword[except] identifier[ValueError] :
identifier[dom] = identifier[lxml] . identifier[html] . identifier[fromstring] ( identifier[html] , identifier[parser] = identifier[lxml] . identifier[html] . identifier[HTMLParser] ())
keyword[return] identifier[dom] | def html_to_dom(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS):
"""Converts HTML to DOM."""
if isinstance(html, unicode):
decoded_html = html
# encode HTML for case it's XML with encoding declaration
forced_encoding = encoding if encoding else default_encoding
html = html.encode(forced_encoding, errors) # depends on [control=['if'], data=[]]
else:
decoded_html = decode_html(html, default_encoding, encoding, errors)
try:
dom = lxml.html.fromstring(decoded_html, parser=lxml.html.HTMLParser()) # depends on [control=['try'], data=[]]
except ValueError:
# Unicode strings with encoding declaration are not supported.
# for XHTML files with encoding declaration, use the declared encoding
dom = lxml.html.fromstring(html, parser=lxml.html.HTMLParser()) # depends on [control=['except'], data=[]]
return dom |
def _convert_copy(self, fc):
"""Convert a FileCopyCommand into a new FileCommand.
:return: None if the copy is being ignored, otherwise a
new FileCommand based on the whether the source and destination
paths are inside or outside of the interesting locations.
"""
src = fc.src_path
dest = fc.dest_path
keep_src = self._path_to_be_kept(src)
keep_dest = self._path_to_be_kept(dest)
if keep_src and keep_dest:
fc.src_path = self._adjust_for_new_root(src)
fc.dest_path = self._adjust_for_new_root(dest)
return fc
elif keep_src:
# The file has been copied to a non-interesting location.
# Ignore it!
return None
elif keep_dest:
# The file has been copied into an interesting location
# We really ought to add it but we don't currently buffer
# the contents of all previous files and probably never want
# to. Maybe fast-import-info needs to be extended to
# remember all copies and a config file can be passed
# into here ala fast-import?
self.warning("cannot turn copy of %s into an add of %s yet" %
(src, dest))
return None | def function[_convert_copy, parameter[self, fc]]:
constant[Convert a FileCopyCommand into a new FileCommand.
:return: None if the copy is being ignored, otherwise a
new FileCommand based on the whether the source and destination
paths are inside or outside of the interesting locations.
]
variable[src] assign[=] name[fc].src_path
variable[dest] assign[=] name[fc].dest_path
variable[keep_src] assign[=] call[name[self]._path_to_be_kept, parameter[name[src]]]
variable[keep_dest] assign[=] call[name[self]._path_to_be_kept, parameter[name[dest]]]
if <ast.BoolOp object at 0x7da1b0913ac0> begin[:]
name[fc].src_path assign[=] call[name[self]._adjust_for_new_root, parameter[name[src]]]
name[fc].dest_path assign[=] call[name[self]._adjust_for_new_root, parameter[name[dest]]]
return[name[fc]]
return[constant[None]] | keyword[def] identifier[_convert_copy] ( identifier[self] , identifier[fc] ):
literal[string]
identifier[src] = identifier[fc] . identifier[src_path]
identifier[dest] = identifier[fc] . identifier[dest_path]
identifier[keep_src] = identifier[self] . identifier[_path_to_be_kept] ( identifier[src] )
identifier[keep_dest] = identifier[self] . identifier[_path_to_be_kept] ( identifier[dest] )
keyword[if] identifier[keep_src] keyword[and] identifier[keep_dest] :
identifier[fc] . identifier[src_path] = identifier[self] . identifier[_adjust_for_new_root] ( identifier[src] )
identifier[fc] . identifier[dest_path] = identifier[self] . identifier[_adjust_for_new_root] ( identifier[dest] )
keyword[return] identifier[fc]
keyword[elif] identifier[keep_src] :
keyword[return] keyword[None]
keyword[elif] identifier[keep_dest] :
identifier[self] . identifier[warning] ( literal[string] %
( identifier[src] , identifier[dest] ))
keyword[return] keyword[None] | def _convert_copy(self, fc):
"""Convert a FileCopyCommand into a new FileCommand.
:return: None if the copy is being ignored, otherwise a
new FileCommand based on the whether the source and destination
paths are inside or outside of the interesting locations.
"""
src = fc.src_path
dest = fc.dest_path
keep_src = self._path_to_be_kept(src)
keep_dest = self._path_to_be_kept(dest)
if keep_src and keep_dest:
fc.src_path = self._adjust_for_new_root(src)
fc.dest_path = self._adjust_for_new_root(dest)
return fc # depends on [control=['if'], data=[]]
elif keep_src:
# The file has been copied to a non-interesting location.
# Ignore it!
return None # depends on [control=['if'], data=[]]
elif keep_dest:
# The file has been copied into an interesting location
# We really ought to add it but we don't currently buffer
# the contents of all previous files and probably never want
# to. Maybe fast-import-info needs to be extended to
# remember all copies and a config file can be passed
# into here ala fast-import?
self.warning('cannot turn copy of %s into an add of %s yet' % (src, dest)) # depends on [control=['if'], data=[]]
return None |
def hidden_announcements(self, user):
"""Get a list of announcements marked as hidden for a given user (usually request.user).
These are all announcements visible to the user -- they have just decided to
hide them.
"""
ids = user.announcements_hidden.all().values_list("announcement__id")
return Announcement.objects.filter(id__in=ids) | def function[hidden_announcements, parameter[self, user]]:
constant[Get a list of announcements marked as hidden for a given user (usually request.user).
These are all announcements visible to the user -- they have just decided to
hide them.
]
variable[ids] assign[=] call[call[name[user].announcements_hidden.all, parameter[]].values_list, parameter[constant[announcement__id]]]
return[call[name[Announcement].objects.filter, parameter[]]] | keyword[def] identifier[hidden_announcements] ( identifier[self] , identifier[user] ):
literal[string]
identifier[ids] = identifier[user] . identifier[announcements_hidden] . identifier[all] (). identifier[values_list] ( literal[string] )
keyword[return] identifier[Announcement] . identifier[objects] . identifier[filter] ( identifier[id__in] = identifier[ids] ) | def hidden_announcements(self, user):
"""Get a list of announcements marked as hidden for a given user (usually request.user).
These are all announcements visible to the user -- they have just decided to
hide them.
"""
ids = user.announcements_hidden.all().values_list('announcement__id')
return Announcement.objects.filter(id__in=ids) |
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash) | def function[scrypt_mcf, parameter[password, salt, N, r, p, prefix]]:
constant[Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
]
if call[name[isinstance], parameter[name[password], name[unicode]]] begin[:]
variable[password] assign[=] call[name[password].encode, parameter[constant[utf8]]]
if <ast.BoolOp object at 0x7da20c7c8a30> begin[:]
<ast.Raise object at 0x7da20c794f10>
if <ast.BoolOp object at 0x7da20c795c60> begin[:]
<ast.Raise object at 0x7da20c795330>
if compare[name[N] greater[>] binary_operation[constant[2] ** constant[31]]] begin[:]
<ast.Raise object at 0x7da20c794b50>
if <ast.BoolOp object at 0x7da20c7950c0> begin[:]
return[call[name[mcf_mod].scrypt_mcf, parameter[name[scrypt], name[password], name[salt], name[N], name[r], name[p], name[prefix]]]]
variable[s] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da20c795f90>]]
variable[t] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da20c794370>]]
variable[m] assign[=] binary_operation[constant[2] ** binary_operation[constant[10] + name[s]]]
variable[o] assign[=] binary_operation[constant[2] ** binary_operation[binary_operation[constant[5] + name[t]] + name[s]]]
variable[mcf] assign[=] call[name[ctypes].create_string_buffer, parameter[constant[102]]]
if compare[call[name[_scrypt_str], parameter[name[mcf], name[password], call[name[len], parameter[name[password]]], name[o], name[m]]] not_equal[!=] constant[0]] begin[:]
return[call[name[mcf_mod].scrypt_mcf, parameter[name[scrypt], name[password], name[salt], name[N], name[r], name[p], name[prefix]]]]
if compare[name[prefix] in tuple[[<ast.Name object at 0x7da1b23457b0>, <ast.Name object at 0x7da1b2344d90>]]] begin[:]
return[call[name[mcf].raw.strip, parameter[constant[b'\x00']]]]
<ast.Tuple object at 0x7da1b2347cd0> assign[=] call[name[mcf_mod]._scrypt_mcf_decode_7, parameter[call[name[mcf].raw][<ast.Slice object at 0x7da1b2347e80>]]]
assert[<ast.BoolOp object at 0x7da1b2344760>]
return[call[name[mcf_mod]._scrypt_mcf_encode_s1, parameter[name[N], name[r], name[p], name[salt], name[hash]]]] | keyword[def] identifier[scrypt_mcf] ( identifier[password] , identifier[salt] = keyword[None] , identifier[N] = identifier[SCRYPT_N] , identifier[r] = identifier[SCRYPT_r] , identifier[p] = identifier[SCRYPT_p] ,
identifier[prefix] = identifier[SCRYPT_MCF_PREFIX_DEFAULT] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[password] , identifier[unicode] ):
identifier[password] = identifier[password] . identifier[encode] ( literal[string] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[password] , identifier[bytes] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[N] < literal[int] keyword[or] ( identifier[N] &( identifier[N] - literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[p] > literal[int] keyword[or] identifier[p] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[N] > literal[int] ** literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( identifier[salt] keyword[is] keyword[not] keyword[None] keyword[or] identifier[r] != literal[int] keyword[or] ( identifier[p] &( identifier[p] - literal[int] )) keyword[or] ( identifier[N] * identifier[p] <= literal[int] ) keyword[or]
identifier[prefix] keyword[not] keyword[in] ( identifier[SCRYPT_MCF_PREFIX_7] , identifier[SCRYPT_MCF_PREFIX_s1] ,
identifier[SCRYPT_MCF_PREFIX_ANY] ) keyword[or]
identifier[_scrypt_ll] ):
keyword[return] identifier[mcf_mod] . identifier[scrypt_mcf] ( identifier[scrypt] , identifier[password] , identifier[salt] , identifier[N] , identifier[r] , identifier[p] , identifier[prefix] )
identifier[s] = identifier[next] ( identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ) keyword[if] literal[int] ** identifier[i] == identifier[N] )
identifier[t] = identifier[next] ( identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ) keyword[if] literal[int] ** identifier[i] == identifier[p] )
identifier[m] = literal[int] **( literal[int] + identifier[s] )
identifier[o] = literal[int] **( literal[int] + identifier[t] + identifier[s] )
identifier[mcf] = identifier[ctypes] . identifier[create_string_buffer] ( literal[int] )
keyword[if] identifier[_scrypt_str] ( identifier[mcf] , identifier[password] , identifier[len] ( identifier[password] ), identifier[o] , identifier[m] )!= literal[int] :
keyword[return] identifier[mcf_mod] . identifier[scrypt_mcf] ( identifier[scrypt] , identifier[password] , identifier[salt] , identifier[N] , identifier[r] , identifier[p] , identifier[prefix] )
keyword[if] identifier[prefix] keyword[in] ( identifier[SCRYPT_MCF_PREFIX_7] , identifier[SCRYPT_MCF_PREFIX_ANY] ):
keyword[return] identifier[mcf] . identifier[raw] . identifier[strip] ( literal[string] )
identifier[_N] , identifier[_r] , identifier[_p] , identifier[salt] , identifier[hash] , identifier[olen] = identifier[mcf_mod] . identifier[_scrypt_mcf_decode_7] ( identifier[mcf] . identifier[raw] [:- literal[int] ])
keyword[assert] identifier[_N] == identifier[N] keyword[and] identifier[_r] == identifier[r] keyword[and] identifier[_p] == identifier[p] ,( identifier[_N] , identifier[_r] , identifier[_p] , identifier[N] , identifier[r] , identifier[p] , identifier[o] , identifier[m] )
keyword[return] identifier[mcf_mod] . identifier[_scrypt_mcf_encode_s1] ( identifier[N] , identifier[r] , identifier[p] , identifier[salt] , identifier[hash] ) | def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8') # depends on [control=['if'], data=[]]
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string') # depends on [control=['if'], data=[]]
if N < 2 or N & N - 1:
raise ValueError('scrypt N must be a power of 2 greater than 1') # depends on [control=['if'], data=[]]
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]') # depends on [control=['if'], data=[]]
if N > 2 ** 31:
raise ValueError('scrypt_mcf N out of range [2,2**31]') # depends on [control=['if'], data=[]]
if salt is not None or r != 8 or p & p - 1 or (N * p <= 512) or (prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1, SCRYPT_MCF_PREFIX_ANY)) or _scrypt_ll:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix) # depends on [control=['if'], data=[]]
s = next((i for i in range(1, 32) if 2 ** i == N))
t = next((i for i in range(0, 8) if 2 ** i == p))
m = 2 ** (10 + s)
o = 2 ** (5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix) # depends on [control=['if'], data=[]]
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\x00') # depends on [control=['if'], data=[]]
(_N, _r, _p, salt, hash, olen) = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and (_p == p), (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash) |
def filter_solvers(solvers, requirements):
"""Yield solvers that fullfil the requirements."""
for solver in solvers:
for req, value in iteritems(requirements):
if (req in ('integer', 'quadratic', 'rational', 'name') and
(req not in solver or solver[req] != value)):
break
else:
yield solver | def function[filter_solvers, parameter[solvers, requirements]]:
constant[Yield solvers that fullfil the requirements.]
for taget[name[solver]] in starred[name[solvers]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c76d3f0>, <ast.Name object at 0x7da20c76d3c0>]]] in starred[call[name[iteritems], parameter[name[requirements]]]] begin[:]
if <ast.BoolOp object at 0x7da20c76c3d0> begin[:]
break | keyword[def] identifier[filter_solvers] ( identifier[solvers] , identifier[requirements] ):
literal[string]
keyword[for] identifier[solver] keyword[in] identifier[solvers] :
keyword[for] identifier[req] , identifier[value] keyword[in] identifier[iteritems] ( identifier[requirements] ):
keyword[if] ( identifier[req] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ) keyword[and]
( identifier[req] keyword[not] keyword[in] identifier[solver] keyword[or] identifier[solver] [ identifier[req] ]!= identifier[value] )):
keyword[break]
keyword[else] :
keyword[yield] identifier[solver] | def filter_solvers(solvers, requirements):
"""Yield solvers that fullfil the requirements."""
for solver in solvers:
for (req, value) in iteritems(requirements):
if req in ('integer', 'quadratic', 'rational', 'name') and (req not in solver or solver[req] != value):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
yield solver # depends on [control=['for'], data=['solver']] |
def complete_query(
self,
name,
query,
page_size,
language_codes=None,
company_name=None,
scope=None,
type_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Completes the specified prefix with keyword suggestions.
Intended for use by a job search auto-complete search box.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompletionClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `query`:
>>> query = ''
>>>
>>> # TODO: Initialize `page_size`:
>>> page_size = 0
>>>
>>> response = client.complete_query(name, query, page_size)
Args:
name (str): Required.
Resource name of project the completion is performed within.
The format is "projects/{project\_id}", for example,
"projects/api-test-project".
query (str): Required.
The query used to generate suggestions.
The maximum number of allowed characters is 255.
page_size (int): Required.
Completion result count.
The maximum allowed page size is 10.
language_codes (list[str]): Optional.
The list of languages of the query. This is the BCP-47 language code,
such as "en-US" or "sr-Latn". For more information, see `Tags for
Identifying Languages <https://tools.ietf.org/html/bcp47>`__.
For ``CompletionType.JOB_TITLE`` type, only open jobs with the same
``language_codes`` are returned.
For ``CompletionType.COMPANY_NAME`` type, only companies having open
jobs with the same ``language_codes`` are returned.
For ``CompletionType.COMBINED`` type, only open jobs with the same
``language_codes`` or companies having open jobs with the same
``language_codes`` are returned.
The maximum number of allowed characters is 255.
company_name (str): Optional.
If provided, restricts completion to specified company.
The format is "projects/{project\_id}/companies/{company\_id}", for
example, "projects/api-test-project/companies/foo".
scope (~google.cloud.talent_v4beta1.types.CompletionScope): Optional.
The scope of the completion. The defaults is ``CompletionScope.PUBLIC``.
type_ (~google.cloud.talent_v4beta1.types.CompletionType): Optional.
The completion topic. The default is ``CompletionType.COMBINED``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.CompleteQueryResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "complete_query" not in self._inner_api_calls:
self._inner_api_calls[
"complete_query"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.complete_query,
default_retry=self._method_configs["CompleteQuery"].retry,
default_timeout=self._method_configs["CompleteQuery"].timeout,
client_info=self._client_info,
)
request = completion_service_pb2.CompleteQueryRequest(
name=name,
query=query,
page_size=page_size,
language_codes=language_codes,
company_name=company_name,
scope=scope,
type=type_,
)
return self._inner_api_calls["complete_query"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[complete_query, parameter[self, name, query, page_size, language_codes, company_name, scope, type_, retry, timeout, metadata]]:
constant[
Completes the specified prefix with keyword suggestions.
Intended for use by a job search auto-complete search box.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompletionClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `query`:
>>> query = ''
>>>
>>> # TODO: Initialize `page_size`:
>>> page_size = 0
>>>
>>> response = client.complete_query(name, query, page_size)
Args:
name (str): Required.
Resource name of project the completion is performed within.
The format is "projects/{project\_id}", for example,
"projects/api-test-project".
query (str): Required.
The query used to generate suggestions.
The maximum number of allowed characters is 255.
page_size (int): Required.
Completion result count.
The maximum allowed page size is 10.
language_codes (list[str]): Optional.
The list of languages of the query. This is the BCP-47 language code,
such as "en-US" or "sr-Latn". For more information, see `Tags for
Identifying Languages <https://tools.ietf.org/html/bcp47>`__.
For ``CompletionType.JOB_TITLE`` type, only open jobs with the same
``language_codes`` are returned.
For ``CompletionType.COMPANY_NAME`` type, only companies having open
jobs with the same ``language_codes`` are returned.
For ``CompletionType.COMBINED`` type, only open jobs with the same
``language_codes`` or companies having open jobs with the same
``language_codes`` are returned.
The maximum number of allowed characters is 255.
company_name (str): Optional.
If provided, restricts completion to specified company.
The format is "projects/{project\_id}/companies/{company\_id}", for
example, "projects/api-test-project/companies/foo".
scope (~google.cloud.talent_v4beta1.types.CompletionScope): Optional.
The scope of the completion. The defaults is ``CompletionScope.PUBLIC``.
type_ (~google.cloud.talent_v4beta1.types.CompletionType): Optional.
The completion topic. The default is ``CompletionType.COMBINED``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.CompleteQueryResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[complete_query] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[complete_query]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.complete_query]]
variable[request] assign[=] call[name[completion_service_pb2].CompleteQueryRequest, parameter[]]
return[call[call[name[self]._inner_api_calls][constant[complete_query]], parameter[name[request]]]] | keyword[def] identifier[complete_query] (
identifier[self] ,
identifier[name] ,
identifier[query] ,
identifier[page_size] ,
identifier[language_codes] = keyword[None] ,
identifier[company_name] = keyword[None] ,
identifier[scope] = keyword[None] ,
identifier[type_] = keyword[None] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[complete_query] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[completion_service_pb2] . identifier[CompleteQueryRequest] (
identifier[name] = identifier[name] ,
identifier[query] = identifier[query] ,
identifier[page_size] = identifier[page_size] ,
identifier[language_codes] = identifier[language_codes] ,
identifier[company_name] = identifier[company_name] ,
identifier[scope] = identifier[scope] ,
identifier[type] = identifier[type_] ,
)
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def complete_query(self, name, query, page_size, language_codes=None, company_name=None, scope=None, type_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Completes the specified prefix with keyword suggestions.
Intended for use by a job search auto-complete search box.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompletionClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `query`:
>>> query = ''
>>>
>>> # TODO: Initialize `page_size`:
>>> page_size = 0
>>>
>>> response = client.complete_query(name, query, page_size)
Args:
name (str): Required.
Resource name of project the completion is performed within.
The format is "projects/{project\\_id}", for example,
"projects/api-test-project".
query (str): Required.
The query used to generate suggestions.
The maximum number of allowed characters is 255.
page_size (int): Required.
Completion result count.
The maximum allowed page size is 10.
language_codes (list[str]): Optional.
The list of languages of the query. This is the BCP-47 language code,
such as "en-US" or "sr-Latn". For more information, see `Tags for
Identifying Languages <https://tools.ietf.org/html/bcp47>`__.
For ``CompletionType.JOB_TITLE`` type, only open jobs with the same
``language_codes`` are returned.
For ``CompletionType.COMPANY_NAME`` type, only companies having open
jobs with the same ``language_codes`` are returned.
For ``CompletionType.COMBINED`` type, only open jobs with the same
``language_codes`` or companies having open jobs with the same
``language_codes`` are returned.
The maximum number of allowed characters is 255.
company_name (str): Optional.
If provided, restricts completion to specified company.
The format is "projects/{project\\_id}/companies/{company\\_id}", for
example, "projects/api-test-project/companies/foo".
scope (~google.cloud.talent_v4beta1.types.CompletionScope): Optional.
The scope of the completion. The defaults is ``CompletionScope.PUBLIC``.
type_ (~google.cloud.talent_v4beta1.types.CompletionType): Optional.
The completion topic. The default is ``CompletionType.COMBINED``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.CompleteQueryResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'complete_query' not in self._inner_api_calls:
self._inner_api_calls['complete_query'] = google.api_core.gapic_v1.method.wrap_method(self.transport.complete_query, default_retry=self._method_configs['CompleteQuery'].retry, default_timeout=self._method_configs['CompleteQuery'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = completion_service_pb2.CompleteQueryRequest(name=name, query=query, page_size=page_size, language_codes=language_codes, company_name=company_name, scope=scope, type=type_)
return self._inner_api_calls['complete_query'](request, retry=retry, timeout=timeout, metadata=metadata) |
def set_open_viewing(self, open_viewing):
"""
Set to True to only search for properties that have upcoming 'open for viewing' dates.
:param open_viewing:
:return:
"""
if open_viewing:
self._open_viewing = open_viewing
self._query_params += str(QueryParam.OPEN_VIEWING) | def function[set_open_viewing, parameter[self, open_viewing]]:
constant[
Set to True to only search for properties that have upcoming 'open for viewing' dates.
:param open_viewing:
:return:
]
if name[open_viewing] begin[:]
name[self]._open_viewing assign[=] name[open_viewing]
<ast.AugAssign object at 0x7da1b0579ff0> | keyword[def] identifier[set_open_viewing] ( identifier[self] , identifier[open_viewing] ):
literal[string]
keyword[if] identifier[open_viewing] :
identifier[self] . identifier[_open_viewing] = identifier[open_viewing]
identifier[self] . identifier[_query_params] += identifier[str] ( identifier[QueryParam] . identifier[OPEN_VIEWING] ) | def set_open_viewing(self, open_viewing):
"""
Set to True to only search for properties that have upcoming 'open for viewing' dates.
:param open_viewing:
:return:
"""
if open_viewing:
self._open_viewing = open_viewing
self._query_params += str(QueryParam.OPEN_VIEWING) # depends on [control=['if'], data=[]] |
def my_pick_non_system_keyspace(self):
"""
Find a keyspace in the cluster which is not 'system', for the purpose
of getting a valid ring view. Can't use 'system' or null.
"""
d = self.my_describe_keyspaces()
def pick_non_system(klist):
for k in klist:
if k.name not in SYSTEM_KEYSPACES:
return k.name
err = NoKeyspacesAvailable("Can't gather information about the "
"Cassandra ring; no non-system "
"keyspaces available")
warn(err)
raise err
d.addCallback(pick_non_system)
return d | def function[my_pick_non_system_keyspace, parameter[self]]:
constant[
Find a keyspace in the cluster which is not 'system', for the purpose
of getting a valid ring view. Can't use 'system' or null.
]
variable[d] assign[=] call[name[self].my_describe_keyspaces, parameter[]]
def function[pick_non_system, parameter[klist]]:
for taget[name[k]] in starred[name[klist]] begin[:]
if compare[name[k].name <ast.NotIn object at 0x7da2590d7190> name[SYSTEM_KEYSPACES]] begin[:]
return[name[k].name]
variable[err] assign[=] call[name[NoKeyspacesAvailable], parameter[constant[Can't gather information about the Cassandra ring; no non-system keyspaces available]]]
call[name[warn], parameter[name[err]]]
<ast.Raise object at 0x7da1b26ae560>
call[name[d].addCallback, parameter[name[pick_non_system]]]
return[name[d]] | keyword[def] identifier[my_pick_non_system_keyspace] ( identifier[self] ):
literal[string]
identifier[d] = identifier[self] . identifier[my_describe_keyspaces] ()
keyword[def] identifier[pick_non_system] ( identifier[klist] ):
keyword[for] identifier[k] keyword[in] identifier[klist] :
keyword[if] identifier[k] . identifier[name] keyword[not] keyword[in] identifier[SYSTEM_KEYSPACES] :
keyword[return] identifier[k] . identifier[name]
identifier[err] = identifier[NoKeyspacesAvailable] ( literal[string]
literal[string]
literal[string] )
identifier[warn] ( identifier[err] )
keyword[raise] identifier[err]
identifier[d] . identifier[addCallback] ( identifier[pick_non_system] )
keyword[return] identifier[d] | def my_pick_non_system_keyspace(self):
"""
Find a keyspace in the cluster which is not 'system', for the purpose
of getting a valid ring view. Can't use 'system' or null.
"""
d = self.my_describe_keyspaces()
def pick_non_system(klist):
for k in klist:
if k.name not in SYSTEM_KEYSPACES:
return k.name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
err = NoKeyspacesAvailable("Can't gather information about the Cassandra ring; no non-system keyspaces available")
warn(err)
raise err
d.addCallback(pick_non_system)
return d |
def add_chromedriver_to_path():
"""
Appends the directory of the chromedriver binary file to PATH.
"""
chromedriver_dir = os.path.abspath(os.path.dirname(__file__))
if 'PATH' not in os.environ:
os.environ['PATH'] = chromedriver_dir
elif chromedriver_dir not in os.environ['PATH']:
os.environ['PATH'] += utils.get_variable_separator()+chromedriver_dir | def function[add_chromedriver_to_path, parameter[]]:
constant[
Appends the directory of the chromedriver binary file to PATH.
]
variable[chromedriver_dir] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[name[__file__]]]]]
if compare[constant[PATH] <ast.NotIn object at 0x7da2590d7190> name[os].environ] begin[:]
call[name[os].environ][constant[PATH]] assign[=] name[chromedriver_dir] | keyword[def] identifier[add_chromedriver_to_path] ():
literal[string]
identifier[chromedriver_dir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[os] . identifier[environ] :
identifier[os] . identifier[environ] [ literal[string] ]= identifier[chromedriver_dir]
keyword[elif] identifier[chromedriver_dir] keyword[not] keyword[in] identifier[os] . identifier[environ] [ literal[string] ]:
identifier[os] . identifier[environ] [ literal[string] ]+= identifier[utils] . identifier[get_variable_separator] ()+ identifier[chromedriver_dir] | def add_chromedriver_to_path():
"""
Appends the directory of the chromedriver binary file to PATH.
"""
chromedriver_dir = os.path.abspath(os.path.dirname(__file__))
if 'PATH' not in os.environ:
os.environ['PATH'] = chromedriver_dir # depends on [control=['if'], data=[]]
elif chromedriver_dir not in os.environ['PATH']:
os.environ['PATH'] += utils.get_variable_separator() + chromedriver_dir # depends on [control=['if'], data=['chromedriver_dir']] |
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1 | def function[close, parameter[self]]:
constant[Close the connection.]
if name[self].sock begin[:]
call[name[self].sock.close, parameter[]]
name[self].sock assign[=] constant[0]
name[self].eof assign[=] constant[1] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[sock] :
identifier[self] . identifier[sock] . identifier[close] ()
identifier[self] . identifier[sock] = literal[int]
identifier[self] . identifier[eof] = literal[int] | def close(self):
"""Close the connection."""
if self.sock:
self.sock.close() # depends on [control=['if'], data=[]]
self.sock = 0
self.eof = 1 |
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
"""
rv = {}
for key, value in six.iteritems(self):
if not key.startswith(namespace):
continue
if trim_namespace:
key = key[len(namespace):]
else:
key = key
if lowercase:
key = key.lower()
rv[key] = value
return rv | def function[get_namespace, parameter[self, namespace, lowercase, trim_namespace]]:
constant[Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
]
variable[rv] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1e08d90>, <ast.Name object at 0x7da1b1e09030>]]] in starred[call[name[six].iteritems, parameter[name[self]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1e083d0> begin[:]
continue
if name[trim_namespace] begin[:]
variable[key] assign[=] call[name[key]][<ast.Slice object at 0x7da1b1e08ee0>]
if name[lowercase] begin[:]
variable[key] assign[=] call[name[key].lower, parameter[]]
call[name[rv]][name[key]] assign[=] name[value]
return[name[rv]] | keyword[def] identifier[get_namespace] ( identifier[self] , identifier[namespace] , identifier[lowercase] = keyword[True] , identifier[trim_namespace] = keyword[True] ):
literal[string]
identifier[rv] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] ):
keyword[if] keyword[not] identifier[key] . identifier[startswith] ( identifier[namespace] ):
keyword[continue]
keyword[if] identifier[trim_namespace] :
identifier[key] = identifier[key] [ identifier[len] ( identifier[namespace] ):]
keyword[else] :
identifier[key] = identifier[key]
keyword[if] identifier[lowercase] :
identifier[key] = identifier[key] . identifier[lower] ()
identifier[rv] [ identifier[key] ]= identifier[value]
keyword[return] identifier[rv] | def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
"""
rv = {}
for (key, value) in six.iteritems(self):
if not key.startswith(namespace):
continue # depends on [control=['if'], data=[]]
if trim_namespace:
key = key[len(namespace):] # depends on [control=['if'], data=[]]
else:
key = key
if lowercase:
key = key.lower() # depends on [control=['if'], data=[]]
rv[key] = value # depends on [control=['for'], data=[]]
return rv |
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = np.array(fg_vals)
neg = np.array(bg_vals)
s = scoreatpercentile(neg, 100 - fpr * 100)
neg_matches = float(len(neg[neg >= s]))
if neg_matches == 0:
return float("inf")
return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos)) | def function[enr_at_fpr, parameter[fg_vals, bg_vals, fpr]]:
constant[
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
]
variable[pos] assign[=] call[name[np].array, parameter[name[fg_vals]]]
variable[neg] assign[=] call[name[np].array, parameter[name[bg_vals]]]
variable[s] assign[=] call[name[scoreatpercentile], parameter[name[neg], binary_operation[constant[100] - binary_operation[name[fpr] * constant[100]]]]]
variable[neg_matches] assign[=] call[name[float], parameter[call[name[len], parameter[call[name[neg]][compare[name[neg] greater_or_equal[>=] name[s]]]]]]]
if compare[name[neg_matches] equal[==] constant[0]] begin[:]
return[call[name[float], parameter[constant[inf]]]]
return[binary_operation[binary_operation[binary_operation[call[name[len], parameter[call[name[pos]][compare[name[pos] greater_or_equal[>=] name[s]]]]] / name[neg_matches]] * call[name[len], parameter[name[neg]]]] / call[name[float], parameter[call[name[len], parameter[name[pos]]]]]]] | keyword[def] identifier[enr_at_fpr] ( identifier[fg_vals] , identifier[bg_vals] , identifier[fpr] = literal[int] ):
literal[string]
identifier[pos] = identifier[np] . identifier[array] ( identifier[fg_vals] )
identifier[neg] = identifier[np] . identifier[array] ( identifier[bg_vals] )
identifier[s] = identifier[scoreatpercentile] ( identifier[neg] , literal[int] - identifier[fpr] * literal[int] )
identifier[neg_matches] = identifier[float] ( identifier[len] ( identifier[neg] [ identifier[neg] >= identifier[s] ]))
keyword[if] identifier[neg_matches] == literal[int] :
keyword[return] identifier[float] ( literal[string] )
keyword[return] identifier[len] ( identifier[pos] [ identifier[pos] >= identifier[s] ])/ identifier[neg_matches] * identifier[len] ( identifier[neg] )/ identifier[float] ( identifier[len] ( identifier[pos] )) | def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = np.array(fg_vals)
neg = np.array(bg_vals)
s = scoreatpercentile(neg, 100 - fpr * 100)
neg_matches = float(len(neg[neg >= s]))
if neg_matches == 0:
return float('inf') # depends on [control=['if'], data=[]]
return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos)) |
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True | def function[select, parameter[self]]:
constant[
Select the current bitmap into this wxDC instance
]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
call[name[self].dc.SelectObject, parameter[name[self].bitmap]]
name[self].IsSelected assign[=] constant[True] | keyword[def] identifier[select] ( identifier[self] ):
literal[string]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[self] . identifier[dc] . identifier[SelectObject] ( identifier[self] . identifier[bitmap] )
identifier[self] . identifier[IsSelected] = keyword[True] | def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True # depends on [control=['if'], data=[]] |
def format(self, record):
"""tweaked from source of base"""
try:
record.message = record.getMessage()
except TypeError:
# if error during msg = msg % self.args
if record.args:
if isinstance(record.args, collections.Mapping):
record.message = record.msg.format(**record.args)
else:
record.message = record.msg.format(record.args)
self._fmt = self.getfmt(record.levelname)
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt.format(**record.__dict__)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != '\n':
s += '\n'
try:
s = s + record.exc_text
except UnicodeError:
s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')
return s | def function[format, parameter[self, record]]:
constant[tweaked from source of base]
<ast.Try object at 0x7da1afe71ba0>
name[self]._fmt assign[=] call[name[self].getfmt, parameter[name[record].levelname]]
if call[name[self].usesTime, parameter[]] begin[:]
name[record].asctime assign[=] call[name[self].formatTime, parameter[name[record], name[self].datefmt]]
variable[s] assign[=] call[name[self]._fmt.format, parameter[]]
if name[record].exc_info begin[:]
if <ast.UnaryOp object at 0x7da1afe70cd0> begin[:]
name[record].exc_text assign[=] call[name[self].formatException, parameter[name[record].exc_info]]
if name[record].exc_text begin[:]
if compare[call[name[s]][<ast.Slice object at 0x7da1afe73190>] not_equal[!=] constant[
]] begin[:]
<ast.AugAssign object at 0x7da1afe70070>
<ast.Try object at 0x7da1afe721a0>
return[name[s]] | keyword[def] identifier[format] ( identifier[self] , identifier[record] ):
literal[string]
keyword[try] :
identifier[record] . identifier[message] = identifier[record] . identifier[getMessage] ()
keyword[except] identifier[TypeError] :
keyword[if] identifier[record] . identifier[args] :
keyword[if] identifier[isinstance] ( identifier[record] . identifier[args] , identifier[collections] . identifier[Mapping] ):
identifier[record] . identifier[message] = identifier[record] . identifier[msg] . identifier[format] (** identifier[record] . identifier[args] )
keyword[else] :
identifier[record] . identifier[message] = identifier[record] . identifier[msg] . identifier[format] ( identifier[record] . identifier[args] )
identifier[self] . identifier[_fmt] = identifier[self] . identifier[getfmt] ( identifier[record] . identifier[levelname] )
keyword[if] identifier[self] . identifier[usesTime] ():
identifier[record] . identifier[asctime] = identifier[self] . identifier[formatTime] ( identifier[record] , identifier[self] . identifier[datefmt] )
identifier[s] = identifier[self] . identifier[_fmt] . identifier[format] (** identifier[record] . identifier[__dict__] )
keyword[if] identifier[record] . identifier[exc_info] :
keyword[if] keyword[not] identifier[record] . identifier[exc_text] :
identifier[record] . identifier[exc_text] = identifier[self] . identifier[formatException] ( identifier[record] . identifier[exc_info] )
keyword[if] identifier[record] . identifier[exc_text] :
keyword[if] identifier[s] [- literal[int] :]!= literal[string] :
identifier[s] += literal[string]
keyword[try] :
identifier[s] = identifier[s] + identifier[record] . identifier[exc_text]
keyword[except] identifier[UnicodeError] :
identifier[s] = identifier[s] + identifier[record] . identifier[exc_text] . identifier[decode] ( identifier[sys] . identifier[getfilesystemencoding] (), literal[string] )
keyword[return] identifier[s] | def format(self, record):
"""tweaked from source of base"""
try:
record.message = record.getMessage() # depends on [control=['try'], data=[]]
except TypeError:
# if error during msg = msg % self.args
if record.args:
if isinstance(record.args, collections.Mapping):
record.message = record.msg.format(**record.args) # depends on [control=['if'], data=[]]
else:
record.message = record.msg.format(record.args) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
self._fmt = self.getfmt(record.levelname)
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt) # depends on [control=['if'], data=[]]
s = self._fmt.format(**record.__dict__)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if record.exc_text:
if s[-1:] != '\n':
s += '\n' # depends on [control=['if'], data=[]]
try:
s = s + record.exc_text # depends on [control=['try'], data=[]]
except UnicodeError:
s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return s |
def compstat(sdat, tstart=None, tend=None):
"""Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
"""
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values # assume t is first column
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
rms.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n') | def function[compstat, parameter[sdat, tstart, tend]]:
constant[Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
]
variable[data] assign[=] call[name[sdat].tseries_between, parameter[name[tstart], name[tend]]]
variable[time] assign[=] call[name[data]][constant[t]].values
variable[delta_time] assign[=] binary_operation[call[name[time]][<ast.UnaryOp object at 0x7da1b19a3b80>] - call[name[time]][constant[0]]]
variable[data] assign[=] call[name[data].iloc][tuple[[<ast.Slice object at 0x7da1b19a3370>, <ast.Slice object at 0x7da1b19a3340>]]].values
variable[mean] assign[=] binary_operation[call[name[np].trapz, parameter[name[data]]] / name[delta_time]]
variable[rms] assign[=] call[name[np].sqrt, parameter[binary_operation[call[name[np].trapz, parameter[binary_operation[binary_operation[name[data] - name[mean]] ** constant[2]]]] / name[delta_time]]]]
with call[name[open], parameter[call[name[misc].out_name, parameter[constant[statistics.dat]]], constant[w]]] begin[:]
call[name[mean].tofile, parameter[name[out_file]]]
call[name[out_file].write, parameter[constant[
]]]
call[name[rms].tofile, parameter[name[out_file]]]
call[name[out_file].write, parameter[constant[
]]] | keyword[def] identifier[compstat] ( identifier[sdat] , identifier[tstart] = keyword[None] , identifier[tend] = keyword[None] ):
literal[string]
identifier[data] = identifier[sdat] . identifier[tseries_between] ( identifier[tstart] , identifier[tend] )
identifier[time] = identifier[data] [ literal[string] ]. identifier[values]
identifier[delta_time] = identifier[time] [- literal[int] ]- identifier[time] [ literal[int] ]
identifier[data] = identifier[data] . identifier[iloc] [:, literal[int] :]. identifier[values]
identifier[mean] = identifier[np] . identifier[trapz] ( identifier[data] , identifier[x] = identifier[time] , identifier[axis] = literal[int] )/ identifier[delta_time]
identifier[rms] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[trapz] (( identifier[data] - identifier[mean] )** literal[int] , identifier[x] = identifier[time] , identifier[axis] = literal[int] )/ identifier[delta_time] )
keyword[with] identifier[open] ( identifier[misc] . identifier[out_name] ( literal[string] ), literal[string] ) keyword[as] identifier[out_file] :
identifier[mean] . identifier[tofile] ( identifier[out_file] , identifier[sep] = literal[string] , identifier[format] = literal[string] )
identifier[out_file] . identifier[write] ( literal[string] )
identifier[rms] . identifier[tofile] ( identifier[out_file] , identifier[sep] = literal[string] , identifier[format] = literal[string] )
identifier[out_file] . identifier[write] ( literal[string] ) | def compstat(sdat, tstart=None, tend=None):
"""Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
"""
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values # assume t is first column
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean) ** 2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format='%10.5e')
out_file.write('\n')
rms.tofile(out_file, sep=' ', format='%10.5e')
out_file.write('\n') # depends on [control=['with'], data=['out_file']] |
def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg
raise com.IbisTypeError(
'Argument is not a table with column subset of {}'.format(schema)
) | def function[table, parameter[schema, arg]]:
constant[A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
]
assert[call[name[isinstance], parameter[name[arg], name[ir].TableExpr]]]
if compare[call[name[arg].schema, parameter[]] greater_or_equal[>=] call[name[sch].schema, parameter[name[schema]]]] begin[:]
return[name[arg]]
<ast.Raise object at 0x7da207f00c70> | keyword[def] identifier[table] ( identifier[schema] , identifier[arg] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[arg] , identifier[ir] . identifier[TableExpr] )
keyword[if] identifier[arg] . identifier[schema] ()>= identifier[sch] . identifier[schema] ( identifier[schema] ):
keyword[return] identifier[arg]
keyword[raise] identifier[com] . identifier[IbisTypeError] (
literal[string] . identifier[format] ( identifier[schema] )
) | def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg # depends on [control=['if'], data=[]]
raise com.IbisTypeError('Argument is not a table with column subset of {}'.format(schema)) |
def to_gpu(x, *args, **kwargs):
'''puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true. '''
return x.cuda(*args, **kwargs) if USE_GPU else x | def function[to_gpu, parameter[x]]:
constant[puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true. ]
return[<ast.IfExp object at 0x7da1b1e12ad0>] | keyword[def] identifier[to_gpu] ( identifier[x] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[x] . identifier[cuda] (* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[USE_GPU] keyword[else] identifier[x] | def to_gpu(x, *args, **kwargs):
"""puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true. """
return x.cuda(*args, **kwargs) if USE_GPU else x |
def _seconds_to_HHMMSS(seconds):
"""
Retuns a string which is the hour, minute, second(milli) representation
of the intput `seconds`
Parameters
----------
seconds : float
Returns
-------
str
Has the form <int>H<int>M<int>S.<float>
"""
less_than_second = seconds - floor(seconds)
minutes, seconds = divmod(floor(seconds), 60)
hours, minutes = divmod(minutes, 60)
return "{}H{}M{}S.{}".format(hours, minutes, seconds, less_than_second) | def function[_seconds_to_HHMMSS, parameter[seconds]]:
constant[
Retuns a string which is the hour, minute, second(milli) representation
of the intput `seconds`
Parameters
----------
seconds : float
Returns
-------
str
Has the form <int>H<int>M<int>S.<float>
]
variable[less_than_second] assign[=] binary_operation[name[seconds] - call[name[floor], parameter[name[seconds]]]]
<ast.Tuple object at 0x7da1b0403af0> assign[=] call[name[divmod], parameter[call[name[floor], parameter[name[seconds]]], constant[60]]]
<ast.Tuple object at 0x7da1b0402d40> assign[=] call[name[divmod], parameter[name[minutes], constant[60]]]
return[call[constant[{}H{}M{}S.{}].format, parameter[name[hours], name[minutes], name[seconds], name[less_than_second]]]] | keyword[def] identifier[_seconds_to_HHMMSS] ( identifier[seconds] ):
literal[string]
identifier[less_than_second] = identifier[seconds] - identifier[floor] ( identifier[seconds] )
identifier[minutes] , identifier[seconds] = identifier[divmod] ( identifier[floor] ( identifier[seconds] ), literal[int] )
identifier[hours] , identifier[minutes] = identifier[divmod] ( identifier[minutes] , literal[int] )
keyword[return] literal[string] . identifier[format] ( identifier[hours] , identifier[minutes] , identifier[seconds] , identifier[less_than_second] ) | def _seconds_to_HHMMSS(seconds):
"""
Retuns a string which is the hour, minute, second(milli) representation
of the intput `seconds`
Parameters
----------
seconds : float
Returns
-------
str
Has the form <int>H<int>M<int>S.<float>
"""
less_than_second = seconds - floor(seconds)
(minutes, seconds) = divmod(floor(seconds), 60)
(hours, minutes) = divmod(minutes, 60)
return '{}H{}M{}S.{}'.format(hours, minutes, seconds, less_than_second) |
def set_fields(self, changeset):
"""Set the fields of this class with the metadata of the analysed
changeset.
"""
self.id = int(changeset.get('id'))
self.user = changeset.get('user')
self.uid = changeset.get('uid')
self.editor = changeset.get('created_by', None)
self.review_requested = changeset.get('review_requested', False)
self.host = changeset.get('host', 'Not reported')
self.bbox = changeset.get('bbox').wkt
self.comment = changeset.get('comment', 'Not reported')
self.source = changeset.get('source', 'Not reported')
self.imagery_used = changeset.get('imagery_used', 'Not reported')
self.date = datetime.strptime(
changeset.get('created_at'),
'%Y-%m-%dT%H:%M:%SZ'
)
self.suspicion_reasons = []
self.is_suspect = False
self.powerfull_editor = False | def function[set_fields, parameter[self, changeset]]:
constant[Set the fields of this class with the metadata of the analysed
changeset.
]
name[self].id assign[=] call[name[int], parameter[call[name[changeset].get, parameter[constant[id]]]]]
name[self].user assign[=] call[name[changeset].get, parameter[constant[user]]]
name[self].uid assign[=] call[name[changeset].get, parameter[constant[uid]]]
name[self].editor assign[=] call[name[changeset].get, parameter[constant[created_by], constant[None]]]
name[self].review_requested assign[=] call[name[changeset].get, parameter[constant[review_requested], constant[False]]]
name[self].host assign[=] call[name[changeset].get, parameter[constant[host], constant[Not reported]]]
name[self].bbox assign[=] call[name[changeset].get, parameter[constant[bbox]]].wkt
name[self].comment assign[=] call[name[changeset].get, parameter[constant[comment], constant[Not reported]]]
name[self].source assign[=] call[name[changeset].get, parameter[constant[source], constant[Not reported]]]
name[self].imagery_used assign[=] call[name[changeset].get, parameter[constant[imagery_used], constant[Not reported]]]
name[self].date assign[=] call[name[datetime].strptime, parameter[call[name[changeset].get, parameter[constant[created_at]]], constant[%Y-%m-%dT%H:%M:%SZ]]]
name[self].suspicion_reasons assign[=] list[[]]
name[self].is_suspect assign[=] constant[False]
name[self].powerfull_editor assign[=] constant[False] | keyword[def] identifier[set_fields] ( identifier[self] , identifier[changeset] ):
literal[string]
identifier[self] . identifier[id] = identifier[int] ( identifier[changeset] . identifier[get] ( literal[string] ))
identifier[self] . identifier[user] = identifier[changeset] . identifier[get] ( literal[string] )
identifier[self] . identifier[uid] = identifier[changeset] . identifier[get] ( literal[string] )
identifier[self] . identifier[editor] = identifier[changeset] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[review_requested] = identifier[changeset] . identifier[get] ( literal[string] , keyword[False] )
identifier[self] . identifier[host] = identifier[changeset] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[bbox] = identifier[changeset] . identifier[get] ( literal[string] ). identifier[wkt]
identifier[self] . identifier[comment] = identifier[changeset] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[source] = identifier[changeset] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[imagery_used] = identifier[changeset] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[date] = identifier[datetime] . identifier[strptime] (
identifier[changeset] . identifier[get] ( literal[string] ),
literal[string]
)
identifier[self] . identifier[suspicion_reasons] =[]
identifier[self] . identifier[is_suspect] = keyword[False]
identifier[self] . identifier[powerfull_editor] = keyword[False] | def set_fields(self, changeset):
"""Set the fields of this class with the metadata of the analysed
changeset.
"""
self.id = int(changeset.get('id'))
self.user = changeset.get('user')
self.uid = changeset.get('uid')
self.editor = changeset.get('created_by', None)
self.review_requested = changeset.get('review_requested', False)
self.host = changeset.get('host', 'Not reported')
self.bbox = changeset.get('bbox').wkt
self.comment = changeset.get('comment', 'Not reported')
self.source = changeset.get('source', 'Not reported')
self.imagery_used = changeset.get('imagery_used', 'Not reported')
self.date = datetime.strptime(changeset.get('created_at'), '%Y-%m-%dT%H:%M:%SZ')
self.suspicion_reasons = []
self.is_suspect = False
self.powerfull_editor = False |
def lr_find2(self, start_lr=1e-5, end_lr=10, num_it = 100, wds=None, linear=False, stop_dv=True, **kwargs):
"""A variant of lr_find() that helps find the best learning rate. It doesn't do
an epoch but a fixed num of iterations (which may be more or less than an epoch
depending on your data).
At each step, it computes the validation loss and the metrics on the next
batch of the validation data, so it's slower than lr_find().
Args:
start_lr (float/numpy array) : Passing in a numpy array allows you
to specify learning rates for a learner's layer_groups
end_lr (float) : The maximum learning rate to try.
num_it : the number of iterations you want it to run
wds (iterable/float)
stop_dv : stops (or not) when the losses starts to explode.
"""
self.save('tmp')
layer_opt = self.get_layer_opt(start_lr, wds)
self.sched = LR_Finder2(layer_opt, num_it, end_lr, linear=linear, metrics=self.metrics, stop_dv=stop_dv)
self.fit_gen(self.model, self.data, layer_opt, num_it//len(self.data.trn_dl) + 1, all_val=True, **kwargs)
self.load('tmp') | def function[lr_find2, parameter[self, start_lr, end_lr, num_it, wds, linear, stop_dv]]:
constant[A variant of lr_find() that helps find the best learning rate. It doesn't do
an epoch but a fixed num of iterations (which may be more or less than an epoch
depending on your data).
At each step, it computes the validation loss and the metrics on the next
batch of the validation data, so it's slower than lr_find().
Args:
start_lr (float/numpy array) : Passing in a numpy array allows you
to specify learning rates for a learner's layer_groups
end_lr (float) : The maximum learning rate to try.
num_it : the number of iterations you want it to run
wds (iterable/float)
stop_dv : stops (or not) when the losses starts to explode.
]
call[name[self].save, parameter[constant[tmp]]]
variable[layer_opt] assign[=] call[name[self].get_layer_opt, parameter[name[start_lr], name[wds]]]
name[self].sched assign[=] call[name[LR_Finder2], parameter[name[layer_opt], name[num_it], name[end_lr]]]
call[name[self].fit_gen, parameter[name[self].model, name[self].data, name[layer_opt], binary_operation[binary_operation[name[num_it] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[len], parameter[name[self].data.trn_dl]]] + constant[1]]]]
call[name[self].load, parameter[constant[tmp]]] | keyword[def] identifier[lr_find2] ( identifier[self] , identifier[start_lr] = literal[int] , identifier[end_lr] = literal[int] , identifier[num_it] = literal[int] , identifier[wds] = keyword[None] , identifier[linear] = keyword[False] , identifier[stop_dv] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[save] ( literal[string] )
identifier[layer_opt] = identifier[self] . identifier[get_layer_opt] ( identifier[start_lr] , identifier[wds] )
identifier[self] . identifier[sched] = identifier[LR_Finder2] ( identifier[layer_opt] , identifier[num_it] , identifier[end_lr] , identifier[linear] = identifier[linear] , identifier[metrics] = identifier[self] . identifier[metrics] , identifier[stop_dv] = identifier[stop_dv] )
identifier[self] . identifier[fit_gen] ( identifier[self] . identifier[model] , identifier[self] . identifier[data] , identifier[layer_opt] , identifier[num_it] // identifier[len] ( identifier[self] . identifier[data] . identifier[trn_dl] )+ literal[int] , identifier[all_val] = keyword[True] ,** identifier[kwargs] )
identifier[self] . identifier[load] ( literal[string] ) | def lr_find2(self, start_lr=1e-05, end_lr=10, num_it=100, wds=None, linear=False, stop_dv=True, **kwargs):
"""A variant of lr_find() that helps find the best learning rate. It doesn't do
an epoch but a fixed num of iterations (which may be more or less than an epoch
depending on your data).
At each step, it computes the validation loss and the metrics on the next
batch of the validation data, so it's slower than lr_find().
Args:
start_lr (float/numpy array) : Passing in a numpy array allows you
to specify learning rates for a learner's layer_groups
end_lr (float) : The maximum learning rate to try.
num_it : the number of iterations you want it to run
wds (iterable/float)
stop_dv : stops (or not) when the losses starts to explode.
"""
self.save('tmp')
layer_opt = self.get_layer_opt(start_lr, wds)
self.sched = LR_Finder2(layer_opt, num_it, end_lr, linear=linear, metrics=self.metrics, stop_dv=stop_dv)
self.fit_gen(self.model, self.data, layer_opt, num_it // len(self.data.trn_dl) + 1, all_val=True, **kwargs)
self.load('tmp') |
def wait(self, timeout=None):
"""Wait until the result is available or until roughly timeout seconds
pass."""
logger = logging.getLogger(__name__)
if int(self.max_sleep_interval) < int(self._min_sleep_interval):
self.max_sleep_interval = int(self._min_sleep_interval)
t0 = time.time()
sleep_seconds = min(5, self.max_sleep_interval)
status = self.status
prev_status = status
while status < COMPLETED:
logger.debug("sleep for %d seconds", sleep_seconds)
time.sleep(sleep_seconds)
if 2*sleep_seconds <= self.max_sleep_interval:
sleep_seconds *= 2
if timeout is not None:
if int(time.time() - t0) > int(timeout):
return
status = self.status
if status != prev_status:
sleep_seconds = min(5, self.max_sleep_interval)
prev_status = status | def function[wait, parameter[self, timeout]]:
constant[Wait until the result is available or until roughly timeout seconds
pass.]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
if compare[call[name[int], parameter[name[self].max_sleep_interval]] less[<] call[name[int], parameter[name[self]._min_sleep_interval]]] begin[:]
name[self].max_sleep_interval assign[=] call[name[int], parameter[name[self]._min_sleep_interval]]
variable[t0] assign[=] call[name[time].time, parameter[]]
variable[sleep_seconds] assign[=] call[name[min], parameter[constant[5], name[self].max_sleep_interval]]
variable[status] assign[=] name[self].status
variable[prev_status] assign[=] name[status]
while compare[name[status] less[<] name[COMPLETED]] begin[:]
call[name[logger].debug, parameter[constant[sleep for %d seconds], name[sleep_seconds]]]
call[name[time].sleep, parameter[name[sleep_seconds]]]
if compare[binary_operation[constant[2] * name[sleep_seconds]] less_or_equal[<=] name[self].max_sleep_interval] begin[:]
<ast.AugAssign object at 0x7da1b0bc8700>
if compare[name[timeout] is_not constant[None]] begin[:]
if compare[call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] - name[t0]]]] greater[>] call[name[int], parameter[name[timeout]]]] begin[:]
return[None]
variable[status] assign[=] name[self].status
if compare[name[status] not_equal[!=] name[prev_status]] begin[:]
variable[sleep_seconds] assign[=] call[name[min], parameter[constant[5], name[self].max_sleep_interval]]
variable[prev_status] assign[=] name[status] | keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
keyword[if] identifier[int] ( identifier[self] . identifier[max_sleep_interval] )< identifier[int] ( identifier[self] . identifier[_min_sleep_interval] ):
identifier[self] . identifier[max_sleep_interval] = identifier[int] ( identifier[self] . identifier[_min_sleep_interval] )
identifier[t0] = identifier[time] . identifier[time] ()
identifier[sleep_seconds] = identifier[min] ( literal[int] , identifier[self] . identifier[max_sleep_interval] )
identifier[status] = identifier[self] . identifier[status]
identifier[prev_status] = identifier[status]
keyword[while] identifier[status] < identifier[COMPLETED] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[sleep_seconds] )
identifier[time] . identifier[sleep] ( identifier[sleep_seconds] )
keyword[if] literal[int] * identifier[sleep_seconds] <= identifier[self] . identifier[max_sleep_interval] :
identifier[sleep_seconds] *= literal[int]
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[int] ( identifier[time] . identifier[time] ()- identifier[t0] )> identifier[int] ( identifier[timeout] ):
keyword[return]
identifier[status] = identifier[self] . identifier[status]
keyword[if] identifier[status] != identifier[prev_status] :
identifier[sleep_seconds] = identifier[min] ( literal[int] , identifier[self] . identifier[max_sleep_interval] )
identifier[prev_status] = identifier[status] | def wait(self, timeout=None):
"""Wait until the result is available or until roughly timeout seconds
pass."""
logger = logging.getLogger(__name__)
if int(self.max_sleep_interval) < int(self._min_sleep_interval):
self.max_sleep_interval = int(self._min_sleep_interval) # depends on [control=['if'], data=[]]
t0 = time.time()
sleep_seconds = min(5, self.max_sleep_interval)
status = self.status
prev_status = status
while status < COMPLETED:
logger.debug('sleep for %d seconds', sleep_seconds)
time.sleep(sleep_seconds)
if 2 * sleep_seconds <= self.max_sleep_interval:
sleep_seconds *= 2 # depends on [control=['if'], data=[]]
if timeout is not None:
if int(time.time() - t0) > int(timeout):
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['timeout']]
status = self.status
if status != prev_status:
sleep_seconds = min(5, self.max_sleep_interval)
prev_status = status # depends on [control=['if'], data=['status', 'prev_status']] # depends on [control=['while'], data=['status']] |
def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
# number of genes in your query data
k = len(query)
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background)
elif isinstance(background, int):
bg = background
else:
raise ValueError("background should be set or int object")
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = gene_sets.get(s)
m = len(category)
hits = query.intersection(set(category))
x = len(hits)
if x < 1 : continue
# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
vals.append((s, hypergeom.sf(x-1, bg, m, k), x, m, hits))
return zip(*vals) | def function[calc_pvalues, parameter[query, gene_sets, background]]:
constant[ calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
]
variable[k] assign[=] call[name[len], parameter[name[query]]]
variable[query] assign[=] call[name[set], parameter[name[query]]]
variable[vals] assign[=] list[[]]
if call[name[isinstance], parameter[name[background], name[set]]] begin[:]
variable[bg] assign[=] call[name[len], parameter[name[background]]]
variable[query] assign[=] call[name[query].intersection, parameter[name[background]]]
variable[subsets] assign[=] call[name[sorted], parameter[call[name[gene_sets].keys, parameter[]]]]
for taget[name[s]] in starred[name[subsets]] begin[:]
variable[category] assign[=] call[name[gene_sets].get, parameter[name[s]]]
variable[m] assign[=] call[name[len], parameter[name[category]]]
variable[hits] assign[=] call[name[query].intersection, parameter[call[name[set], parameter[name[category]]]]]
variable[x] assign[=] call[name[len], parameter[name[hits]]]
if compare[name[x] less[<] constant[1]] begin[:]
continue
call[name[vals].append, parameter[tuple[[<ast.Name object at 0x7da1b0296260>, <ast.Call object at 0x7da1b0297190>, <ast.Name object at 0x7da1b02962f0>, <ast.Name object at 0x7da1b0297910>, <ast.Name object at 0x7da1b02940d0>]]]]
return[call[name[zip], parameter[<ast.Starred object at 0x7da1b0294370>]]] | keyword[def] identifier[calc_pvalues] ( identifier[query] , identifier[gene_sets] , identifier[background] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[k] = identifier[len] ( identifier[query] )
identifier[query] = identifier[set] ( identifier[query] )
identifier[vals] =[]
keyword[if] identifier[isinstance] ( identifier[background] , identifier[set] ):
identifier[bg] = identifier[len] ( identifier[background] )
identifier[query] = identifier[query] . identifier[intersection] ( identifier[background] )
keyword[elif] identifier[isinstance] ( identifier[background] , identifier[int] ):
identifier[bg] = identifier[background]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[subsets] = identifier[sorted] ( identifier[gene_sets] . identifier[keys] ())
keyword[for] identifier[s] keyword[in] identifier[subsets] :
identifier[category] = identifier[gene_sets] . identifier[get] ( identifier[s] )
identifier[m] = identifier[len] ( identifier[category] )
identifier[hits] = identifier[query] . identifier[intersection] ( identifier[set] ( identifier[category] ))
identifier[x] = identifier[len] ( identifier[hits] )
keyword[if] identifier[x] < literal[int] : keyword[continue]
identifier[vals] . identifier[append] (( identifier[s] , identifier[hypergeom] . identifier[sf] ( identifier[x] - literal[int] , identifier[bg] , identifier[m] , identifier[k] ), identifier[x] , identifier[m] , identifier[hits] ))
keyword[return] identifier[zip] (* identifier[vals] ) | def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
# number of genes in your query data
k = len(query)
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background) # depends on [control=['if'], data=[]]
elif isinstance(background, int):
bg = background # depends on [control=['if'], data=[]]
else:
raise ValueError('background should be set or int object')
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = gene_sets.get(s)
m = len(category)
hits = query.intersection(set(category))
x = len(hits)
if x < 1:
continue # depends on [control=['if'], data=[]] # pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
vals.append((s, hypergeom.sf(x - 1, bg, m, k), x, m, hits)) # depends on [control=['for'], data=['s']]
return zip(*vals) |
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None | def function[enum, parameter[self, other, rmax, process, bunch]]:
constant[ cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
]
variable[rall] assign[=] constant[None]
if compare[name[process] is constant[None]] begin[:]
variable[rall] assign[=] list[[<ast.Call object at 0x7da20c6a9390>]]
variable[iall] assign[=] list[[<ast.Call object at 0x7da20c6a9540>]]
variable[jall] assign[=] list[[<ast.Call object at 0x7da20c6a91e0>]]
def function[process, parameter[r1, i1, j1]]:
call[name[rall]][constant[0]] assign[=] call[name[numpy].append, parameter[call[name[rall]][constant[0]], name[r1]]]
call[name[iall]][constant[0]] assign[=] call[name[numpy].append, parameter[call[name[iall]][constant[0]], name[i1]]]
call[name[jall]][constant[0]] assign[=] call[name[numpy].append, parameter[call[name[jall]][constant[0]], name[j1]]]
call[name[_core].KDNode.enum, parameter[name[self], name[other], name[rmax], name[process], name[bunch]]]
if compare[name[rall] is_not constant[None]] begin[:]
return[tuple[[<ast.Subscript object at 0x7da20c9920e0>, <ast.Subscript object at 0x7da20c992260>, <ast.Subscript object at 0x7da20c992d70>]]] | keyword[def] identifier[enum] ( identifier[self] , identifier[other] , identifier[rmax] , identifier[process] = keyword[None] , identifier[bunch] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[rall] = keyword[None]
keyword[if] identifier[process] keyword[is] keyword[None] :
identifier[rall] =[ identifier[numpy] . identifier[empty] ( literal[int] , literal[string] )]
identifier[iall] =[ identifier[numpy] . identifier[empty] ( literal[int] , literal[string] )]
identifier[jall] =[ identifier[numpy] . identifier[empty] ( literal[int] , literal[string] )]
keyword[def] identifier[process] ( identifier[r1] , identifier[i1] , identifier[j1] ,** identifier[kwargs] ):
identifier[rall] [ literal[int] ]= identifier[numpy] . identifier[append] ( identifier[rall] [ literal[int] ], identifier[r1] )
identifier[iall] [ literal[int] ]= identifier[numpy] . identifier[append] ( identifier[iall] [ literal[int] ], identifier[i1] )
identifier[jall] [ literal[int] ]= identifier[numpy] . identifier[append] ( identifier[jall] [ literal[int] ], identifier[j1] )
identifier[_core] . identifier[KDNode] . identifier[enum] ( identifier[self] , identifier[other] , identifier[rmax] , identifier[process] , identifier[bunch] ,** identifier[kwargs] )
keyword[if] identifier[rall] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[rall] [ literal[int] ], identifier[iall] [ literal[int] ], identifier[jall] [ literal[int] ]
keyword[else] :
keyword[return] keyword[None] | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1) # depends on [control=['if'], data=[]]
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return (rall[0], iall[0], jall[0]) # depends on [control=['if'], data=['rall']]
else:
return None |
def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$C_v$ (J/K/mol)"
else:
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig | def function[plot_cv, parameter[self, tmin, tmax, ntemp, ylim]]:
constant[
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
]
variable[temperatures] assign[=] call[name[np].linspace, parameter[name[tmin], name[tmax], name[ntemp]]]
if name[self].structure begin[:]
variable[ylabel] assign[=] constant[$C_v$ (J/K/mol)]
variable[fig] assign[=] call[name[self]._plot_thermo, parameter[name[self].dos.cv, name[temperatures]]]
return[name[fig]] | keyword[def] identifier[plot_cv] ( identifier[self] , identifier[tmin] , identifier[tmax] , identifier[ntemp] , identifier[ylim] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[temperatures] = identifier[np] . identifier[linspace] ( identifier[tmin] , identifier[tmax] , identifier[ntemp] )
keyword[if] identifier[self] . identifier[structure] :
identifier[ylabel] = literal[string]
keyword[else] :
identifier[ylabel] = literal[string]
identifier[fig] = identifier[self] . identifier[_plot_thermo] ( identifier[self] . identifier[dos] . identifier[cv] , identifier[temperatures] , identifier[ylabel] = identifier[ylabel] , identifier[ylim] = identifier[ylim] ,** identifier[kwargs] )
keyword[return] identifier[fig] | def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = '$C_v$ (J/K/mol)' # depends on [control=['if'], data=[]]
else:
ylabel = '$C_v$ (J/K/mol-c)'
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig |
def _netinfo_freebsd_netbsd():
'''
Get process information for network connections using sockstat
'''
ret = {}
# NetBSD requires '-n' to disable port-to-service resolution
out = __salt__['cmd.run'](
'sockstat -46 {0} | tail -n+2'.format(
'-n' if __grains__['kernel'] == 'NetBSD' else ''
), python_shell=True
)
for line in out.splitlines():
user, cmd, pid, _, proto, local_addr, remote_addr = line.split()
local_addr = '.'.join(local_addr.rsplit(':', 1))
remote_addr = '.'.join(remote_addr.rsplit(':', 1))
ret.setdefault(
local_addr, {}).setdefault(
remote_addr, {}).setdefault(
proto, {}).setdefault(
pid, {})['user'] = user
ret[local_addr][remote_addr][proto][pid]['cmd'] = cmd
return ret | def function[_netinfo_freebsd_netbsd, parameter[]]:
constant[
Get process information for network connections using sockstat
]
variable[ret] assign[=] dictionary[[], []]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[call[constant[sockstat -46 {0} | tail -n+2].format, parameter[<ast.IfExp object at 0x7da1b2047ca0>]]]]
for taget[name[line]] in starred[call[name[out].splitlines, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b2045570> assign[=] call[name[line].split, parameter[]]
variable[local_addr] assign[=] call[constant[.].join, parameter[call[name[local_addr].rsplit, parameter[constant[:], constant[1]]]]]
variable[remote_addr] assign[=] call[constant[.].join, parameter[call[name[remote_addr].rsplit, parameter[constant[:], constant[1]]]]]
call[call[call[call[call[name[ret].setdefault, parameter[name[local_addr], dictionary[[], []]]].setdefault, parameter[name[remote_addr], dictionary[[], []]]].setdefault, parameter[name[proto], dictionary[[], []]]].setdefault, parameter[name[pid], dictionary[[], []]]]][constant[user]] assign[=] name[user]
call[call[call[call[call[name[ret]][name[local_addr]]][name[remote_addr]]][name[proto]]][name[pid]]][constant[cmd]] assign[=] name[cmd]
return[name[ret]] | keyword[def] identifier[_netinfo_freebsd_netbsd] ():
literal[string]
identifier[ret] ={}
identifier[out] = identifier[__salt__] [ literal[string] ](
literal[string] . identifier[format] (
literal[string] keyword[if] identifier[__grains__] [ literal[string] ]== literal[string] keyword[else] literal[string]
), identifier[python_shell] = keyword[True]
)
keyword[for] identifier[line] keyword[in] identifier[out] . identifier[splitlines] ():
identifier[user] , identifier[cmd] , identifier[pid] , identifier[_] , identifier[proto] , identifier[local_addr] , identifier[remote_addr] = identifier[line] . identifier[split] ()
identifier[local_addr] = literal[string] . identifier[join] ( identifier[local_addr] . identifier[rsplit] ( literal[string] , literal[int] ))
identifier[remote_addr] = literal[string] . identifier[join] ( identifier[remote_addr] . identifier[rsplit] ( literal[string] , literal[int] ))
identifier[ret] . identifier[setdefault] (
identifier[local_addr] ,{}). identifier[setdefault] (
identifier[remote_addr] ,{}). identifier[setdefault] (
identifier[proto] ,{}). identifier[setdefault] (
identifier[pid] ,{})[ literal[string] ]= identifier[user]
identifier[ret] [ identifier[local_addr] ][ identifier[remote_addr] ][ identifier[proto] ][ identifier[pid] ][ literal[string] ]= identifier[cmd]
keyword[return] identifier[ret] | def _netinfo_freebsd_netbsd():
"""
Get process information for network connections using sockstat
"""
ret = {}
# NetBSD requires '-n' to disable port-to-service resolution
out = __salt__['cmd.run']('sockstat -46 {0} | tail -n+2'.format('-n' if __grains__['kernel'] == 'NetBSD' else ''), python_shell=True)
for line in out.splitlines():
(user, cmd, pid, _, proto, local_addr, remote_addr) = line.split()
local_addr = '.'.join(local_addr.rsplit(':', 1))
remote_addr = '.'.join(remote_addr.rsplit(':', 1))
ret.setdefault(local_addr, {}).setdefault(remote_addr, {}).setdefault(proto, {}).setdefault(pid, {})['user'] = user
ret[local_addr][remote_addr][proto][pid]['cmd'] = cmd # depends on [control=['for'], data=['line']]
return ret |
def login_to_portal(username, password, client, retries=2, delay=0):
"""Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries.
"""
if not client.session_id:
client.request_session()
concierge_request_header = client.construct_concierge_header(
url=("http://membersuite.com/contracts/IConciergeAPIService/"
"LoginToPortal"))
attempts = 0
while attempts < retries:
if attempts:
time.sleep(delay)
result = client.client.service.LoginToPortal(
_soapheaders=[concierge_request_header],
portalUserName=username,
portalPassword=password)
login_to_portal_result = result["body"]["LoginToPortalResult"]
if login_to_portal_result["Success"]:
portal_user = login_to_portal_result["ResultValue"]["PortalUser"]
session_id = get_session_id(result=result)
return PortalUser(membersuite_object_data=portal_user,
session_id=session_id)
else:
attempts += 1
try:
error_code = login_to_portal_result[
"Errors"]["ConciergeError"][0]["Code"]
except IndexError: # Not a ConciergeError
continue
else:
if attempts < retries and error_code == "GeneralException":
continue
raise LoginToPortalError(result=result) | def function[login_to_portal, parameter[username, password, client, retries, delay]]:
constant[Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries.
]
if <ast.UnaryOp object at 0x7da20c9925c0> begin[:]
call[name[client].request_session, parameter[]]
variable[concierge_request_header] assign[=] call[name[client].construct_concierge_header, parameter[]]
variable[attempts] assign[=] constant[0]
while compare[name[attempts] less[<] name[retries]] begin[:]
if name[attempts] begin[:]
call[name[time].sleep, parameter[name[delay]]]
variable[result] assign[=] call[name[client].client.service.LoginToPortal, parameter[]]
variable[login_to_portal_result] assign[=] call[call[name[result]][constant[body]]][constant[LoginToPortalResult]]
if call[name[login_to_portal_result]][constant[Success]] begin[:]
variable[portal_user] assign[=] call[call[name[login_to_portal_result]][constant[ResultValue]]][constant[PortalUser]]
variable[session_id] assign[=] call[name[get_session_id], parameter[]]
return[call[name[PortalUser], parameter[]]]
<ast.Raise object at 0x7da20c990310> | keyword[def] identifier[login_to_portal] ( identifier[username] , identifier[password] , identifier[client] , identifier[retries] = literal[int] , identifier[delay] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[client] . identifier[session_id] :
identifier[client] . identifier[request_session] ()
identifier[concierge_request_header] = identifier[client] . identifier[construct_concierge_header] (
identifier[url] =( literal[string]
literal[string] ))
identifier[attempts] = literal[int]
keyword[while] identifier[attempts] < identifier[retries] :
keyword[if] identifier[attempts] :
identifier[time] . identifier[sleep] ( identifier[delay] )
identifier[result] = identifier[client] . identifier[client] . identifier[service] . identifier[LoginToPortal] (
identifier[_soapheaders] =[ identifier[concierge_request_header] ],
identifier[portalUserName] = identifier[username] ,
identifier[portalPassword] = identifier[password] )
identifier[login_to_portal_result] = identifier[result] [ literal[string] ][ literal[string] ]
keyword[if] identifier[login_to_portal_result] [ literal[string] ]:
identifier[portal_user] = identifier[login_to_portal_result] [ literal[string] ][ literal[string] ]
identifier[session_id] = identifier[get_session_id] ( identifier[result] = identifier[result] )
keyword[return] identifier[PortalUser] ( identifier[membersuite_object_data] = identifier[portal_user] ,
identifier[session_id] = identifier[session_id] )
keyword[else] :
identifier[attempts] += literal[int]
keyword[try] :
identifier[error_code] = identifier[login_to_portal_result] [
literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ]
keyword[except] identifier[IndexError] :
keyword[continue]
keyword[else] :
keyword[if] identifier[attempts] < identifier[retries] keyword[and] identifier[error_code] == literal[string] :
keyword[continue]
keyword[raise] identifier[LoginToPortalError] ( identifier[result] = identifier[result] ) | def login_to_portal(username, password, client, retries=2, delay=0):
"""Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries.
"""
if not client.session_id:
client.request_session() # depends on [control=['if'], data=[]]
concierge_request_header = client.construct_concierge_header(url='http://membersuite.com/contracts/IConciergeAPIService/LoginToPortal')
attempts = 0
while attempts < retries:
if attempts:
time.sleep(delay) # depends on [control=['if'], data=[]]
result = client.client.service.LoginToPortal(_soapheaders=[concierge_request_header], portalUserName=username, portalPassword=password)
login_to_portal_result = result['body']['LoginToPortalResult']
if login_to_portal_result['Success']:
portal_user = login_to_portal_result['ResultValue']['PortalUser']
session_id = get_session_id(result=result)
return PortalUser(membersuite_object_data=portal_user, session_id=session_id) # depends on [control=['if'], data=[]]
else:
attempts += 1
try:
error_code = login_to_portal_result['Errors']['ConciergeError'][0]['Code'] # depends on [control=['try'], data=[]]
except IndexError: # Not a ConciergeError
continue # depends on [control=['except'], data=[]]
else:
if attempts < retries and error_code == 'GeneralException':
continue # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['attempts', 'retries']]
raise LoginToPortalError(result=result) |
def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None):
"""Assemble IAM Policy for _app_.
Args:
app (str): Name of Spinnaker Application.
env (str): Environment/Account in AWS
group (str):A Application group/namespace
region (str): AWS region
pipeline_settings (dict): Settings from *pipeline.json*.
Returns:
json: Custom IAM Policy for _app_.
None: When no *services* have been defined in *pipeline.json*.
"""
LOG.info('Create custom IAM Policy for %s.', app)
services = pipeline_settings.get('services', {})
LOG.debug('Found requested services: %s', services)
services = auto_service(pipeline_settings=pipeline_settings, services=services)
if services:
credential = get_env_credential(env=env)
account_number = credential['accountId']
statements = []
for service, value in services.items():
if value is True:
items = []
elif isinstance(value, str):
items = [value]
else:
items = value
rendered_statements = render_policy_template(
account_number=account_number,
app=app,
env=env,
group=group,
items=items,
pipeline_settings=pipeline_settings,
region=region,
service=service)
statements.extend(rendered_statements)
if statements:
policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements))
else:
LOG.info('No services defined for %s.', app)
policy_json = None
return policy_json | def function[construct_policy, parameter[app, env, group, region, pipeline_settings]]:
constant[Assemble IAM Policy for _app_.
Args:
app (str): Name of Spinnaker Application.
env (str): Environment/Account in AWS
group (str):A Application group/namespace
region (str): AWS region
pipeline_settings (dict): Settings from *pipeline.json*.
Returns:
json: Custom IAM Policy for _app_.
None: When no *services* have been defined in *pipeline.json*.
]
call[name[LOG].info, parameter[constant[Create custom IAM Policy for %s.], name[app]]]
variable[services] assign[=] call[name[pipeline_settings].get, parameter[constant[services], dictionary[[], []]]]
call[name[LOG].debug, parameter[constant[Found requested services: %s], name[services]]]
variable[services] assign[=] call[name[auto_service], parameter[]]
if name[services] begin[:]
variable[credential] assign[=] call[name[get_env_credential], parameter[]]
variable[account_number] assign[=] call[name[credential]][constant[accountId]]
variable[statements] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18fe91e40>, <ast.Name object at 0x7da18fe90130>]]] in starred[call[name[services].items, parameter[]]] begin[:]
if compare[name[value] is constant[True]] begin[:]
variable[items] assign[=] list[[]]
variable[rendered_statements] assign[=] call[name[render_policy_template], parameter[]]
call[name[statements].extend, parameter[name[rendered_statements]]]
if name[statements] begin[:]
variable[policy_json] assign[=] call[name[get_template], parameter[constant[infrastructure/iam/wrapper.json.j2]]]
return[name[policy_json]] | keyword[def] identifier[construct_policy] ( identifier[app] = literal[string] , identifier[env] = literal[string] , identifier[group] = literal[string] , identifier[region] = literal[string] , identifier[pipeline_settings] = keyword[None] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] , identifier[app] )
identifier[services] = identifier[pipeline_settings] . identifier[get] ( literal[string] ,{})
identifier[LOG] . identifier[debug] ( literal[string] , identifier[services] )
identifier[services] = identifier[auto_service] ( identifier[pipeline_settings] = identifier[pipeline_settings] , identifier[services] = identifier[services] )
keyword[if] identifier[services] :
identifier[credential] = identifier[get_env_credential] ( identifier[env] = identifier[env] )
identifier[account_number] = identifier[credential] [ literal[string] ]
identifier[statements] =[]
keyword[for] identifier[service] , identifier[value] keyword[in] identifier[services] . identifier[items] ():
keyword[if] identifier[value] keyword[is] keyword[True] :
identifier[items] =[]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[items] =[ identifier[value] ]
keyword[else] :
identifier[items] = identifier[value]
identifier[rendered_statements] = identifier[render_policy_template] (
identifier[account_number] = identifier[account_number] ,
identifier[app] = identifier[app] ,
identifier[env] = identifier[env] ,
identifier[group] = identifier[group] ,
identifier[items] = identifier[items] ,
identifier[pipeline_settings] = identifier[pipeline_settings] ,
identifier[region] = identifier[region] ,
identifier[service] = identifier[service] )
identifier[statements] . identifier[extend] ( identifier[rendered_statements] )
keyword[if] identifier[statements] :
identifier[policy_json] = identifier[get_template] ( literal[string] , identifier[statements] = identifier[json] . identifier[dumps] ( identifier[statements] ))
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[app] )
identifier[policy_json] = keyword[None]
keyword[return] identifier[policy_json] | def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None):
"""Assemble IAM Policy for _app_.
Args:
app (str): Name of Spinnaker Application.
env (str): Environment/Account in AWS
group (str):A Application group/namespace
region (str): AWS region
pipeline_settings (dict): Settings from *pipeline.json*.
Returns:
json: Custom IAM Policy for _app_.
None: When no *services* have been defined in *pipeline.json*.
"""
LOG.info('Create custom IAM Policy for %s.', app)
services = pipeline_settings.get('services', {})
LOG.debug('Found requested services: %s', services)
services = auto_service(pipeline_settings=pipeline_settings, services=services)
if services:
credential = get_env_credential(env=env)
account_number = credential['accountId'] # depends on [control=['if'], data=[]]
statements = []
for (service, value) in services.items():
if value is True:
items = [] # depends on [control=['if'], data=[]]
elif isinstance(value, str):
items = [value] # depends on [control=['if'], data=[]]
else:
items = value
rendered_statements = render_policy_template(account_number=account_number, app=app, env=env, group=group, items=items, pipeline_settings=pipeline_settings, region=region, service=service)
statements.extend(rendered_statements) # depends on [control=['for'], data=[]]
if statements:
policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements)) # depends on [control=['if'], data=[]]
else:
LOG.info('No services defined for %s.', app)
policy_json = None
return policy_json |
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url | def function[splithost, parameter[url]]:
constant[splithost('//host[:port]/path') --> 'host[:port]', '/path'.]
<ast.Global object at 0x7da18bccbbb0>
if compare[name[_hostprog] is constant[None]] begin[:]
import module[re]
variable[_hostprog] assign[=] call[name[re].compile, parameter[constant[^//([^/?]*)(.*)$]]]
variable[match] assign[=] call[name[_hostprog].match, parameter[name[url]]]
if name[match] begin[:]
variable[host_port] assign[=] call[name[match].group, parameter[constant[1]]]
variable[path] assign[=] call[name[match].group, parameter[constant[2]]]
if <ast.BoolOp object at 0x7da18bcc8220> begin[:]
variable[path] assign[=] binary_operation[constant[/] + name[path]]
return[tuple[[<ast.Name object at 0x7da18bccadd0>, <ast.Name object at 0x7da18bcca440>]]]
return[tuple[[<ast.Constant object at 0x7da18bcc9030>, <ast.Name object at 0x7da18bcc89a0>]]] | keyword[def] identifier[splithost] ( identifier[url] ):
literal[string]
keyword[global] identifier[_hostprog]
keyword[if] identifier[_hostprog] keyword[is] keyword[None] :
keyword[import] identifier[re]
identifier[_hostprog] = identifier[re] . identifier[compile] ( literal[string] )
identifier[match] = identifier[_hostprog] . identifier[match] ( identifier[url] )
keyword[if] identifier[match] :
identifier[host_port] = identifier[match] . identifier[group] ( literal[int] )
identifier[path] = identifier[match] . identifier[group] ( literal[int] )
keyword[if] identifier[path] keyword[and] keyword[not] identifier[path] . identifier[startswith] ( literal[string] ):
identifier[path] = literal[string] + identifier[path]
keyword[return] identifier[host_port] , identifier[path]
keyword[return] keyword[None] , identifier[url] | def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$') # depends on [control=['if'], data=['_hostprog']]
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and (not path.startswith('/')):
path = '/' + path # depends on [control=['if'], data=[]]
return (host_port, path) # depends on [control=['if'], data=[]]
return (None, url) |
def saveAsJSON(self, fp):
"""
Write the records out as JSON. The first JSON object saved contains
the BLAST parameters.
@param fp: A C{str} file pointer to write to.
"""
first = True
for record in self.records():
if first:
print(dumps(self.params, separators=(',', ':')), file=fp)
first = False
print(dumps(self._convertBlastRecordToDict(record),
separators=(',', ':')), file=fp) | def function[saveAsJSON, parameter[self, fp]]:
constant[
Write the records out as JSON. The first JSON object saved contains
the BLAST parameters.
@param fp: A C{str} file pointer to write to.
]
variable[first] assign[=] constant[True]
for taget[name[record]] in starred[call[name[self].records, parameter[]]] begin[:]
if name[first] begin[:]
call[name[print], parameter[call[name[dumps], parameter[name[self].params]]]]
variable[first] assign[=] constant[False]
call[name[print], parameter[call[name[dumps], parameter[call[name[self]._convertBlastRecordToDict, parameter[name[record]]]]]]] | keyword[def] identifier[saveAsJSON] ( identifier[self] , identifier[fp] ):
literal[string]
identifier[first] = keyword[True]
keyword[for] identifier[record] keyword[in] identifier[self] . identifier[records] ():
keyword[if] identifier[first] :
identifier[print] ( identifier[dumps] ( identifier[self] . identifier[params] , identifier[separators] =( literal[string] , literal[string] )), identifier[file] = identifier[fp] )
identifier[first] = keyword[False]
identifier[print] ( identifier[dumps] ( identifier[self] . identifier[_convertBlastRecordToDict] ( identifier[record] ),
identifier[separators] =( literal[string] , literal[string] )), identifier[file] = identifier[fp] ) | def saveAsJSON(self, fp):
"""
Write the records out as JSON. The first JSON object saved contains
the BLAST parameters.
@param fp: A C{str} file pointer to write to.
"""
first = True
for record in self.records():
if first:
print(dumps(self.params, separators=(',', ':')), file=fp)
first = False # depends on [control=['if'], data=[]]
print(dumps(self._convertBlastRecordToDict(record), separators=(',', ':')), file=fp) # depends on [control=['for'], data=['record']] |
def save(self, *args, **kwargs):
"""
Extends the default save method by verifying that the chosen
organization user is associated with the organization.
Method validates against the primary key of the organization because
when validating an inherited model it may be checking an instance of
`Organization` against an instance of `CustomOrganization`. Mutli-table
inheritence means the database keys will be identical though.
"""
from organizations.exceptions import OrganizationMismatch
if self.organization_user.organization.pk != self.organization.pk:
raise OrganizationMismatch
else:
super(AbstractBaseOrganizationOwner, self).save(*args, **kwargs) | def function[save, parameter[self]]:
constant[
Extends the default save method by verifying that the chosen
organization user is associated with the organization.
Method validates against the primary key of the organization because
when validating an inherited model it may be checking an instance of
`Organization` against an instance of `CustomOrganization`. Mutli-table
inheritence means the database keys will be identical though.
]
from relative_module[organizations.exceptions] import module[OrganizationMismatch]
if compare[name[self].organization_user.organization.pk not_equal[!=] name[self].organization.pk] begin[:]
<ast.Raise object at 0x7da18f58c8b0> | keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[organizations] . identifier[exceptions] keyword[import] identifier[OrganizationMismatch]
keyword[if] identifier[self] . identifier[organization_user] . identifier[organization] . identifier[pk] != identifier[self] . identifier[organization] . identifier[pk] :
keyword[raise] identifier[OrganizationMismatch]
keyword[else] :
identifier[super] ( identifier[AbstractBaseOrganizationOwner] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] ) | def save(self, *args, **kwargs):
"""
Extends the default save method by verifying that the chosen
organization user is associated with the organization.
Method validates against the primary key of the organization because
when validating an inherited model it may be checking an instance of
`Organization` against an instance of `CustomOrganization`. Mutli-table
inheritence means the database keys will be identical though.
"""
from organizations.exceptions import OrganizationMismatch
if self.organization_user.organization.pk != self.organization.pk:
raise OrganizationMismatch # depends on [control=['if'], data=[]]
else:
super(AbstractBaseOrganizationOwner, self).save(*args, **kwargs) |
def update_user(self, ID, data):
"""Update a User."""
# http://teampasswordmanager.com/docs/api-users/#update_user
log.info('Update user %s with %s' % (ID, data))
self.put('users/%s.json' % ID, data) | def function[update_user, parameter[self, ID, data]]:
constant[Update a User.]
call[name[log].info, parameter[binary_operation[constant[Update user %s with %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1193910>, <ast.Name object at 0x7da1b11933a0>]]]]]
call[name[self].put, parameter[binary_operation[constant[users/%s.json] <ast.Mod object at 0x7da2590d6920> name[ID]], name[data]]] | keyword[def] identifier[update_user] ( identifier[self] , identifier[ID] , identifier[data] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] %( identifier[ID] , identifier[data] ))
identifier[self] . identifier[put] ( literal[string] % identifier[ID] , identifier[data] ) | def update_user(self, ID, data):
"""Update a User."""
# http://teampasswordmanager.com/docs/api-users/#update_user
log.info('Update user %s with %s' % (ID, data))
self.put('users/%s.json' % ID, data) |
def after_model_change(self, form, User, is_created):
"""Send password instructions if desired."""
if is_created and form.notification.data is True:
send_reset_password_instructions(User) | def function[after_model_change, parameter[self, form, User, is_created]]:
constant[Send password instructions if desired.]
if <ast.BoolOp object at 0x7da2044c1ea0> begin[:]
call[name[send_reset_password_instructions], parameter[name[User]]] | keyword[def] identifier[after_model_change] ( identifier[self] , identifier[form] , identifier[User] , identifier[is_created] ):
literal[string]
keyword[if] identifier[is_created] keyword[and] identifier[form] . identifier[notification] . identifier[data] keyword[is] keyword[True] :
identifier[send_reset_password_instructions] ( identifier[User] ) | def after_model_change(self, form, User, is_created):
"""Send password instructions if desired."""
if is_created and form.notification.data is True:
send_reset_password_instructions(User) # depends on [control=['if'], data=[]] |
def add_samples(self, samples, reverse=False):
"""
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
"""
self.log(u"Adding samples...")
samples_length = len(samples)
current_length = self.__samples_length
future_length = current_length + samples_length
if (self.__samples is None) or (self.__samples_capacity < future_length):
self.preallocate_memory(2 * future_length)
if reverse:
self.__samples[current_length:future_length] = samples[::-1]
else:
self.__samples[current_length:future_length] = samples[:]
self.__samples_length = future_length
self._update_length()
self.log(u"Adding samples... done") | def function[add_samples, parameter[self, samples, reverse]]:
constant[
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
]
call[name[self].log, parameter[constant[Adding samples...]]]
variable[samples_length] assign[=] call[name[len], parameter[name[samples]]]
variable[current_length] assign[=] name[self].__samples_length
variable[future_length] assign[=] binary_operation[name[current_length] + name[samples_length]]
if <ast.BoolOp object at 0x7da1b18f9de0> begin[:]
call[name[self].preallocate_memory, parameter[binary_operation[constant[2] * name[future_length]]]]
if name[reverse] begin[:]
call[name[self].__samples][<ast.Slice object at 0x7da1b18fb460>] assign[=] call[name[samples]][<ast.Slice object at 0x7da1b18f8c40>]
name[self].__samples_length assign[=] name[future_length]
call[name[self]._update_length, parameter[]]
call[name[self].log, parameter[constant[Adding samples... done]]] | keyword[def] identifier[add_samples] ( identifier[self] , identifier[samples] , identifier[reverse] = keyword[False] ):
literal[string]
identifier[self] . identifier[log] ( literal[string] )
identifier[samples_length] = identifier[len] ( identifier[samples] )
identifier[current_length] = identifier[self] . identifier[__samples_length]
identifier[future_length] = identifier[current_length] + identifier[samples_length]
keyword[if] ( identifier[self] . identifier[__samples] keyword[is] keyword[None] ) keyword[or] ( identifier[self] . identifier[__samples_capacity] < identifier[future_length] ):
identifier[self] . identifier[preallocate_memory] ( literal[int] * identifier[future_length] )
keyword[if] identifier[reverse] :
identifier[self] . identifier[__samples] [ identifier[current_length] : identifier[future_length] ]= identifier[samples] [::- literal[int] ]
keyword[else] :
identifier[self] . identifier[__samples] [ identifier[current_length] : identifier[future_length] ]= identifier[samples] [:]
identifier[self] . identifier[__samples_length] = identifier[future_length]
identifier[self] . identifier[_update_length] ()
identifier[self] . identifier[log] ( literal[string] ) | def add_samples(self, samples, reverse=False):
"""
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
"""
self.log(u'Adding samples...')
samples_length = len(samples)
current_length = self.__samples_length
future_length = current_length + samples_length
if self.__samples is None or self.__samples_capacity < future_length:
self.preallocate_memory(2 * future_length) # depends on [control=['if'], data=[]]
if reverse:
self.__samples[current_length:future_length] = samples[::-1] # depends on [control=['if'], data=[]]
else:
self.__samples[current_length:future_length] = samples[:]
self.__samples_length = future_length
self._update_length()
self.log(u'Adding samples... done') |
def _get_data(self, file_url, file_name='', method_title='', argument_title=''):
''' a helper method to retrieve data buffer for a file url
:param file_url: string with url to file
:param file_name: [optional] string with name to affix to file buffer
:param method_title: [optional] string with name of class method calling
:param argument_title: [optional] string with name of method argument key
:return: byte data buffer with file data
'''
# https://docs.python.org/3/library/io.html#io.BytesIO
import io
import requests
# fill empty values
if not file_name:
file_name = 'file'
if not method_title:
method_title = '%s._get_data' % self.__class__.__name__
if not argument_title:
argument_title = 'file_url'
# request file from url
try:
remote_file = requests.get(file_url)
except requests.exceptions.ConnectionError as err:
if self.requests_handler:
return self.requests_handler(err)
else:
raise
except:
raise ValueError('%s(%s=%s) is not a valid url.' % (method_title, argument_title, file_url))
# add contents to buffer
file_buffer = io.BytesIO(remote_file.content)
file_buffer.name = '%s' % file_name
return file_buffer | def function[_get_data, parameter[self, file_url, file_name, method_title, argument_title]]:
constant[ a helper method to retrieve data buffer for a file url
:param file_url: string with url to file
:param file_name: [optional] string with name to affix to file buffer
:param method_title: [optional] string with name of class method calling
:param argument_title: [optional] string with name of method argument key
:return: byte data buffer with file data
]
import module[io]
import module[requests]
if <ast.UnaryOp object at 0x7da204344af0> begin[:]
variable[file_name] assign[=] constant[file]
if <ast.UnaryOp object at 0x7da204344520> begin[:]
variable[method_title] assign[=] binary_operation[constant[%s._get_data] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
if <ast.UnaryOp object at 0x7da204346fe0> begin[:]
variable[argument_title] assign[=] constant[file_url]
<ast.Try object at 0x7da2043475b0>
variable[file_buffer] assign[=] call[name[io].BytesIO, parameter[name[remote_file].content]]
name[file_buffer].name assign[=] binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[file_name]]
return[name[file_buffer]] | keyword[def] identifier[_get_data] ( identifier[self] , identifier[file_url] , identifier[file_name] = literal[string] , identifier[method_title] = literal[string] , identifier[argument_title] = literal[string] ):
literal[string]
keyword[import] identifier[io]
keyword[import] identifier[requests]
keyword[if] keyword[not] identifier[file_name] :
identifier[file_name] = literal[string]
keyword[if] keyword[not] identifier[method_title] :
identifier[method_title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
keyword[if] keyword[not] identifier[argument_title] :
identifier[argument_title] = literal[string]
keyword[try] :
identifier[remote_file] = identifier[requests] . identifier[get] ( identifier[file_url] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[err] :
keyword[if] identifier[self] . identifier[requests_handler] :
keyword[return] identifier[self] . identifier[requests_handler] ( identifier[err] )
keyword[else] :
keyword[raise]
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[method_title] , identifier[argument_title] , identifier[file_url] ))
identifier[file_buffer] = identifier[io] . identifier[BytesIO] ( identifier[remote_file] . identifier[content] )
identifier[file_buffer] . identifier[name] = literal[string] % identifier[file_name]
keyword[return] identifier[file_buffer] | def _get_data(self, file_url, file_name='', method_title='', argument_title=''):
""" a helper method to retrieve data buffer for a file url
:param file_url: string with url to file
:param file_name: [optional] string with name to affix to file buffer
:param method_title: [optional] string with name of class method calling
:param argument_title: [optional] string with name of method argument key
:return: byte data buffer with file data
""" # https://docs.python.org/3/library/io.html#io.BytesIO
import io
import requests # fill empty values
if not file_name:
file_name = 'file' # depends on [control=['if'], data=[]]
if not method_title:
method_title = '%s._get_data' % self.__class__.__name__ # depends on [control=['if'], data=[]]
if not argument_title:
argument_title = 'file_url' # depends on [control=['if'], data=[]] # request file from url
try:
remote_file = requests.get(file_url) # depends on [control=['try'], data=[]]
except requests.exceptions.ConnectionError as err:
if self.requests_handler:
return self.requests_handler(err) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['err']]
except:
raise ValueError('%s(%s=%s) is not a valid url.' % (method_title, argument_title, file_url)) # depends on [control=['except'], data=[]] # add contents to buffer
file_buffer = io.BytesIO(remote_file.content)
file_buffer.name = '%s' % file_name
return file_buffer |
def account_representative_set(self, wallet, account, representative, work=None):
"""
Sets the representative for **account** in **wallet**
.. enable_control required
:param wallet: Wallet to use for account
:type wallet: str
:param account: Account to set representative for
:type account: str
:param representative: Representative to set to
:type representative: str
:param work: If set, is used as the work for the block
:type work: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.account_representative_set(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... account="xrb_39a73oy5ungrhxy5z5oao1xso4zo7dmgpjd4u74xcrx3r1w6rtazuouw6qfi",
... representative="xrb_16u1uufyoig8777y6r8iqjtrw8sg8maqrm36zzcm95jmbd9i9aj5i8abr8u5"
... )
"000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
"""
wallet = self._process_value(wallet, 'wallet')
account = self._process_value(account, 'account')
representative = self._process_value(representative, 'account')
payload = {
"wallet": wallet,
"account": account,
"representative": representative,
}
if work is not None:
payload['work'] = self._process_value(work, 'work')
resp = self.call('account_representative_set', payload)
return resp['block'] | def function[account_representative_set, parameter[self, wallet, account, representative, work]]:
constant[
Sets the representative for **account** in **wallet**
.. enable_control required
:param wallet: Wallet to use for account
:type wallet: str
:param account: Account to set representative for
:type account: str
:param representative: Representative to set to
:type representative: str
:param work: If set, is used as the work for the block
:type work: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.account_representative_set(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... account="xrb_39a73oy5ungrhxy5z5oao1xso4zo7dmgpjd4u74xcrx3r1w6rtazuouw6qfi",
... representative="xrb_16u1uufyoig8777y6r8iqjtrw8sg8maqrm36zzcm95jmbd9i9aj5i8abr8u5"
... )
"000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
]
variable[wallet] assign[=] call[name[self]._process_value, parameter[name[wallet], constant[wallet]]]
variable[account] assign[=] call[name[self]._process_value, parameter[name[account], constant[account]]]
variable[representative] assign[=] call[name[self]._process_value, parameter[name[representative], constant[account]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da2044c1f60>, <ast.Constant object at 0x7da2044c22f0>, <ast.Constant object at 0x7da2044c3ac0>], [<ast.Name object at 0x7da18bc72c20>, <ast.Name object at 0x7da18bc70970>, <ast.Name object at 0x7da18bc710f0>]]
if compare[name[work] is_not constant[None]] begin[:]
call[name[payload]][constant[work]] assign[=] call[name[self]._process_value, parameter[name[work], constant[work]]]
variable[resp] assign[=] call[name[self].call, parameter[constant[account_representative_set], name[payload]]]
return[call[name[resp]][constant[block]]] | keyword[def] identifier[account_representative_set] ( identifier[self] , identifier[wallet] , identifier[account] , identifier[representative] , identifier[work] = keyword[None] ):
literal[string]
identifier[wallet] = identifier[self] . identifier[_process_value] ( identifier[wallet] , literal[string] )
identifier[account] = identifier[self] . identifier[_process_value] ( identifier[account] , literal[string] )
identifier[representative] = identifier[self] . identifier[_process_value] ( identifier[representative] , literal[string] )
identifier[payload] ={
literal[string] : identifier[wallet] ,
literal[string] : identifier[account] ,
literal[string] : identifier[representative] ,
}
keyword[if] identifier[work] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[self] . identifier[_process_value] ( identifier[work] , literal[string] )
identifier[resp] = identifier[self] . identifier[call] ( literal[string] , identifier[payload] )
keyword[return] identifier[resp] [ literal[string] ] | def account_representative_set(self, wallet, account, representative, work=None):
"""
Sets the representative for **account** in **wallet**
.. enable_control required
:param wallet: Wallet to use for account
:type wallet: str
:param account: Account to set representative for
:type account: str
:param representative: Representative to set to
:type representative: str
:param work: If set, is used as the work for the block
:type work: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.account_representative_set(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... account="xrb_39a73oy5ungrhxy5z5oao1xso4zo7dmgpjd4u74xcrx3r1w6rtazuouw6qfi",
... representative="xrb_16u1uufyoig8777y6r8iqjtrw8sg8maqrm36zzcm95jmbd9i9aj5i8abr8u5"
... )
"000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
"""
wallet = self._process_value(wallet, 'wallet')
account = self._process_value(account, 'account')
representative = self._process_value(representative, 'account')
payload = {'wallet': wallet, 'account': account, 'representative': representative}
if work is not None:
payload['work'] = self._process_value(work, 'work') # depends on [control=['if'], data=['work']]
resp = self.call('account_representative_set', payload)
return resp['block'] |
def delete(self) :
"deletes the document from the database"
if self.URL is None :
raise DeletionError("Can't delete a document that was not saved")
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or 'error' in data :
raise DeletionError(data['errorMessage'], data)
self.reset(self.collection)
self.modified = True | def function[delete, parameter[self]]:
constant[deletes the document from the database]
if compare[name[self].URL is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0f1b550>
variable[r] assign[=] call[name[self].connection.session.delete, parameter[name[self].URL]]
variable[data] assign[=] call[name[r].json, parameter[]]
if <ast.BoolOp object at 0x7da1b0fcfa00> begin[:]
<ast.Raise object at 0x7da1b0fcd9f0>
call[name[self].reset, parameter[name[self].collection]]
name[self].modified assign[=] constant[True] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[URL] keyword[is] keyword[None] :
keyword[raise] identifier[DeletionError] ( literal[string] )
identifier[r] = identifier[self] . identifier[connection] . identifier[session] . identifier[delete] ( identifier[self] . identifier[URL] )
identifier[data] = identifier[r] . identifier[json] ()
keyword[if] ( identifier[r] . identifier[status_code] != literal[int] keyword[and] identifier[r] . identifier[status_code] != literal[int] ) keyword[or] literal[string] keyword[in] identifier[data] :
keyword[raise] identifier[DeletionError] ( identifier[data] [ literal[string] ], identifier[data] )
identifier[self] . identifier[reset] ( identifier[self] . identifier[collection] )
identifier[self] . identifier[modified] = keyword[True] | def delete(self):
"""deletes the document from the database"""
if self.URL is None:
raise DeletionError("Can't delete a document that was not saved") # depends on [control=['if'], data=[]]
r = self.connection.session.delete(self.URL)
data = r.json()
if r.status_code != 200 and r.status_code != 202 or 'error' in data:
raise DeletionError(data['errorMessage'], data) # depends on [control=['if'], data=[]]
self.reset(self.collection)
self.modified = True |
def __get_formatter(self, extension_name):
"""
Banana banana
"""
ext = self.extensions.get(extension_name)
if ext:
return ext.formatter
return None | def function[__get_formatter, parameter[self, extension_name]]:
constant[
Banana banana
]
variable[ext] assign[=] call[name[self].extensions.get, parameter[name[extension_name]]]
if name[ext] begin[:]
return[name[ext].formatter]
return[constant[None]] | keyword[def] identifier[__get_formatter] ( identifier[self] , identifier[extension_name] ):
literal[string]
identifier[ext] = identifier[self] . identifier[extensions] . identifier[get] ( identifier[extension_name] )
keyword[if] identifier[ext] :
keyword[return] identifier[ext] . identifier[formatter]
keyword[return] keyword[None] | def __get_formatter(self, extension_name):
"""
Banana banana
"""
ext = self.extensions.get(extension_name)
if ext:
return ext.formatter # depends on [control=['if'], data=[]]
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.