code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def init(self):
"""
Fetch the credentials (and cache them on disk).
"""
self.credentials = self._get_credentials()
self.http = self.credentials.authorize(httplib2.Http())
self.service = discovery.build(
'sheets',
'v4',
http=self.http,
discoveryServiceUrl=DISCOVERY_URL,
) | def function[init, parameter[self]]:
constant[
Fetch the credentials (and cache them on disk).
]
name[self].credentials assign[=] call[name[self]._get_credentials, parameter[]]
name[self].http assign[=] call[name[self].credentials.authorize, parameter[call[name[httplib2].Http, parameter[]]]]
name[self].service assign[=] call[name[discovery].build, parameter[constant[sheets], constant[v4]]] | keyword[def] identifier[init] ( identifier[self] ):
literal[string]
identifier[self] . identifier[credentials] = identifier[self] . identifier[_get_credentials] ()
identifier[self] . identifier[http] = identifier[self] . identifier[credentials] . identifier[authorize] ( identifier[httplib2] . identifier[Http] ())
identifier[self] . identifier[service] = identifier[discovery] . identifier[build] (
literal[string] ,
literal[string] ,
identifier[http] = identifier[self] . identifier[http] ,
identifier[discoveryServiceUrl] = identifier[DISCOVERY_URL] ,
) | def init(self):
"""
Fetch the credentials (and cache them on disk).
"""
self.credentials = self._get_credentials()
self.http = self.credentials.authorize(httplib2.Http())
self.service = discovery.build('sheets', 'v4', http=self.http, discoveryServiceUrl=DISCOVERY_URL) |
def competitions_submissions_upload(self, file, guid, content_length, last_modified_date_utc, **kwargs): # noqa: E501
"""Upload competition submission file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.competitions_submissions_upload(file, guid, content_length, last_modified_date_utc, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: Competition submission file (required)
:param str guid: Location where submission should be uploaded (required)
:param int content_length: Content length of file in bytes (required)
:param int last_modified_date_utc: Last modified date of file in milliseconds since epoch in UTC (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.competitions_submissions_upload_with_http_info(file, guid, content_length, last_modified_date_utc, **kwargs) # noqa: E501
else:
(data) = self.competitions_submissions_upload_with_http_info(file, guid, content_length, last_modified_date_utc, **kwargs) # noqa: E501
return data | def function[competitions_submissions_upload, parameter[self, file, guid, content_length, last_modified_date_utc]]:
constant[Upload competition submission file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.competitions_submissions_upload(file, guid, content_length, last_modified_date_utc, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: Competition submission file (required)
:param str guid: Location where submission should be uploaded (required)
:param int content_length: Content length of file in bytes (required)
:param int last_modified_date_utc: Last modified date of file in milliseconds since epoch in UTC (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].competitions_submissions_upload_with_http_info, parameter[name[file], name[guid], name[content_length], name[last_modified_date_utc]]]] | keyword[def] identifier[competitions_submissions_upload] ( identifier[self] , identifier[file] , identifier[guid] , identifier[content_length] , identifier[last_modified_date_utc] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[competitions_submissions_upload_with_http_info] ( identifier[file] , identifier[guid] , identifier[content_length] , identifier[last_modified_date_utc] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[competitions_submissions_upload_with_http_info] ( identifier[file] , identifier[guid] , identifier[content_length] , identifier[last_modified_date_utc] ,** identifier[kwargs] )
keyword[return] identifier[data] | def competitions_submissions_upload(self, file, guid, content_length, last_modified_date_utc, **kwargs): # noqa: E501
'Upload competition submission file # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.competitions_submissions_upload(file, guid, content_length, last_modified_date_utc, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param file file: Competition submission file (required)\n :param str guid: Location where submission should be uploaded (required)\n :param int content_length: Content length of file in bytes (required)\n :param int last_modified_date_utc: Last modified date of file in milliseconds since epoch in UTC (required)\n :return: Result\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.competitions_submissions_upload_with_http_info(file, guid, content_length, last_modified_date_utc, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.competitions_submissions_upload_with_http_info(file, guid, content_length, last_modified_date_utc, **kwargs) # noqa: E501
return data |
def export_data( self ):
"""Export data to a byte array."""
klass = self.__class__
output = bytearray( b'\x00'*self.get_size() )
# prevalidate all data before export.
# this is important to ensure that any dependent fields
# are updated beforehand, e.g. a count referenced
# in a BlockField
queue = []
for name in klass._fields:
self.scrub_field( name )
self.validate_field( name )
self.update_deps()
for name in klass._fields:
klass._fields[name].update_buffer_with_value(
self._field_data[name], output, parent=self
)
for name, check in klass._checks.items():
check.update_buffer( output, parent=self )
return output | def function[export_data, parameter[self]]:
constant[Export data to a byte array.]
variable[klass] assign[=] name[self].__class__
variable[output] assign[=] call[name[bytearray], parameter[binary_operation[constant[b'\x00'] * call[name[self].get_size, parameter[]]]]]
variable[queue] assign[=] list[[]]
for taget[name[name]] in starred[name[klass]._fields] begin[:]
call[name[self].scrub_field, parameter[name[name]]]
call[name[self].validate_field, parameter[name[name]]]
call[name[self].update_deps, parameter[]]
for taget[name[name]] in starred[name[klass]._fields] begin[:]
call[call[name[klass]._fields][name[name]].update_buffer_with_value, parameter[call[name[self]._field_data][name[name]], name[output]]]
for taget[tuple[[<ast.Name object at 0x7da1b11e0cd0>, <ast.Name object at 0x7da1b11e2ec0>]]] in starred[call[name[klass]._checks.items, parameter[]]] begin[:]
call[name[check].update_buffer, parameter[name[output]]]
return[name[output]] | keyword[def] identifier[export_data] ( identifier[self] ):
literal[string]
identifier[klass] = identifier[self] . identifier[__class__]
identifier[output] = identifier[bytearray] ( literal[string] * identifier[self] . identifier[get_size] ())
identifier[queue] =[]
keyword[for] identifier[name] keyword[in] identifier[klass] . identifier[_fields] :
identifier[self] . identifier[scrub_field] ( identifier[name] )
identifier[self] . identifier[validate_field] ( identifier[name] )
identifier[self] . identifier[update_deps] ()
keyword[for] identifier[name] keyword[in] identifier[klass] . identifier[_fields] :
identifier[klass] . identifier[_fields] [ identifier[name] ]. identifier[update_buffer_with_value] (
identifier[self] . identifier[_field_data] [ identifier[name] ], identifier[output] , identifier[parent] = identifier[self]
)
keyword[for] identifier[name] , identifier[check] keyword[in] identifier[klass] . identifier[_checks] . identifier[items] ():
identifier[check] . identifier[update_buffer] ( identifier[output] , identifier[parent] = identifier[self] )
keyword[return] identifier[output] | def export_data(self):
"""Export data to a byte array."""
klass = self.__class__
output = bytearray(b'\x00' * self.get_size())
# prevalidate all data before export.
# this is important to ensure that any dependent fields
# are updated beforehand, e.g. a count referenced
# in a BlockField
queue = []
for name in klass._fields:
self.scrub_field(name)
self.validate_field(name) # depends on [control=['for'], data=['name']]
self.update_deps()
for name in klass._fields:
klass._fields[name].update_buffer_with_value(self._field_data[name], output, parent=self) # depends on [control=['for'], data=['name']]
for (name, check) in klass._checks.items():
check.update_buffer(output, parent=self) # depends on [control=['for'], data=[]]
return output |
def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
dataset, batch_size, val_methods = args
if (isinstance(dataset, ImageFrame)):
return callBigDlFunc(self.bigdl_type,
"modelEvaluateImageFrame",
self.value,
dataset, batch_size, val_methods)
else:
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
dataset, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only") | def function[evaluate, parameter[self]]:
constant[
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[0]] begin[:]
call[name[callBigDlFunc], parameter[name[self].bigdl_type, constant[evaluate], name[self].value]]
return[name[self]] | keyword[def] identifier[evaluate] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[callBigDlFunc] ( identifier[self] . identifier[bigdl_type] ,
literal[string] , identifier[self] . identifier[value] )
keyword[return] identifier[self]
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[dataset] , identifier[batch_size] , identifier[val_methods] = identifier[args]
keyword[if] ( identifier[isinstance] ( identifier[dataset] , identifier[ImageFrame] )):
keyword[return] identifier[callBigDlFunc] ( identifier[self] . identifier[bigdl_type] ,
literal[string] ,
identifier[self] . identifier[value] ,
identifier[dataset] , identifier[batch_size] , identifier[val_methods] )
keyword[else] :
keyword[return] identifier[callBigDlFunc] ( identifier[self] . identifier[bigdl_type] ,
literal[string] ,
identifier[self] . identifier[value] ,
identifier[dataset] , identifier[batch_size] , identifier[val_methods] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type, 'evaluate', self.value)
return self # depends on [control=['if'], data=[]]
elif len(args) == 3:
(dataset, batch_size, val_methods) = args
if isinstance(dataset, ImageFrame):
return callBigDlFunc(self.bigdl_type, 'modelEvaluateImageFrame', self.value, dataset, batch_size, val_methods) # depends on [control=['if'], data=[]]
else:
return callBigDlFunc(self.bigdl_type, 'modelEvaluate', self.value, dataset, batch_size, val_methods) # depends on [control=['if'], data=[]]
else:
raise Exception('Error when calling evaluate(): it takes no argument or exactly three arguments only') |
def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
id = self.id()
if not isinstance(id, (int, long)):
id = None
return id | def function[integer_id, parameter[self]]:
constant[Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
]
variable[id] assign[=] call[name[self].id, parameter[]]
if <ast.UnaryOp object at 0x7da1b23445e0> begin[:]
variable[id] assign[=] constant[None]
return[name[id]] | keyword[def] identifier[integer_id] ( identifier[self] ):
literal[string]
identifier[id] = identifier[self] . identifier[id] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[id] ,( identifier[int] , identifier[long] )):
identifier[id] = keyword[None]
keyword[return] identifier[id] | def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
id = self.id()
if not isinstance(id, (int, long)):
id = None # depends on [control=['if'], data=[]]
return id |
def get_certificate(self, **kwargs):
"""GetCertificate.
[Preview API]
:rtype: object
"""
response = self._send(http_method='GET',
location_id='2e0dbce7-a327-4bc0-a291-056139393f6d',
version='5.0-preview.1',
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | def function[get_certificate, parameter[self]]:
constant[GetCertificate.
[Preview API]
:rtype: object
]
variable[response] assign[=] call[name[self]._send, parameter[]]
if compare[constant[callback] in name[kwargs]] begin[:]
variable[callback] assign[=] call[name[kwargs]][constant[callback]]
return[call[name[self]._client.stream_download, parameter[name[response]]]] | keyword[def] identifier[get_certificate] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[accept_media_type] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[callback] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[callback] = keyword[None]
keyword[return] identifier[self] . identifier[_client] . identifier[stream_download] ( identifier[response] , identifier[callback] = identifier[callback] ) | def get_certificate(self, **kwargs):
"""GetCertificate.
[Preview API]
:rtype: object
"""
response = self._send(http_method='GET', location_id='2e0dbce7-a327-4bc0-a291-056139393f6d', version='5.0-preview.1', accept_media_type='application/octet-stream')
if 'callback' in kwargs:
callback = kwargs['callback'] # depends on [control=['if'], data=['kwargs']]
else:
callback = None
return self._client.stream_download(response, callback=callback) |
async def redirect_async(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._remote_address = address.Source(redirect.address)
await self._redirect_async(redirect, auth) | <ast.AsyncFunctionDef object at 0x7da20e962110> | keyword[async] keyword[def] identifier[redirect_async] ( identifier[self] , identifier[redirect] , identifier[auth] ):
literal[string]
keyword[if] identifier[self] . identifier[_ext_connection] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[message_handler] :
keyword[await] identifier[self] . identifier[message_handler] . identifier[destroy_async] ()
identifier[self] . identifier[message_handler] = keyword[None]
identifier[self] . identifier[_shutdown] = keyword[False]
identifier[self] . identifier[_last_activity_timestamp] = keyword[None]
identifier[self] . identifier[_was_message_received] = keyword[False]
identifier[self] . identifier[_remote_address] = identifier[address] . identifier[Source] ( identifier[redirect] . identifier[address] )
keyword[await] identifier[self] . identifier[_redirect_async] ( identifier[redirect] , identifier[auth] ) | async def redirect_async(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError('Clients with a shared connection cannot be automatically redirected.') # depends on [control=['if'], data=[]]
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None # depends on [control=['if'], data=[]]
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._remote_address = address.Source(redirect.address)
await self._redirect_async(redirect, auth) |
def explain_permutation_importance(estimator,
vec=None,
top=_TOP,
target_names=None, # ignored
targets=None, # ignored
feature_names=None,
feature_re=None,
feature_filter=None,
):
"""
Return an explanation of PermutationImportance.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``, ``feature_re`` and ``feature_filter``
parameters.
``target_names`` and ``targets`` parameters are ignored.
``vec`` is a vectorizer instance used to transform
raw features to the input of the estimator (e.g. a fitted
CountVectorizer instance); you can pass it instead of ``feature_names``.
"""
coef = estimator.feature_importances_
coef_std = estimator.feature_importances_std_
return get_feature_importance_explanation(estimator, vec, coef,
coef_std=coef_std,
feature_names=feature_names,
feature_filter=feature_filter,
feature_re=feature_re,
top=top,
description=DESCRIPTION_SCORE_DECREASE + estimator.caveats_,
is_regression=isinstance(estimator.wrapped_estimator_, RegressorMixin),
) | def function[explain_permutation_importance, parameter[estimator, vec, top, target_names, targets, feature_names, feature_re, feature_filter]]:
constant[
Return an explanation of PermutationImportance.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``, ``feature_re`` and ``feature_filter``
parameters.
``target_names`` and ``targets`` parameters are ignored.
``vec`` is a vectorizer instance used to transform
raw features to the input of the estimator (e.g. a fitted
CountVectorizer instance); you can pass it instead of ``feature_names``.
]
variable[coef] assign[=] name[estimator].feature_importances_
variable[coef_std] assign[=] name[estimator].feature_importances_std_
return[call[name[get_feature_importance_explanation], parameter[name[estimator], name[vec], name[coef]]]] | keyword[def] identifier[explain_permutation_importance] ( identifier[estimator] ,
identifier[vec] = keyword[None] ,
identifier[top] = identifier[_TOP] ,
identifier[target_names] = keyword[None] ,
identifier[targets] = keyword[None] ,
identifier[feature_names] = keyword[None] ,
identifier[feature_re] = keyword[None] ,
identifier[feature_filter] = keyword[None] ,
):
literal[string]
identifier[coef] = identifier[estimator] . identifier[feature_importances_]
identifier[coef_std] = identifier[estimator] . identifier[feature_importances_std_]
keyword[return] identifier[get_feature_importance_explanation] ( identifier[estimator] , identifier[vec] , identifier[coef] ,
identifier[coef_std] = identifier[coef_std] ,
identifier[feature_names] = identifier[feature_names] ,
identifier[feature_filter] = identifier[feature_filter] ,
identifier[feature_re] = identifier[feature_re] ,
identifier[top] = identifier[top] ,
identifier[description] = identifier[DESCRIPTION_SCORE_DECREASE] + identifier[estimator] . identifier[caveats_] ,
identifier[is_regression] = identifier[isinstance] ( identifier[estimator] . identifier[wrapped_estimator_] , identifier[RegressorMixin] ),
) | def explain_permutation_importance(estimator, vec=None, top=_TOP, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None): # ignored
# ignored
'\n Return an explanation of PermutationImportance.\n\n See :func:`eli5.explain_weights` for description of\n ``top``, ``feature_names``, ``feature_re`` and ``feature_filter``\n parameters.\n\n ``target_names`` and ``targets`` parameters are ignored.\n\n ``vec`` is a vectorizer instance used to transform\n raw features to the input of the estimator (e.g. a fitted\n CountVectorizer instance); you can pass it instead of ``feature_names``.\n '
coef = estimator.feature_importances_
coef_std = estimator.feature_importances_std_
return get_feature_importance_explanation(estimator, vec, coef, coef_std=coef_std, feature_names=feature_names, feature_filter=feature_filter, feature_re=feature_re, top=top, description=DESCRIPTION_SCORE_DECREASE + estimator.caveats_, is_regression=isinstance(estimator.wrapped_estimator_, RegressorMixin)) |
def throw(self, typ, val=None, tb=None): # pylint:disable=method-hidden,invalid-name
"""
throw(typ[,val[,tb]]) -> raise exception in generator,
return next yielded value or raise StopIteration.
"""
self._materialize()
self._generator.throw(typ, val, tb) | def function[throw, parameter[self, typ, val, tb]]:
constant[
throw(typ[,val[,tb]]) -> raise exception in generator,
return next yielded value or raise StopIteration.
]
call[name[self]._materialize, parameter[]]
call[name[self]._generator.throw, parameter[name[typ], name[val], name[tb]]] | keyword[def] identifier[throw] ( identifier[self] , identifier[typ] , identifier[val] = keyword[None] , identifier[tb] = keyword[None] ):
literal[string]
identifier[self] . identifier[_materialize] ()
identifier[self] . identifier[_generator] . identifier[throw] ( identifier[typ] , identifier[val] , identifier[tb] ) | def throw(self, typ, val=None, tb=None): # pylint:disable=method-hidden,invalid-name
'\n throw(typ[,val[,tb]]) -> raise exception in generator,\n return next yielded value or raise StopIteration.\n '
self._materialize()
self._generator.throw(typ, val, tb) |
def parse_multiple(s, f, values=None):
"""Parse multiple comma-separated elements, each of which is parsed
using function f."""
if values is None: values = []
values.append(f(s))
if s.pos < len(s) and s.cur == ',':
s.pos += 1
return parse_multiple(s, f, values)
else:
return values | def function[parse_multiple, parameter[s, f, values]]:
constant[Parse multiple comma-separated elements, each of which is parsed
using function f.]
if compare[name[values] is constant[None]] begin[:]
variable[values] assign[=] list[[]]
call[name[values].append, parameter[call[name[f], parameter[name[s]]]]]
if <ast.BoolOp object at 0x7da1b0948940> begin[:]
<ast.AugAssign object at 0x7da1b0948700>
return[call[name[parse_multiple], parameter[name[s], name[f], name[values]]]] | keyword[def] identifier[parse_multiple] ( identifier[s] , identifier[f] , identifier[values] = keyword[None] ):
literal[string]
keyword[if] identifier[values] keyword[is] keyword[None] : identifier[values] =[]
identifier[values] . identifier[append] ( identifier[f] ( identifier[s] ))
keyword[if] identifier[s] . identifier[pos] < identifier[len] ( identifier[s] ) keyword[and] identifier[s] . identifier[cur] == literal[string] :
identifier[s] . identifier[pos] += literal[int]
keyword[return] identifier[parse_multiple] ( identifier[s] , identifier[f] , identifier[values] )
keyword[else] :
keyword[return] identifier[values] | def parse_multiple(s, f, values=None):
"""Parse multiple comma-separated elements, each of which is parsed
using function f."""
if values is None:
values = [] # depends on [control=['if'], data=['values']]
values.append(f(s))
if s.pos < len(s) and s.cur == ',':
s.pos += 1
return parse_multiple(s, f, values) # depends on [control=['if'], data=[]]
else:
return values |
def MoVPluRunOff(profile):
"""
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported profile type")
exit()
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
# print(rankmaps)
ranking = MechanismPlurality().getRanking(profile)
# print("ranking=", ranking)
# 1st round: find the top 2 candidates in plurality scores
# Compute the 1st-place candidate in plurality scores
max_cand = ranking[0][0][0]
# Compute the 2nd-place candidate in plurality scores
# Automatically using tie-breaking rule--numerically increasing order
if len(ranking[0][0]) > 1:
second_max_cand = ranking[0][0][1]
if len(ranking[0][0]) > 2:
third_max_cand = ranking[0][0][2]
else:
third_max_cand = ranking[0][1][0]
else:
second_max_cand = ranking[0][1][0]
if len(ranking[0][1]) > 1:
third_max_cand = ranking[0][1][1]
else:
third_max_cand = ranking[0][2][0]
top_2 = [max_cand, second_max_cand]
# 2nd round: find the candidate with maximum plurality score
dict_top2 = {max_cand: 0, second_max_cand: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2}
# print(vote_top2)
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i]
# the original winner-- d
# print("dict_top2=", dict_top2)
d = max(dict_top2.items(), key=lambda x: x[1])[0]
c_1 = top_2[0] if top_2[1] == d else top_2[1]
# the candidate with third highest plurality score
c_2 = third_max_cand
# print("d=", d, c_1, c_2)
Type1_1 = Type1_2 = 0
plu_d = plu_c_1 = plu_c_2 = 0
# ------------count the votes of CASE I & II---------------
for i in range(len_prefcounts):
if rankmaps[i][d] < rankmaps[i][c_1]:
Type1_1 += prefcounts[i]
elif rankmaps[i][d] > rankmaps[i][c_1]:
Type1_2 += prefcounts[i]
if rankmaps[i][d] == 1:
plu_d += prefcounts[i]
elif rankmaps[i][c_1] == 1:
plu_c_1 += prefcounts[i]
elif rankmaps[i][c_2] == 1:
plu_c_2 += prefcounts[i]
# print("plu=", plu_d, plu_c_1, plu_c_2)
# -------------------CASE I------------------------------
MoV_I = math.floor((Type1_1 - Type1_2)/2) + 1
# -------------------CASE II-------------------------------
if math.floor((plu_d + plu_c_2)/2) + 1 <= plu_c_1:
MoV_II = math.floor((plu_d - plu_c_2)/2) + 1
else:
MoV_II = plu_d - math.floor((plu_d + plu_c_1 + plu_c_2)/3) + 1
# MoV_II = math.floor((plu_d * 2 - plu_c_1 - plu_c_2) / 3) + 1 # old version
# -------------------CASE III-----------------------------
MoV_d = dict()
remaining = sorted(rankmaps[0].keys())
remaining.remove(d)
remaining.remove(c_1)
for e in remaining:
# ------------count the votes of CASE III---------------
T1 = T2 = T3 = T4 = T5 = T6 = T7 = T8 = 0
for i in range(len_prefcounts):
if rankmaps[i][d] == 1:
if rankmaps[i][c_1] < rankmaps[i][e]:
T1 += prefcounts[i]
elif rankmaps[i][e] < rankmaps[i][c_1]:
T2 += prefcounts[i]
elif rankmaps[i][c_1] == 1:
if rankmaps[i][d] < rankmaps[i][e]:
T3 += prefcounts[i]
elif rankmaps[i][e] < rankmaps[i][d]:
T4 += prefcounts[i]
elif rankmaps[i][e] == 1:
if rankmaps[i][d] < rankmaps[i][c_1]:
T5 += prefcounts[i]
elif rankmaps[i][c_1] < rankmaps[i][d]:
T6 += prefcounts[i]
else:
if rankmaps[i][d] < rankmaps[i][e]:
T7 += prefcounts[i]
elif rankmaps[i][e] < rankmaps[i][d]:
T8 += prefcounts[i]
if math.floor((T3 + T4 + T5 + T6)/2) + 1 <= T1 + T2:
CHANGE1 = math.floor((T3 + T4 - T5 - T6)/2) + 1
else:
CHANGE1 = T3 + T4 - T1 -T2 + 1
x = min(T3, CHANGE1)
if T1 + T2 + T3 + T7 - x < T4 + T5 + T6 + T8 + x:
MoV_d[e] = CHANGE1
else:
CHANGE2 = math.floor((T1 + T2 + T3 + T7 - T4 - T5 - T6 - T8)/2) - x + 1
MoV_d[e] = CHANGE1 + CHANGE2
MoV_III = min(MoV_d.items(), key=lambda x: x[1])[1]
# ------------------------Overall MoV---------------------------------
# print(MoV_d)
# print(MoV_I, MoV_II, MoV_III)
MoV = min(MoV_I, MoV_II, MoV_III)
return MoV | def function[MoVPluRunOff, parameter[profile]]:
constant[
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
]
variable[elecType] assign[=] call[name[profile].getElecType, parameter[]]
if <ast.BoolOp object at 0x7da1b2308370> begin[:]
call[name[print], parameter[constant[ERROR: unsupported profile type]]]
call[name[exit], parameter[]]
variable[prefcounts] assign[=] call[name[profile].getPreferenceCounts, parameter[]]
variable[len_prefcounts] assign[=] call[name[len], parameter[name[prefcounts]]]
variable[rankmaps] assign[=] call[name[profile].getRankMaps, parameter[]]
variable[ranking] assign[=] call[call[name[MechanismPlurality], parameter[]].getRanking, parameter[name[profile]]]
variable[max_cand] assign[=] call[call[call[name[ranking]][constant[0]]][constant[0]]][constant[0]]
if compare[call[name[len], parameter[call[call[name[ranking]][constant[0]]][constant[0]]]] greater[>] constant[1]] begin[:]
variable[second_max_cand] assign[=] call[call[call[name[ranking]][constant[0]]][constant[0]]][constant[1]]
if compare[call[name[len], parameter[call[call[name[ranking]][constant[0]]][constant[0]]]] greater[>] constant[2]] begin[:]
variable[third_max_cand] assign[=] call[call[call[name[ranking]][constant[0]]][constant[0]]][constant[2]]
variable[top_2] assign[=] list[[<ast.Name object at 0x7da1b2309660>, <ast.Name object at 0x7da1b2309750>]]
variable[dict_top2] assign[=] dictionary[[<ast.Name object at 0x7da1b2309720>, <ast.Name object at 0x7da1b2309780>], [<ast.Constant object at 0x7da1b23097e0>, <ast.Constant object at 0x7da1b2309840>]]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
variable[vote_top2] assign[=] <ast.DictComp object at 0x7da1b2309990>
variable[top_position] assign[=] call[name[min], parameter[call[name[vote_top2].values, parameter[]]]]
variable[keys] assign[=] <ast.ListComp object at 0x7da1b2309d50>
for taget[name[key]] in starred[name[keys]] begin[:]
<ast.AugAssign object at 0x7da1b230a380>
variable[d] assign[=] call[call[name[max], parameter[call[name[dict_top2].items, parameter[]]]]][constant[0]]
variable[c_1] assign[=] <ast.IfExp object at 0x7da1b230a620>
variable[c_2] assign[=] name[third_max_cand]
variable[Type1_1] assign[=] constant[0]
variable[plu_d] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
if compare[call[call[name[rankmaps]][name[i]]][name[d]] less[<] call[call[name[rankmaps]][name[i]]][name[c_1]]] begin[:]
<ast.AugAssign object at 0x7da1b230aec0>
if compare[call[call[name[rankmaps]][name[i]]][name[d]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b230b190>
variable[MoV_I] assign[=] binary_operation[call[name[math].floor, parameter[binary_operation[binary_operation[name[Type1_1] - name[Type1_2]] / constant[2]]]] + constant[1]]
if compare[binary_operation[call[name[math].floor, parameter[binary_operation[binary_operation[name[plu_d] + name[plu_c_2]] / constant[2]]]] + constant[1]] less_or_equal[<=] name[plu_c_1]] begin[:]
variable[MoV_II] assign[=] binary_operation[call[name[math].floor, parameter[binary_operation[binary_operation[name[plu_d] - name[plu_c_2]] / constant[2]]]] + constant[1]]
variable[MoV_d] assign[=] call[name[dict], parameter[]]
variable[remaining] assign[=] call[name[sorted], parameter[call[call[name[rankmaps]][constant[0]].keys, parameter[]]]]
call[name[remaining].remove, parameter[name[d]]]
call[name[remaining].remove, parameter[name[c_1]]]
for taget[name[e]] in starred[name[remaining]] begin[:]
variable[T1] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
if compare[call[call[name[rankmaps]][name[i]]][name[d]] equal[==] constant[1]] begin[:]
if compare[call[call[name[rankmaps]][name[i]]][name[c_1]] less[<] call[call[name[rankmaps]][name[i]]][name[e]]] begin[:]
<ast.AugAssign object at 0x7da1b2350100>
if compare[binary_operation[call[name[math].floor, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[T3] + name[T4]] + name[T5]] + name[T6]] / constant[2]]]] + constant[1]] less_or_equal[<=] binary_operation[name[T1] + name[T2]]] begin[:]
variable[CHANGE1] assign[=] binary_operation[call[name[math].floor, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[T3] + name[T4]] - name[T5]] - name[T6]] / constant[2]]]] + constant[1]]
variable[x] assign[=] call[name[min], parameter[name[T3], name[CHANGE1]]]
if compare[binary_operation[binary_operation[binary_operation[binary_operation[name[T1] + name[T2]] + name[T3]] + name[T7]] - name[x]] less[<] binary_operation[binary_operation[binary_operation[binary_operation[name[T4] + name[T5]] + name[T6]] + name[T8]] + name[x]]] begin[:]
call[name[MoV_d]][name[e]] assign[=] name[CHANGE1]
variable[MoV_III] assign[=] call[call[name[min], parameter[call[name[MoV_d].items, parameter[]]]]][constant[1]]
variable[MoV] assign[=] call[name[min], parameter[name[MoV_I], name[MoV_II], name[MoV_III]]]
return[name[MoV]] | keyword[def] identifier[MoVPluRunOff] ( identifier[profile] ):
literal[string]
identifier[elecType] = identifier[profile] . identifier[getElecType] ()
keyword[if] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] :
identifier[print] ( literal[string] )
identifier[exit] ()
identifier[prefcounts] = identifier[profile] . identifier[getPreferenceCounts] ()
identifier[len_prefcounts] = identifier[len] ( identifier[prefcounts] )
identifier[rankmaps] = identifier[profile] . identifier[getRankMaps] ()
identifier[ranking] = identifier[MechanismPlurality] (). identifier[getRanking] ( identifier[profile] )
identifier[max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[ranking] [ literal[int] ][ literal[int] ])> literal[int] :
identifier[second_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[ranking] [ literal[int] ][ literal[int] ])> literal[int] :
identifier[third_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[else] :
identifier[third_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[else] :
identifier[second_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[ranking] [ literal[int] ][ literal[int] ])> literal[int] :
identifier[third_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[else] :
identifier[third_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
identifier[top_2] =[ identifier[max_cand] , identifier[second_max_cand] ]
identifier[dict_top2] ={ identifier[max_cand] : literal[int] , identifier[second_max_cand] : literal[int] }
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
identifier[vote_top2] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[rankmaps] [ identifier[i] ]. identifier[items] () keyword[if] identifier[key] keyword[in] identifier[top_2] }
identifier[top_position] = identifier[min] ( identifier[vote_top2] . identifier[values] ())
identifier[keys] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[vote_top2] . identifier[keys] () keyword[if] identifier[vote_top2] [ identifier[x] ]== identifier[top_position] ]
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[dict_top2] [ identifier[key] ]+= identifier[prefcounts] [ identifier[i] ]
identifier[d] = identifier[max] ( identifier[dict_top2] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])[ literal[int] ]
identifier[c_1] = identifier[top_2] [ literal[int] ] keyword[if] identifier[top_2] [ literal[int] ]== identifier[d] keyword[else] identifier[top_2] [ literal[int] ]
identifier[c_2] = identifier[third_max_cand]
identifier[Type1_1] = identifier[Type1_2] = literal[int]
identifier[plu_d] = identifier[plu_c_1] = identifier[plu_c_2] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]< identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]:
identifier[Type1_1] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]> identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]:
identifier[Type1_2] += identifier[prefcounts] [ identifier[i] ]
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]== literal[int] :
identifier[plu_d] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]== literal[int] :
identifier[plu_c_1] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[c_2] ]== literal[int] :
identifier[plu_c_2] += identifier[prefcounts] [ identifier[i] ]
identifier[MoV_I] = identifier[math] . identifier[floor] (( identifier[Type1_1] - identifier[Type1_2] )/ literal[int] )+ literal[int]
keyword[if] identifier[math] . identifier[floor] (( identifier[plu_d] + identifier[plu_c_2] )/ literal[int] )+ literal[int] <= identifier[plu_c_1] :
identifier[MoV_II] = identifier[math] . identifier[floor] (( identifier[plu_d] - identifier[plu_c_2] )/ literal[int] )+ literal[int]
keyword[else] :
identifier[MoV_II] = identifier[plu_d] - identifier[math] . identifier[floor] (( identifier[plu_d] + identifier[plu_c_1] + identifier[plu_c_2] )/ literal[int] )+ literal[int]
identifier[MoV_d] = identifier[dict] ()
identifier[remaining] = identifier[sorted] ( identifier[rankmaps] [ literal[int] ]. identifier[keys] ())
identifier[remaining] . identifier[remove] ( identifier[d] )
identifier[remaining] . identifier[remove] ( identifier[c_1] )
keyword[for] identifier[e] keyword[in] identifier[remaining] :
identifier[T1] = identifier[T2] = identifier[T3] = identifier[T4] = identifier[T5] = identifier[T6] = identifier[T7] = identifier[T8] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]== literal[int] :
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]< identifier[rankmaps] [ identifier[i] ][ identifier[e] ]:
identifier[T1] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[e] ]< identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]:
identifier[T2] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]== literal[int] :
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]< identifier[rankmaps] [ identifier[i] ][ identifier[e] ]:
identifier[T3] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[e] ]< identifier[rankmaps] [ identifier[i] ][ identifier[d] ]:
identifier[T4] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[e] ]== literal[int] :
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]< identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]:
identifier[T5] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[c_1] ]< identifier[rankmaps] [ identifier[i] ][ identifier[d] ]:
identifier[T6] += identifier[prefcounts] [ identifier[i] ]
keyword[else] :
keyword[if] identifier[rankmaps] [ identifier[i] ][ identifier[d] ]< identifier[rankmaps] [ identifier[i] ][ identifier[e] ]:
identifier[T7] += identifier[prefcounts] [ identifier[i] ]
keyword[elif] identifier[rankmaps] [ identifier[i] ][ identifier[e] ]< identifier[rankmaps] [ identifier[i] ][ identifier[d] ]:
identifier[T8] += identifier[prefcounts] [ identifier[i] ]
keyword[if] identifier[math] . identifier[floor] (( identifier[T3] + identifier[T4] + identifier[T5] + identifier[T6] )/ literal[int] )+ literal[int] <= identifier[T1] + identifier[T2] :
identifier[CHANGE1] = identifier[math] . identifier[floor] (( identifier[T3] + identifier[T4] - identifier[T5] - identifier[T6] )/ literal[int] )+ literal[int]
keyword[else] :
identifier[CHANGE1] = identifier[T3] + identifier[T4] - identifier[T1] - identifier[T2] + literal[int]
identifier[x] = identifier[min] ( identifier[T3] , identifier[CHANGE1] )
keyword[if] identifier[T1] + identifier[T2] + identifier[T3] + identifier[T7] - identifier[x] < identifier[T4] + identifier[T5] + identifier[T6] + identifier[T8] + identifier[x] :
identifier[MoV_d] [ identifier[e] ]= identifier[CHANGE1]
keyword[else] :
identifier[CHANGE2] = identifier[math] . identifier[floor] (( identifier[T1] + identifier[T2] + identifier[T3] + identifier[T7] - identifier[T4] - identifier[T5] - identifier[T6] - identifier[T8] )/ literal[int] )- identifier[x] + literal[int]
identifier[MoV_d] [ identifier[e] ]= identifier[CHANGE1] + identifier[CHANGE2]
identifier[MoV_III] = identifier[min] ( identifier[MoV_d] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])[ literal[int] ]
identifier[MoV] = identifier[min] ( identifier[MoV_I] , identifier[MoV_II] , identifier[MoV_III] )
keyword[return] identifier[MoV] | def MoVPluRunOff(profile):
"""
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != 'soc' and elecType != 'toc' and (elecType != 'csv'):
print('ERROR: unsupported profile type')
exit() # depends on [control=['if'], data=[]]
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
# print(rankmaps)
ranking = MechanismPlurality().getRanking(profile)
# print("ranking=", ranking)
# 1st round: find the top 2 candidates in plurality scores
# Compute the 1st-place candidate in plurality scores
max_cand = ranking[0][0][0]
# Compute the 2nd-place candidate in plurality scores
# Automatically using tie-breaking rule--numerically increasing order
if len(ranking[0][0]) > 1:
second_max_cand = ranking[0][0][1]
if len(ranking[0][0]) > 2:
third_max_cand = ranking[0][0][2] # depends on [control=['if'], data=[]]
else:
third_max_cand = ranking[0][1][0] # depends on [control=['if'], data=[]]
else:
second_max_cand = ranking[0][1][0]
if len(ranking[0][1]) > 1:
third_max_cand = ranking[0][1][1] # depends on [control=['if'], data=[]]
else:
third_max_cand = ranking[0][2][0]
top_2 = [max_cand, second_max_cand]
# 2nd round: find the candidate with maximum plurality score
dict_top2 = {max_cand: 0, second_max_cand: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for (key, value) in rankmaps[i].items() if key in top_2}
# print(vote_top2)
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i] # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=['i']]
# the original winner-- d
# print("dict_top2=", dict_top2)
d = max(dict_top2.items(), key=lambda x: x[1])[0]
c_1 = top_2[0] if top_2[1] == d else top_2[1]
# the candidate with third highest plurality score
c_2 = third_max_cand
# print("d=", d, c_1, c_2)
Type1_1 = Type1_2 = 0
plu_d = plu_c_1 = plu_c_2 = 0
# ------------count the votes of CASE I & II---------------
for i in range(len_prefcounts):
if rankmaps[i][d] < rankmaps[i][c_1]:
Type1_1 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][d] > rankmaps[i][c_1]:
Type1_2 += prefcounts[i] # depends on [control=['if'], data=[]]
if rankmaps[i][d] == 1:
plu_d += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][c_1] == 1:
plu_c_1 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][c_2] == 1:
plu_c_2 += prefcounts[i] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# print("plu=", plu_d, plu_c_1, plu_c_2)
# -------------------CASE I------------------------------
MoV_I = math.floor((Type1_1 - Type1_2) / 2) + 1
# -------------------CASE II-------------------------------
if math.floor((plu_d + plu_c_2) / 2) + 1 <= plu_c_1:
MoV_II = math.floor((plu_d - plu_c_2) / 2) + 1 # depends on [control=['if'], data=[]]
else:
MoV_II = plu_d - math.floor((plu_d + plu_c_1 + plu_c_2) / 3) + 1
# MoV_II = math.floor((plu_d * 2 - plu_c_1 - plu_c_2) / 3) + 1 # old version
# -------------------CASE III-----------------------------
MoV_d = dict()
remaining = sorted(rankmaps[0].keys())
remaining.remove(d)
remaining.remove(c_1)
for e in remaining:
# ------------count the votes of CASE III---------------
T1 = T2 = T3 = T4 = T5 = T6 = T7 = T8 = 0
for i in range(len_prefcounts):
if rankmaps[i][d] == 1:
if rankmaps[i][c_1] < rankmaps[i][e]:
T1 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][e] < rankmaps[i][c_1]:
T2 += prefcounts[i] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif rankmaps[i][c_1] == 1:
if rankmaps[i][d] < rankmaps[i][e]:
T3 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][e] < rankmaps[i][d]:
T4 += prefcounts[i] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif rankmaps[i][e] == 1:
if rankmaps[i][d] < rankmaps[i][c_1]:
T5 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][c_1] < rankmaps[i][d]:
T6 += prefcounts[i] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif rankmaps[i][d] < rankmaps[i][e]:
T7 += prefcounts[i] # depends on [control=['if'], data=[]]
elif rankmaps[i][e] < rankmaps[i][d]:
T8 += prefcounts[i] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if math.floor((T3 + T4 + T5 + T6) / 2) + 1 <= T1 + T2:
CHANGE1 = math.floor((T3 + T4 - T5 - T6) / 2) + 1 # depends on [control=['if'], data=[]]
else:
CHANGE1 = T3 + T4 - T1 - T2 + 1
x = min(T3, CHANGE1)
if T1 + T2 + T3 + T7 - x < T4 + T5 + T6 + T8 + x:
MoV_d[e] = CHANGE1 # depends on [control=['if'], data=[]]
else:
CHANGE2 = math.floor((T1 + T2 + T3 + T7 - T4 - T5 - T6 - T8) / 2) - x + 1
MoV_d[e] = CHANGE1 + CHANGE2 # depends on [control=['for'], data=['e']]
MoV_III = min(MoV_d.items(), key=lambda x: x[1])[1]
# ------------------------Overall MoV---------------------------------
# print(MoV_d)
# print(MoV_I, MoV_II, MoV_III)
MoV = min(MoV_I, MoV_II, MoV_III)
return MoV |
def add_placeholder(self, id_, name, ph_type, orient, sz, idx):
"""
Append a newly-created placeholder ``<p:sp>`` shape having the
specified placeholder properties.
"""
sp = CT_Shape.new_placeholder_sp(
id_, name, ph_type, orient, sz, idx
)
self.insert_element_before(sp, 'p:extLst')
return sp | def function[add_placeholder, parameter[self, id_, name, ph_type, orient, sz, idx]]:
constant[
Append a newly-created placeholder ``<p:sp>`` shape having the
specified placeholder properties.
]
variable[sp] assign[=] call[name[CT_Shape].new_placeholder_sp, parameter[name[id_], name[name], name[ph_type], name[orient], name[sz], name[idx]]]
call[name[self].insert_element_before, parameter[name[sp], constant[p:extLst]]]
return[name[sp]] | keyword[def] identifier[add_placeholder] ( identifier[self] , identifier[id_] , identifier[name] , identifier[ph_type] , identifier[orient] , identifier[sz] , identifier[idx] ):
literal[string]
identifier[sp] = identifier[CT_Shape] . identifier[new_placeholder_sp] (
identifier[id_] , identifier[name] , identifier[ph_type] , identifier[orient] , identifier[sz] , identifier[idx]
)
identifier[self] . identifier[insert_element_before] ( identifier[sp] , literal[string] )
keyword[return] identifier[sp] | def add_placeholder(self, id_, name, ph_type, orient, sz, idx):
"""
Append a newly-created placeholder ``<p:sp>`` shape having the
specified placeholder properties.
"""
sp = CT_Shape.new_placeholder_sp(id_, name, ph_type, orient, sz, idx)
self.insert_element_before(sp, 'p:extLst')
return sp |
def execute(self):
"""Runs the tool on all source files that are located."""
relevant_targets = self._get_non_synthetic_targets(self.get_targets())
if self.sideeffecting:
# Always execute sideeffecting tasks without invalidation.
self._execute_for(relevant_targets)
else:
# If the task is not sideeffecting we can use invalidation.
with self.invalidated(relevant_targets) as invalidation_check:
self._execute_for([vt.target for vt in invalidation_check.invalid_vts]) | def function[execute, parameter[self]]:
constant[Runs the tool on all source files that are located.]
variable[relevant_targets] assign[=] call[name[self]._get_non_synthetic_targets, parameter[call[name[self].get_targets, parameter[]]]]
if name[self].sideeffecting begin[:]
call[name[self]._execute_for, parameter[name[relevant_targets]]] | keyword[def] identifier[execute] ( identifier[self] ):
literal[string]
identifier[relevant_targets] = identifier[self] . identifier[_get_non_synthetic_targets] ( identifier[self] . identifier[get_targets] ())
keyword[if] identifier[self] . identifier[sideeffecting] :
identifier[self] . identifier[_execute_for] ( identifier[relevant_targets] )
keyword[else] :
keyword[with] identifier[self] . identifier[invalidated] ( identifier[relevant_targets] ) keyword[as] identifier[invalidation_check] :
identifier[self] . identifier[_execute_for] ([ identifier[vt] . identifier[target] keyword[for] identifier[vt] keyword[in] identifier[invalidation_check] . identifier[invalid_vts] ]) | def execute(self):
"""Runs the tool on all source files that are located."""
relevant_targets = self._get_non_synthetic_targets(self.get_targets())
if self.sideeffecting:
# Always execute sideeffecting tasks without invalidation.
self._execute_for(relevant_targets) # depends on [control=['if'], data=[]]
else:
# If the task is not sideeffecting we can use invalidation.
with self.invalidated(relevant_targets) as invalidation_check:
self._execute_for([vt.target for vt in invalidation_check.invalid_vts]) # depends on [control=['with'], data=['invalidation_check']] |
def login_honeypot(request):
"""
A login honypot.
"""
status_code = None
if request.method == 'POST':
form = HoneypotForm(request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
# Don't store password from existing users:
if request.user.is_authenticated(): # Logged in user used the honypot?!?
password="***"
else:
user_model = get_user_model()
existing_user = user_model.objects.filter(username=username).exists()
if existing_user:
password="***"
HonypotAuth.objects.add(request, username, password)
# Send a "errored" form back, that looks like the normal form
form = HoneypotForm(request.POST, raise_error=True)
status_code = 401 # Unauthorized
else:
form = HoneypotForm()
context = {
"form": form,
"form_url": request.path,
}
response = render_to_response(
"admin/login.html",
context, context_instance=RequestContext(request)
)
if status_code is not None:
response.status_code = status_code
return response | def function[login_honeypot, parameter[request]]:
constant[
A login honypot.
]
variable[status_code] assign[=] constant[None]
if compare[name[request].method equal[==] constant[POST]] begin[:]
variable[form] assign[=] call[name[HoneypotForm], parameter[name[request].POST]]
if call[name[form].is_valid, parameter[]] begin[:]
variable[username] assign[=] call[name[form].cleaned_data][constant[username]]
variable[password] assign[=] call[name[form].cleaned_data][constant[password]]
if call[name[request].user.is_authenticated, parameter[]] begin[:]
variable[password] assign[=] constant[***]
call[name[HonypotAuth].objects.add, parameter[name[request], name[username], name[password]]]
variable[form] assign[=] call[name[HoneypotForm], parameter[name[request].POST]]
variable[status_code] assign[=] constant[401]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da18f58da20>, <ast.Constant object at 0x7da18f58db40>], [<ast.Name object at 0x7da18f58cd90>, <ast.Attribute object at 0x7da18f58e620>]]
variable[response] assign[=] call[name[render_to_response], parameter[constant[admin/login.html], name[context]]]
if compare[name[status_code] is_not constant[None]] begin[:]
name[response].status_code assign[=] name[status_code]
return[name[response]] | keyword[def] identifier[login_honeypot] ( identifier[request] ):
literal[string]
identifier[status_code] = keyword[None]
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[form] = identifier[HoneypotForm] ( identifier[request] . identifier[POST] )
keyword[if] identifier[form] . identifier[is_valid] ():
identifier[username] = identifier[form] . identifier[cleaned_data] [ literal[string] ]
identifier[password] = identifier[form] . identifier[cleaned_data] [ literal[string] ]
keyword[if] identifier[request] . identifier[user] . identifier[is_authenticated] ():
identifier[password] = literal[string]
keyword[else] :
identifier[user_model] = identifier[get_user_model] ()
identifier[existing_user] = identifier[user_model] . identifier[objects] . identifier[filter] ( identifier[username] = identifier[username] ). identifier[exists] ()
keyword[if] identifier[existing_user] :
identifier[password] = literal[string]
identifier[HonypotAuth] . identifier[objects] . identifier[add] ( identifier[request] , identifier[username] , identifier[password] )
identifier[form] = identifier[HoneypotForm] ( identifier[request] . identifier[POST] , identifier[raise_error] = keyword[True] )
identifier[status_code] = literal[int]
keyword[else] :
identifier[form] = identifier[HoneypotForm] ()
identifier[context] ={
literal[string] : identifier[form] ,
literal[string] : identifier[request] . identifier[path] ,
}
identifier[response] = identifier[render_to_response] (
literal[string] ,
identifier[context] , identifier[context_instance] = identifier[RequestContext] ( identifier[request] )
)
keyword[if] identifier[status_code] keyword[is] keyword[not] keyword[None] :
identifier[response] . identifier[status_code] = identifier[status_code]
keyword[return] identifier[response] | def login_honeypot(request):
"""
A login honypot.
"""
status_code = None
if request.method == 'POST':
form = HoneypotForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
# Don't store password from existing users:
if request.user.is_authenticated(): # Logged in user used the honypot?!?
password = '***' # depends on [control=['if'], data=[]]
else:
user_model = get_user_model()
existing_user = user_model.objects.filter(username=username).exists()
if existing_user:
password = '***' # depends on [control=['if'], data=[]]
HonypotAuth.objects.add(request, username, password)
# Send a "errored" form back, that looks like the normal form
form = HoneypotForm(request.POST, raise_error=True)
status_code = 401 # Unauthorized # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
form = HoneypotForm()
context = {'form': form, 'form_url': request.path}
response = render_to_response('admin/login.html', context, context_instance=RequestContext(request))
if status_code is not None:
response.status_code = status_code # depends on [control=['if'], data=['status_code']]
return response |
def _parse_mode(client, command, actor, args):
"""Parse a mode changes, update states, and dispatch MODE events."""
chantypes = client.server.features.get("CHANTYPES", "#")
channel, _, args = args.partition(" ")
args = args.lstrip(":")
if channel[0] not in chantypes:
# Personal modes
for modes in args.split():
op, modes = modes[0], modes[1:]
for mode in modes:
if op == "+":
client.user.modes.add(mode)
else:
client.user.modes.discard(mode)
client.dispatch_event("MODE", actor, client.user, op, mode, None)
return
# channel-specific modes
chan = client.server.get_channel(channel)
user_modes = set(client._get_prefixes().itervalues())
chanmodes = client._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
argument_modes = list_modes | always_arg_modes | set_arg_modes
tokens = args.split()
while tokens:
modes, tokens = tokens[0], tokens[1:]
op, modes = modes[0], modes[1:]
for mode in modes:
argument = None
if mode in (user_modes | argument_modes):
argument, tokens = tokens[0], tokens[1:]
if mode in user_modes:
user = client.server.get_channel(channel).members[argument]
if op == "+":
user.modes.add(mode)
else:
user.modes.discard(mode)
if op == "+":
if mode in (always_arg_modes | set_arg_modes):
chan.modes[mode] = argument
elif mode in toggle_modes:
chan.modes[mode] = True
else:
if mode in (always_arg_modes | set_arg_modes | toggle_modes):
if mode in chan.modes:
del chan.modes[mode]
# list-type modes (bans+exceptions, invite masks) aren't stored,
# but do generate MODE events.
client.dispatch_event("MODE", actor, chan, op, mode, argument) | def function[_parse_mode, parameter[client, command, actor, args]]:
constant[Parse a mode changes, update states, and dispatch MODE events.]
variable[chantypes] assign[=] call[name[client].server.features.get, parameter[constant[CHANTYPES], constant[#]]]
<ast.Tuple object at 0x7da204623640> assign[=] call[name[args].partition, parameter[constant[ ]]]
variable[args] assign[=] call[name[args].lstrip, parameter[constant[:]]]
if compare[call[name[channel]][constant[0]] <ast.NotIn object at 0x7da2590d7190> name[chantypes]] begin[:]
for taget[name[modes]] in starred[call[name[args].split, parameter[]]] begin[:]
<ast.Tuple object at 0x7da204621960> assign[=] tuple[[<ast.Subscript object at 0x7da204621e10>, <ast.Subscript object at 0x7da2046205b0>]]
for taget[name[mode]] in starred[name[modes]] begin[:]
if compare[name[op] equal[==] constant[+]] begin[:]
call[name[client].user.modes.add, parameter[name[mode]]]
call[name[client].dispatch_event, parameter[constant[MODE], name[actor], name[client].user, name[op], name[mode], constant[None]]]
return[None]
variable[chan] assign[=] call[name[client].server.get_channel, parameter[name[channel]]]
variable[user_modes] assign[=] call[name[set], parameter[call[call[name[client]._get_prefixes, parameter[]].itervalues, parameter[]]]]
variable[chanmodes] assign[=] call[name[client]._get_chanmodes, parameter[]]
<ast.Tuple object at 0x7da207f00e50> assign[=] name[chanmodes]
variable[argument_modes] assign[=] binary_operation[binary_operation[name[list_modes] <ast.BitOr object at 0x7da2590d6aa0> name[always_arg_modes]] <ast.BitOr object at 0x7da2590d6aa0> name[set_arg_modes]]
variable[tokens] assign[=] call[name[args].split, parameter[]]
while name[tokens] begin[:]
<ast.Tuple object at 0x7da207f033a0> assign[=] tuple[[<ast.Subscript object at 0x7da207f000a0>, <ast.Subscript object at 0x7da207f019c0>]]
<ast.Tuple object at 0x7da207f01f60> assign[=] tuple[[<ast.Subscript object at 0x7da207f01180>, <ast.Subscript object at 0x7da207f03f40>]]
for taget[name[mode]] in starred[name[modes]] begin[:]
variable[argument] assign[=] constant[None]
if compare[name[mode] in binary_operation[name[user_modes] <ast.BitOr object at 0x7da2590d6aa0> name[argument_modes]]] begin[:]
<ast.Tuple object at 0x7da207f03ac0> assign[=] tuple[[<ast.Subscript object at 0x7da207f01360>, <ast.Subscript object at 0x7da207f03070>]]
if compare[name[mode] in name[user_modes]] begin[:]
variable[user] assign[=] call[call[name[client].server.get_channel, parameter[name[channel]]].members][name[argument]]
if compare[name[op] equal[==] constant[+]] begin[:]
call[name[user].modes.add, parameter[name[mode]]]
if compare[name[op] equal[==] constant[+]] begin[:]
if compare[name[mode] in binary_operation[name[always_arg_modes] <ast.BitOr object at 0x7da2590d6aa0> name[set_arg_modes]]] begin[:]
call[name[chan].modes][name[mode]] assign[=] name[argument]
call[name[client].dispatch_event, parameter[constant[MODE], name[actor], name[chan], name[op], name[mode], name[argument]]] | keyword[def] identifier[_parse_mode] ( identifier[client] , identifier[command] , identifier[actor] , identifier[args] ):
literal[string]
identifier[chantypes] = identifier[client] . identifier[server] . identifier[features] . identifier[get] ( literal[string] , literal[string] )
identifier[channel] , identifier[_] , identifier[args] = identifier[args] . identifier[partition] ( literal[string] )
identifier[args] = identifier[args] . identifier[lstrip] ( literal[string] )
keyword[if] identifier[channel] [ literal[int] ] keyword[not] keyword[in] identifier[chantypes] :
keyword[for] identifier[modes] keyword[in] identifier[args] . identifier[split] ():
identifier[op] , identifier[modes] = identifier[modes] [ literal[int] ], identifier[modes] [ literal[int] :]
keyword[for] identifier[mode] keyword[in] identifier[modes] :
keyword[if] identifier[op] == literal[string] :
identifier[client] . identifier[user] . identifier[modes] . identifier[add] ( identifier[mode] )
keyword[else] :
identifier[client] . identifier[user] . identifier[modes] . identifier[discard] ( identifier[mode] )
identifier[client] . identifier[dispatch_event] ( literal[string] , identifier[actor] , identifier[client] . identifier[user] , identifier[op] , identifier[mode] , keyword[None] )
keyword[return]
identifier[chan] = identifier[client] . identifier[server] . identifier[get_channel] ( identifier[channel] )
identifier[user_modes] = identifier[set] ( identifier[client] . identifier[_get_prefixes] (). identifier[itervalues] ())
identifier[chanmodes] = identifier[client] . identifier[_get_chanmodes] ()
identifier[list_modes] , identifier[always_arg_modes] , identifier[set_arg_modes] , identifier[toggle_modes] = identifier[chanmodes]
identifier[argument_modes] = identifier[list_modes] | identifier[always_arg_modes] | identifier[set_arg_modes]
identifier[tokens] = identifier[args] . identifier[split] ()
keyword[while] identifier[tokens] :
identifier[modes] , identifier[tokens] = identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] :]
identifier[op] , identifier[modes] = identifier[modes] [ literal[int] ], identifier[modes] [ literal[int] :]
keyword[for] identifier[mode] keyword[in] identifier[modes] :
identifier[argument] = keyword[None]
keyword[if] identifier[mode] keyword[in] ( identifier[user_modes] | identifier[argument_modes] ):
identifier[argument] , identifier[tokens] = identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] :]
keyword[if] identifier[mode] keyword[in] identifier[user_modes] :
identifier[user] = identifier[client] . identifier[server] . identifier[get_channel] ( identifier[channel] ). identifier[members] [ identifier[argument] ]
keyword[if] identifier[op] == literal[string] :
identifier[user] . identifier[modes] . identifier[add] ( identifier[mode] )
keyword[else] :
identifier[user] . identifier[modes] . identifier[discard] ( identifier[mode] )
keyword[if] identifier[op] == literal[string] :
keyword[if] identifier[mode] keyword[in] ( identifier[always_arg_modes] | identifier[set_arg_modes] ):
identifier[chan] . identifier[modes] [ identifier[mode] ]= identifier[argument]
keyword[elif] identifier[mode] keyword[in] identifier[toggle_modes] :
identifier[chan] . identifier[modes] [ identifier[mode] ]= keyword[True]
keyword[else] :
keyword[if] identifier[mode] keyword[in] ( identifier[always_arg_modes] | identifier[set_arg_modes] | identifier[toggle_modes] ):
keyword[if] identifier[mode] keyword[in] identifier[chan] . identifier[modes] :
keyword[del] identifier[chan] . identifier[modes] [ identifier[mode] ]
identifier[client] . identifier[dispatch_event] ( literal[string] , identifier[actor] , identifier[chan] , identifier[op] , identifier[mode] , identifier[argument] ) | def _parse_mode(client, command, actor, args):
"""Parse a mode changes, update states, and dispatch MODE events."""
chantypes = client.server.features.get('CHANTYPES', '#')
(channel, _, args) = args.partition(' ')
args = args.lstrip(':')
if channel[0] not in chantypes:
# Personal modes
for modes in args.split():
(op, modes) = (modes[0], modes[1:])
for mode in modes:
if op == '+':
client.user.modes.add(mode) # depends on [control=['if'], data=[]]
else:
client.user.modes.discard(mode)
client.dispatch_event('MODE', actor, client.user, op, mode, None) # depends on [control=['for'], data=['mode']] # depends on [control=['for'], data=['modes']]
return # depends on [control=['if'], data=[]]
# channel-specific modes
chan = client.server.get_channel(channel)
user_modes = set(client._get_prefixes().itervalues())
chanmodes = client._get_chanmodes()
(list_modes, always_arg_modes, set_arg_modes, toggle_modes) = chanmodes
argument_modes = list_modes | always_arg_modes | set_arg_modes
tokens = args.split()
while tokens:
(modes, tokens) = (tokens[0], tokens[1:])
(op, modes) = (modes[0], modes[1:])
for mode in modes:
argument = None
if mode in user_modes | argument_modes:
(argument, tokens) = (tokens[0], tokens[1:]) # depends on [control=['if'], data=[]]
if mode in user_modes:
user = client.server.get_channel(channel).members[argument]
if op == '+':
user.modes.add(mode) # depends on [control=['if'], data=[]]
else:
user.modes.discard(mode) # depends on [control=['if'], data=['mode']]
if op == '+':
if mode in always_arg_modes | set_arg_modes:
chan.modes[mode] = argument # depends on [control=['if'], data=['mode']]
elif mode in toggle_modes:
chan.modes[mode] = True # depends on [control=['if'], data=['mode']] # depends on [control=['if'], data=[]]
elif mode in always_arg_modes | set_arg_modes | toggle_modes:
if mode in chan.modes:
del chan.modes[mode] # depends on [control=['if'], data=['mode']] # depends on [control=['if'], data=['mode']]
# list-type modes (bans+exceptions, invite masks) aren't stored,
# but do generate MODE events.
client.dispatch_event('MODE', actor, chan, op, mode, argument) # depends on [control=['for'], data=['mode']] # depends on [control=['while'], data=[]] |
def parse_operation_type(lexer: Lexer) -> OperationType:
"""OperationType: one of query mutation subscription"""
operation_token = expect_token(lexer, TokenKind.NAME)
try:
return OperationType(operation_token.value)
except ValueError:
raise unexpected(lexer, operation_token) | def function[parse_operation_type, parameter[lexer]]:
constant[OperationType: one of query mutation subscription]
variable[operation_token] assign[=] call[name[expect_token], parameter[name[lexer], name[TokenKind].NAME]]
<ast.Try object at 0x7da1b22e9300> | keyword[def] identifier[parse_operation_type] ( identifier[lexer] : identifier[Lexer] )-> identifier[OperationType] :
literal[string]
identifier[operation_token] = identifier[expect_token] ( identifier[lexer] , identifier[TokenKind] . identifier[NAME] )
keyword[try] :
keyword[return] identifier[OperationType] ( identifier[operation_token] . identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[unexpected] ( identifier[lexer] , identifier[operation_token] ) | def parse_operation_type(lexer: Lexer) -> OperationType:
"""OperationType: one of query mutation subscription"""
operation_token = expect_token(lexer, TokenKind.NAME)
try:
return OperationType(operation_token.value) # depends on [control=['try'], data=[]]
except ValueError:
raise unexpected(lexer, operation_token) # depends on [control=['except'], data=[]] |
def docker_windows_reverse_path_adjust(path):
# type: (Text) -> (Text)
r"""
Change docker path (only on windows os) appropriately back to Window path/
Example: /C/Users/foo to C:\Users\foo
"""
if path is not None and onWindows():
if path[0] == '/':
path = path[1:]
else:
raise ValueError("not a docker path")
splitpath = path.split('/')
splitpath[0] = splitpath[0]+':'
return '\\'.join(splitpath)
return path | def function[docker_windows_reverse_path_adjust, parameter[path]]:
constant[
Change docker path (only on windows os) appropriately back to Window path/
Example: /C/Users/foo to C:\Users\foo
]
if <ast.BoolOp object at 0x7da20e74b850> begin[:]
if compare[call[name[path]][constant[0]] equal[==] constant[/]] begin[:]
variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da20e74a890>]
variable[splitpath] assign[=] call[name[path].split, parameter[constant[/]]]
call[name[splitpath]][constant[0]] assign[=] binary_operation[call[name[splitpath]][constant[0]] + constant[:]]
return[call[constant[\].join, parameter[name[splitpath]]]]
return[name[path]] | keyword[def] identifier[docker_windows_reverse_path_adjust] ( identifier[path] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[not] keyword[None] keyword[and] identifier[onWindows] ():
keyword[if] identifier[path] [ literal[int] ]== literal[string] :
identifier[path] = identifier[path] [ literal[int] :]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[splitpath] = identifier[path] . identifier[split] ( literal[string] )
identifier[splitpath] [ literal[int] ]= identifier[splitpath] [ literal[int] ]+ literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[splitpath] )
keyword[return] identifier[path] | def docker_windows_reverse_path_adjust(path):
# type: (Text) -> (Text)
'\n Change docker path (only on windows os) appropriately back to Window path/\n Example: /C/Users/foo to C:\\Users\\foo\n '
if path is not None and onWindows():
if path[0] == '/':
path = path[1:] # depends on [control=['if'], data=[]]
else:
raise ValueError('not a docker path')
splitpath = path.split('/')
splitpath[0] = splitpath[0] + ':'
return '\\'.join(splitpath) # depends on [control=['if'], data=[]]
return path |
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size] | def function[partitions, parameter[l, partition_size]]:
constant[
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], call[name[len], parameter[name[l]]], name[partition_size]]]] begin[:]
<ast.Yield object at 0x7da2046214b0> | keyword[def] identifier[partitions] ( identifier[l] , identifier[partition_size] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[l] ), identifier[partition_size] ):
keyword[yield] identifier[l] [ identifier[i] : identifier[i] + identifier[partition_size] ] | def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size] # depends on [control=['for'], data=['i']] |
def _parse_row(row):
"""Parses HTML row
:param row: HTML row
:return: list of values in row
"""
data = []
labels = HtmlTable._get_row_tag(row, "th")
if labels:
data += labels
columns = HtmlTable._get_row_tag(row, "td")
if columns:
data += columns
return data | def function[_parse_row, parameter[row]]:
constant[Parses HTML row
:param row: HTML row
:return: list of values in row
]
variable[data] assign[=] list[[]]
variable[labels] assign[=] call[name[HtmlTable]._get_row_tag, parameter[name[row], constant[th]]]
if name[labels] begin[:]
<ast.AugAssign object at 0x7da1b1eed960>
variable[columns] assign[=] call[name[HtmlTable]._get_row_tag, parameter[name[row], constant[td]]]
if name[columns] begin[:]
<ast.AugAssign object at 0x7da1b1eef190>
return[name[data]] | keyword[def] identifier[_parse_row] ( identifier[row] ):
literal[string]
identifier[data] =[]
identifier[labels] = identifier[HtmlTable] . identifier[_get_row_tag] ( identifier[row] , literal[string] )
keyword[if] identifier[labels] :
identifier[data] += identifier[labels]
identifier[columns] = identifier[HtmlTable] . identifier[_get_row_tag] ( identifier[row] , literal[string] )
keyword[if] identifier[columns] :
identifier[data] += identifier[columns]
keyword[return] identifier[data] | def _parse_row(row):
"""Parses HTML row
:param row: HTML row
:return: list of values in row
"""
data = []
labels = HtmlTable._get_row_tag(row, 'th')
if labels:
data += labels # depends on [control=['if'], data=[]]
columns = HtmlTable._get_row_tag(row, 'td')
if columns:
data += columns # depends on [control=['if'], data=[]]
return data |
def get_host_from_service_info(service_info):
""" Get hostname or IP from service_info. """
host = None
port = None
if (service_info and service_info.port and
(service_info.server or service_info.address)):
if service_info.address:
host = socket.inet_ntoa(service_info.address)
else:
host = service_info.server.lower()
port = service_info.port
return (host, port) | def function[get_host_from_service_info, parameter[service_info]]:
constant[ Get hostname or IP from service_info. ]
variable[host] assign[=] constant[None]
variable[port] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20c7cb400> begin[:]
if name[service_info].address begin[:]
variable[host] assign[=] call[name[socket].inet_ntoa, parameter[name[service_info].address]]
variable[port] assign[=] name[service_info].port
return[tuple[[<ast.Name object at 0x7da20c7ca260>, <ast.Name object at 0x7da20c7c8d00>]]] | keyword[def] identifier[get_host_from_service_info] ( identifier[service_info] ):
literal[string]
identifier[host] = keyword[None]
identifier[port] = keyword[None]
keyword[if] ( identifier[service_info] keyword[and] identifier[service_info] . identifier[port] keyword[and]
( identifier[service_info] . identifier[server] keyword[or] identifier[service_info] . identifier[address] )):
keyword[if] identifier[service_info] . identifier[address] :
identifier[host] = identifier[socket] . identifier[inet_ntoa] ( identifier[service_info] . identifier[address] )
keyword[else] :
identifier[host] = identifier[service_info] . identifier[server] . identifier[lower] ()
identifier[port] = identifier[service_info] . identifier[port]
keyword[return] ( identifier[host] , identifier[port] ) | def get_host_from_service_info(service_info):
""" Get hostname or IP from service_info. """
host = None
port = None
if service_info and service_info.port and (service_info.server or service_info.address):
if service_info.address:
host = socket.inet_ntoa(service_info.address) # depends on [control=['if'], data=[]]
else:
host = service_info.server.lower()
port = service_info.port # depends on [control=['if'], data=[]]
return (host, port) |
def subscribe_all_to_spec(self, spec):
""" Will automatically not subscribe reporters that are not parallel
or serial depending on the current mode.
"""
for reporter in self.reporters:
if self.can_use_reporter(reporter, self.parallel):
reporter.subscribe_to_spec(spec) | def function[subscribe_all_to_spec, parameter[self, spec]]:
constant[ Will automatically not subscribe reporters that are not parallel
or serial depending on the current mode.
]
for taget[name[reporter]] in starred[name[self].reporters] begin[:]
if call[name[self].can_use_reporter, parameter[name[reporter], name[self].parallel]] begin[:]
call[name[reporter].subscribe_to_spec, parameter[name[spec]]] | keyword[def] identifier[subscribe_all_to_spec] ( identifier[self] , identifier[spec] ):
literal[string]
keyword[for] identifier[reporter] keyword[in] identifier[self] . identifier[reporters] :
keyword[if] identifier[self] . identifier[can_use_reporter] ( identifier[reporter] , identifier[self] . identifier[parallel] ):
identifier[reporter] . identifier[subscribe_to_spec] ( identifier[spec] ) | def subscribe_all_to_spec(self, spec):
""" Will automatically not subscribe reporters that are not parallel
or serial depending on the current mode.
"""
for reporter in self.reporters:
if self.can_use_reporter(reporter, self.parallel):
reporter.subscribe_to_spec(spec) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reporter']] |
def add_file_ident_desc(self, new_fi_desc, logical_block_size):
# type: (UDFFileIdentifierDescriptor, int) -> int
'''
A method to add a new UDF File Identifier Descriptor to this UDF File
Entry.
Parameters:
new_fi_desc - The new UDF File Identifier Descriptor to add.
logical_block_size - The logical block size to use.
Returns:
The number of extents added due to adding this File Identifier Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')
if self.icb_tag.file_type != 4:
raise pycdlibexception.PyCdlibInvalidInput('Can only add a UDF File Identifier to a directory')
self.fi_descs.append(new_fi_desc)
num_bytes_to_add = UDFFileIdentifierDescriptor.length(len(new_fi_desc.fi))
old_num_extents = 0
# If info_len is 0, then this is a brand-new File Entry, and thus the
# number of extents it is using is 0.
if self.info_len > 0:
old_num_extents = utils.ceiling_div(self.info_len, logical_block_size)
self.info_len += num_bytes_to_add
new_num_extents = utils.ceiling_div(self.info_len, logical_block_size)
self.log_block_recorded = new_num_extents
self.alloc_descs[0][0] = self.info_len
if new_fi_desc.is_dir():
self.file_link_count += 1
return new_num_extents - old_num_extents | def function[add_file_ident_desc, parameter[self, new_fi_desc, logical_block_size]]:
constant[
A method to add a new UDF File Identifier Descriptor to this UDF File
Entry.
Parameters:
new_fi_desc - The new UDF File Identifier Descriptor to add.
logical_block_size - The logical block size to use.
Returns:
The number of extents added due to adding this File Identifier Descriptor.
]
if <ast.UnaryOp object at 0x7da1b0d0cd90> begin[:]
<ast.Raise object at 0x7da1b0d0d930>
if compare[name[self].icb_tag.file_type not_equal[!=] constant[4]] begin[:]
<ast.Raise object at 0x7da1b0d0d8a0>
call[name[self].fi_descs.append, parameter[name[new_fi_desc]]]
variable[num_bytes_to_add] assign[=] call[name[UDFFileIdentifierDescriptor].length, parameter[call[name[len], parameter[name[new_fi_desc].fi]]]]
variable[old_num_extents] assign[=] constant[0]
if compare[name[self].info_len greater[>] constant[0]] begin[:]
variable[old_num_extents] assign[=] call[name[utils].ceiling_div, parameter[name[self].info_len, name[logical_block_size]]]
<ast.AugAssign object at 0x7da1b0d0e9b0>
variable[new_num_extents] assign[=] call[name[utils].ceiling_div, parameter[name[self].info_len, name[logical_block_size]]]
name[self].log_block_recorded assign[=] name[new_num_extents]
call[call[name[self].alloc_descs][constant[0]]][constant[0]] assign[=] name[self].info_len
if call[name[new_fi_desc].is_dir, parameter[]] begin[:]
<ast.AugAssign object at 0x7da1b0ff12a0>
return[binary_operation[name[new_num_extents] - name[old_num_extents]]] | keyword[def] identifier[add_file_ident_desc] ( identifier[self] , identifier[new_fi_desc] , identifier[logical_block_size] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[if] identifier[self] . identifier[icb_tag] . identifier[file_type] != literal[int] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
identifier[self] . identifier[fi_descs] . identifier[append] ( identifier[new_fi_desc] )
identifier[num_bytes_to_add] = identifier[UDFFileIdentifierDescriptor] . identifier[length] ( identifier[len] ( identifier[new_fi_desc] . identifier[fi] ))
identifier[old_num_extents] = literal[int]
keyword[if] identifier[self] . identifier[info_len] > literal[int] :
identifier[old_num_extents] = identifier[utils] . identifier[ceiling_div] ( identifier[self] . identifier[info_len] , identifier[logical_block_size] )
identifier[self] . identifier[info_len] += identifier[num_bytes_to_add]
identifier[new_num_extents] = identifier[utils] . identifier[ceiling_div] ( identifier[self] . identifier[info_len] , identifier[logical_block_size] )
identifier[self] . identifier[log_block_recorded] = identifier[new_num_extents]
identifier[self] . identifier[alloc_descs] [ literal[int] ][ literal[int] ]= identifier[self] . identifier[info_len]
keyword[if] identifier[new_fi_desc] . identifier[is_dir] ():
identifier[self] . identifier[file_link_count] += literal[int]
keyword[return] identifier[new_num_extents] - identifier[old_num_extents] | def add_file_ident_desc(self, new_fi_desc, logical_block_size):
# type: (UDFFileIdentifierDescriptor, int) -> int
'\n A method to add a new UDF File Identifier Descriptor to this UDF File\n Entry.\n\n Parameters:\n new_fi_desc - The new UDF File Identifier Descriptor to add.\n logical_block_size - The logical block size to use.\n Returns:\n The number of extents added due to adding this File Identifier Descriptor.\n '
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized') # depends on [control=['if'], data=[]]
if self.icb_tag.file_type != 4:
raise pycdlibexception.PyCdlibInvalidInput('Can only add a UDF File Identifier to a directory') # depends on [control=['if'], data=[]]
self.fi_descs.append(new_fi_desc)
num_bytes_to_add = UDFFileIdentifierDescriptor.length(len(new_fi_desc.fi))
old_num_extents = 0
# If info_len is 0, then this is a brand-new File Entry, and thus the
# number of extents it is using is 0.
if self.info_len > 0:
old_num_extents = utils.ceiling_div(self.info_len, logical_block_size) # depends on [control=['if'], data=[]]
self.info_len += num_bytes_to_add
new_num_extents = utils.ceiling_div(self.info_len, logical_block_size)
self.log_block_recorded = new_num_extents
self.alloc_descs[0][0] = self.info_len
if new_fi_desc.is_dir():
self.file_link_count += 1 # depends on [control=['if'], data=[]]
return new_num_extents - old_num_extents |
def get_gradebook_column_form(self, *args, **kwargs):
"""Pass through to provider GradebookColumnAdminSession.get_gradebook_column_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'gradebook_column_record_types' in kwargs:
return self.get_gradebook_column_form_for_create(*args, **kwargs)
else:
return self.get_gradebook_column_form_for_update(*args, **kwargs) | def function[get_gradebook_column_form, parameter[self]]:
constant[Pass through to provider GradebookColumnAdminSession.get_gradebook_column_form_for_update]
if <ast.BoolOp object at 0x7da20c7c83a0> begin[:]
return[call[name[self].get_gradebook_column_form_for_create, parameter[<ast.Starred object at 0x7da20c7cb8b0>]]] | keyword[def] identifier[get_gradebook_column_form] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[args] [- literal[int] ], identifier[list] ) keyword[or] literal[string] keyword[in] identifier[kwargs] :
keyword[return] identifier[self] . identifier[get_gradebook_column_form_for_create] (* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[self] . identifier[get_gradebook_column_form_for_update] (* identifier[args] ,** identifier[kwargs] ) | def get_gradebook_column_form(self, *args, **kwargs):
"""Pass through to provider GradebookColumnAdminSession.get_gradebook_column_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'gradebook_column_record_types' in kwargs:
return self.get_gradebook_column_form_for_create(*args, **kwargs) # depends on [control=['if'], data=[]]
else:
return self.get_gradebook_column_form_for_update(*args, **kwargs) |
def expand_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.floor(result.minpt / chunk_size) * chunk_size
result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size
return (result + offset).astype(self.dtype) | def function[expand_to_chunk_size, parameter[self, chunk_size, offset]]:
constant[
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
]
variable[chunk_size] assign[=] call[name[np].array, parameter[name[chunk_size]]]
variable[result] assign[=] call[name[self].clone, parameter[]]
variable[result] assign[=] binary_operation[name[result] - name[offset]]
name[result].minpt assign[=] binary_operation[call[name[np].floor, parameter[binary_operation[name[result].minpt / name[chunk_size]]]] * name[chunk_size]]
name[result].maxpt assign[=] binary_operation[call[name[np].ceil, parameter[binary_operation[name[result].maxpt / name[chunk_size]]]] * name[chunk_size]]
return[call[binary_operation[name[result] + name[offset]].astype, parameter[name[self].dtype]]] | keyword[def] identifier[expand_to_chunk_size] ( identifier[self] , identifier[chunk_size] , identifier[offset] = identifier[Vec] ( literal[int] , literal[int] , literal[int] , identifier[dtype] = identifier[int] )):
literal[string]
identifier[chunk_size] = identifier[np] . identifier[array] ( identifier[chunk_size] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[result] = identifier[self] . identifier[clone] ()
identifier[result] = identifier[result] - identifier[offset]
identifier[result] . identifier[minpt] = identifier[np] . identifier[floor] ( identifier[result] . identifier[minpt] / identifier[chunk_size] )* identifier[chunk_size]
identifier[result] . identifier[maxpt] = identifier[np] . identifier[ceil] ( identifier[result] . identifier[maxpt] / identifier[chunk_size] )* identifier[chunk_size]
keyword[return] ( identifier[result] + identifier[offset] ). identifier[astype] ( identifier[self] . identifier[dtype] ) | def expand_to_chunk_size(self, chunk_size, offset=Vec(0, 0, 0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.floor(result.minpt / chunk_size) * chunk_size
result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size
return (result + offset).astype(self.dtype) |
def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources | def function[build_from_c_and_cpp_files, parameter[extensions]]:
constant[Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
]
for taget[name[extension]] in starred[name[extensions]] begin[:]
variable[sources] assign[=] list[[]]
for taget[name[sfile]] in starred[name[extension].sources] begin[:]
<ast.Tuple object at 0x7da204564580> assign[=] call[name[os].path.splitext, parameter[name[sfile]]]
if compare[name[ext] in tuple[[<ast.Constant object at 0x7da204566d70>, <ast.Constant object at 0x7da2045667d0>]]] begin[:]
if compare[name[extension].language equal[==] constant[c++]] begin[:]
variable[ext] assign[=] constant[.cpp]
variable[sfile] assign[=] binary_operation[name[path] + name[ext]]
call[name[sources].append, parameter[name[sfile]]]
name[extension].sources assign[=] name[sources] | keyword[def] identifier[build_from_c_and_cpp_files] ( identifier[extensions] ):
literal[string]
keyword[for] identifier[extension] keyword[in] identifier[extensions] :
identifier[sources] =[]
keyword[for] identifier[sfile] keyword[in] identifier[extension] . identifier[sources] :
identifier[path] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[sfile] )
keyword[if] identifier[ext] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[extension] . identifier[language] == literal[string] :
identifier[ext] = literal[string]
keyword[else] :
identifier[ext] = literal[string]
identifier[sfile] = identifier[path] + identifier[ext]
identifier[sources] . identifier[append] ( identifier[sfile] )
identifier[extension] . identifier[sources] = identifier[sources] | def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
(path, ext) = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp' # depends on [control=['if'], data=[]]
else:
ext = '.c'
sfile = path + ext # depends on [control=['if'], data=['ext']]
sources.append(sfile) # depends on [control=['for'], data=['sfile']]
extension.sources = sources # depends on [control=['for'], data=['extension']] |
def dumpBlock(self, block_name):
"""
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
"""
try:
return self.dbsBlock.dumpBlock(block_name)
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError) | def function[dumpBlock, parameter[self, block_name]]:
constant[
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
]
<ast.Try object at 0x7da20c794100> | keyword[def] identifier[dumpBlock] ( identifier[self] , identifier[block_name] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[dbsBlock] . identifier[dumpBlock] ( identifier[block_name] )
keyword[except] identifier[HTTPError] keyword[as] identifier[he] :
keyword[raise] identifier[he]
keyword[except] identifier[dbsException] keyword[as] identifier[de] :
identifier[dbsExceptionHandler] ( identifier[de] . identifier[eCode] , identifier[de] . identifier[message] , identifier[self] . identifier[logger] . identifier[exception] , identifier[de] . identifier[serverError] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[sError] = literal[string] %( identifier[ex] , identifier[traceback] . identifier[format_exc] ())
identifier[dbsExceptionHandler] ( literal[string] , identifier[ex] . identifier[message] , identifier[self] . identifier[logger] . identifier[exception] , identifier[sError] ) | def dumpBlock(self, block_name):
"""
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
"""
try:
return self.dbsBlock.dumpBlock(block_name) # depends on [control=['try'], data=[]]
except HTTPError as he:
raise he # depends on [control=['except'], data=['he']]
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) # depends on [control=['except'], data=['de']]
except Exception as ex:
sError = 'DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s' % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError) # depends on [control=['except'], data=['ex']] |
def _get_napp_key(self, key, user=None, napp=None):
"""Return a value from kytos.json.
Args:
user (string): A Username.
napp (string): A NApp name
key (string): Key used to get the value within kytos.json.
Returns:
meta (object): Value stored in kytos.json.
"""
if user is None:
user = self.user
if napp is None:
napp = self.napp
kytos_json = self._installed / user / napp / 'kytos.json'
try:
with kytos_json.open() as file_descriptor:
meta = json.load(file_descriptor)
return meta[key]
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return '' | def function[_get_napp_key, parameter[self, key, user, napp]]:
constant[Return a value from kytos.json.
Args:
user (string): A Username.
napp (string): A NApp name
key (string): Key used to get the value within kytos.json.
Returns:
meta (object): Value stored in kytos.json.
]
if compare[name[user] is constant[None]] begin[:]
variable[user] assign[=] name[self].user
if compare[name[napp] is constant[None]] begin[:]
variable[napp] assign[=] name[self].napp
variable[kytos_json] assign[=] binary_operation[binary_operation[binary_operation[name[self]._installed / name[user]] / name[napp]] / constant[kytos.json]]
<ast.Try object at 0x7da1b25d2680> | keyword[def] identifier[_get_napp_key] ( identifier[self] , identifier[key] , identifier[user] = keyword[None] , identifier[napp] = keyword[None] ):
literal[string]
keyword[if] identifier[user] keyword[is] keyword[None] :
identifier[user] = identifier[self] . identifier[user]
keyword[if] identifier[napp] keyword[is] keyword[None] :
identifier[napp] = identifier[self] . identifier[napp]
identifier[kytos_json] = identifier[self] . identifier[_installed] / identifier[user] / identifier[napp] / literal[string]
keyword[try] :
keyword[with] identifier[kytos_json] . identifier[open] () keyword[as] identifier[file_descriptor] :
identifier[meta] = identifier[json] . identifier[load] ( identifier[file_descriptor] )
keyword[return] identifier[meta] [ identifier[key] ]
keyword[except] ( identifier[FileNotFoundError] , identifier[json] . identifier[JSONDecodeError] , identifier[KeyError] ):
keyword[return] literal[string] | def _get_napp_key(self, key, user=None, napp=None):
"""Return a value from kytos.json.
Args:
user (string): A Username.
napp (string): A NApp name
key (string): Key used to get the value within kytos.json.
Returns:
meta (object): Value stored in kytos.json.
"""
if user is None:
user = self.user # depends on [control=['if'], data=['user']]
if napp is None:
napp = self.napp # depends on [control=['if'], data=['napp']]
kytos_json = self._installed / user / napp / 'kytos.json'
try:
with kytos_json.open() as file_descriptor:
meta = json.load(file_descriptor)
return meta[key] # depends on [control=['with'], data=['file_descriptor']] # depends on [control=['try'], data=[]]
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return '' # depends on [control=['except'], data=[]] |
def members(group_id):
"""List user group members."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
s = request.args.get('s', '')
group = Group.query.get_or_404(group_id)
if group.can_see_members(current_user):
members = Membership.query_by_group(group_id, with_invitations=True)
if q:
members = Membership.search(members, q)
if s:
members = Membership.order(members, Membership.state, s)
members = members.paginate(page, per_page=per_page)
return render_template(
"invenio_groups/members.html",
group=group,
members=members,
page=page,
per_page=per_page,
q=q,
s=s,
)
flash(
_(
'You are not allowed to see members of this group %(group_name)s.',
group_name=group.name
),
'error'
)
return redirect(url_for('.index')) | def function[members, parameter[group_id]]:
constant[List user group members.]
variable[page] assign[=] call[name[request].args.get, parameter[constant[page], constant[1]]]
variable[per_page] assign[=] call[name[request].args.get, parameter[constant[per_page], constant[5]]]
variable[q] assign[=] call[name[request].args.get, parameter[constant[q], constant[]]]
variable[s] assign[=] call[name[request].args.get, parameter[constant[s], constant[]]]
variable[group] assign[=] call[name[Group].query.get_or_404, parameter[name[group_id]]]
if call[name[group].can_see_members, parameter[name[current_user]]] begin[:]
variable[members] assign[=] call[name[Membership].query_by_group, parameter[name[group_id]]]
if name[q] begin[:]
variable[members] assign[=] call[name[Membership].search, parameter[name[members], name[q]]]
if name[s] begin[:]
variable[members] assign[=] call[name[Membership].order, parameter[name[members], name[Membership].state, name[s]]]
variable[members] assign[=] call[name[members].paginate, parameter[name[page]]]
return[call[name[render_template], parameter[constant[invenio_groups/members.html]]]]
call[name[flash], parameter[call[name[_], parameter[constant[You are not allowed to see members of this group %(group_name)s.]]], constant[error]]]
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[.index]]]]]] | keyword[def] identifier[members] ( identifier[group_id] ):
literal[string]
identifier[page] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[int] , identifier[type] = identifier[int] )
identifier[per_page] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[int] , identifier[type] = identifier[int] )
identifier[q] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[string] )
identifier[s] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[string] )
identifier[group] = identifier[Group] . identifier[query] . identifier[get_or_404] ( identifier[group_id] )
keyword[if] identifier[group] . identifier[can_see_members] ( identifier[current_user] ):
identifier[members] = identifier[Membership] . identifier[query_by_group] ( identifier[group_id] , identifier[with_invitations] = keyword[True] )
keyword[if] identifier[q] :
identifier[members] = identifier[Membership] . identifier[search] ( identifier[members] , identifier[q] )
keyword[if] identifier[s] :
identifier[members] = identifier[Membership] . identifier[order] ( identifier[members] , identifier[Membership] . identifier[state] , identifier[s] )
identifier[members] = identifier[members] . identifier[paginate] ( identifier[page] , identifier[per_page] = identifier[per_page] )
keyword[return] identifier[render_template] (
literal[string] ,
identifier[group] = identifier[group] ,
identifier[members] = identifier[members] ,
identifier[page] = identifier[page] ,
identifier[per_page] = identifier[per_page] ,
identifier[q] = identifier[q] ,
identifier[s] = identifier[s] ,
)
identifier[flash] (
identifier[_] (
literal[string] ,
identifier[group_name] = identifier[group] . identifier[name]
),
literal[string]
)
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] )) | def members(group_id):
"""List user group members."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
s = request.args.get('s', '')
group = Group.query.get_or_404(group_id)
if group.can_see_members(current_user):
members = Membership.query_by_group(group_id, with_invitations=True)
if q:
members = Membership.search(members, q) # depends on [control=['if'], data=[]]
if s:
members = Membership.order(members, Membership.state, s) # depends on [control=['if'], data=[]]
members = members.paginate(page, per_page=per_page)
return render_template('invenio_groups/members.html', group=group, members=members, page=page, per_page=per_page, q=q, s=s) # depends on [control=['if'], data=[]]
flash(_('You are not allowed to see members of this group %(group_name)s.', group_name=group.name), 'error')
return redirect(url_for('.index')) |
def TrimBeginningAndEndingSlashes(path):
"""Trims beginning and ending slashes
:param str path:
:return:
Path with beginning and ending slashes trimmed.
:rtype: str
"""
if path.startswith('/'):
# Returns substring starting from index 1 to end of the string
path = path[1:]
if path.endswith('/'):
# Returns substring starting from beginning to last but one char in the string
path = path[:-1]
return path | def function[TrimBeginningAndEndingSlashes, parameter[path]]:
constant[Trims beginning and ending slashes
:param str path:
:return:
Path with beginning and ending slashes trimmed.
:rtype: str
]
if call[name[path].startswith, parameter[constant[/]]] begin[:]
variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da1b1720d60>]
if call[name[path].endswith, parameter[constant[/]]] begin[:]
variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da1b1720160>]
return[name[path]] | keyword[def] identifier[TrimBeginningAndEndingSlashes] ( identifier[path] ):
literal[string]
keyword[if] identifier[path] . identifier[startswith] ( literal[string] ):
identifier[path] = identifier[path] [ literal[int] :]
keyword[if] identifier[path] . identifier[endswith] ( literal[string] ):
identifier[path] = identifier[path] [:- literal[int] ]
keyword[return] identifier[path] | def TrimBeginningAndEndingSlashes(path):
"""Trims beginning and ending slashes
:param str path:
:return:
Path with beginning and ending slashes trimmed.
:rtype: str
"""
if path.startswith('/'):
# Returns substring starting from index 1 to end of the string
path = path[1:] # depends on [control=['if'], data=[]]
if path.endswith('/'):
# Returns substring starting from beginning to last but one char in the string
path = path[:-1] # depends on [control=['if'], data=[]]
return path |
def check_internal_ip(request):
""" request is an AsgiRequest """
remote_addr = (request.META["HTTP_X_FORWARDED_FOR"] if "HTTP_X_FORWARDED_FOR" in request.META else request.META.get("REMOTE_ADDR", ""))
return remote_addr in settings.INTERNAL_IPS | def function[check_internal_ip, parameter[request]]:
constant[ request is an AsgiRequest ]
variable[remote_addr] assign[=] <ast.IfExp object at 0x7da1b0499fc0>
return[compare[name[remote_addr] in name[settings].INTERNAL_IPS]] | keyword[def] identifier[check_internal_ip] ( identifier[request] ):
literal[string]
identifier[remote_addr] =( identifier[request] . identifier[META] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[request] . identifier[META] keyword[else] identifier[request] . identifier[META] . identifier[get] ( literal[string] , literal[string] ))
keyword[return] identifier[remote_addr] keyword[in] identifier[settings] . identifier[INTERNAL_IPS] | def check_internal_ip(request):
""" request is an AsgiRequest """
remote_addr = request.META['HTTP_X_FORWARDED_FOR'] if 'HTTP_X_FORWARDED_FOR' in request.META else request.META.get('REMOTE_ADDR', '')
return remote_addr in settings.INTERNAL_IPS |
def previous_pages_numbers(self):
'A generator of previous page integers.'
count = self._previous_pages_count() + 1
for i in reversed(range(1, count)):
yield i | def function[previous_pages_numbers, parameter[self]]:
constant[A generator of previous page integers.]
variable[count] assign[=] binary_operation[call[name[self]._previous_pages_count, parameter[]] + constant[1]]
for taget[name[i]] in starred[call[name[reversed], parameter[call[name[range], parameter[constant[1], name[count]]]]]] begin[:]
<ast.Yield object at 0x7da2041db580> | keyword[def] identifier[previous_pages_numbers] ( identifier[self] ):
literal[string]
identifier[count] = identifier[self] . identifier[_previous_pages_count] ()+ literal[int]
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[range] ( literal[int] , identifier[count] )):
keyword[yield] identifier[i] | def previous_pages_numbers(self):
"""A generator of previous page integers."""
count = self._previous_pages_count() + 1
for i in reversed(range(1, count)):
yield i # depends on [control=['for'], data=['i']] |
def get_tenant(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> name = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> response = client.get_tenant(name)
Args:
name (str): Required.
The resource name of the tenant to be retrieved.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenants/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_tenant" not in self._inner_api_calls:
self._inner_api_calls[
"get_tenant"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_tenant,
default_retry=self._method_configs["GetTenant"].retry,
default_timeout=self._method_configs["GetTenant"].timeout,
client_info=self._client_info,
)
request = tenant_service_pb2.GetTenantRequest(name=name)
return self._inner_api_calls["get_tenant"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[get_tenant, parameter[self, name, retry, timeout, metadata]]:
constant[
Retrieves specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> name = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> response = client.get_tenant(name)
Args:
name (str): Required.
The resource name of the tenant to be retrieved.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenants/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[get_tenant] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[get_tenant]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.get_tenant]]
variable[request] assign[=] call[name[tenant_service_pb2].GetTenantRequest, parameter[]]
return[call[call[name[self]._inner_api_calls][constant[get_tenant]], parameter[name[request]]]] | keyword[def] identifier[get_tenant] (
identifier[self] ,
identifier[name] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[get_tenant] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[tenant_service_pb2] . identifier[GetTenantRequest] ( identifier[name] = identifier[name] )
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def get_tenant(self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Retrieves specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> name = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> response = client.get_tenant(name)
Args:
name (str): Required.
The resource name of the tenant to be retrieved.
The format is "projects/{project\\_id}/tenants/{tenant\\_id}", for
example, "projects/api-test-project/tenants/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_tenant' not in self._inner_api_calls:
self._inner_api_calls['get_tenant'] = google.api_core.gapic_v1.method.wrap_method(self.transport.get_tenant, default_retry=self._method_configs['GetTenant'].retry, default_timeout=self._method_configs['GetTenant'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = tenant_service_pb2.GetTenantRequest(name=name)
return self._inner_api_calls['get_tenant'](request, retry=retry, timeout=timeout, metadata=metadata) |
def set_call_back(self, func):
"""sets callback function for updating the plot.
in the callback function implement the logic of reading of serial input
also the further processing of the signal if necessary has to be done
in this
callbak function."""
self.timer.add_callback(func)
self.timer.start() | def function[set_call_back, parameter[self, func]]:
constant[sets callback function for updating the plot.
in the callback function implement the logic of reading of serial input
also the further processing of the signal if necessary has to be done
in this
callbak function.]
call[name[self].timer.add_callback, parameter[name[func]]]
call[name[self].timer.start, parameter[]] | keyword[def] identifier[set_call_back] ( identifier[self] , identifier[func] ):
literal[string]
identifier[self] . identifier[timer] . identifier[add_callback] ( identifier[func] )
identifier[self] . identifier[timer] . identifier[start] () | def set_call_back(self, func):
"""sets callback function for updating the plot.
in the callback function implement the logic of reading of serial input
also the further processing of the signal if necessary has to be done
in this
callbak function."""
self.timer.add_callback(func)
self.timer.start() |
def remove(property_name, system=False):
"""
Remove a configuration property/value setting from the config file.
:param property_name: The name of the property to remove.
:keyword system: Set to True to modify the system configuration file.
If not set, the user config file will be modified.
"""
config_filename = \
_SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE
config = _read_config(config_filename)
section = _MAIN_SECTION_NAME
config.remove_option(section, property_name)
_write_config(config, config_filename) | def function[remove, parameter[property_name, system]]:
constant[
Remove a configuration property/value setting from the config file.
:param property_name: The name of the property to remove.
:keyword system: Set to True to modify the system configuration file.
If not set, the user config file will be modified.
]
variable[config_filename] assign[=] <ast.IfExp object at 0x7da1b235a6b0>
variable[config] assign[=] call[name[_read_config], parameter[name[config_filename]]]
variable[section] assign[=] name[_MAIN_SECTION_NAME]
call[name[config].remove_option, parameter[name[section], name[property_name]]]
call[name[_write_config], parameter[name[config], name[config_filename]]] | keyword[def] identifier[remove] ( identifier[property_name] , identifier[system] = keyword[False] ):
literal[string]
identifier[config_filename] = identifier[_SYSTEM_CONFIG_FILE] keyword[if] identifier[system] keyword[is] keyword[True] keyword[else] identifier[_USER_CONFIG_FILE]
identifier[config] = identifier[_read_config] ( identifier[config_filename] )
identifier[section] = identifier[_MAIN_SECTION_NAME]
identifier[config] . identifier[remove_option] ( identifier[section] , identifier[property_name] )
identifier[_write_config] ( identifier[config] , identifier[config_filename] ) | def remove(property_name, system=False):
"""
Remove a configuration property/value setting from the config file.
:param property_name: The name of the property to remove.
:keyword system: Set to True to modify the system configuration file.
If not set, the user config file will be modified.
"""
config_filename = _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE
config = _read_config(config_filename)
section = _MAIN_SECTION_NAME
config.remove_option(section, property_name)
_write_config(config, config_filename) |
def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect '
'channel_participant_deposit_limit',
)
if token_network_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect token_network_deposit_limit',
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f'got {version_from_onchain} from the chain, expected '
f'{self.contract_manager.version_string} in the deployment data',
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address | def function[_register_token_network_without_limits, parameter[self, token_registry_abi, token_registry_address, token_address, channel_participant_deposit_limit, token_network_deposit_limit]]:
constant[Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
]
if name[channel_participant_deposit_limit] begin[:]
<ast.Raise object at 0x7da2054a5750>
if name[token_network_deposit_limit] begin[:]
<ast.Raise object at 0x7da2054a6fb0>
variable[token_network_registry] assign[=] call[name[self].web3.eth.contract, parameter[]]
variable[version_from_onchain] assign[=] call[call[name[token_network_registry].functions.contract_version, parameter[]].call, parameter[]]
if compare[name[version_from_onchain] not_equal[!=] name[self].contract_manager.version_string] begin[:]
<ast.Raise object at 0x7da2054a77c0>
variable[command] assign[=] call[name[token_network_registry].functions.createERC20TokenNetwork, parameter[name[token_address]]]
call[name[self].transact, parameter[name[command]]]
variable[token_network_address] assign[=] call[call[name[token_network_registry].functions.token_to_token_networks, parameter[name[token_address]]].call, parameter[]]
variable[token_network_address] assign[=] call[name[to_checksum_address], parameter[name[token_network_address]]]
call[name[LOG].debug, parameter[<ast.JoinedStr object at 0x7da2054a63b0>]]
return[name[token_network_address]] | keyword[def] identifier[_register_token_network_without_limits] (
identifier[self] ,
identifier[token_registry_abi] : identifier[Dict] ,
identifier[token_registry_address] : identifier[str] ,
identifier[token_address] : identifier[str] ,
identifier[channel_participant_deposit_limit] : identifier[Optional] [ identifier[int] ],
identifier[token_network_deposit_limit] : identifier[Optional] [ identifier[int] ],
):
literal[string]
keyword[if] identifier[channel_participant_deposit_limit] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] ,
)
keyword[if] identifier[token_network_deposit_limit] :
keyword[raise] identifier[ValueError] (
literal[string] ,
)
identifier[token_network_registry] = identifier[self] . identifier[web3] . identifier[eth] . identifier[contract] (
identifier[abi] = identifier[token_registry_abi] ,
identifier[address] = identifier[token_registry_address] ,
)
identifier[version_from_onchain] = identifier[token_network_registry] . identifier[functions] . identifier[contract_version] (). identifier[call] ()
keyword[if] identifier[version_from_onchain] != identifier[self] . identifier[contract_manager] . identifier[version_string] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string] ,
)
identifier[command] = identifier[token_network_registry] . identifier[functions] . identifier[createERC20TokenNetwork] (
identifier[token_address] ,
)
identifier[self] . identifier[transact] ( identifier[command] )
identifier[token_network_address] = identifier[token_network_registry] . identifier[functions] . identifier[token_to_token_networks] (
identifier[token_address] ,
). identifier[call] ()
identifier[token_network_address] = identifier[to_checksum_address] ( identifier[token_network_address] )
identifier[LOG] . identifier[debug] ( literal[string] )
keyword[return] identifier[token_network_address] | def _register_token_network_without_limits(self, token_registry_abi: Dict, token_registry_address: str, token_address: str, channel_participant_deposit_limit: Optional[int], token_network_deposit_limit: Optional[int]):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError('contracts_version below 0.9.0 does not expect channel_participant_deposit_limit') # depends on [control=['if'], data=[]]
if token_network_deposit_limit:
raise ValueError('contracts_version below 0.9.0 does not expect token_network_deposit_limit') # depends on [control=['if'], data=[]]
token_network_registry = self.web3.eth.contract(abi=token_registry_abi, address=token_registry_address)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(f'got {version_from_onchain} from the chain, expected {self.contract_manager.version_string} in the deployment data') # depends on [control=['if'], data=['version_from_onchain']]
command = token_network_registry.functions.createERC20TokenNetwork(token_address)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(token_address).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address |
def uri_from_parts(parts):
"simple function to merge three parts into an uri"
uri = "%s://%s%s" % (parts[0], parts[1], parts[2])
if parts[3]:
extra = '?'+urlencode(parts[3])
uri += extra
return uri | def function[uri_from_parts, parameter[parts]]:
constant[simple function to merge three parts into an uri]
variable[uri] assign[=] binary_operation[constant[%s://%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18fe91e40>, <ast.Subscript object at 0x7da18fe93820>, <ast.Subscript object at 0x7da1b26a4220>]]]
if call[name[parts]][constant[3]] begin[:]
variable[extra] assign[=] binary_operation[constant[?] + call[name[urlencode], parameter[call[name[parts]][constant[3]]]]]
<ast.AugAssign object at 0x7da1b25d62c0>
return[name[uri]] | keyword[def] identifier[uri_from_parts] ( identifier[parts] ):
literal[string]
identifier[uri] = literal[string] %( identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ])
keyword[if] identifier[parts] [ literal[int] ]:
identifier[extra] = literal[string] + identifier[urlencode] ( identifier[parts] [ literal[int] ])
identifier[uri] += identifier[extra]
keyword[return] identifier[uri] | def uri_from_parts(parts):
"""simple function to merge three parts into an uri"""
uri = '%s://%s%s' % (parts[0], parts[1], parts[2])
if parts[3]:
extra = '?' + urlencode(parts[3])
uri += extra # depends on [control=['if'], data=[]]
return uri |
def alias(requestContext, seriesList, newName):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Prints the string instead of the metric name in the legend.
Example::
&target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
"""
try:
seriesList.name = newName
except AttributeError:
for series in seriesList:
series.name = newName
return seriesList | def function[alias, parameter[requestContext, seriesList, newName]]:
constant[
Takes one metric or a wildcard seriesList and a string in quotes.
Prints the string instead of the metric name in the legend.
Example::
&target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
]
<ast.Try object at 0x7da18f722e30>
return[name[seriesList]] | keyword[def] identifier[alias] ( identifier[requestContext] , identifier[seriesList] , identifier[newName] ):
literal[string]
keyword[try] :
identifier[seriesList] . identifier[name] = identifier[newName]
keyword[except] identifier[AttributeError] :
keyword[for] identifier[series] keyword[in] identifier[seriesList] :
identifier[series] . identifier[name] = identifier[newName]
keyword[return] identifier[seriesList] | def alias(requestContext, seriesList, newName):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Prints the string instead of the metric name in the legend.
Example::
&target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
"""
try:
seriesList.name = newName # depends on [control=['try'], data=[]]
except AttributeError:
for series in seriesList:
series.name = newName # depends on [control=['for'], data=['series']] # depends on [control=['except'], data=[]]
return seriesList |
def superable(cls) :
'''Provide .__super in python 2.x classes without having to specify the current
class name each time super is used (DRY principle).'''
name = cls.__name__
super_name = '_%s__super' % (name,)
setattr(cls,super_name,super(cls))
return cls | def function[superable, parameter[cls]]:
constant[Provide .__super in python 2.x classes without having to specify the current
class name each time super is used (DRY principle).]
variable[name] assign[=] name[cls].__name__
variable[super_name] assign[=] binary_operation[constant[_%s__super] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e956920>]]]
call[name[setattr], parameter[name[cls], name[super_name], call[name[super], parameter[name[cls]]]]]
return[name[cls]] | keyword[def] identifier[superable] ( identifier[cls] ):
literal[string]
identifier[name] = identifier[cls] . identifier[__name__]
identifier[super_name] = literal[string] %( identifier[name] ,)
identifier[setattr] ( identifier[cls] , identifier[super_name] , identifier[super] ( identifier[cls] ))
keyword[return] identifier[cls] | def superable(cls):
"""Provide .__super in python 2.x classes without having to specify the current
class name each time super is used (DRY principle)."""
name = cls.__name__
super_name = '_%s__super' % (name,)
setattr(cls, super_name, super(cls))
return cls |
def string(self, name):
"""parse a string frame"""
self._assert_is_string(name)
frame = self._next_frame()
try:
val = frame.decode('utf-8')
self.results.__dict__[name] = val
except UnicodeError as err:
raise MessageParserError("Message contained invalid Unicode characters") \
from err
return self | def function[string, parameter[self, name]]:
constant[parse a string frame]
call[name[self]._assert_is_string, parameter[name[name]]]
variable[frame] assign[=] call[name[self]._next_frame, parameter[]]
<ast.Try object at 0x7da1b1454d00>
return[name[self]] | keyword[def] identifier[string] ( identifier[self] , identifier[name] ):
literal[string]
identifier[self] . identifier[_assert_is_string] ( identifier[name] )
identifier[frame] = identifier[self] . identifier[_next_frame] ()
keyword[try] :
identifier[val] = identifier[frame] . identifier[decode] ( literal[string] )
identifier[self] . identifier[results] . identifier[__dict__] [ identifier[name] ]= identifier[val]
keyword[except] identifier[UnicodeError] keyword[as] identifier[err] :
keyword[raise] identifier[MessageParserError] ( literal[string] ) keyword[from] identifier[err]
keyword[return] identifier[self] | def string(self, name):
"""parse a string frame"""
self._assert_is_string(name)
frame = self._next_frame()
try:
val = frame.decode('utf-8')
self.results.__dict__[name] = val # depends on [control=['try'], data=[]]
except UnicodeError as err:
raise MessageParserError('Message contained invalid Unicode characters') from err # depends on [control=['except'], data=['err']]
return self |
def read_item(self, from_date=None):
"""Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
yield hit | def function[read_item, parameter[self, from_date]]:
constant[Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
]
variable[search_query] assign[=] call[name[self]._build_search_query, parameter[name[from_date]]]
for taget[name[hit]] in starred[call[name[helpers].scan, parameter[name[self]._es_conn, name[search_query]]]] begin[:]
<ast.Yield object at 0x7da1b0fed690> | keyword[def] identifier[read_item] ( identifier[self] , identifier[from_date] = keyword[None] ):
literal[string]
identifier[search_query] = identifier[self] . identifier[_build_search_query] ( identifier[from_date] )
keyword[for] identifier[hit] keyword[in] identifier[helpers] . identifier[scan] ( identifier[self] . identifier[_es_conn] ,
identifier[search_query] ,
identifier[scroll] = literal[string] ,
identifier[index] = identifier[self] . identifier[_es_index] ,
identifier[preserve_order] = keyword[True] ):
keyword[yield] identifier[hit] | def read_item(self, from_date=None):
"""Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
for hit in helpers.scan(self._es_conn, search_query, scroll='300m', index=self._es_index, preserve_order=True):
yield hit # depends on [control=['for'], data=['hit']] |
def fit(self, X):
"""Fit a t-SNE embedding for a given data set.
Runs the standard t-SNE optimization, consisting of the early
exaggeration phase and a normal optimization phase.
Parameters
----------
X: np.ndarray
The data matrix to be embedded.
Returns
-------
TSNEEmbedding
A fully optimized t-SNE embedding.
"""
embedding = self.prepare_initial(X)
try:
# Early exaggeration with lower momentum to allow points to find more
# easily move around and find their neighbors
embedding.optimize(
n_iter=self.early_exaggeration_iter,
exaggeration=self.early_exaggeration,
momentum=self.initial_momentum,
inplace=True,
propagate_exception=True,
)
# Restore actual affinity probabilities and increase momentum to get
# final, optimized embedding
embedding.optimize(
n_iter=self.n_iter,
exaggeration=self.exaggeration,
momentum=self.final_momentum,
inplace=True,
propagate_exception=True,
)
except OptimizationInterrupt as ex:
log.info("Optimization was interrupted with callback.")
embedding = ex.final_embedding
return embedding | def function[fit, parameter[self, X]]:
constant[Fit a t-SNE embedding for a given data set.
Runs the standard t-SNE optimization, consisting of the early
exaggeration phase and a normal optimization phase.
Parameters
----------
X: np.ndarray
The data matrix to be embedded.
Returns
-------
TSNEEmbedding
A fully optimized t-SNE embedding.
]
variable[embedding] assign[=] call[name[self].prepare_initial, parameter[name[X]]]
<ast.Try object at 0x7da1b220eb90>
return[name[embedding]] | keyword[def] identifier[fit] ( identifier[self] , identifier[X] ):
literal[string]
identifier[embedding] = identifier[self] . identifier[prepare_initial] ( identifier[X] )
keyword[try] :
identifier[embedding] . identifier[optimize] (
identifier[n_iter] = identifier[self] . identifier[early_exaggeration_iter] ,
identifier[exaggeration] = identifier[self] . identifier[early_exaggeration] ,
identifier[momentum] = identifier[self] . identifier[initial_momentum] ,
identifier[inplace] = keyword[True] ,
identifier[propagate_exception] = keyword[True] ,
)
identifier[embedding] . identifier[optimize] (
identifier[n_iter] = identifier[self] . identifier[n_iter] ,
identifier[exaggeration] = identifier[self] . identifier[exaggeration] ,
identifier[momentum] = identifier[self] . identifier[final_momentum] ,
identifier[inplace] = keyword[True] ,
identifier[propagate_exception] = keyword[True] ,
)
keyword[except] identifier[OptimizationInterrupt] keyword[as] identifier[ex] :
identifier[log] . identifier[info] ( literal[string] )
identifier[embedding] = identifier[ex] . identifier[final_embedding]
keyword[return] identifier[embedding] | def fit(self, X):
"""Fit a t-SNE embedding for a given data set.
Runs the standard t-SNE optimization, consisting of the early
exaggeration phase and a normal optimization phase.
Parameters
----------
X: np.ndarray
The data matrix to be embedded.
Returns
-------
TSNEEmbedding
A fully optimized t-SNE embedding.
"""
embedding = self.prepare_initial(X)
try:
# Early exaggeration with lower momentum to allow points to find more
# easily move around and find their neighbors
embedding.optimize(n_iter=self.early_exaggeration_iter, exaggeration=self.early_exaggeration, momentum=self.initial_momentum, inplace=True, propagate_exception=True)
# Restore actual affinity probabilities and increase momentum to get
# final, optimized embedding
embedding.optimize(n_iter=self.n_iter, exaggeration=self.exaggeration, momentum=self.final_momentum, inplace=True, propagate_exception=True) # depends on [control=['try'], data=[]]
except OptimizationInterrupt as ex:
log.info('Optimization was interrupted with callback.')
embedding = ex.final_embedding # depends on [control=['except'], data=['ex']]
return embedding |
def _attach_subcommands(self):
"""Create a subparser and add the registered commands to it.
This will also call ``_init`` on each subcommand (in turn invoking its
``_attach_subcommands`` method).
"""
if self.subcommands:
self.subparsers = self.parser.add_subparsers()
for subcommand in self.subcommands:
subparser = self.subparsers.add_parser(subcommand.name,
help=subcommand.title)
if subcommand.handler:
self._register_handler(subparser, subcommand.handler)
subcommand._init(subparser) | def function[_attach_subcommands, parameter[self]]:
constant[Create a subparser and add the registered commands to it.
This will also call ``_init`` on each subcommand (in turn invoking its
``_attach_subcommands`` method).
]
if name[self].subcommands begin[:]
name[self].subparsers assign[=] call[name[self].parser.add_subparsers, parameter[]]
for taget[name[subcommand]] in starred[name[self].subcommands] begin[:]
variable[subparser] assign[=] call[name[self].subparsers.add_parser, parameter[name[subcommand].name]]
if name[subcommand].handler begin[:]
call[name[self]._register_handler, parameter[name[subparser], name[subcommand].handler]]
call[name[subcommand]._init, parameter[name[subparser]]] | keyword[def] identifier[_attach_subcommands] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[subcommands] :
identifier[self] . identifier[subparsers] = identifier[self] . identifier[parser] . identifier[add_subparsers] ()
keyword[for] identifier[subcommand] keyword[in] identifier[self] . identifier[subcommands] :
identifier[subparser] = identifier[self] . identifier[subparsers] . identifier[add_parser] ( identifier[subcommand] . identifier[name] ,
identifier[help] = identifier[subcommand] . identifier[title] )
keyword[if] identifier[subcommand] . identifier[handler] :
identifier[self] . identifier[_register_handler] ( identifier[subparser] , identifier[subcommand] . identifier[handler] )
identifier[subcommand] . identifier[_init] ( identifier[subparser] ) | def _attach_subcommands(self):
"""Create a subparser and add the registered commands to it.
This will also call ``_init`` on each subcommand (in turn invoking its
``_attach_subcommands`` method).
"""
if self.subcommands:
self.subparsers = self.parser.add_subparsers()
for subcommand in self.subcommands:
subparser = self.subparsers.add_parser(subcommand.name, help=subcommand.title)
if subcommand.handler:
self._register_handler(subparser, subcommand.handler) # depends on [control=['if'], data=[]]
subcommand._init(subparser) # depends on [control=['for'], data=['subcommand']] # depends on [control=['if'], data=[]] |
def is_fold_trigger(block):
"""
Checks if the block is a fold trigger.
:param block: block to check
:return: True if the block is a fold trigger (represented as a node in
the fold panel)
"""
if block is None:
return False
state = block.userState()
if state == -1:
state = 0
return bool(state & 0x04000000) | def function[is_fold_trigger, parameter[block]]:
constant[
Checks if the block is a fold trigger.
:param block: block to check
:return: True if the block is a fold trigger (represented as a node in
the fold panel)
]
if compare[name[block] is constant[None]] begin[:]
return[constant[False]]
variable[state] assign[=] call[name[block].userState, parameter[]]
if compare[name[state] equal[==] <ast.UnaryOp object at 0x7da2041db670>] begin[:]
variable[state] assign[=] constant[0]
return[call[name[bool], parameter[binary_operation[name[state] <ast.BitAnd object at 0x7da2590d6b60> constant[67108864]]]]] | keyword[def] identifier[is_fold_trigger] ( identifier[block] ):
literal[string]
keyword[if] identifier[block] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[state] = identifier[block] . identifier[userState] ()
keyword[if] identifier[state] ==- literal[int] :
identifier[state] = literal[int]
keyword[return] identifier[bool] ( identifier[state] & literal[int] ) | def is_fold_trigger(block):
"""
Checks if the block is a fold trigger.
:param block: block to check
:return: True if the block is a fold trigger (represented as a node in
the fold panel)
"""
if block is None:
return False # depends on [control=['if'], data=[]]
state = block.userState()
if state == -1:
state = 0 # depends on [control=['if'], data=['state']]
return bool(state & 67108864) |
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
self._models = Collection.make(models)
self._build_dictionary(models) | def function[add_eager_constraints, parameter[self, models]]:
constant[
Set the constraints for an eager load of the relation.
:type models: list
]
name[self]._models assign[=] call[name[Collection].make, parameter[name[models]]]
call[name[self]._build_dictionary, parameter[name[models]]] | keyword[def] identifier[add_eager_constraints] ( identifier[self] , identifier[models] ):
literal[string]
identifier[self] . identifier[_models] = identifier[Collection] . identifier[make] ( identifier[models] )
identifier[self] . identifier[_build_dictionary] ( identifier[models] ) | def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
self._models = Collection.make(models)
self._build_dictionary(models) |
def get_buildout_parts(buildout, query=None):
"""Return buildout parts matching the given query
"""
parts = names = (buildout['buildout'].get('parts') or '').split('\n')
for name in names:
part = buildout.get(name) or {}
for key, value in (query or {}).items():
if value not in (part.get(key) or ''):
parts.remove(name)
break
return parts | def function[get_buildout_parts, parameter[buildout, query]]:
constant[Return buildout parts matching the given query
]
variable[parts] assign[=] call[<ast.BoolOp object at 0x7da204347700>.split, parameter[constant[
]]]
for taget[name[name]] in starred[name[names]] begin[:]
variable[part] assign[=] <ast.BoolOp object at 0x7da204347370>
for taget[tuple[[<ast.Name object at 0x7da2043479a0>, <ast.Name object at 0x7da204347be0>]]] in starred[call[<ast.BoolOp object at 0x7da204345270>.items, parameter[]]] begin[:]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> <ast.BoolOp object at 0x7da2043451e0>] begin[:]
call[name[parts].remove, parameter[name[name]]]
break
return[name[parts]] | keyword[def] identifier[get_buildout_parts] ( identifier[buildout] , identifier[query] = keyword[None] ):
literal[string]
identifier[parts] = identifier[names] =( identifier[buildout] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[or] literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[part] = identifier[buildout] . identifier[get] ( identifier[name] ) keyword[or] {}
keyword[for] identifier[key] , identifier[value] keyword[in] ( identifier[query] keyword[or] {}). identifier[items] ():
keyword[if] identifier[value] keyword[not] keyword[in] ( identifier[part] . identifier[get] ( identifier[key] ) keyword[or] literal[string] ):
identifier[parts] . identifier[remove] ( identifier[name] )
keyword[break]
keyword[return] identifier[parts] | def get_buildout_parts(buildout, query=None):
"""Return buildout parts matching the given query
"""
parts = names = (buildout['buildout'].get('parts') or '').split('\n')
for name in names:
part = buildout.get(name) or {}
for (key, value) in (query or {}).items():
if value not in (part.get(key) or ''):
parts.remove(name)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['name']]
return parts |
def to_basis(self, basis=None,
start=None,
stop=None,
step=None,
undefined=None):
"""
Make a new curve in a new basis, given a basis, or a new start, step,
and/or stop. You only need to set the parameters you want to change.
If the new extents go beyond the current extents, the curve is padded
with the ``undefined`` parameter.
Args:
basis (ndarray)
start (float)
stop (float)
step (float)
undefined (float)
Returns:
Curve. The current instance in the new basis.
"""
if basis is None:
if start is None:
new_start = self.start
else:
new_start = start
new_step = step or self.step
new_stop = stop or self.stop
# new_adj_stop = new_stop + new_step/100 # To guarantee inclusion.
# basis = np.arange(new_start, new_adj_stop, new_step)
steps = 1 + (new_stop - new_start) / new_step
basis = np.linspace(new_start, new_stop, int(steps), endpoint=True)
else:
new_start = basis[0]
new_step = basis[1] - basis[0]
if undefined is None:
undefined = np.nan
else:
undefined = undefined
interp = interp1d(self.basis, self,
bounds_error=False,
fill_value=undefined)
data = interp(basis)
params = self.__dict__.copy()
params['step'] = float(new_step)
params['start'] = float(new_start)
return Curve(data, params=params) | def function[to_basis, parameter[self, basis, start, stop, step, undefined]]:
constant[
Make a new curve in a new basis, given a basis, or a new start, step,
and/or stop. You only need to set the parameters you want to change.
If the new extents go beyond the current extents, the curve is padded
with the ``undefined`` parameter.
Args:
basis (ndarray)
start (float)
stop (float)
step (float)
undefined (float)
Returns:
Curve. The current instance in the new basis.
]
if compare[name[basis] is constant[None]] begin[:]
if compare[name[start] is constant[None]] begin[:]
variable[new_start] assign[=] name[self].start
variable[new_step] assign[=] <ast.BoolOp object at 0x7da1b2295930>
variable[new_stop] assign[=] <ast.BoolOp object at 0x7da1b22944c0>
variable[steps] assign[=] binary_operation[constant[1] + binary_operation[binary_operation[name[new_stop] - name[new_start]] / name[new_step]]]
variable[basis] assign[=] call[name[np].linspace, parameter[name[new_start], name[new_stop], call[name[int], parameter[name[steps]]]]]
if compare[name[undefined] is constant[None]] begin[:]
variable[undefined] assign[=] name[np].nan
variable[interp] assign[=] call[name[interp1d], parameter[name[self].basis, name[self]]]
variable[data] assign[=] call[name[interp], parameter[name[basis]]]
variable[params] assign[=] call[name[self].__dict__.copy, parameter[]]
call[name[params]][constant[step]] assign[=] call[name[float], parameter[name[new_step]]]
call[name[params]][constant[start]] assign[=] call[name[float], parameter[name[new_start]]]
return[call[name[Curve], parameter[name[data]]]] | keyword[def] identifier[to_basis] ( identifier[self] , identifier[basis] = keyword[None] ,
identifier[start] = keyword[None] ,
identifier[stop] = keyword[None] ,
identifier[step] = keyword[None] ,
identifier[undefined] = keyword[None] ):
literal[string]
keyword[if] identifier[basis] keyword[is] keyword[None] :
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[new_start] = identifier[self] . identifier[start]
keyword[else] :
identifier[new_start] = identifier[start]
identifier[new_step] = identifier[step] keyword[or] identifier[self] . identifier[step]
identifier[new_stop] = identifier[stop] keyword[or] identifier[self] . identifier[stop]
identifier[steps] = literal[int] +( identifier[new_stop] - identifier[new_start] )/ identifier[new_step]
identifier[basis] = identifier[np] . identifier[linspace] ( identifier[new_start] , identifier[new_stop] , identifier[int] ( identifier[steps] ), identifier[endpoint] = keyword[True] )
keyword[else] :
identifier[new_start] = identifier[basis] [ literal[int] ]
identifier[new_step] = identifier[basis] [ literal[int] ]- identifier[basis] [ literal[int] ]
keyword[if] identifier[undefined] keyword[is] keyword[None] :
identifier[undefined] = identifier[np] . identifier[nan]
keyword[else] :
identifier[undefined] = identifier[undefined]
identifier[interp] = identifier[interp1d] ( identifier[self] . identifier[basis] , identifier[self] ,
identifier[bounds_error] = keyword[False] ,
identifier[fill_value] = identifier[undefined] )
identifier[data] = identifier[interp] ( identifier[basis] )
identifier[params] = identifier[self] . identifier[__dict__] . identifier[copy] ()
identifier[params] [ literal[string] ]= identifier[float] ( identifier[new_step] )
identifier[params] [ literal[string] ]= identifier[float] ( identifier[new_start] )
keyword[return] identifier[Curve] ( identifier[data] , identifier[params] = identifier[params] ) | def to_basis(self, basis=None, start=None, stop=None, step=None, undefined=None):
"""
Make a new curve in a new basis, given a basis, or a new start, step,
and/or stop. You only need to set the parameters you want to change.
If the new extents go beyond the current extents, the curve is padded
with the ``undefined`` parameter.
Args:
basis (ndarray)
start (float)
stop (float)
step (float)
undefined (float)
Returns:
Curve. The current instance in the new basis.
"""
if basis is None:
if start is None:
new_start = self.start # depends on [control=['if'], data=[]]
else:
new_start = start
new_step = step or self.step
new_stop = stop or self.stop
# new_adj_stop = new_stop + new_step/100 # To guarantee inclusion.
# basis = np.arange(new_start, new_adj_stop, new_step)
steps = 1 + (new_stop - new_start) / new_step
basis = np.linspace(new_start, new_stop, int(steps), endpoint=True) # depends on [control=['if'], data=['basis']]
else:
new_start = basis[0]
new_step = basis[1] - basis[0]
if undefined is None:
undefined = np.nan # depends on [control=['if'], data=['undefined']]
else:
undefined = undefined
interp = interp1d(self.basis, self, bounds_error=False, fill_value=undefined)
data = interp(basis)
params = self.__dict__.copy()
params['step'] = float(new_step)
params['start'] = float(new_start)
return Curve(data, params=params) |
def encode_endpoint_props(ed):
"""
Encodes the properties of the given EndpointDescription
"""
props = encode_osgi_props(ed)
props[ECF_RSVC_ID] = "{0}".format(ed.get_remoteservice_id()[1])
props[ECF_ENDPOINT_ID] = "{0}".format(ed.get_container_id()[1])
props[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = "{0}".format(
ed.get_container_id()[0]
)
props[ECF_ENDPOINT_TIMESTAMP] = "{0}".format(ed.get_timestamp())
ctid = ed.get_connect_target_id()
if ctid:
props[ECF_ENDPOINT_CONNECTTARGET_ID] = "{0}".format(ctid)
id_filters = ed.get_id_filters()
if id_filters:
props[ECF_ENDPOINT_IDFILTER_IDS] = " ".join([x[1] for x in id_filters])
rs_filter = ed.get_remoteservice_filter()
if rs_filter:
props[ECF_ENDPOINT_REMOTESERVICE_FILTER] = ed.get_remoteservice_filter()
async_intfs = ed.get_async_interfaces()
if async_intfs:
props[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES] = " ".join(async_intfs)
all_props = ed.get_properties()
other_props = {
key: all_props[key]
for key in all_props.keys()
if not is_reserved_property(key)
}
return merge_dicts(props, other_props) | def function[encode_endpoint_props, parameter[ed]]:
constant[
Encodes the properties of the given EndpointDescription
]
variable[props] assign[=] call[name[encode_osgi_props], parameter[name[ed]]]
call[name[props]][name[ECF_RSVC_ID]] assign[=] call[constant[{0}].format, parameter[call[call[name[ed].get_remoteservice_id, parameter[]]][constant[1]]]]
call[name[props]][name[ECF_ENDPOINT_ID]] assign[=] call[constant[{0}].format, parameter[call[call[name[ed].get_container_id, parameter[]]][constant[1]]]]
call[name[props]][name[ECF_ENDPOINT_CONTAINERID_NAMESPACE]] assign[=] call[constant[{0}].format, parameter[call[call[name[ed].get_container_id, parameter[]]][constant[0]]]]
call[name[props]][name[ECF_ENDPOINT_TIMESTAMP]] assign[=] call[constant[{0}].format, parameter[call[name[ed].get_timestamp, parameter[]]]]
variable[ctid] assign[=] call[name[ed].get_connect_target_id, parameter[]]
if name[ctid] begin[:]
call[name[props]][name[ECF_ENDPOINT_CONNECTTARGET_ID]] assign[=] call[constant[{0}].format, parameter[name[ctid]]]
variable[id_filters] assign[=] call[name[ed].get_id_filters, parameter[]]
if name[id_filters] begin[:]
call[name[props]][name[ECF_ENDPOINT_IDFILTER_IDS]] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20c993160>]]
variable[rs_filter] assign[=] call[name[ed].get_remoteservice_filter, parameter[]]
if name[rs_filter] begin[:]
call[name[props]][name[ECF_ENDPOINT_REMOTESERVICE_FILTER]] assign[=] call[name[ed].get_remoteservice_filter, parameter[]]
variable[async_intfs] assign[=] call[name[ed].get_async_interfaces, parameter[]]
if name[async_intfs] begin[:]
call[name[props]][name[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES]] assign[=] call[constant[ ].join, parameter[name[async_intfs]]]
variable[all_props] assign[=] call[name[ed].get_properties, parameter[]]
variable[other_props] assign[=] <ast.DictComp object at 0x7da20c9932e0>
return[call[name[merge_dicts], parameter[name[props], name[other_props]]]] | keyword[def] identifier[encode_endpoint_props] ( identifier[ed] ):
literal[string]
identifier[props] = identifier[encode_osgi_props] ( identifier[ed] )
identifier[props] [ identifier[ECF_RSVC_ID] ]= literal[string] . identifier[format] ( identifier[ed] . identifier[get_remoteservice_id] ()[ literal[int] ])
identifier[props] [ identifier[ECF_ENDPOINT_ID] ]= literal[string] . identifier[format] ( identifier[ed] . identifier[get_container_id] ()[ literal[int] ])
identifier[props] [ identifier[ECF_ENDPOINT_CONTAINERID_NAMESPACE] ]= literal[string] . identifier[format] (
identifier[ed] . identifier[get_container_id] ()[ literal[int] ]
)
identifier[props] [ identifier[ECF_ENDPOINT_TIMESTAMP] ]= literal[string] . identifier[format] ( identifier[ed] . identifier[get_timestamp] ())
identifier[ctid] = identifier[ed] . identifier[get_connect_target_id] ()
keyword[if] identifier[ctid] :
identifier[props] [ identifier[ECF_ENDPOINT_CONNECTTARGET_ID] ]= literal[string] . identifier[format] ( identifier[ctid] )
identifier[id_filters] = identifier[ed] . identifier[get_id_filters] ()
keyword[if] identifier[id_filters] :
identifier[props] [ identifier[ECF_ENDPOINT_IDFILTER_IDS] ]= literal[string] . identifier[join] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[id_filters] ])
identifier[rs_filter] = identifier[ed] . identifier[get_remoteservice_filter] ()
keyword[if] identifier[rs_filter] :
identifier[props] [ identifier[ECF_ENDPOINT_REMOTESERVICE_FILTER] ]= identifier[ed] . identifier[get_remoteservice_filter] ()
identifier[async_intfs] = identifier[ed] . identifier[get_async_interfaces] ()
keyword[if] identifier[async_intfs] :
identifier[props] [ identifier[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES] ]= literal[string] . identifier[join] ( identifier[async_intfs] )
identifier[all_props] = identifier[ed] . identifier[get_properties] ()
identifier[other_props] ={
identifier[key] : identifier[all_props] [ identifier[key] ]
keyword[for] identifier[key] keyword[in] identifier[all_props] . identifier[keys] ()
keyword[if] keyword[not] identifier[is_reserved_property] ( identifier[key] )
}
keyword[return] identifier[merge_dicts] ( identifier[props] , identifier[other_props] ) | def encode_endpoint_props(ed):
"""
Encodes the properties of the given EndpointDescription
"""
props = encode_osgi_props(ed)
props[ECF_RSVC_ID] = '{0}'.format(ed.get_remoteservice_id()[1])
props[ECF_ENDPOINT_ID] = '{0}'.format(ed.get_container_id()[1])
props[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = '{0}'.format(ed.get_container_id()[0])
props[ECF_ENDPOINT_TIMESTAMP] = '{0}'.format(ed.get_timestamp())
ctid = ed.get_connect_target_id()
if ctid:
props[ECF_ENDPOINT_CONNECTTARGET_ID] = '{0}'.format(ctid) # depends on [control=['if'], data=[]]
id_filters = ed.get_id_filters()
if id_filters:
props[ECF_ENDPOINT_IDFILTER_IDS] = ' '.join([x[1] for x in id_filters]) # depends on [control=['if'], data=[]]
rs_filter = ed.get_remoteservice_filter()
if rs_filter:
props[ECF_ENDPOINT_REMOTESERVICE_FILTER] = ed.get_remoteservice_filter() # depends on [control=['if'], data=[]]
async_intfs = ed.get_async_interfaces()
if async_intfs:
props[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES] = ' '.join(async_intfs) # depends on [control=['if'], data=[]]
all_props = ed.get_properties()
other_props = {key: all_props[key] for key in all_props.keys() if not is_reserved_property(key)}
return merge_dicts(props, other_props) |
def run(self):
""" Run the receiver thread """
while (True):
if (self.sp):
break
try:
# Blocking until USB data available
data = self.cfusb.receive_packet()
if len(data) > 0:
pk = CRTPPacket(data[0], list(data[1:]))
self.in_queue.put(pk)
except Exception as e:
import traceback
self.link_error_callback(
'Error communicating with the Crazyflie'
' ,it has probably been unplugged!\n'
'Exception:%s\n\n%s' % (e,
traceback.format_exc())) | def function[run, parameter[self]]:
constant[ Run the receiver thread ]
while constant[True] begin[:]
if name[self].sp begin[:]
break
<ast.Try object at 0x7da1b1685630> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[while] ( keyword[True] ):
keyword[if] ( identifier[self] . identifier[sp] ):
keyword[break]
keyword[try] :
identifier[data] = identifier[self] . identifier[cfusb] . identifier[receive_packet] ()
keyword[if] identifier[len] ( identifier[data] )> literal[int] :
identifier[pk] = identifier[CRTPPacket] ( identifier[data] [ literal[int] ], identifier[list] ( identifier[data] [ literal[int] :]))
identifier[self] . identifier[in_queue] . identifier[put] ( identifier[pk] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[import] identifier[traceback]
identifier[self] . identifier[link_error_callback] (
literal[string]
literal[string]
literal[string] %( identifier[e] ,
identifier[traceback] . identifier[format_exc] ())) | def run(self):
""" Run the receiver thread """
while True:
if self.sp:
break # depends on [control=['if'], data=[]]
try:
# Blocking until USB data available
data = self.cfusb.receive_packet()
if len(data) > 0:
pk = CRTPPacket(data[0], list(data[1:]))
self.in_queue.put(pk) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
import traceback
self.link_error_callback('Error communicating with the Crazyflie ,it has probably been unplugged!\nException:%s\n\n%s' % (e, traceback.format_exc())) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] |
def _get_org(server_config, label):
"""Find an :class:`nailgun.entities.Organization` object.
:param nailgun.config.ServerConfig server_config: The server that should be
searched.
:param label: A string. The label of the organization to find.
:raises APIResponseError: If exactly one organization is not found.
:returns: An :class:`nailgun.entities.Organization` object.
"""
organizations = Organization(server_config).search(
query={u'search': u'label={0}'.format(label)}
)
if len(organizations) != 1:
raise APIResponseError(
u'Could not find exactly one organization with label "{0}". '
u'Actual search results: {1}'.format(label, organizations)
)
return organizations[0].read() | def function[_get_org, parameter[server_config, label]]:
constant[Find an :class:`nailgun.entities.Organization` object.
:param nailgun.config.ServerConfig server_config: The server that should be
searched.
:param label: A string. The label of the organization to find.
:raises APIResponseError: If exactly one organization is not found.
:returns: An :class:`nailgun.entities.Organization` object.
]
variable[organizations] assign[=] call[call[name[Organization], parameter[name[server_config]]].search, parameter[]]
if compare[call[name[len], parameter[name[organizations]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20cabe2c0>
return[call[call[name[organizations]][constant[0]].read, parameter[]]] | keyword[def] identifier[_get_org] ( identifier[server_config] , identifier[label] ):
literal[string]
identifier[organizations] = identifier[Organization] ( identifier[server_config] ). identifier[search] (
identifier[query] ={ literal[string] : literal[string] . identifier[format] ( identifier[label] )}
)
keyword[if] identifier[len] ( identifier[organizations] )!= literal[int] :
keyword[raise] identifier[APIResponseError] (
literal[string]
literal[string] . identifier[format] ( identifier[label] , identifier[organizations] )
)
keyword[return] identifier[organizations] [ literal[int] ]. identifier[read] () | def _get_org(server_config, label):
"""Find an :class:`nailgun.entities.Organization` object.
:param nailgun.config.ServerConfig server_config: The server that should be
searched.
:param label: A string. The label of the organization to find.
:raises APIResponseError: If exactly one organization is not found.
:returns: An :class:`nailgun.entities.Organization` object.
"""
organizations = Organization(server_config).search(query={u'search': u'label={0}'.format(label)})
if len(organizations) != 1:
raise APIResponseError(u'Could not find exactly one organization with label "{0}". Actual search results: {1}'.format(label, organizations)) # depends on [control=['if'], data=[]]
return organizations[0].read() |
def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs):
"""Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
else:
(data) = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
return data | def function[update_wish_list_by_id, parameter[cls, wish_list_id, wish_list]]:
constant[Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._update_wish_list_by_id_with_http_info, parameter[name[wish_list_id], name[wish_list]]]] | keyword[def] identifier[update_wish_list_by_id] ( identifier[cls] , identifier[wish_list_id] , identifier[wish_list] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_update_wish_list_by_id_with_http_info] ( identifier[wish_list_id] , identifier[wish_list] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_update_wish_list_by_id_with_http_info] ( identifier[wish_list_id] , identifier[wish_list] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs):
"""Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
return data |
def image_path_from_index(self, index):
"""
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
"""
assert self.image_set_index is not None, "Dataset not initialized"
name = self.image_set_index[index]
image_file = os.path.join(self.image_dir, 'images', name)
assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)
return image_file | def function[image_path_from_index, parameter[self, index]]:
constant[
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
]
assert[compare[name[self].image_set_index is_not constant[None]]]
variable[name] assign[=] call[name[self].image_set_index][name[index]]
variable[image_file] assign[=] call[name[os].path.join, parameter[name[self].image_dir, constant[images], name[name]]]
assert[call[name[os].path.isfile, parameter[name[image_file]]]]
return[name[image_file]] | keyword[def] identifier[image_path_from_index] ( identifier[self] , identifier[index] ):
literal[string]
keyword[assert] identifier[self] . identifier[image_set_index] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[name] = identifier[self] . identifier[image_set_index] [ identifier[index] ]
identifier[image_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[image_dir] , literal[string] , identifier[name] )
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[image_file] ), literal[string] . identifier[format] ( identifier[image_file] )
keyword[return] identifier[image_file] | def image_path_from_index(self, index):
"""
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
"""
assert self.image_set_index is not None, 'Dataset not initialized'
name = self.image_set_index[index]
image_file = os.path.join(self.image_dir, 'images', name)
assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)
return image_file |
def merge_strings(list_of_strings: Union[List[str], Tuple[str]]) -> dict:
"""
Pack the list of strings into two arrays: the concatenated chars and the \
individual string lengths. :func:`split_strings()` does the inverse.
:param list_of_strings: The :class:`tuple` or :class:`list` of :class:`str`-s \
or :class:`bytes`-s to pack.
:return: :class:`dict` with "strings" and "lengths" \
:class:`numpy.ndarray`-s.
"""
if not isinstance(list_of_strings, (tuple, list)):
raise TypeError("list_of_strings must be either a tuple or a list")
if len(list_of_strings) == 0:
return {"strings": numpy.array([], dtype="S1"),
"lengths": numpy.array([], dtype=int),
"str": None}
with_str = not isinstance(list_of_strings[0], bytes)
if with_str:
if not isinstance(list_of_strings[0], str):
raise TypeError("list_of_strings must contain either bytes or strings")
strings = numpy.array(["".join(list_of_strings).encode("utf-8")])
else:
merged = bytearray(sum(len(s) for s in list_of_strings))
offset = 0
for s in list_of_strings:
merged[offset:offset + len(s)] = s
offset += len(s)
strings = numpy.frombuffer(merged, dtype="S%d" % len(merged))
lengths = [0] * len(list_of_strings)
for i, s in enumerate(list_of_strings):
lengths[i] = len(s)
lengths = squeeze_bits(numpy.array(lengths, dtype=int))
return {"strings": strings, "lengths": lengths, "str": with_str} | def function[merge_strings, parameter[list_of_strings]]:
constant[
Pack the list of strings into two arrays: the concatenated chars and the individual string lengths. :func:`split_strings()` does the inverse.
:param list_of_strings: The :class:`tuple` or :class:`list` of :class:`str`-s or :class:`bytes`-s to pack.
:return: :class:`dict` with "strings" and "lengths" :class:`numpy.ndarray`-s.
]
if <ast.UnaryOp object at 0x7da1b0c91120> begin[:]
<ast.Raise object at 0x7da1b0c90ac0>
if compare[call[name[len], parameter[name[list_of_strings]]] equal[==] constant[0]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b0c912d0>, <ast.Constant object at 0x7da1b0c91240>, <ast.Constant object at 0x7da1b0c92aa0>], [<ast.Call object at 0x7da1b0c91a50>, <ast.Call object at 0x7da1b0c92140>, <ast.Constant object at 0x7da1b0c92920>]]]
variable[with_str] assign[=] <ast.UnaryOp object at 0x7da1b0c90c70>
if name[with_str] begin[:]
if <ast.UnaryOp object at 0x7da1b0c926b0> begin[:]
<ast.Raise object at 0x7da1b0c934c0>
variable[strings] assign[=] call[name[numpy].array, parameter[list[[<ast.Call object at 0x7da1b0c91ed0>]]]]
variable[lengths] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0c90d90>]] * call[name[len], parameter[name[list_of_strings]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0c93a00>, <ast.Name object at 0x7da1b0c92290>]]] in starred[call[name[enumerate], parameter[name[list_of_strings]]]] begin[:]
call[name[lengths]][name[i]] assign[=] call[name[len], parameter[name[s]]]
variable[lengths] assign[=] call[name[squeeze_bits], parameter[call[name[numpy].array, parameter[name[lengths]]]]]
return[dictionary[[<ast.Constant object at 0x7da1b0c90a30>, <ast.Constant object at 0x7da1b0c90c10>, <ast.Constant object at 0x7da1b0c915a0>], [<ast.Name object at 0x7da1b0c92860>, <ast.Name object at 0x7da1b0c935b0>, <ast.Name object at 0x7da1b0c90550>]]] | keyword[def] identifier[merge_strings] ( identifier[list_of_strings] : identifier[Union] [ identifier[List] [ identifier[str] ], identifier[Tuple] [ identifier[str] ]])-> identifier[dict] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[list_of_strings] ,( identifier[tuple] , identifier[list] )):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[len] ( identifier[list_of_strings] )== literal[int] :
keyword[return] { literal[string] : identifier[numpy] . identifier[array] ([], identifier[dtype] = literal[string] ),
literal[string] : identifier[numpy] . identifier[array] ([], identifier[dtype] = identifier[int] ),
literal[string] : keyword[None] }
identifier[with_str] = keyword[not] identifier[isinstance] ( identifier[list_of_strings] [ literal[int] ], identifier[bytes] )
keyword[if] identifier[with_str] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[list_of_strings] [ literal[int] ], identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[strings] = identifier[numpy] . identifier[array] ([ literal[string] . identifier[join] ( identifier[list_of_strings] ). identifier[encode] ( literal[string] )])
keyword[else] :
identifier[merged] = identifier[bytearray] ( identifier[sum] ( identifier[len] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[list_of_strings] ))
identifier[offset] = literal[int]
keyword[for] identifier[s] keyword[in] identifier[list_of_strings] :
identifier[merged] [ identifier[offset] : identifier[offset] + identifier[len] ( identifier[s] )]= identifier[s]
identifier[offset] += identifier[len] ( identifier[s] )
identifier[strings] = identifier[numpy] . identifier[frombuffer] ( identifier[merged] , identifier[dtype] = literal[string] % identifier[len] ( identifier[merged] ))
identifier[lengths] =[ literal[int] ]* identifier[len] ( identifier[list_of_strings] )
keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[list_of_strings] ):
identifier[lengths] [ identifier[i] ]= identifier[len] ( identifier[s] )
identifier[lengths] = identifier[squeeze_bits] ( identifier[numpy] . identifier[array] ( identifier[lengths] , identifier[dtype] = identifier[int] ))
keyword[return] { literal[string] : identifier[strings] , literal[string] : identifier[lengths] , literal[string] : identifier[with_str] } | def merge_strings(list_of_strings: Union[List[str], Tuple[str]]) -> dict:
"""
Pack the list of strings into two arrays: the concatenated chars and the individual string lengths. :func:`split_strings()` does the inverse.
:param list_of_strings: The :class:`tuple` or :class:`list` of :class:`str`-s or :class:`bytes`-s to pack.
:return: :class:`dict` with "strings" and "lengths" :class:`numpy.ndarray`-s.
"""
if not isinstance(list_of_strings, (tuple, list)):
raise TypeError('list_of_strings must be either a tuple or a list') # depends on [control=['if'], data=[]]
if len(list_of_strings) == 0:
return {'strings': numpy.array([], dtype='S1'), 'lengths': numpy.array([], dtype=int), 'str': None} # depends on [control=['if'], data=[]]
with_str = not isinstance(list_of_strings[0], bytes)
if with_str:
if not isinstance(list_of_strings[0], str):
raise TypeError('list_of_strings must contain either bytes or strings') # depends on [control=['if'], data=[]]
strings = numpy.array([''.join(list_of_strings).encode('utf-8')]) # depends on [control=['if'], data=[]]
else:
merged = bytearray(sum((len(s) for s in list_of_strings)))
offset = 0
for s in list_of_strings:
merged[offset:offset + len(s)] = s
offset += len(s) # depends on [control=['for'], data=['s']]
strings = numpy.frombuffer(merged, dtype='S%d' % len(merged))
lengths = [0] * len(list_of_strings)
for (i, s) in enumerate(list_of_strings):
lengths[i] = len(s) # depends on [control=['for'], data=[]]
lengths = squeeze_bits(numpy.array(lengths, dtype=int))
return {'strings': strings, 'lengths': lengths, 'str': with_str} |
def normal_component(data_x, data_y, index='index'):
r"""Obtain the normal component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_normal: `xarray.DataArray`
The component of the vector field in the normal directions.
See Also
--------
cross_section_components, tangential_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
_, unit_norm = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_norm = data_x * unit_norm[0] + data_y * unit_norm[1]
# Reattach only reliable attributes after operation
for attr in ('units', 'grid_mapping'):
if attr in data_x.attrs:
component_norm.attrs[attr] = data_x.attrs[attr]
return component_norm | def function[normal_component, parameter[data_x, data_y, index]]:
constant[Obtain the normal component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_normal: `xarray.DataArray`
The component of the vector field in the normal directions.
See Also
--------
cross_section_components, tangential_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
]
<ast.Tuple object at 0x7da1b1d5e110> assign[=] call[name[unit_vectors_from_cross_section], parameter[name[data_x]]]
variable[component_norm] assign[=] binary_operation[binary_operation[name[data_x] * call[name[unit_norm]][constant[0]]] + binary_operation[name[data_y] * call[name[unit_norm]][constant[1]]]]
for taget[name[attr]] in starred[tuple[[<ast.Constant object at 0x7da1b1d5dc90>, <ast.Constant object at 0x7da1b1d5dc60>]]] begin[:]
if compare[name[attr] in name[data_x].attrs] begin[:]
call[name[component_norm].attrs][name[attr]] assign[=] call[name[data_x].attrs][name[attr]]
return[name[component_norm]] | keyword[def] identifier[normal_component] ( identifier[data_x] , identifier[data_y] , identifier[index] = literal[string] ):
literal[string]
identifier[_] , identifier[unit_norm] = identifier[unit_vectors_from_cross_section] ( identifier[data_x] , identifier[index] = identifier[index] )
identifier[component_norm] = identifier[data_x] * identifier[unit_norm] [ literal[int] ]+ identifier[data_y] * identifier[unit_norm] [ literal[int] ]
keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[attr] keyword[in] identifier[data_x] . identifier[attrs] :
identifier[component_norm] . identifier[attrs] [ identifier[attr] ]= identifier[data_x] . identifier[attrs] [ identifier[attr] ]
keyword[return] identifier[component_norm] | def normal_component(data_x, data_y, index='index'):
"""Obtain the normal component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_normal: `xarray.DataArray`
The component of the vector field in the normal directions.
See Also
--------
cross_section_components, tangential_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
(_, unit_norm) = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_norm = data_x * unit_norm[0] + data_y * unit_norm[1]
# Reattach only reliable attributes after operation
for attr in ('units', 'grid_mapping'):
if attr in data_x.attrs:
component_norm.attrs[attr] = data_x.attrs[attr] # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=['attr']]
return component_norm |
def _get_available_encodings():
"""Get a list of the available encodings to make it easy to
tab-complete the command line interface.
Inspiration from http://stackoverflow.com/a/3824405/564709
"""
available_encodings = set(encodings.aliases.aliases.values())
paths = [os.path.dirname(encodings.__file__)]
for importer, modname, ispkg in pkgutil.walk_packages(path=paths):
available_encodings.add(modname)
available_encodings = list(available_encodings)
available_encodings.sort()
return available_encodings | def function[_get_available_encodings, parameter[]]:
constant[Get a list of the available encodings to make it easy to
tab-complete the command line interface.
Inspiration from http://stackoverflow.com/a/3824405/564709
]
variable[available_encodings] assign[=] call[name[set], parameter[call[name[encodings].aliases.aliases.values, parameter[]]]]
variable[paths] assign[=] list[[<ast.Call object at 0x7da1b17a88e0>]]
for taget[tuple[[<ast.Name object at 0x7da1b17a8790>, <ast.Name object at 0x7da1b17a8c40>, <ast.Name object at 0x7da1b17aabc0>]]] in starred[call[name[pkgutil].walk_packages, parameter[]]] begin[:]
call[name[available_encodings].add, parameter[name[modname]]]
variable[available_encodings] assign[=] call[name[list], parameter[name[available_encodings]]]
call[name[available_encodings].sort, parameter[]]
return[name[available_encodings]] | keyword[def] identifier[_get_available_encodings] ():
literal[string]
identifier[available_encodings] = identifier[set] ( identifier[encodings] . identifier[aliases] . identifier[aliases] . identifier[values] ())
identifier[paths] =[ identifier[os] . identifier[path] . identifier[dirname] ( identifier[encodings] . identifier[__file__] )]
keyword[for] identifier[importer] , identifier[modname] , identifier[ispkg] keyword[in] identifier[pkgutil] . identifier[walk_packages] ( identifier[path] = identifier[paths] ):
identifier[available_encodings] . identifier[add] ( identifier[modname] )
identifier[available_encodings] = identifier[list] ( identifier[available_encodings] )
identifier[available_encodings] . identifier[sort] ()
keyword[return] identifier[available_encodings] | def _get_available_encodings():
"""Get a list of the available encodings to make it easy to
tab-complete the command line interface.
Inspiration from http://stackoverflow.com/a/3824405/564709
"""
available_encodings = set(encodings.aliases.aliases.values())
paths = [os.path.dirname(encodings.__file__)]
for (importer, modname, ispkg) in pkgutil.walk_packages(path=paths):
available_encodings.add(modname) # depends on [control=['for'], data=[]]
available_encodings = list(available_encodings)
available_encodings.sort()
return available_encodings |
def send_work(self):
'''Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time.
'''
if self.__set_running():
# msg = FutureRequest(FUTURE, self.__method, self.__params,
# self.__channel, self.__target, self.__id)
msg = {TYPE: FUTURE, METHOD: self.__method, PARAMS: self.__params,
CHANNEL: self.__channel, TO: self.__target,
RPC_ID: self.__id}
self.__actor_channel.send(msg)
else:
raise FutureError("Future already running.") | def function[send_work, parameter[self]]:
constant[Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time.
]
if call[name[self].__set_running, parameter[]] begin[:]
variable[msg] assign[=] dictionary[[<ast.Name object at 0x7da18c4cc160>, <ast.Name object at 0x7da18c4cdf90>, <ast.Name object at 0x7da18c4cdd20>, <ast.Name object at 0x7da18c4cc400>, <ast.Name object at 0x7da18c4cf460>, <ast.Name object at 0x7da18c4cda50>], [<ast.Name object at 0x7da18c4cc610>, <ast.Attribute object at 0x7da18c4cd2a0>, <ast.Attribute object at 0x7da18c4cd150>, <ast.Attribute object at 0x7da18c4cf6d0>, <ast.Attribute object at 0x7da18c4cd3c0>, <ast.Attribute object at 0x7da18c4ccfa0>]]
call[name[self].__actor_channel.send, parameter[name[msg]]] | keyword[def] identifier[send_work] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__set_running] ():
identifier[msg] ={ identifier[TYPE] : identifier[FUTURE] , identifier[METHOD] : identifier[self] . identifier[__method] , identifier[PARAMS] : identifier[self] . identifier[__params] ,
identifier[CHANNEL] : identifier[self] . identifier[__channel] , identifier[TO] : identifier[self] . identifier[__target] ,
identifier[RPC_ID] : identifier[self] . identifier[__id] }
identifier[self] . identifier[__actor_channel] . identifier[send] ( identifier[msg] )
keyword[else] :
keyword[raise] identifier[FutureError] ( literal[string] ) | def send_work(self):
"""Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time.
"""
if self.__set_running():
# msg = FutureRequest(FUTURE, self.__method, self.__params,
# self.__channel, self.__target, self.__id)
msg = {TYPE: FUTURE, METHOD: self.__method, PARAMS: self.__params, CHANNEL: self.__channel, TO: self.__target, RPC_ID: self.__id}
self.__actor_channel.send(msg) # depends on [control=['if'], data=[]]
else:
raise FutureError('Future already running.') |
async def get_agents(self, addr=True, agent_cls=None):
"""Get addresses of all agents in all the slave environments.
This is a managing function for
:meth:`creamas.mp.MultiEnvironment.get_agents`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, ``addr`` and ``agent_cls`` parameters are
omitted from the call to underlying multi-environment's
:meth:`get_agents`.
If :class:`aiomas.rpc.Proxy` objects from all the agents are
needed, call each slave environment manager's :meth:`get_agents`
directly.
"""
return await self.menv.get_agents(addr=True, agent_cls=None,
as_coro=True) | <ast.AsyncFunctionDef object at 0x7da18c4ce290> | keyword[async] keyword[def] identifier[get_agents] ( identifier[self] , identifier[addr] = keyword[True] , identifier[agent_cls] = keyword[None] ):
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[menv] . identifier[get_agents] ( identifier[addr] = keyword[True] , identifier[agent_cls] = keyword[None] ,
identifier[as_coro] = keyword[True] ) | async def get_agents(self, addr=True, agent_cls=None):
"""Get addresses of all agents in all the slave environments.
This is a managing function for
:meth:`creamas.mp.MultiEnvironment.get_agents`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, ``addr`` and ``agent_cls`` parameters are
omitted from the call to underlying multi-environment's
:meth:`get_agents`.
If :class:`aiomas.rpc.Proxy` objects from all the agents are
needed, call each slave environment manager's :meth:`get_agents`
directly.
"""
return await self.menv.get_agents(addr=True, agent_cls=None, as_coro=True) |
def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement | def function[find_partition_multiplex, parameter[graphs, partition_type]]:
constant[ Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
]
variable[n_layers] assign[=] call[name[len], parameter[name[graphs]]]
variable[partitions] assign[=] list[[]]
variable[layer_weights] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f721b70>]] * name[n_layers]]
for taget[name[graph]] in starred[name[graphs]] begin[:]
call[name[partitions].append, parameter[call[name[partition_type], parameter[name[graph]]]]]
variable[optimiser] assign[=] call[name[Optimiser], parameter[]]
variable[improvement] assign[=] call[name[optimiser].optimise_partition_multiplex, parameter[name[partitions], name[layer_weights]]]
return[tuple[[<ast.Attribute object at 0x7da18f720460>, <ast.Name object at 0x7da18f00d960>]]] | keyword[def] identifier[find_partition_multiplex] ( identifier[graphs] , identifier[partition_type] ,** identifier[kwargs] ):
literal[string]
identifier[n_layers] = identifier[len] ( identifier[graphs] )
identifier[partitions] =[]
identifier[layer_weights] =[ literal[int] ]* identifier[n_layers]
keyword[for] identifier[graph] keyword[in] identifier[graphs] :
identifier[partitions] . identifier[append] ( identifier[partition_type] ( identifier[graph] ,** identifier[kwargs] ))
identifier[optimiser] = identifier[Optimiser] ()
identifier[improvement] = identifier[optimiser] . identifier[optimise_partition_multiplex] ( identifier[partitions] , identifier[layer_weights] )
keyword[return] identifier[partitions] [ literal[int] ]. identifier[membership] , identifier[improvement] | def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1] * n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs)) # depends on [control=['for'], data=['graph']]
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return (partitions[0].membership, improvement) |
async def set_endpoint_for_did(wallet_handle: int,
did: str,
address: str,
transport_key: str) -> None:
"""
Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("set_endpoint_for_did: >>> wallet_handle: %r, did: %r, address: %r, transport_key: %r",
wallet_handle,
did,
address,
transport_key)
if not hasattr(set_endpoint_for_did, "cb"):
logger.debug("set_endpoint_for_did: Creating callback")
set_endpoint_for_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
c_transport_key = c_char_p(transport_key.encode('utf-8'))
await do_call('indy_set_endpoint_for_did',
c_wallet_handle,
c_did,
c_address,
c_transport_key,
set_endpoint_for_did.cb)
logger.debug("set_endpoint_for_did: <<<") | <ast.AsyncFunctionDef object at 0x7da18ede6050> | keyword[async] keyword[def] identifier[set_endpoint_for_did] ( identifier[wallet_handle] : identifier[int] ,
identifier[did] : identifier[str] ,
identifier[address] : identifier[str] ,
identifier[transport_key] : identifier[str] )-> keyword[None] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[wallet_handle] ,
identifier[did] ,
identifier[address] ,
identifier[transport_key] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[set_endpoint_for_did] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[set_endpoint_for_did] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] ))
identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] )
identifier[c_did] = identifier[c_char_p] ( identifier[did] . identifier[encode] ( literal[string] ))
identifier[c_address] = identifier[c_char_p] ( identifier[address] . identifier[encode] ( literal[string] ))
identifier[c_transport_key] = identifier[c_char_p] ( identifier[transport_key] . identifier[encode] ( literal[string] ))
keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_wallet_handle] ,
identifier[c_did] ,
identifier[c_address] ,
identifier[c_transport_key] ,
identifier[set_endpoint_for_did] . identifier[cb] )
identifier[logger] . identifier[debug] ( literal[string] ) | async def set_endpoint_for_did(wallet_handle: int, did: str, address: str, transport_key: str) -> None:
"""
Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug('set_endpoint_for_did: >>> wallet_handle: %r, did: %r, address: %r, transport_key: %r', wallet_handle, did, address, transport_key)
if not hasattr(set_endpoint_for_did, 'cb'):
logger.debug('set_endpoint_for_did: Creating callback')
set_endpoint_for_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) # depends on [control=['if'], data=[]]
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
c_transport_key = c_char_p(transport_key.encode('utf-8'))
await do_call('indy_set_endpoint_for_did', c_wallet_handle, c_did, c_address, c_transport_key, set_endpoint_for_did.cb)
logger.debug('set_endpoint_for_did: <<<') |
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": ""
}
]
) | def function[remove_binary_support, parameter[self, api_id, cors]]:
constant[
Remove binary support
]
variable[response] assign[=] call[name[self].apigateway_client.get_rest_api, parameter[]]
if <ast.BoolOp object at 0x7da1b1f1ac80> begin[:]
call[name[self].apigateway_client.update_rest_api, parameter[]]
if name[cors] begin[:]
variable[response] assign[=] call[name[self].apigateway_client.get_resources, parameter[]]
variable[resource_ids] assign[=] <ast.ListComp object at 0x7da1b21d44c0>
for taget[name[resource_id]] in starred[name[resource_ids]] begin[:]
call[name[self].apigateway_client.update_integration, parameter[]] | keyword[def] identifier[remove_binary_support] ( identifier[self] , identifier[api_id] , identifier[cors] = keyword[False] ):
literal[string]
identifier[response] = identifier[self] . identifier[apigateway_client] . identifier[get_rest_api] (
identifier[restApiId] = identifier[api_id]
)
keyword[if] literal[string] keyword[in] identifier[response] keyword[and] literal[string] keyword[in] identifier[response] [ literal[string] ]:
identifier[self] . identifier[apigateway_client] . identifier[update_rest_api] (
identifier[restApiId] = identifier[api_id] ,
identifier[patchOperations] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string]
}
]
)
keyword[if] identifier[cors] :
identifier[response] = identifier[self] . identifier[apigateway_client] . identifier[get_resources] ( identifier[restApiId] = identifier[api_id] )
identifier[resource_ids] =[
identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[response] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[item] . identifier[get] ( literal[string] ,{})
]
keyword[for] identifier[resource_id] keyword[in] identifier[resource_ids] :
identifier[self] . identifier[apigateway_client] . identifier[update_integration] (
identifier[restApiId] = identifier[api_id] ,
identifier[resourceId] = identifier[resource_id] ,
identifier[httpMethod] = literal[string] ,
identifier[patchOperations] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
]
) | def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if 'binaryMediaTypes' in response and '*/*' in response['binaryMediaTypes']:
self.apigateway_client.update_rest_api(restApiId=api_id, patchOperations=[{'op': 'remove', 'path': '/binaryMediaTypes/*~1*'}]) # depends on [control=['if'], data=[]]
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [item['id'] for item in response['items'] if 'OPTIONS' in item.get('resourceMethods', {})]
for resource_id in resource_ids:
self.apigateway_client.update_integration(restApiId=api_id, resourceId=resource_id, httpMethod='OPTIONS', patchOperations=[{'op': 'replace', 'path': '/contentHandling', 'value': ''}]) # depends on [control=['for'], data=['resource_id']] # depends on [control=['if'], data=[]] |
def element_background_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has background color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'background-color', expected) | def function[element_background_color_should_be, parameter[self, locator, expected]]:
constant[Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |]
call[name[self]._info, parameter[binary_operation[constant[Verifying element '%s' has background color '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9b2b30>, <ast.Name object at 0x7da20e9b12d0>]]]]]
call[name[self]._check_element_css_value, parameter[name[locator], constant[background-color], name[expected]]] | keyword[def] identifier[element_background_color_should_be] ( identifier[self] , identifier[locator] , identifier[expected] ):
literal[string]
identifier[self] . identifier[_info] ( literal[string] %( identifier[locator] , identifier[expected] ))
identifier[self] . identifier[_check_element_css_value] ( identifier[locator] , literal[string] , identifier[expected] ) | def element_background_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has background color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'background-color', expected) |
def csv_row_cleaner(rows):
"""
Clean row checking:
- Not empty row.
- >=1 element different in a row.
- row allready in cleaned row result.
"""
result = []
for row in rows:
# check not empty row
check_empty = len(exclude_empty_values(row)) > 1
# check more or eq than 1 unique element in row
check_set = len(set(exclude_empty_values(row))) > 1
# check row not into result cleaned rows.
check_last_allready = (result and result[-1] == row)
if check_empty and check_set and not check_last_allready:
result.append(row)
return result | def function[csv_row_cleaner, parameter[rows]]:
constant[
Clean row checking:
- Not empty row.
- >=1 element different in a row.
- row allready in cleaned row result.
]
variable[result] assign[=] list[[]]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[check_empty] assign[=] compare[call[name[len], parameter[call[name[exclude_empty_values], parameter[name[row]]]]] greater[>] constant[1]]
variable[check_set] assign[=] compare[call[name[len], parameter[call[name[set], parameter[call[name[exclude_empty_values], parameter[name[row]]]]]]] greater[>] constant[1]]
variable[check_last_allready] assign[=] <ast.BoolOp object at 0x7da20c6e7df0>
if <ast.BoolOp object at 0x7da20c6e6860> begin[:]
call[name[result].append, parameter[name[row]]]
return[name[result]] | keyword[def] identifier[csv_row_cleaner] ( identifier[rows] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[check_empty] = identifier[len] ( identifier[exclude_empty_values] ( identifier[row] ))> literal[int]
identifier[check_set] = identifier[len] ( identifier[set] ( identifier[exclude_empty_values] ( identifier[row] )))> literal[int]
identifier[check_last_allready] =( identifier[result] keyword[and] identifier[result] [- literal[int] ]== identifier[row] )
keyword[if] identifier[check_empty] keyword[and] identifier[check_set] keyword[and] keyword[not] identifier[check_last_allready] :
identifier[result] . identifier[append] ( identifier[row] )
keyword[return] identifier[result] | def csv_row_cleaner(rows):
"""
Clean row checking:
- Not empty row.
- >=1 element different in a row.
- row allready in cleaned row result.
"""
result = []
for row in rows:
# check not empty row
check_empty = len(exclude_empty_values(row)) > 1
# check more or eq than 1 unique element in row
check_set = len(set(exclude_empty_values(row))) > 1
# check row not into result cleaned rows.
check_last_allready = result and result[-1] == row
if check_empty and check_set and (not check_last_allready):
result.append(row) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
return result |
def put_file_range(ase, offsets, data, timeout=None):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.upload.Offsets, bytes, int) -> None
"""Puts a range of bytes into the remote file
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.upload.Offsets offsets: upload offsets
:param bytes data: data
:param int timeout: timeout
"""
dir, fpath, _ = parse_file_path(ase.name)
ase.client.update_range(
share_name=ase.container,
directory_name=dir,
file_name=fpath,
data=data,
start_range=offsets.range_start,
end_range=offsets.range_end,
validate_content=False, # integrity is enforced with HTTPS
timeout=timeout) | def function[put_file_range, parameter[ase, offsets, data, timeout]]:
constant[Puts a range of bytes into the remote file
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.upload.Offsets offsets: upload offsets
:param bytes data: data
:param int timeout: timeout
]
<ast.Tuple object at 0x7da18eb567a0> assign[=] call[name[parse_file_path], parameter[name[ase].name]]
call[name[ase].client.update_range, parameter[]] | keyword[def] identifier[put_file_range] ( identifier[ase] , identifier[offsets] , identifier[data] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[dir] , identifier[fpath] , identifier[_] = identifier[parse_file_path] ( identifier[ase] . identifier[name] )
identifier[ase] . identifier[client] . identifier[update_range] (
identifier[share_name] = identifier[ase] . identifier[container] ,
identifier[directory_name] = identifier[dir] ,
identifier[file_name] = identifier[fpath] ,
identifier[data] = identifier[data] ,
identifier[start_range] = identifier[offsets] . identifier[range_start] ,
identifier[end_range] = identifier[offsets] . identifier[range_end] ,
identifier[validate_content] = keyword[False] ,
identifier[timeout] = identifier[timeout] ) | def put_file_range(ase, offsets, data, timeout=None):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.upload.Offsets, bytes, int) -> None
'Puts a range of bytes into the remote file\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param blobxfer.models.upload.Offsets offsets: upload offsets\n :param bytes data: data\n :param int timeout: timeout\n '
(dir, fpath, _) = parse_file_path(ase.name) # integrity is enforced with HTTPS
ase.client.update_range(share_name=ase.container, directory_name=dir, file_name=fpath, data=data, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, timeout=timeout) |
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
for i, wavFile in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0: # append feature vector
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0], ))
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], )))
return (all_mt_feats, signal_idx, wav_file_list) | def function[dirWavFeatureExtractionNoAveraging, parameter[dirName, mt_win, mt_step, st_win, st_step]]:
constant[
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
]
variable[all_mt_feats] assign[=] call[name[numpy].array, parameter[list[[]]]]
variable[signal_idx] assign[=] call[name[numpy].array, parameter[list[[]]]]
variable[process_times] assign[=] list[[]]
variable[types] assign[=] tuple[[<ast.Constant object at 0x7da20e9b17e0>, <ast.Constant object at 0x7da20e9b1900>, <ast.Constant object at 0x7da20e9b1780>, <ast.Constant object at 0x7da20e9b1210>]]
variable[wav_file_list] assign[=] list[[]]
for taget[name[files]] in starred[name[types]] begin[:]
call[name[wav_file_list].extend, parameter[call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[dirName], name[files]]]]]]]
variable[wav_file_list] assign[=] call[name[sorted], parameter[name[wav_file_list]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b3fa0>, <ast.Name object at 0x7da20e9b2860>]]] in starred[call[name[enumerate], parameter[name[wav_file_list]]]] begin[:]
<ast.List object at 0x7da20e9b1de0> assign[=] call[name[audioBasicIO].readAudioFile, parameter[name[wavFile]]]
if call[name[isinstance], parameter[name[x], name[int]]] begin[:]
continue
variable[x] assign[=] call[name[audioBasicIO].stereo2mono, parameter[name[x]]]
<ast.List object at 0x7da20e9b3e50> assign[=] call[name[mtFeatureExtraction], parameter[name[x], name[fs], call[name[round], parameter[binary_operation[name[mt_win] * name[fs]]]], call[name[round], parameter[binary_operation[name[mt_step] * name[fs]]]], call[name[round], parameter[binary_operation[name[fs] * name[st_win]]]], call[name[round], parameter[binary_operation[name[fs] * name[st_step]]]]]]
variable[mt_term_feats] assign[=] call[name[numpy].transpose, parameter[name[mt_term_feats]]]
if compare[call[name[len], parameter[name[all_mt_feats]]] equal[==] constant[0]] begin[:]
variable[all_mt_feats] assign[=] name[mt_term_feats]
variable[signal_idx] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Subscript object at 0x7da20e9b1a20>]]]]
return[tuple[[<ast.Name object at 0x7da20e9b3b50>, <ast.Name object at 0x7da20e9b18d0>, <ast.Name object at 0x7da20e9b2bc0>]]] | keyword[def] identifier[dirWavFeatureExtractionNoAveraging] ( identifier[dirName] , identifier[mt_win] , identifier[mt_step] , identifier[st_win] , identifier[st_step] ):
literal[string]
identifier[all_mt_feats] = identifier[numpy] . identifier[array] ([])
identifier[signal_idx] = identifier[numpy] . identifier[array] ([])
identifier[process_times] =[]
identifier[types] =( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[wav_file_list] =[]
keyword[for] identifier[files] keyword[in] identifier[types] :
identifier[wav_file_list] . identifier[extend] ( identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dirName] , identifier[files] )))
identifier[wav_file_list] = identifier[sorted] ( identifier[wav_file_list] )
keyword[for] identifier[i] , identifier[wavFile] keyword[in] identifier[enumerate] ( identifier[wav_file_list] ):
[ identifier[fs] , identifier[x] ]= identifier[audioBasicIO] . identifier[readAudioFile] ( identifier[wavFile] )
keyword[if] identifier[isinstance] ( identifier[x] , identifier[int] ):
keyword[continue]
identifier[x] = identifier[audioBasicIO] . identifier[stereo2mono] ( identifier[x] )
[ identifier[mt_term_feats] , identifier[_] , identifier[_] ]= identifier[mtFeatureExtraction] ( identifier[x] , identifier[fs] , identifier[round] ( identifier[mt_win] * identifier[fs] ),
identifier[round] ( identifier[mt_step] * identifier[fs] ),
identifier[round] ( identifier[fs] * identifier[st_win] ),
identifier[round] ( identifier[fs] * identifier[st_step] ))
identifier[mt_term_feats] = identifier[numpy] . identifier[transpose] ( identifier[mt_term_feats] )
keyword[if] identifier[len] ( identifier[all_mt_feats] )== literal[int] :
identifier[all_mt_feats] = identifier[mt_term_feats]
identifier[signal_idx] = identifier[numpy] . identifier[zeros] (( identifier[mt_term_feats] . identifier[shape] [ literal[int] ],))
keyword[else] :
identifier[all_mt_feats] = identifier[numpy] . identifier[vstack] (( identifier[all_mt_feats] , identifier[mt_term_feats] ))
identifier[signal_idx] = identifier[numpy] . identifier[append] ( identifier[signal_idx] , identifier[i] * identifier[numpy] . identifier[ones] (( identifier[mt_term_feats] . identifier[shape] [ literal[int] ],)))
keyword[return] ( identifier[all_mt_feats] , identifier[signal_idx] , identifier[wav_file_list] ) | def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files))) # depends on [control=['for'], data=['files']]
wav_file_list = sorted(wav_file_list)
for (i, wavFile) in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue # depends on [control=['if'], data=[]]
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0: # append feature vector
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0],)) # depends on [control=['if'], data=[]]
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0],))) # depends on [control=['for'], data=[]]
return (all_mt_feats, signal_idx, wav_file_list) |
def minvar(X, order, sampling=1., NFFT=default_NFFT):
r"""Minimum Variance Spectral Estimation (MV)
This function computes the minimum variance spectral estimate using
the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg`
is used for the estimation of the autoregressive parameters.
The MV spectral estimator is given by:
.. math:: P_{MV}(f) = \frac{T}{e^H(f) R^{-1}_p e(f)}
where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation
matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector.
:param X: Array of complex or real data samples (length N)
:param int order: Dimension of correlation matrix (AR order = order - 1 )
:param float T: Sample interval (PSD scaling)
:param int NFFT: length of the final PSD
:return:
* PSD - Power spectral density values (two-sided)
* AR - AR coefficients (Burg algorithm)
* k - Reflection coefficients (Burg algorithm)
.. note:: The MV spectral estimator is not a true PSD function because the
area under the MV estimate does not represent the total power in the
measured process. MV minimises the variance of the output of a narrowband
filter and adpats itself to the spectral content of the input data
at each frequency.
:Example: The following example computes a PSD estimate using :func:`minvar`
The output PSD is transformed to a ``centerdc`` PSD and plotted.
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import plot, log10, linspace, xlim
psd, A, k = minvar(marple_data, 15)
psd = twosided_2_centerdc(psd) # switch positive and negative freq
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)))
xlim(-0.5, 0.5 )
.. seealso::
* External functions used are :meth:`~spectrum.burg.arburg`
and numpy.fft.fft
* :class:`pminvar`, a Class dedicated to MV method.
:Reference: [Marple]_
"""
errors.is_positive_integer(order)
errors.is_positive_integer(NFFT)
psi = np.zeros(NFFT, dtype=complex)
# First, we need to compute the AR values (note that order-1)
A, P, k = arburg (X, order - 1)
# add the order 0
A = np.insert(A, 0, 1.+0j)
# We cannot compare the output with those of MARPLE in a precise way.
# Indeed the burg algorithm is only single precision in fortram code
# So, the AR values are slightly differnt.
# The followign values are those from Marple
"""A[1] = 2.62284255-0.701703191j
A[2] = 4.97930574-2.32781982j
A[3] = 6.78445101-5.02477741j
A[4] =7.85207081-8.01284409j
A[5] =7.39412165-10.7684202j
A[6] =6.03175116-12.7067814j
A[7] =3.80106878-13.6808891j
A[8] =1.48207295-13.2265558j
A[9] =-0.644280195-11.4574194j
A[10] =-2.02386642-8.53268814j
A[11] =-2.32437634-5.25636244j
A[12] =-1.75356281-2.46820402j
A[13] =-0.888899028-0.781434655j
A[14] =-0.287197977-0.0918145925j
P = 0.00636525545
"""
# if we use exactly the same AR coeff and P from Marple Burg output, then
# we can compare the following code. This has been done and reveals that
# the FFT in marple is also slightly different (precision) from this one.
# However, the results are sufficiently close (when NFFT is small) that
# we are confident the following code is correct.
# Compute the psi coefficients
for K in range(0, order):
SUM = 0.
MK = order-K
# Correlate the autoregressive parameters
for I in range(0, order - K):
SUM = SUM + float(MK-2*I) * A[I].conjugate()*A[I+K] # Eq. (12.25)
SUM = SUM/P
if K != 0:
psi[NFFT-K] = SUM.conjugate()
psi[K] = SUM
# Compute FFT of denominator
psi = fft(psi, NFFT)
# Invert the psi terms at this point to get PSD values
PSD = sampling / np.real(psi)
return PSD, A, k | def function[minvar, parameter[X, order, sampling, NFFT]]:
constant[Minimum Variance Spectral Estimation (MV)
This function computes the minimum variance spectral estimate using
the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg`
is used for the estimation of the autoregressive parameters.
The MV spectral estimator is given by:
.. math:: P_{MV}(f) = \frac{T}{e^H(f) R^{-1}_p e(f)}
where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation
matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector.
:param X: Array of complex or real data samples (length N)
:param int order: Dimension of correlation matrix (AR order = order - 1 )
:param float T: Sample interval (PSD scaling)
:param int NFFT: length of the final PSD
:return:
* PSD - Power spectral density values (two-sided)
* AR - AR coefficients (Burg algorithm)
* k - Reflection coefficients (Burg algorithm)
.. note:: The MV spectral estimator is not a true PSD function because the
area under the MV estimate does not represent the total power in the
measured process. MV minimises the variance of the output of a narrowband
filter and adpats itself to the spectral content of the input data
at each frequency.
:Example: The following example computes a PSD estimate using :func:`minvar`
The output PSD is transformed to a ``centerdc`` PSD and plotted.
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import plot, log10, linspace, xlim
psd, A, k = minvar(marple_data, 15)
psd = twosided_2_centerdc(psd) # switch positive and negative freq
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)))
xlim(-0.5, 0.5 )
.. seealso::
* External functions used are :meth:`~spectrum.burg.arburg`
and numpy.fft.fft
* :class:`pminvar`, a Class dedicated to MV method.
:Reference: [Marple]_
]
call[name[errors].is_positive_integer, parameter[name[order]]]
call[name[errors].is_positive_integer, parameter[name[NFFT]]]
variable[psi] assign[=] call[name[np].zeros, parameter[name[NFFT]]]
<ast.Tuple object at 0x7da20c6abdc0> assign[=] call[name[arburg], parameter[name[X], binary_operation[name[order] - constant[1]]]]
variable[A] assign[=] call[name[np].insert, parameter[name[A], constant[0], binary_operation[constant[1.0] + constant[0j]]]]
constant[A[1] = 2.62284255-0.701703191j
A[2] = 4.97930574-2.32781982j
A[3] = 6.78445101-5.02477741j
A[4] =7.85207081-8.01284409j
A[5] =7.39412165-10.7684202j
A[6] =6.03175116-12.7067814j
A[7] =3.80106878-13.6808891j
A[8] =1.48207295-13.2265558j
A[9] =-0.644280195-11.4574194j
A[10] =-2.02386642-8.53268814j
A[11] =-2.32437634-5.25636244j
A[12] =-1.75356281-2.46820402j
A[13] =-0.888899028-0.781434655j
A[14] =-0.287197977-0.0918145925j
P = 0.00636525545
]
for taget[name[K]] in starred[call[name[range], parameter[constant[0], name[order]]]] begin[:]
variable[SUM] assign[=] constant[0.0]
variable[MK] assign[=] binary_operation[name[order] - name[K]]
for taget[name[I]] in starred[call[name[range], parameter[constant[0], binary_operation[name[order] - name[K]]]]] begin[:]
variable[SUM] assign[=] binary_operation[name[SUM] + binary_operation[binary_operation[call[name[float], parameter[binary_operation[name[MK] - binary_operation[constant[2] * name[I]]]]] * call[call[name[A]][name[I]].conjugate, parameter[]]] * call[name[A]][binary_operation[name[I] + name[K]]]]]
variable[SUM] assign[=] binary_operation[name[SUM] / name[P]]
if compare[name[K] not_equal[!=] constant[0]] begin[:]
call[name[psi]][binary_operation[name[NFFT] - name[K]]] assign[=] call[name[SUM].conjugate, parameter[]]
call[name[psi]][name[K]] assign[=] name[SUM]
variable[psi] assign[=] call[name[fft], parameter[name[psi], name[NFFT]]]
variable[PSD] assign[=] binary_operation[name[sampling] / call[name[np].real, parameter[name[psi]]]]
return[tuple[[<ast.Name object at 0x7da20c6aaf20>, <ast.Name object at 0x7da20c6a99c0>, <ast.Name object at 0x7da20c6a96c0>]]] | keyword[def] identifier[minvar] ( identifier[X] , identifier[order] , identifier[sampling] = literal[int] , identifier[NFFT] = identifier[default_NFFT] ):
literal[string]
identifier[errors] . identifier[is_positive_integer] ( identifier[order] )
identifier[errors] . identifier[is_positive_integer] ( identifier[NFFT] )
identifier[psi] = identifier[np] . identifier[zeros] ( identifier[NFFT] , identifier[dtype] = identifier[complex] )
identifier[A] , identifier[P] , identifier[k] = identifier[arburg] ( identifier[X] , identifier[order] - literal[int] )
identifier[A] = identifier[np] . identifier[insert] ( identifier[A] , literal[int] , literal[int] + literal[int] )
literal[string]
keyword[for] identifier[K] keyword[in] identifier[range] ( literal[int] , identifier[order] ):
identifier[SUM] = literal[int]
identifier[MK] = identifier[order] - identifier[K]
keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] , identifier[order] - identifier[K] ):
identifier[SUM] = identifier[SUM] + identifier[float] ( identifier[MK] - literal[int] * identifier[I] )* identifier[A] [ identifier[I] ]. identifier[conjugate] ()* identifier[A] [ identifier[I] + identifier[K] ]
identifier[SUM] = identifier[SUM] / identifier[P]
keyword[if] identifier[K] != literal[int] :
identifier[psi] [ identifier[NFFT] - identifier[K] ]= identifier[SUM] . identifier[conjugate] ()
identifier[psi] [ identifier[K] ]= identifier[SUM]
identifier[psi] = identifier[fft] ( identifier[psi] , identifier[NFFT] )
identifier[PSD] = identifier[sampling] / identifier[np] . identifier[real] ( identifier[psi] )
keyword[return] identifier[PSD] , identifier[A] , identifier[k] | def minvar(X, order, sampling=1.0, NFFT=default_NFFT):
"""Minimum Variance Spectral Estimation (MV)
This function computes the minimum variance spectral estimate using
the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg`
is used for the estimation of the autoregressive parameters.
The MV spectral estimator is given by:
.. math:: P_{MV}(f) = \\frac{T}{e^H(f) R^{-1}_p e(f)}
where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation
matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector.
:param X: Array of complex or real data samples (length N)
:param int order: Dimension of correlation matrix (AR order = order - 1 )
:param float T: Sample interval (PSD scaling)
:param int NFFT: length of the final PSD
:return:
* PSD - Power spectral density values (two-sided)
* AR - AR coefficients (Burg algorithm)
* k - Reflection coefficients (Burg algorithm)
.. note:: The MV spectral estimator is not a true PSD function because the
area under the MV estimate does not represent the total power in the
measured process. MV minimises the variance of the output of a narrowband
filter and adpats itself to the spectral content of the input data
at each frequency.
:Example: The following example computes a PSD estimate using :func:`minvar`
The output PSD is transformed to a ``centerdc`` PSD and plotted.
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import plot, log10, linspace, xlim
psd, A, k = minvar(marple_data, 15)
psd = twosided_2_centerdc(psd) # switch positive and negative freq
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)))
xlim(-0.5, 0.5 )
.. seealso::
* External functions used are :meth:`~spectrum.burg.arburg`
and numpy.fft.fft
* :class:`pminvar`, a Class dedicated to MV method.
:Reference: [Marple]_
"""
errors.is_positive_integer(order)
errors.is_positive_integer(NFFT)
psi = np.zeros(NFFT, dtype=complex)
# First, we need to compute the AR values (note that order-1)
(A, P, k) = arburg(X, order - 1)
# add the order 0
A = np.insert(A, 0, 1.0 + 0j)
# We cannot compare the output with those of MARPLE in a precise way.
# Indeed the burg algorithm is only single precision in fortram code
# So, the AR values are slightly differnt.
# The followign values are those from Marple
'A[1] = 2.62284255-0.701703191j\n A[2] = 4.97930574-2.32781982j\n A[3] = 6.78445101-5.02477741j\n A[4] =7.85207081-8.01284409j\n A[5] =7.39412165-10.7684202j\n A[6] =6.03175116-12.7067814j\n A[7] =3.80106878-13.6808891j\n A[8] =1.48207295-13.2265558j\n A[9] =-0.644280195-11.4574194j\n A[10] =-2.02386642-8.53268814j\n A[11] =-2.32437634-5.25636244j\n A[12] =-1.75356281-2.46820402j\n A[13] =-0.888899028-0.781434655j\n A[14] =-0.287197977-0.0918145925j\n P = 0.00636525545\n '
# if we use exactly the same AR coeff and P from Marple Burg output, then
# we can compare the following code. This has been done and reveals that
# the FFT in marple is also slightly different (precision) from this one.
# However, the results are sufficiently close (when NFFT is small) that
# we are confident the following code is correct.
# Compute the psi coefficients
for K in range(0, order):
SUM = 0.0
MK = order - K
# Correlate the autoregressive parameters
for I in range(0, order - K):
SUM = SUM + float(MK - 2 * I) * A[I].conjugate() * A[I + K] # Eq. (12.25) # depends on [control=['for'], data=['I']]
SUM = SUM / P
if K != 0:
psi[NFFT - K] = SUM.conjugate() # depends on [control=['if'], data=['K']]
psi[K] = SUM # depends on [control=['for'], data=['K']]
# Compute FFT of denominator
psi = fft(psi, NFFT)
# Invert the psi terms at this point to get PSD values
PSD = sampling / np.real(psi)
return (PSD, A, k) |
def load(cls, campaign_dir):
"""
Initialize from an existing database.
It is assumed that the database json file has the same name as its
containing folder.
Args:
campaign_dir (str): The path to the campaign directory.
"""
# We only accept absolute paths
if not Path(campaign_dir).is_absolute():
raise ValueError("Path is not absolute")
# Verify file exists
if not Path(campaign_dir).exists():
raise ValueError("Directory does not exist")
# Extract filename from campaign dir
filename = "%s.json" % os.path.split(campaign_dir)[1]
filepath = os.path.join(campaign_dir, filename)
try:
# Read TinyDB instance from file
tinydb = TinyDB(filepath)
# Make sure the configuration is a valid dictionary
assert set(
tinydb.table('config').all()[0].keys()) == set(['script',
'params',
'commit'])
except:
# Remove the database instance created by tinydb
os.remove(filepath)
raise ValueError("Specified campaign directory seems corrupt")
return cls(tinydb, campaign_dir) | def function[load, parameter[cls, campaign_dir]]:
constant[
Initialize from an existing database.
It is assumed that the database json file has the same name as its
containing folder.
Args:
campaign_dir (str): The path to the campaign directory.
]
if <ast.UnaryOp object at 0x7da18f09d1e0> begin[:]
<ast.Raise object at 0x7da18f09d660>
if <ast.UnaryOp object at 0x7da18f09f130> begin[:]
<ast.Raise object at 0x7da18f09ec80>
variable[filename] assign[=] binary_operation[constant[%s.json] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.split, parameter[name[campaign_dir]]]][constant[1]]]
variable[filepath] assign[=] call[name[os].path.join, parameter[name[campaign_dir], name[filename]]]
<ast.Try object at 0x7da18f09c5b0>
return[call[name[cls], parameter[name[tinydb], name[campaign_dir]]]] | keyword[def] identifier[load] ( identifier[cls] , identifier[campaign_dir] ):
literal[string]
keyword[if] keyword[not] identifier[Path] ( identifier[campaign_dir] ). identifier[is_absolute] ():
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[Path] ( identifier[campaign_dir] ). identifier[exists] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[filename] = literal[string] % identifier[os] . identifier[path] . identifier[split] ( identifier[campaign_dir] )[ literal[int] ]
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[campaign_dir] , identifier[filename] )
keyword[try] :
identifier[tinydb] = identifier[TinyDB] ( identifier[filepath] )
keyword[assert] identifier[set] (
identifier[tinydb] . identifier[table] ( literal[string] ). identifier[all] ()[ literal[int] ]. identifier[keys] ())== identifier[set] ([ literal[string] ,
literal[string] ,
literal[string] ])
keyword[except] :
identifier[os] . identifier[remove] ( identifier[filepath] )
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[cls] ( identifier[tinydb] , identifier[campaign_dir] ) | def load(cls, campaign_dir):
"""
Initialize from an existing database.
It is assumed that the database json file has the same name as its
containing folder.
Args:
campaign_dir (str): The path to the campaign directory.
"""
# We only accept absolute paths
if not Path(campaign_dir).is_absolute():
raise ValueError('Path is not absolute') # depends on [control=['if'], data=[]]
# Verify file exists
if not Path(campaign_dir).exists():
raise ValueError('Directory does not exist') # depends on [control=['if'], data=[]]
# Extract filename from campaign dir
filename = '%s.json' % os.path.split(campaign_dir)[1]
filepath = os.path.join(campaign_dir, filename)
try:
# Read TinyDB instance from file
tinydb = TinyDB(filepath)
# Make sure the configuration is a valid dictionary
assert set(tinydb.table('config').all()[0].keys()) == set(['script', 'params', 'commit']) # depends on [control=['try'], data=[]]
except:
# Remove the database instance created by tinydb
os.remove(filepath)
raise ValueError('Specified campaign directory seems corrupt') # depends on [control=['except'], data=[]]
return cls(tinydb, campaign_dir) |
def clear(ctx, schema):
"""Clears an entire database collection irrevocably. Use with caution!"""
response = _ask('Are you sure you want to delete the collection "%s"' % (
schema), default='N', data_type='bool')
if response is True:
host, port = ctx.obj['dbhost'].split(':')
client = pymongo.MongoClient(host=host, port=int(port))
database = client[ctx.obj['dbname']]
log("Clearing collection for", schema, lvl=warn,
emitter='MANAGE')
result = database.drop_collection(schema)
if not result['ok']:
log("Could not drop collection:", lvl=error)
log(result, pretty=True, lvl=error)
else:
log("Done") | def function[clear, parameter[ctx, schema]]:
constant[Clears an entire database collection irrevocably. Use with caution!]
variable[response] assign[=] call[name[_ask], parameter[binary_operation[constant[Are you sure you want to delete the collection "%s"] <ast.Mod object at 0x7da2590d6920> name[schema]]]]
if compare[name[response] is constant[True]] begin[:]
<ast.Tuple object at 0x7da1b0f47220> assign[=] call[call[name[ctx].obj][constant[dbhost]].split, parameter[constant[:]]]
variable[client] assign[=] call[name[pymongo].MongoClient, parameter[]]
variable[database] assign[=] call[name[client]][call[name[ctx].obj][constant[dbname]]]
call[name[log], parameter[constant[Clearing collection for], name[schema]]]
variable[result] assign[=] call[name[database].drop_collection, parameter[name[schema]]]
if <ast.UnaryOp object at 0x7da1b0f47940> begin[:]
call[name[log], parameter[constant[Could not drop collection:]]]
call[name[log], parameter[name[result]]] | keyword[def] identifier[clear] ( identifier[ctx] , identifier[schema] ):
literal[string]
identifier[response] = identifier[_ask] ( literal[string] %(
identifier[schema] ), identifier[default] = literal[string] , identifier[data_type] = literal[string] )
keyword[if] identifier[response] keyword[is] keyword[True] :
identifier[host] , identifier[port] = identifier[ctx] . identifier[obj] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[client] = identifier[pymongo] . identifier[MongoClient] ( identifier[host] = identifier[host] , identifier[port] = identifier[int] ( identifier[port] ))
identifier[database] = identifier[client] [ identifier[ctx] . identifier[obj] [ literal[string] ]]
identifier[log] ( literal[string] , identifier[schema] , identifier[lvl] = identifier[warn] ,
identifier[emitter] = literal[string] )
identifier[result] = identifier[database] . identifier[drop_collection] ( identifier[schema] )
keyword[if] keyword[not] identifier[result] [ literal[string] ]:
identifier[log] ( literal[string] , identifier[lvl] = identifier[error] )
identifier[log] ( identifier[result] , identifier[pretty] = keyword[True] , identifier[lvl] = identifier[error] )
keyword[else] :
identifier[log] ( literal[string] ) | def clear(ctx, schema):
"""Clears an entire database collection irrevocably. Use with caution!"""
response = _ask('Are you sure you want to delete the collection "%s"' % schema, default='N', data_type='bool')
if response is True:
(host, port) = ctx.obj['dbhost'].split(':')
client = pymongo.MongoClient(host=host, port=int(port))
database = client[ctx.obj['dbname']]
log('Clearing collection for', schema, lvl=warn, emitter='MANAGE')
result = database.drop_collection(schema)
if not result['ok']:
log('Could not drop collection:', lvl=error)
log(result, pretty=True, lvl=error) # depends on [control=['if'], data=[]]
else:
log('Done') # depends on [control=['if'], data=[]] |
def execute(self):
"""
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.converge()
self._config.state.change_state('converged', True) | def function[execute, parameter[self]]:
constant[
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
]
call[name[self].print_info, parameter[]]
call[name[self]._config.provisioner.converge, parameter[]]
call[name[self]._config.state.change_state, parameter[constant[converged], constant[True]]] | keyword[def] identifier[execute] ( identifier[self] ):
literal[string]
identifier[self] . identifier[print_info] ()
identifier[self] . identifier[_config] . identifier[provisioner] . identifier[converge] ()
identifier[self] . identifier[_config] . identifier[state] . identifier[change_state] ( literal[string] , keyword[True] ) | def execute(self):
"""
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.converge()
self._config.state.change_state('converged', True) |
def _get_item_from_search_response(self, response, type_):
""" Returns either a Song or Artist result from search_genius_web """
sections = sorted(response['sections'],
key=lambda sect: sect['type'] == type_,
reverse=True)
for section in sections:
hits = [hit for hit in section['hits'] if hit['type'] == type_]
if hits:
return hits[0]['result'] | def function[_get_item_from_search_response, parameter[self, response, type_]]:
constant[ Returns either a Song or Artist result from search_genius_web ]
variable[sections] assign[=] call[name[sorted], parameter[call[name[response]][constant[sections]]]]
for taget[name[section]] in starred[name[sections]] begin[:]
variable[hits] assign[=] <ast.ListComp object at 0x7da1b22a5db0>
if name[hits] begin[:]
return[call[call[name[hits]][constant[0]]][constant[result]]] | keyword[def] identifier[_get_item_from_search_response] ( identifier[self] , identifier[response] , identifier[type_] ):
literal[string]
identifier[sections] = identifier[sorted] ( identifier[response] [ literal[string] ],
identifier[key] = keyword[lambda] identifier[sect] : identifier[sect] [ literal[string] ]== identifier[type_] ,
identifier[reverse] = keyword[True] )
keyword[for] identifier[section] keyword[in] identifier[sections] :
identifier[hits] =[ identifier[hit] keyword[for] identifier[hit] keyword[in] identifier[section] [ literal[string] ] keyword[if] identifier[hit] [ literal[string] ]== identifier[type_] ]
keyword[if] identifier[hits] :
keyword[return] identifier[hits] [ literal[int] ][ literal[string] ] | def _get_item_from_search_response(self, response, type_):
""" Returns either a Song or Artist result from search_genius_web """
sections = sorted(response['sections'], key=lambda sect: sect['type'] == type_, reverse=True)
for section in sections:
hits = [hit for hit in section['hits'] if hit['type'] == type_]
if hits:
return hits[0]['result'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section']] |
def list_all(cls, basic = None):
"""List known settings.
:Parameters:
- `basic`: When `True` then limit output to the basic settings,
when `False` list only the extra settings.
"""
if basic is None:
return [s for s in cls._defs]
else:
return [s.name for s in cls._defs.values() if s.basic == basic] | def function[list_all, parameter[cls, basic]]:
constant[List known settings.
:Parameters:
- `basic`: When `True` then limit output to the basic settings,
when `False` list only the extra settings.
]
if compare[name[basic] is constant[None]] begin[:]
return[<ast.ListComp object at 0x7da204622710>] | keyword[def] identifier[list_all] ( identifier[cls] , identifier[basic] = keyword[None] ):
literal[string]
keyword[if] identifier[basic] keyword[is] keyword[None] :
keyword[return] [ identifier[s] keyword[for] identifier[s] keyword[in] identifier[cls] . identifier[_defs] ]
keyword[else] :
keyword[return] [ identifier[s] . identifier[name] keyword[for] identifier[s] keyword[in] identifier[cls] . identifier[_defs] . identifier[values] () keyword[if] identifier[s] . identifier[basic] == identifier[basic] ] | def list_all(cls, basic=None):
"""List known settings.
:Parameters:
- `basic`: When `True` then limit output to the basic settings,
when `False` list only the extra settings.
"""
if basic is None:
return [s for s in cls._defs] # depends on [control=['if'], data=[]]
else:
return [s.name for s in cls._defs.values() if s.basic == basic] |
def _convert(reddit_session, data):
"""Return a Redditor object from the data."""
retval = Redditor(reddit_session, data['name'], fetch=False)
retval.id = data['id'].split('_')[1] # pylint: disable=C0103,W0201
return retval | def function[_convert, parameter[reddit_session, data]]:
constant[Return a Redditor object from the data.]
variable[retval] assign[=] call[name[Redditor], parameter[name[reddit_session], call[name[data]][constant[name]]]]
name[retval].id assign[=] call[call[call[name[data]][constant[id]].split, parameter[constant[_]]]][constant[1]]
return[name[retval]] | keyword[def] identifier[_convert] ( identifier[reddit_session] , identifier[data] ):
literal[string]
identifier[retval] = identifier[Redditor] ( identifier[reddit_session] , identifier[data] [ literal[string] ], identifier[fetch] = keyword[False] )
identifier[retval] . identifier[id] = identifier[data] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] identifier[retval] | def _convert(reddit_session, data):
"""Return a Redditor object from the data."""
retval = Redditor(reddit_session, data['name'], fetch=False)
retval.id = data['id'].split('_')[1] # pylint: disable=C0103,W0201
return retval |
def findattr(self, name, resolved=True):
"""
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should
be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = '@%s' % name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return result
if resolved:
result = result.resolve()
return result | def function[findattr, parameter[self, name, resolved]]:
constant[
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should
be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
]
variable[name] assign[=] binary_operation[constant[@%s] <ast.Mod object at 0x7da2590d6920> name[name]]
variable[parent] assign[=] call[name[self].top, parameter[]].resolved
if compare[name[parent] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b08e4f70> assign[=] call[name[self].query, parameter[name[name], name[node]]]
if compare[name[result] is constant[None]] begin[:]
return[name[result]]
if name[resolved] begin[:]
variable[result] assign[=] call[name[result].resolve, parameter[]]
return[name[result]] | keyword[def] identifier[findattr] ( identifier[self] , identifier[name] , identifier[resolved] = keyword[True] ):
literal[string]
identifier[name] = literal[string] % identifier[name]
identifier[parent] = identifier[self] . identifier[top] (). identifier[resolved]
keyword[if] identifier[parent] keyword[is] keyword[None] :
identifier[result] , identifier[ancestry] = identifier[self] . identifier[query] ( identifier[name] , identifier[node] )
keyword[else] :
identifier[result] , identifier[ancestry] = identifier[self] . identifier[getchild] ( identifier[name] , identifier[parent] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[return] identifier[result]
keyword[if] identifier[resolved] :
identifier[result] = identifier[result] . identifier[resolve] ()
keyword[return] identifier[result] | def findattr(self, name, resolved=True):
"""
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should
be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = '@%s' % name
parent = self.top().resolved
if parent is None:
(result, ancestry) = self.query(name, node) # depends on [control=['if'], data=[]]
else:
(result, ancestry) = self.getchild(name, parent)
if result is None:
return result # depends on [control=['if'], data=['result']]
if resolved:
result = result.resolve() # depends on [control=['if'], data=[]]
return result |
def get_description(self):
"""
Gets the description of the command. If its not supplied the first sentence of the doc string is used.
"""
if self.description:
return self.description
elif self.__doc__ and self.__doc__.strip():
return self.__doc__.strip().split('.')[0] + '.'
else:
return '' | def function[get_description, parameter[self]]:
constant[
Gets the description of the command. If its not supplied the first sentence of the doc string is used.
]
if name[self].description begin[:]
return[name[self].description] | keyword[def] identifier[get_description] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[description] :
keyword[return] identifier[self] . identifier[description]
keyword[elif] identifier[self] . identifier[__doc__] keyword[and] identifier[self] . identifier[__doc__] . identifier[strip] ():
keyword[return] identifier[self] . identifier[__doc__] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string]
keyword[else] :
keyword[return] literal[string] | def get_description(self):
"""
Gets the description of the command. If its not supplied the first sentence of the doc string is used.
"""
if self.description:
return self.description # depends on [control=['if'], data=[]]
elif self.__doc__ and self.__doc__.strip():
return self.__doc__.strip().split('.')[0] + '.' # depends on [control=['if'], data=[]]
else:
return '' |
def common_prefix(s1, s2):
"Return prefix common of 2 strings"
if not s1 or not s2:
return ""
k = 0
while s1[k] == s2[k]:
k = k + 1
if k >= len(s1) or k >= len(s2):
return s1[0:k]
return s1[0:k] | def function[common_prefix, parameter[s1, s2]]:
constant[Return prefix common of 2 strings]
if <ast.BoolOp object at 0x7da1b1e11db0> begin[:]
return[constant[]]
variable[k] assign[=] constant[0]
while compare[call[name[s1]][name[k]] equal[==] call[name[s2]][name[k]]] begin[:]
variable[k] assign[=] binary_operation[name[k] + constant[1]]
if <ast.BoolOp object at 0x7da1b1e101f0> begin[:]
return[call[name[s1]][<ast.Slice object at 0x7da1b1efbb80>]]
return[call[name[s1]][<ast.Slice object at 0x7da1b1efa2f0>]] | keyword[def] identifier[common_prefix] ( identifier[s1] , identifier[s2] ):
literal[string]
keyword[if] keyword[not] identifier[s1] keyword[or] keyword[not] identifier[s2] :
keyword[return] literal[string]
identifier[k] = literal[int]
keyword[while] identifier[s1] [ identifier[k] ]== identifier[s2] [ identifier[k] ]:
identifier[k] = identifier[k] + literal[int]
keyword[if] identifier[k] >= identifier[len] ( identifier[s1] ) keyword[or] identifier[k] >= identifier[len] ( identifier[s2] ):
keyword[return] identifier[s1] [ literal[int] : identifier[k] ]
keyword[return] identifier[s1] [ literal[int] : identifier[k] ] | def common_prefix(s1, s2):
"""Return prefix common of 2 strings"""
if not s1 or not s2:
return '' # depends on [control=['if'], data=[]]
k = 0
while s1[k] == s2[k]:
k = k + 1
if k >= len(s1) or k >= len(s2):
return s1[0:k] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return s1[0:k] |
async def upload_artifacts(context, files):
"""Compress and upload the requested files from ``artifact_dir``, preserving relative paths.
Compression only occurs with files known to be supported.
This function expects the directory structure in ``artifact_dir`` to remain
the same. So if we want the files in ``public/...``, create an
``artifact_dir/public`` and put the files in there.
Args:
context (scriptworker.context.Context): the scriptworker context.
files (list of str): files that should be uploaded as artifacts
Raises:
Exception: any exceptions the tasks raise.
"""
def to_upload_future(target_path):
path = os.path.join(context.config['artifact_dir'], target_path)
content_type, content_encoding = compress_artifact_if_supported(path)
return asyncio.ensure_future(retry_create_artifact(
context,
path,
target_path=target_path,
content_type=content_type,
content_encoding=content_encoding,
))
tasks = list(map(to_upload_future, files))
await raise_future_exceptions(tasks) | <ast.AsyncFunctionDef object at 0x7da18bccbfa0> | keyword[async] keyword[def] identifier[upload_artifacts] ( identifier[context] , identifier[files] ):
literal[string]
keyword[def] identifier[to_upload_future] ( identifier[target_path] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[context] . identifier[config] [ literal[string] ], identifier[target_path] )
identifier[content_type] , identifier[content_encoding] = identifier[compress_artifact_if_supported] ( identifier[path] )
keyword[return] identifier[asyncio] . identifier[ensure_future] ( identifier[retry_create_artifact] (
identifier[context] ,
identifier[path] ,
identifier[target_path] = identifier[target_path] ,
identifier[content_type] = identifier[content_type] ,
identifier[content_encoding] = identifier[content_encoding] ,
))
identifier[tasks] = identifier[list] ( identifier[map] ( identifier[to_upload_future] , identifier[files] ))
keyword[await] identifier[raise_future_exceptions] ( identifier[tasks] ) | async def upload_artifacts(context, files):
"""Compress and upload the requested files from ``artifact_dir``, preserving relative paths.
Compression only occurs with files known to be supported.
This function expects the directory structure in ``artifact_dir`` to remain
the same. So if we want the files in ``public/...``, create an
``artifact_dir/public`` and put the files in there.
Args:
context (scriptworker.context.Context): the scriptworker context.
files (list of str): files that should be uploaded as artifacts
Raises:
Exception: any exceptions the tasks raise.
"""
def to_upload_future(target_path):
path = os.path.join(context.config['artifact_dir'], target_path)
(content_type, content_encoding) = compress_artifact_if_supported(path)
return asyncio.ensure_future(retry_create_artifact(context, path, target_path=target_path, content_type=content_type, content_encoding=content_encoding))
tasks = list(map(to_upload_future, files))
await raise_future_exceptions(tasks) |
def reftrack_uptodate_data(rt, role):
"""Return the data for the uptodate status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the uptodate status
:rtype: depending on role
:raises: None
"""
uptodate = rt.uptodate()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if uptodate:
return "Yes"
else:
return "No"
if role == QtCore.Qt.ForegroundRole:
if uptodate:
return QtGui.QColor(*UPTODATE_RGB)
elif rt.status():
return QtGui.QColor(*OUTDATED_RGB) | def function[reftrack_uptodate_data, parameter[rt, role]]:
constant[Return the data for the uptodate status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the uptodate status
:rtype: depending on role
:raises: None
]
variable[uptodate] assign[=] call[name[rt].uptodate, parameter[]]
if <ast.BoolOp object at 0x7da1b1649300> begin[:]
if name[uptodate] begin[:]
return[constant[Yes]]
if compare[name[role] equal[==] name[QtCore].Qt.ForegroundRole] begin[:]
if name[uptodate] begin[:]
return[call[name[QtGui].QColor, parameter[<ast.Starred object at 0x7da1b164b9d0>]]] | keyword[def] identifier[reftrack_uptodate_data] ( identifier[rt] , identifier[role] ):
literal[string]
identifier[uptodate] = identifier[rt] . identifier[uptodate] ()
keyword[if] identifier[role] == identifier[QtCore] . identifier[Qt] . identifier[DisplayRole] keyword[or] identifier[role] == identifier[QtCore] . identifier[Qt] . identifier[EditRole] :
keyword[if] identifier[uptodate] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string]
keyword[if] identifier[role] == identifier[QtCore] . identifier[Qt] . identifier[ForegroundRole] :
keyword[if] identifier[uptodate] :
keyword[return] identifier[QtGui] . identifier[QColor] (* identifier[UPTODATE_RGB] )
keyword[elif] identifier[rt] . identifier[status] ():
keyword[return] identifier[QtGui] . identifier[QColor] (* identifier[OUTDATED_RGB] ) | def reftrack_uptodate_data(rt, role):
"""Return the data for the uptodate status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the uptodate status
:rtype: depending on role
:raises: None
"""
uptodate = rt.uptodate()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if uptodate:
return 'Yes' # depends on [control=['if'], data=[]]
else:
return 'No' # depends on [control=['if'], data=[]]
if role == QtCore.Qt.ForegroundRole:
if uptodate:
return QtGui.QColor(*UPTODATE_RGB) # depends on [control=['if'], data=[]]
elif rt.status():
return QtGui.QColor(*OUTDATED_RGB) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def initialize(self, store):
"""Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
"""
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self) | def function[initialize, parameter[self, store]]:
constant[Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
]
assert[call[name[isinstance], parameter[name[store], name[stores].BaseStore]]]
name[self].messages assign[=] call[name[Queue], parameter[]]
name[self].store assign[=] name[store]
call[name[self].store.register, parameter[name[self]]] | keyword[def] identifier[initialize] ( identifier[self] , identifier[store] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[store] , identifier[stores] . identifier[BaseStore] )
identifier[self] . identifier[messages] = identifier[Queue] ()
identifier[self] . identifier[store] = identifier[store]
identifier[self] . identifier[store] . identifier[register] ( identifier[self] ) | def initialize(self, store):
"""Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
"""
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self) |
def DeleteOldFeedItems(client, feed_item_ids, feed):
"""Deletes the old feed items for which extension settings have been created.
Args:
client: an AdWordsClient instance.
feed_item_ids: a list of Feed Item Ids.
feed: the Feed containing the given Feed Item Ids.
"""
if not feed_item_ids:
return
feed_item_service = client.GetService('FeedItemService', 'v201809')
operations = [{
'operator': 'REMOVE',
'operand': {
'feedId': feed['id'],
'feedItemId': feed_item_id
}
} for feed_item_id in feed_item_ids]
feed_item_service.mutate(operations) | def function[DeleteOldFeedItems, parameter[client, feed_item_ids, feed]]:
constant[Deletes the old feed items for which extension settings have been created.
Args:
client: an AdWordsClient instance.
feed_item_ids: a list of Feed Item Ids.
feed: the Feed containing the given Feed Item Ids.
]
if <ast.UnaryOp object at 0x7da1b1b0ce50> begin[:]
return[None]
variable[feed_item_service] assign[=] call[name[client].GetService, parameter[constant[FeedItemService], constant[v201809]]]
variable[operations] assign[=] <ast.ListComp object at 0x7da1b1b0e6b0>
call[name[feed_item_service].mutate, parameter[name[operations]]] | keyword[def] identifier[DeleteOldFeedItems] ( identifier[client] , identifier[feed_item_ids] , identifier[feed] ):
literal[string]
keyword[if] keyword[not] identifier[feed_item_ids] :
keyword[return]
identifier[feed_item_service] = identifier[client] . identifier[GetService] ( literal[string] , literal[string] )
identifier[operations] =[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[feed] [ literal[string] ],
literal[string] : identifier[feed_item_id]
}
} keyword[for] identifier[feed_item_id] keyword[in] identifier[feed_item_ids] ]
identifier[feed_item_service] . identifier[mutate] ( identifier[operations] ) | def DeleteOldFeedItems(client, feed_item_ids, feed):
"""Deletes the old feed items for which extension settings have been created.
Args:
client: an AdWordsClient instance.
feed_item_ids: a list of Feed Item Ids.
feed: the Feed containing the given Feed Item Ids.
"""
if not feed_item_ids:
return # depends on [control=['if'], data=[]]
feed_item_service = client.GetService('FeedItemService', 'v201809')
operations = [{'operator': 'REMOVE', 'operand': {'feedId': feed['id'], 'feedItemId': feed_item_id}} for feed_item_id in feed_item_ids]
feed_item_service.mutate(operations) |
def _as_reference_point(self) -> np.ndarray:
""" Return classification information as reference point
"""
ref_val = []
for fn, f in self._classification.items():
if f[0] == "<":
ref_val.append(self._method.problem.ideal[fn])
elif f[0] == "<>":
ref_val.append(self._method.problem.nadir[fn])
else:
ref_val.append(f[1])
return np.array(ref_val) | def function[_as_reference_point, parameter[self]]:
constant[ Return classification information as reference point
]
variable[ref_val] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c794490>, <ast.Name object at 0x7da20c794430>]]] in starred[call[name[self]._classification.items, parameter[]]] begin[:]
if compare[call[name[f]][constant[0]] equal[==] constant[<]] begin[:]
call[name[ref_val].append, parameter[call[name[self]._method.problem.ideal][name[fn]]]]
return[call[name[np].array, parameter[name[ref_val]]]] | keyword[def] identifier[_as_reference_point] ( identifier[self] )-> identifier[np] . identifier[ndarray] :
literal[string]
identifier[ref_val] =[]
keyword[for] identifier[fn] , identifier[f] keyword[in] identifier[self] . identifier[_classification] . identifier[items] ():
keyword[if] identifier[f] [ literal[int] ]== literal[string] :
identifier[ref_val] . identifier[append] ( identifier[self] . identifier[_method] . identifier[problem] . identifier[ideal] [ identifier[fn] ])
keyword[elif] identifier[f] [ literal[int] ]== literal[string] :
identifier[ref_val] . identifier[append] ( identifier[self] . identifier[_method] . identifier[problem] . identifier[nadir] [ identifier[fn] ])
keyword[else] :
identifier[ref_val] . identifier[append] ( identifier[f] [ literal[int] ])
keyword[return] identifier[np] . identifier[array] ( identifier[ref_val] ) | def _as_reference_point(self) -> np.ndarray:
""" Return classification information as reference point
"""
ref_val = []
for (fn, f) in self._classification.items():
if f[0] == '<':
ref_val.append(self._method.problem.ideal[fn]) # depends on [control=['if'], data=[]]
elif f[0] == '<>':
ref_val.append(self._method.problem.nadir[fn]) # depends on [control=['if'], data=[]]
else:
ref_val.append(f[1]) # depends on [control=['for'], data=[]]
return np.array(ref_val) |
def findmodules(path, recurse=False):
"""
Looks up the modules for the given path and returns a list of the
packages. If the recurse flag is set to True, then it will look
through the package recursively.
:param path | <str>
recurse | <bool>
:return ([<str>, ..] modules, [<str>, ..] paths)
"""
output = set()
roots = set()
for root, folders, files in os.walk(path):
# add packages
for folder in folders:
pkgpath = os.path.join(root, folder, '__init__.py')
if os.path.exists(pkgpath):
output.add(packageFromPath(pkgpath))
# add modules
rootpth = packageRootPath(root)
rootpkg = packageFromPath(root)
roots.add(rootpth)
for file_ in files:
name, ext = os.path.splitext(file_)
if ext not in ('.py', '.pyo', '.pyc'):
continue
if name in ('__init__', '__plugins__'):
continue
if rootpkg:
output.add(rootpkg + '.' + name)
else:
output.add(name)
if not recurse:
break
return list(output), list(roots) | def function[findmodules, parameter[path, recurse]]:
constant[
Looks up the modules for the given path and returns a list of the
packages. If the recurse flag is set to True, then it will look
through the package recursively.
:param path | <str>
recurse | <bool>
:return ([<str>, ..] modules, [<str>, ..] paths)
]
variable[output] assign[=] call[name[set], parameter[]]
variable[roots] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b287b6d0>, <ast.Name object at 0x7da1b28792d0>, <ast.Name object at 0x7da1b2878040>]]] in starred[call[name[os].walk, parameter[name[path]]]] begin[:]
for taget[name[folder]] in starred[name[folders]] begin[:]
variable[pkgpath] assign[=] call[name[os].path.join, parameter[name[root], name[folder], constant[__init__.py]]]
if call[name[os].path.exists, parameter[name[pkgpath]]] begin[:]
call[name[output].add, parameter[call[name[packageFromPath], parameter[name[pkgpath]]]]]
variable[rootpth] assign[=] call[name[packageRootPath], parameter[name[root]]]
variable[rootpkg] assign[=] call[name[packageFromPath], parameter[name[root]]]
call[name[roots].add, parameter[name[rootpth]]]
for taget[name[file_]] in starred[name[files]] begin[:]
<ast.Tuple object at 0x7da1b2879d50> assign[=] call[name[os].path.splitext, parameter[name[file_]]]
if compare[name[ext] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b287b550>, <ast.Constant object at 0x7da1b2878dc0>, <ast.Constant object at 0x7da1b287a2c0>]]] begin[:]
continue
if compare[name[name] in tuple[[<ast.Constant object at 0x7da1b2879210>, <ast.Constant object at 0x7da1b287b370>]]] begin[:]
continue
if name[rootpkg] begin[:]
call[name[output].add, parameter[binary_operation[binary_operation[name[rootpkg] + constant[.]] + name[name]]]]
if <ast.UnaryOp object at 0x7da1b2878910> begin[:]
break
return[tuple[[<ast.Call object at 0x7da1b287a980>, <ast.Call object at 0x7da1b28782b0>]]] | keyword[def] identifier[findmodules] ( identifier[path] , identifier[recurse] = keyword[False] ):
literal[string]
identifier[output] = identifier[set] ()
identifier[roots] = identifier[set] ()
keyword[for] identifier[root] , identifier[folders] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[path] ):
keyword[for] identifier[folder] keyword[in] identifier[folders] :
identifier[pkgpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[folder] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[pkgpath] ):
identifier[output] . identifier[add] ( identifier[packageFromPath] ( identifier[pkgpath] ))
identifier[rootpth] = identifier[packageRootPath] ( identifier[root] )
identifier[rootpkg] = identifier[packageFromPath] ( identifier[root] )
identifier[roots] . identifier[add] ( identifier[rootpth] )
keyword[for] identifier[file_] keyword[in] identifier[files] :
identifier[name] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_] )
keyword[if] identifier[ext] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[continue]
keyword[if] identifier[name] keyword[in] ( literal[string] , literal[string] ):
keyword[continue]
keyword[if] identifier[rootpkg] :
identifier[output] . identifier[add] ( identifier[rootpkg] + literal[string] + identifier[name] )
keyword[else] :
identifier[output] . identifier[add] ( identifier[name] )
keyword[if] keyword[not] identifier[recurse] :
keyword[break]
keyword[return] identifier[list] ( identifier[output] ), identifier[list] ( identifier[roots] ) | def findmodules(path, recurse=False):
"""
Looks up the modules for the given path and returns a list of the
packages. If the recurse flag is set to True, then it will look
through the package recursively.
:param path | <str>
recurse | <bool>
:return ([<str>, ..] modules, [<str>, ..] paths)
"""
output = set()
roots = set()
for (root, folders, files) in os.walk(path):
# add packages
for folder in folders:
pkgpath = os.path.join(root, folder, '__init__.py')
if os.path.exists(pkgpath):
output.add(packageFromPath(pkgpath)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['folder']]
# add modules
rootpth = packageRootPath(root)
rootpkg = packageFromPath(root)
roots.add(rootpth)
for file_ in files:
(name, ext) = os.path.splitext(file_)
if ext not in ('.py', '.pyo', '.pyc'):
continue # depends on [control=['if'], data=[]]
if name in ('__init__', '__plugins__'):
continue # depends on [control=['if'], data=[]]
if rootpkg:
output.add(rootpkg + '.' + name) # depends on [control=['if'], data=[]]
else:
output.add(name) # depends on [control=['for'], data=['file_']]
if not recurse:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (list(output), list(roots)) |
def _parse_execution_args(execution_args):
""" Parses execution arguments provided by user and raises an error if something is wrong
"""
if not isinstance(execution_args, (list, tuple)):
raise ValueError("Parameter 'execution_args' should be a list")
return [EOWorkflow.parse_input_args(input_args) for input_args in execution_args] | def function[_parse_execution_args, parameter[execution_args]]:
constant[ Parses execution arguments provided by user and raises an error if something is wrong
]
if <ast.UnaryOp object at 0x7da18f8102b0> begin[:]
<ast.Raise object at 0x7da18f00c040>
return[<ast.ListComp object at 0x7da18f00e320>] | keyword[def] identifier[_parse_execution_args] ( identifier[execution_args] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[execution_args] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] [ identifier[EOWorkflow] . identifier[parse_input_args] ( identifier[input_args] ) keyword[for] identifier[input_args] keyword[in] identifier[execution_args] ] | def _parse_execution_args(execution_args):
""" Parses execution arguments provided by user and raises an error if something is wrong
"""
if not isinstance(execution_args, (list, tuple)):
raise ValueError("Parameter 'execution_args' should be a list") # depends on [control=['if'], data=[]]
return [EOWorkflow.parse_input_args(input_args) for input_args in execution_args] |
def updateContextImage(self, contextpar):
""" Reset the name of the context image to `None` if parameter
``context`` is `False`.
"""
self.createContext = contextpar
if not contextpar:
log.info('No context image will be created for %s' %
self._filename)
self.outputNames['outContext'] = None | def function[updateContextImage, parameter[self, contextpar]]:
constant[ Reset the name of the context image to `None` if parameter
``context`` is `False`.
]
name[self].createContext assign[=] name[contextpar]
if <ast.UnaryOp object at 0x7da1b1bbd8d0> begin[:]
call[name[log].info, parameter[binary_operation[constant[No context image will be created for %s] <ast.Mod object at 0x7da2590d6920> name[self]._filename]]]
call[name[self].outputNames][constant[outContext]] assign[=] constant[None] | keyword[def] identifier[updateContextImage] ( identifier[self] , identifier[contextpar] ):
literal[string]
identifier[self] . identifier[createContext] = identifier[contextpar]
keyword[if] keyword[not] identifier[contextpar] :
identifier[log] . identifier[info] ( literal[string] %
identifier[self] . identifier[_filename] )
identifier[self] . identifier[outputNames] [ literal[string] ]= keyword[None] | def updateContextImage(self, contextpar):
""" Reset the name of the context image to `None` if parameter
``context`` is `False`.
"""
self.createContext = contextpar
if not contextpar:
log.info('No context image will be created for %s' % self._filename)
self.outputNames['outContext'] = None # depends on [control=['if'], data=[]] |
def write(self, data: bytes) -> int:
"""Returns the number of (encrypted) bytes sent.
"""
if self._sock is None:
raise IOError('Internal socket set to None; cannot perform handshake.')
if not self._is_handshake_completed:
raise IOError('SSL Handshake was not completed; cannot send data.')
# Pass the cleartext data to the SSL engine
self._ssl.write(data)
# Recover the corresponding encrypted data
final_length = self._flush_ssl_engine()
return final_length | def function[write, parameter[self, data]]:
constant[Returns the number of (encrypted) bytes sent.
]
if compare[name[self]._sock is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4ccd30>
if <ast.UnaryOp object at 0x7da18c4ce320> begin[:]
<ast.Raise object at 0x7da18c4cc3d0>
call[name[self]._ssl.write, parameter[name[data]]]
variable[final_length] assign[=] call[name[self]._flush_ssl_engine, parameter[]]
return[name[final_length]] | keyword[def] identifier[write] ( identifier[self] , identifier[data] : identifier[bytes] )-> identifier[int] :
literal[string]
keyword[if] identifier[self] . identifier[_sock] keyword[is] keyword[None] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[_is_handshake_completed] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[self] . identifier[_ssl] . identifier[write] ( identifier[data] )
identifier[final_length] = identifier[self] . identifier[_flush_ssl_engine] ()
keyword[return] identifier[final_length] | def write(self, data: bytes) -> int:
"""Returns the number of (encrypted) bytes sent.
"""
if self._sock is None:
raise IOError('Internal socket set to None; cannot perform handshake.') # depends on [control=['if'], data=[]]
if not self._is_handshake_completed:
raise IOError('SSL Handshake was not completed; cannot send data.') # depends on [control=['if'], data=[]]
# Pass the cleartext data to the SSL engine
self._ssl.write(data)
# Recover the corresponding encrypted data
final_length = self._flush_ssl_engine()
return final_length |
def _normalize_slice(self, key, clamp=False):
"""Return a slice equivalent to the input *key*, standardized."""
if key.start is None:
start = 0
else:
start = (len(self) + key.start) if key.start < 0 else key.start
if key.stop is None or key.stop == maxsize:
stop = len(self) if clamp else None
else:
stop = (len(self) + key.stop) if key.stop < 0 else key.stop
return slice(start, stop, key.step or 1) | def function[_normalize_slice, parameter[self, key, clamp]]:
constant[Return a slice equivalent to the input *key*, standardized.]
if compare[name[key].start is constant[None]] begin[:]
variable[start] assign[=] constant[0]
if <ast.BoolOp object at 0x7da20c794460> begin[:]
variable[stop] assign[=] <ast.IfExp object at 0x7da20c796020>
return[call[name[slice], parameter[name[start], name[stop], <ast.BoolOp object at 0x7da18f813880>]]] | keyword[def] identifier[_normalize_slice] ( identifier[self] , identifier[key] , identifier[clamp] = keyword[False] ):
literal[string]
keyword[if] identifier[key] . identifier[start] keyword[is] keyword[None] :
identifier[start] = literal[int]
keyword[else] :
identifier[start] =( identifier[len] ( identifier[self] )+ identifier[key] . identifier[start] ) keyword[if] identifier[key] . identifier[start] < literal[int] keyword[else] identifier[key] . identifier[start]
keyword[if] identifier[key] . identifier[stop] keyword[is] keyword[None] keyword[or] identifier[key] . identifier[stop] == identifier[maxsize] :
identifier[stop] = identifier[len] ( identifier[self] ) keyword[if] identifier[clamp] keyword[else] keyword[None]
keyword[else] :
identifier[stop] =( identifier[len] ( identifier[self] )+ identifier[key] . identifier[stop] ) keyword[if] identifier[key] . identifier[stop] < literal[int] keyword[else] identifier[key] . identifier[stop]
keyword[return] identifier[slice] ( identifier[start] , identifier[stop] , identifier[key] . identifier[step] keyword[or] literal[int] ) | def _normalize_slice(self, key, clamp=False):
"""Return a slice equivalent to the input *key*, standardized."""
if key.start is None:
start = 0 # depends on [control=['if'], data=[]]
else:
start = len(self) + key.start if key.start < 0 else key.start
if key.stop is None or key.stop == maxsize:
stop = len(self) if clamp else None # depends on [control=['if'], data=[]]
else:
stop = len(self) + key.stop if key.stop < 0 else key.stop
return slice(start, stop, key.step or 1) |
def discard_until(fd, s, deadline):
"""Read chunks from `fd` until one is encountered that ends with `s`. This
is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and
mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to
appear, indicating the first stage is ready to receive the compressed
:mod:`mitogen.core` source.
:param int fd:
File descriptor to read from.
:param bytes s:
Marker string to discard until encountered.
:param float deadline:
Absolute UNIX timestamp after which timeout should occur.
:raises mitogen.core.TimeoutError:
Attempt to read beyond deadline.
:raises mitogen.parent.EofError:
All streams indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
Attempt to read past end of file.
"""
it = iter_read([fd], deadline)
try:
for buf in it:
if IOLOG.level == logging.DEBUG:
for line in buf.splitlines():
IOLOG.debug('discard_until: discarding %r', line)
if buf.endswith(s):
return
finally:
it.close() | def function[discard_until, parameter[fd, s, deadline]]:
constant[Read chunks from `fd` until one is encountered that ends with `s`. This
is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and
mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to
appear, indicating the first stage is ready to receive the compressed
:mod:`mitogen.core` source.
:param int fd:
File descriptor to read from.
:param bytes s:
Marker string to discard until encountered.
:param float deadline:
Absolute UNIX timestamp after which timeout should occur.
:raises mitogen.core.TimeoutError:
Attempt to read beyond deadline.
:raises mitogen.parent.EofError:
All streams indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
Attempt to read past end of file.
]
variable[it] assign[=] call[name[iter_read], parameter[list[[<ast.Name object at 0x7da1b1d0c310>]], name[deadline]]]
<ast.Try object at 0x7da1b1d0cbb0> | keyword[def] identifier[discard_until] ( identifier[fd] , identifier[s] , identifier[deadline] ):
literal[string]
identifier[it] = identifier[iter_read] ([ identifier[fd] ], identifier[deadline] )
keyword[try] :
keyword[for] identifier[buf] keyword[in] identifier[it] :
keyword[if] identifier[IOLOG] . identifier[level] == identifier[logging] . identifier[DEBUG] :
keyword[for] identifier[line] keyword[in] identifier[buf] . identifier[splitlines] ():
identifier[IOLOG] . identifier[debug] ( literal[string] , identifier[line] )
keyword[if] identifier[buf] . identifier[endswith] ( identifier[s] ):
keyword[return]
keyword[finally] :
identifier[it] . identifier[close] () | def discard_until(fd, s, deadline):
"""Read chunks from `fd` until one is encountered that ends with `s`. This
is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and
mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to
appear, indicating the first stage is ready to receive the compressed
:mod:`mitogen.core` source.
:param int fd:
File descriptor to read from.
:param bytes s:
Marker string to discard until encountered.
:param float deadline:
Absolute UNIX timestamp after which timeout should occur.
:raises mitogen.core.TimeoutError:
Attempt to read beyond deadline.
:raises mitogen.parent.EofError:
All streams indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
Attempt to read past end of file.
"""
it = iter_read([fd], deadline)
try:
for buf in it:
if IOLOG.level == logging.DEBUG:
for line in buf.splitlines():
IOLOG.debug('discard_until: discarding %r', line) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
if buf.endswith(s):
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['buf']] # depends on [control=['try'], data=[]]
finally:
it.close() |
def get_instance(self, payload):
"""
Build an instance of WorkspaceCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
return WorkspaceCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
) | def function[get_instance, parameter[self, payload]]:
constant[
Build an instance of WorkspaceCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
]
return[call[name[WorkspaceCumulativeStatisticsInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[get_instance] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[return] identifier[WorkspaceCumulativeStatisticsInstance] (
identifier[self] . identifier[_version] ,
identifier[payload] ,
identifier[workspace_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
) | def get_instance(self, payload):
"""
Build an instance of WorkspaceCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
return WorkspaceCumulativeStatisticsInstance(self._version, payload, workspace_sid=self._solution['workspace_sid']) |
def unfold(self):
"""Unfolds the region."""
# set all direct child blocks which are not triggers to be visible
self._trigger.setVisible(True)
TextBlockHelper.set_collapsed(self._trigger, False)
for block in self.blocks(ignore_blank_lines=False):
block.setVisible(True)
if TextBlockHelper.is_fold_trigger(block):
TextBlockHelper.set_collapsed(block, False) | def function[unfold, parameter[self]]:
constant[Unfolds the region.]
call[name[self]._trigger.setVisible, parameter[constant[True]]]
call[name[TextBlockHelper].set_collapsed, parameter[name[self]._trigger, constant[False]]]
for taget[name[block]] in starred[call[name[self].blocks, parameter[]]] begin[:]
call[name[block].setVisible, parameter[constant[True]]]
if call[name[TextBlockHelper].is_fold_trigger, parameter[name[block]]] begin[:]
call[name[TextBlockHelper].set_collapsed, parameter[name[block], constant[False]]] | keyword[def] identifier[unfold] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_trigger] . identifier[setVisible] ( keyword[True] )
identifier[TextBlockHelper] . identifier[set_collapsed] ( identifier[self] . identifier[_trigger] , keyword[False] )
keyword[for] identifier[block] keyword[in] identifier[self] . identifier[blocks] ( identifier[ignore_blank_lines] = keyword[False] ):
identifier[block] . identifier[setVisible] ( keyword[True] )
keyword[if] identifier[TextBlockHelper] . identifier[is_fold_trigger] ( identifier[block] ):
identifier[TextBlockHelper] . identifier[set_collapsed] ( identifier[block] , keyword[False] ) | def unfold(self):
"""Unfolds the region."""
# set all direct child blocks which are not triggers to be visible
self._trigger.setVisible(True)
TextBlockHelper.set_collapsed(self._trigger, False)
for block in self.blocks(ignore_blank_lines=False):
block.setVisible(True)
if TextBlockHelper.is_fold_trigger(block):
TextBlockHelper.set_collapsed(block, False) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['block']] |
def resize_pad_image(image, dest_w, dest_h, pad_with_transparent=False):
"""
Resize the image and pad to the correct aspect ratio.
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:param pad_with_transparent: If True, make additional padding transparent
:return: Scaled and padded image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
offset = (int((dest_w - scaled_w) / 2), 0)
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
offset = (0, int((dest_h - scaled_h) / 2))
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
# Normally we will want to copy the source mode for the destination image, but in some
# cases the source image will use a Palletted (mode=='P') in which case we need to change
# the mode
mode = scaled_image.mode
log.debug('Padding image with mode: "{}"'.format(mode))
if pad_with_transparent and mode != 'RGBA':
old_mode = mode
mode = 'RGBA'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "{}" to "{}"'.format(old_mode, mode))
elif mode == 'P':
if 'transparency' in scaled_image.info:
mode = 'RGBA'
else:
mode = 'RGB'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "P" to "{}"'.format(mode))
if pad_with_transparent:
pad_colour = (255, 255, 255, 0)
else:
# Get the pixel colour for coordinate (0,0)
pixels = scaled_image.load()
pad_colour = pixels[0, 0]
padded_image = PIL.Image.new(mode, (int(dest_w), int(dest_h)), pad_colour)
padded_image.paste(scaled_image, offset)
return padded_image | def function[resize_pad_image, parameter[image, dest_w, dest_h, pad_with_transparent]]:
constant[
Resize the image and pad to the correct aspect ratio.
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:param pad_with_transparent: If True, make additional padding transparent
:return: Scaled and padded image
]
variable[dest_w] assign[=] call[name[float], parameter[name[dest_w]]]
variable[dest_h] assign[=] call[name[float], parameter[name[dest_h]]]
variable[dest_ratio] assign[=] binary_operation[name[dest_w] / name[dest_h]]
variable[src_w] assign[=] call[name[float], parameter[call[name[image].size][constant[0]]]]
variable[src_h] assign[=] call[name[float], parameter[call[name[image].size][constant[1]]]]
variable[src_ratio] assign[=] binary_operation[name[src_w] / name[src_h]]
if compare[name[src_ratio] less[<] name[dest_ratio]] begin[:]
variable[scale] assign[=] binary_operation[name[dest_h] / name[src_h]]
variable[scaled_h] assign[=] name[dest_h]
variable[scaled_w] assign[=] binary_operation[name[src_w] * name[scale]]
variable[offset] assign[=] tuple[[<ast.Call object at 0x7da2054a6260>, <ast.Constant object at 0x7da2054a4e80>]]
variable[scaled_image] assign[=] call[name[image].resize, parameter[tuple[[<ast.Call object at 0x7da2054a46d0>, <ast.Call object at 0x7da2054a7310>]], name[PIL].Image.ANTIALIAS]]
variable[mode] assign[=] name[scaled_image].mode
call[name[log].debug, parameter[call[constant[Padding image with mode: "{}"].format, parameter[name[mode]]]]]
if <ast.BoolOp object at 0x7da2054a6c20> begin[:]
variable[old_mode] assign[=] name[mode]
variable[mode] assign[=] constant[RGBA]
variable[scaled_image] assign[=] call[name[scaled_image].convert, parameter[name[mode]]]
call[name[log].debug, parameter[call[constant[Changed mode from "{}" to "{}"].format, parameter[name[old_mode], name[mode]]]]]
if name[pad_with_transparent] begin[:]
variable[pad_colour] assign[=] tuple[[<ast.Constant object at 0x7da2054a7940>, <ast.Constant object at 0x7da2054a4b50>, <ast.Constant object at 0x7da2054a4a30>, <ast.Constant object at 0x7da2054a7520>]]
variable[padded_image] assign[=] call[name[PIL].Image.new, parameter[name[mode], tuple[[<ast.Call object at 0x7da2054a6e90>, <ast.Call object at 0x7da2054a7790>]], name[pad_colour]]]
call[name[padded_image].paste, parameter[name[scaled_image], name[offset]]]
return[name[padded_image]] | keyword[def] identifier[resize_pad_image] ( identifier[image] , identifier[dest_w] , identifier[dest_h] , identifier[pad_with_transparent] = keyword[False] ):
literal[string]
identifier[dest_w] = identifier[float] ( identifier[dest_w] )
identifier[dest_h] = identifier[float] ( identifier[dest_h] )
identifier[dest_ratio] = identifier[dest_w] / identifier[dest_h]
identifier[src_w] = identifier[float] ( identifier[image] . identifier[size] [ literal[int] ])
identifier[src_h] = identifier[float] ( identifier[image] . identifier[size] [ literal[int] ])
identifier[src_ratio] = identifier[src_w] / identifier[src_h]
keyword[if] identifier[src_ratio] < identifier[dest_ratio] :
identifier[scale] = identifier[dest_h] / identifier[src_h]
identifier[scaled_h] = identifier[dest_h]
identifier[scaled_w] = identifier[src_w] * identifier[scale]
identifier[offset] =( identifier[int] (( identifier[dest_w] - identifier[scaled_w] )/ literal[int] ), literal[int] )
keyword[else] :
identifier[scale] = identifier[dest_w] / identifier[src_w]
identifier[scaled_w] = identifier[dest_w]
identifier[scaled_h] = identifier[src_h] * identifier[scale]
identifier[offset] =( literal[int] , identifier[int] (( identifier[dest_h] - identifier[scaled_h] )/ literal[int] ))
identifier[scaled_image] = identifier[image] . identifier[resize] (( identifier[int] ( identifier[scaled_w] ), identifier[int] ( identifier[scaled_h] )), identifier[PIL] . identifier[Image] . identifier[ANTIALIAS] )
identifier[mode] = identifier[scaled_image] . identifier[mode]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[if] identifier[pad_with_transparent] keyword[and] identifier[mode] != literal[string] :
identifier[old_mode] = identifier[mode]
identifier[mode] = literal[string]
identifier[scaled_image] = identifier[scaled_image] . identifier[convert] ( identifier[mode] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[old_mode] , identifier[mode] ))
keyword[elif] identifier[mode] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[scaled_image] . identifier[info] :
identifier[mode] = literal[string]
keyword[else] :
identifier[mode] = literal[string]
identifier[scaled_image] = identifier[scaled_image] . identifier[convert] ( identifier[mode] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[if] identifier[pad_with_transparent] :
identifier[pad_colour] =( literal[int] , literal[int] , literal[int] , literal[int] )
keyword[else] :
identifier[pixels] = identifier[scaled_image] . identifier[load] ()
identifier[pad_colour] = identifier[pixels] [ literal[int] , literal[int] ]
identifier[padded_image] = identifier[PIL] . identifier[Image] . identifier[new] ( identifier[mode] ,( identifier[int] ( identifier[dest_w] ), identifier[int] ( identifier[dest_h] )), identifier[pad_colour] )
identifier[padded_image] . identifier[paste] ( identifier[scaled_image] , identifier[offset] )
keyword[return] identifier[padded_image] | def resize_pad_image(image, dest_w, dest_h, pad_with_transparent=False):
"""
Resize the image and pad to the correct aspect ratio.
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:param pad_with_transparent: If True, make additional padding transparent
:return: Scaled and padded image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
offset = (int((dest_w - scaled_w) / 2), 0) # depends on [control=['if'], data=[]]
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
offset = (0, int((dest_h - scaled_h) / 2))
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
# Normally we will want to copy the source mode for the destination image, but in some
# cases the source image will use a Palletted (mode=='P') in which case we need to change
# the mode
mode = scaled_image.mode
log.debug('Padding image with mode: "{}"'.format(mode))
if pad_with_transparent and mode != 'RGBA':
old_mode = mode
mode = 'RGBA'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "{}" to "{}"'.format(old_mode, mode)) # depends on [control=['if'], data=[]]
elif mode == 'P':
if 'transparency' in scaled_image.info:
mode = 'RGBA' # depends on [control=['if'], data=[]]
else:
mode = 'RGB'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "P" to "{}"'.format(mode)) # depends on [control=['if'], data=['mode']]
if pad_with_transparent:
pad_colour = (255, 255, 255, 0) # depends on [control=['if'], data=[]]
else:
# Get the pixel colour for coordinate (0,0)
pixels = scaled_image.load()
pad_colour = pixels[0, 0]
padded_image = PIL.Image.new(mode, (int(dest_w), int(dest_h)), pad_colour)
padded_image.paste(scaled_image, offset)
return padded_image |
def postappend(self):
"""This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
#If the element was not associated with a document yet, do so now (and for all unassociated children:
if not self.doc and self.parent.doc:
self.setdocument(self.parent.doc)
if self.doc and self.doc.deepvalidation:
self.deepvalidation() | def function[postappend, parameter[self]]:
constant[This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
]
if <ast.BoolOp object at 0x7da2054a5000> begin[:]
call[name[self].setdocument, parameter[name[self].parent.doc]]
if <ast.BoolOp object at 0x7da2054a4640> begin[:]
call[name[self].deepvalidation, parameter[]] | keyword[def] identifier[postappend] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[parent] . identifier[doc] :
identifier[self] . identifier[setdocument] ( identifier[self] . identifier[parent] . identifier[doc] )
keyword[if] identifier[self] . identifier[doc] keyword[and] identifier[self] . identifier[doc] . identifier[deepvalidation] :
identifier[self] . identifier[deepvalidation] () | def postappend(self):
"""This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
#If the element was not associated with a document yet, do so now (and for all unassociated children:
if not self.doc and self.parent.doc:
self.setdocument(self.parent.doc) # depends on [control=['if'], data=[]]
if self.doc and self.doc.deepvalidation:
self.deepvalidation() # depends on [control=['if'], data=[]] |
def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs) | def function[enableClient, parameter[self]]:
constant[
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
]
return[call[name[self]._makeApiCall, parameter[call[name[self].funcinfo][constant[enableClient]], <ast.Starred object at 0x7da20c6e5ea0>]]] | keyword[def] identifier[enableClient] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] ) | def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo['enableClient'], *args, **kwargs) |
def dump(self, *args, **kwargs):
"""
Build a list of dicts, by calling :meth:`Node.dump`
on each item.
Each keyword provides a function that extracts a value
from a Node.
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dump(x2=Q*2, m1=Q-1).val()
[{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}]
"""
return self.each(Q.dump(*args, **kwargs)) | def function[dump, parameter[self]]:
constant[
Build a list of dicts, by calling :meth:`Node.dump`
on each item.
Each keyword provides a function that extracts a value
from a Node.
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dump(x2=Q*2, m1=Q-1).val()
[{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}]
]
return[call[name[self].each, parameter[call[name[Q].dump, parameter[<ast.Starred object at 0x7da204564bb0>]]]]] | keyword[def] identifier[dump] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[each] ( identifier[Q] . identifier[dump] (* identifier[args] ,** identifier[kwargs] )) | def dump(self, *args, **kwargs):
"""
Build a list of dicts, by calling :meth:`Node.dump`
on each item.
Each keyword provides a function that extracts a value
from a Node.
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dump(x2=Q*2, m1=Q-1).val()
[{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}]
"""
return self.each(Q.dump(*args, **kwargs)) |
def build_config(self, config):
"""Set config defaults"""
for sec in 'LiSE', 'ELiDE':
config.adddefaultsection(sec)
config.setdefaults(
'LiSE',
{
'world': 'sqlite:///LiSEworld.db',
'language': 'eng',
'logfile': '',
'loglevel': 'info'
}
)
config.setdefaults(
'ELiDE',
{
'boardchar': 'physical',
'debugger': 'no',
'inspector': 'no',
'user_kv': 'yes',
'play_speed': '1',
'thing_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_fg.atlas'),
('RLTiles: Body', 'base.atlas'),
('RLTiles: Basic clothes', 'body.atlas'),
('RLTiles: Armwear', 'arm.atlas'),
('RLTiles: Legwear', 'leg.atlas'),
('RLTiles: Right hand', 'hand1.atlas'),
('RLTiles: Left hand', 'hand2.atlas'),
('RLTiles: Boots', 'boot.atlas'),
('RLTiles: Hair', 'hair.atlas'),
('RLTiles: Beard', 'beard.atlas'),
('RLTiles: Headwear', 'head.atlas')
]),
'place_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_bg.atlas'),
("Marsh Davies' Crypt", 'marsh_davies_crypt.atlas'),
('RLTiles: Dungeon', 'dungeon.atlas')
])
}
)
config.write() | def function[build_config, parameter[self, config]]:
constant[Set config defaults]
for taget[name[sec]] in starred[tuple[[<ast.Constant object at 0x7da1b0b5f580>, <ast.Constant object at 0x7da1b0b5d900>]]] begin[:]
call[name[config].adddefaultsection, parameter[name[sec]]]
call[name[config].setdefaults, parameter[constant[LiSE], dictionary[[<ast.Constant object at 0x7da1b0b5fa30>, <ast.Constant object at 0x7da1b0b5d660>, <ast.Constant object at 0x7da1b0b5ef50>, <ast.Constant object at 0x7da1b0b5f190>], [<ast.Constant object at 0x7da1b0b5d540>, <ast.Constant object at 0x7da1b0b5e950>, <ast.Constant object at 0x7da1b0b5f070>, <ast.Constant object at 0x7da1b0b5e980>]]]]
call[name[config].setdefaults, parameter[constant[ELiDE], dictionary[[<ast.Constant object at 0x7da1b0b5dcf0>, <ast.Constant object at 0x7da1b0b5c610>, <ast.Constant object at 0x7da1b0b5faf0>, <ast.Constant object at 0x7da1b0b5f160>, <ast.Constant object at 0x7da1b0b5f3d0>, <ast.Constant object at 0x7da1b0b5f7f0>, <ast.Constant object at 0x7da1b0b5f6d0>], [<ast.Constant object at 0x7da1b0b5c100>, <ast.Constant object at 0x7da1b0b5dae0>, <ast.Constant object at 0x7da1b0b5c670>, <ast.Constant object at 0x7da1b0b5c070>, <ast.Constant object at 0x7da1b0b5fb50>, <ast.Call object at 0x7da1b0b5d9c0>, <ast.Call object at 0x7da1b0b826e0>]]]]
call[name[config].write, parameter[]] | keyword[def] identifier[build_config] ( identifier[self] , identifier[config] ):
literal[string]
keyword[for] identifier[sec] keyword[in] literal[string] , literal[string] :
identifier[config] . identifier[adddefaultsection] ( identifier[sec] )
identifier[config] . identifier[setdefaults] (
literal[string] ,
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
)
identifier[config] . identifier[setdefaults] (
literal[string] ,
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[json] . identifier[dumps] ([
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )
]),
literal[string] : identifier[json] . identifier[dumps] ([
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )
])
}
)
identifier[config] . identifier[write] () | def build_config(self, config):
"""Set config defaults"""
for sec in ('LiSE', 'ELiDE'):
config.adddefaultsection(sec) # depends on [control=['for'], data=['sec']]
config.setdefaults('LiSE', {'world': 'sqlite:///LiSEworld.db', 'language': 'eng', 'logfile': '', 'loglevel': 'info'})
config.setdefaults('ELiDE', {'boardchar': 'physical', 'debugger': 'no', 'inspector': 'no', 'user_kv': 'yes', 'play_speed': '1', 'thing_graphics': json.dumps([("Marsh Davies' Island", 'marsh_davies_island_fg.atlas'), ('RLTiles: Body', 'base.atlas'), ('RLTiles: Basic clothes', 'body.atlas'), ('RLTiles: Armwear', 'arm.atlas'), ('RLTiles: Legwear', 'leg.atlas'), ('RLTiles: Right hand', 'hand1.atlas'), ('RLTiles: Left hand', 'hand2.atlas'), ('RLTiles: Boots', 'boot.atlas'), ('RLTiles: Hair', 'hair.atlas'), ('RLTiles: Beard', 'beard.atlas'), ('RLTiles: Headwear', 'head.atlas')]), 'place_graphics': json.dumps([("Marsh Davies' Island", 'marsh_davies_island_bg.atlas'), ("Marsh Davies' Crypt", 'marsh_davies_crypt.atlas'), ('RLTiles: Dungeon', 'dungeon.atlas')])})
config.write() |
def _site_users():
"""
Get a list of site_n users
"""
userlist = sudo("cat /etc/passwd | awk '/site/'").split('\n')
siteuserlist = [user.split(':')[0] for user in userlist if 'site_' in user]
return siteuserlist | def function[_site_users, parameter[]]:
constant[
Get a list of site_n users
]
variable[userlist] assign[=] call[call[name[sudo], parameter[constant[cat /etc/passwd | awk '/site/']]].split, parameter[constant[
]]]
variable[siteuserlist] assign[=] <ast.ListComp object at 0x7da18fe921d0>
return[name[siteuserlist]] | keyword[def] identifier[_site_users] ():
literal[string]
identifier[userlist] = identifier[sudo] ( literal[string] ). identifier[split] ( literal[string] )
identifier[siteuserlist] =[ identifier[user] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[user] keyword[in] identifier[userlist] keyword[if] literal[string] keyword[in] identifier[user] ]
keyword[return] identifier[siteuserlist] | def _site_users():
"""
Get a list of site_n users
"""
userlist = sudo("cat /etc/passwd | awk '/site/'").split('\n')
siteuserlist = [user.split(':')[0] for user in userlist if 'site_' in user]
return siteuserlist |
def reset_position_scales(self):
"""
Reset x and y scales
"""
if not self.facet.shrink:
return
with suppress(AttributeError):
self.panel_scales_x.reset()
with suppress(AttributeError):
self.panel_scales_y.reset() | def function[reset_position_scales, parameter[self]]:
constant[
Reset x and y scales
]
if <ast.UnaryOp object at 0x7da18ede6b90> begin[:]
return[None]
with call[name[suppress], parameter[name[AttributeError]]] begin[:]
call[name[self].panel_scales_x.reset, parameter[]]
with call[name[suppress], parameter[name[AttributeError]]] begin[:]
call[name[self].panel_scales_y.reset, parameter[]] | keyword[def] identifier[reset_position_scales] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[facet] . identifier[shrink] :
keyword[return]
keyword[with] identifier[suppress] ( identifier[AttributeError] ):
identifier[self] . identifier[panel_scales_x] . identifier[reset] ()
keyword[with] identifier[suppress] ( identifier[AttributeError] ):
identifier[self] . identifier[panel_scales_y] . identifier[reset] () | def reset_position_scales(self):
"""
Reset x and y scales
"""
if not self.facet.shrink:
return # depends on [control=['if'], data=[]]
with suppress(AttributeError):
self.panel_scales_x.reset() # depends on [control=['with'], data=[]]
with suppress(AttributeError):
self.panel_scales_y.reset() # depends on [control=['with'], data=[]] |
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj) | def function[recieve, parameter[self, resource, obj, operation_timeout, max_envelope_size, locale]]:
constant[
resource can be a URL or a ResourceLocator
]
if call[name[isinstance], parameter[name[resource], name[str]]] begin[:]
variable[resource] assign[=] call[name[ResourceLocator], parameter[name[resource]]]
variable[headers] assign[=] call[name[self]._build_headers, parameter[name[resource], name[Session].ReceiveAction, name[operation_timeout], name[max_envelope_size], name[locale]]]
return[call[name[self].service.invoke, parameter[name[headers], name[obj]]]] | keyword[def] identifier[recieve] ( identifier[self] , identifier[resource] , identifier[obj] ,
identifier[operation_timeout] = keyword[None] , identifier[max_envelope_size] = keyword[None] , identifier[locale] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[resource] , identifier[str] ):
identifier[resource] = identifier[ResourceLocator] ( identifier[resource] )
identifier[headers] = identifier[self] . identifier[_build_headers] ( identifier[resource] , identifier[Session] . identifier[ReceiveAction] ,
identifier[operation_timeout] , identifier[max_envelope_size] , identifier[locale] )
keyword[return] identifier[self] . identifier[service] . identifier[invoke] ( identifier[headers] , identifier[obj] ) | def recieve(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource) # depends on [control=['if'], data=[]]
headers = self._build_headers(resource, Session.ReceiveAction, operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj) |
def modify_parameter_group(self, name, parameters=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_list('ModifyDBParameterGroup', params,
ParameterGroup, verb='POST') | def function[modify_parameter_group, parameter[self, name, parameters]]:
constant[
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2615d50>], [<ast.Name object at 0x7da1b26149d0>]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[parameters]]]]]] begin[:]
variable[parameter] assign[=] call[name[parameters]][name[i]]
call[name[parameter].merge, parameter[name[params], binary_operation[name[i] + constant[1]]]]
return[call[name[self].get_list, parameter[constant[ModifyDBParameterGroup], name[params], name[ParameterGroup]]]] | keyword[def] identifier[modify_parameter_group] ( identifier[self] , identifier[name] , identifier[parameters] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[name] }
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[parameters] )):
identifier[parameter] = identifier[parameters] [ identifier[i] ]
identifier[parameter] . identifier[merge] ( identifier[params] , identifier[i] + literal[int] )
keyword[return] identifier[self] . identifier[get_list] ( literal[string] , identifier[params] ,
identifier[ParameterGroup] , identifier[verb] = literal[string] ) | def modify_parameter_group(self, name, parameters=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i + 1) # depends on [control=['for'], data=['i']]
return self.get_list('ModifyDBParameterGroup', params, ParameterGroup, verb='POST') |
def s3errors(path):
"""Translate S3 errors to FSErrors."""
try:
yield
except ClientError as error:
_error = error.response.get("Error", {})
error_code = _error.get("Code", None)
response_meta = error.response.get("ResponseMetadata", {})
http_status = response_meta.get("HTTPStatusCode", 200)
error_msg = _error.get("Message", None)
if error_code == "NoSuchBucket":
raise errors.ResourceError(path, exc=error, msg=error_msg)
if http_status == 404:
raise errors.ResourceNotFound(path)
elif http_status == 403:
raise errors.PermissionDenied(path=path, msg=error_msg)
else:
raise errors.OperationFailed(path=path, exc=error)
except SSLError as error:
raise errors.OperationFailed(path, exc=error)
except EndpointConnectionError as error:
raise errors.RemoteConnectionError(path, exc=error, msg="{}".format(error)) | def function[s3errors, parameter[path]]:
constant[Translate S3 errors to FSErrors.]
<ast.Try object at 0x7da1b02c7f10> | keyword[def] identifier[s3errors] ( identifier[path] ):
literal[string]
keyword[try] :
keyword[yield]
keyword[except] identifier[ClientError] keyword[as] identifier[error] :
identifier[_error] = identifier[error] . identifier[response] . identifier[get] ( literal[string] ,{})
identifier[error_code] = identifier[_error] . identifier[get] ( literal[string] , keyword[None] )
identifier[response_meta] = identifier[error] . identifier[response] . identifier[get] ( literal[string] ,{})
identifier[http_status] = identifier[response_meta] . identifier[get] ( literal[string] , literal[int] )
identifier[error_msg] = identifier[_error] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[error_code] == literal[string] :
keyword[raise] identifier[errors] . identifier[ResourceError] ( identifier[path] , identifier[exc] = identifier[error] , identifier[msg] = identifier[error_msg] )
keyword[if] identifier[http_status] == literal[int] :
keyword[raise] identifier[errors] . identifier[ResourceNotFound] ( identifier[path] )
keyword[elif] identifier[http_status] == literal[int] :
keyword[raise] identifier[errors] . identifier[PermissionDenied] ( identifier[path] = identifier[path] , identifier[msg] = identifier[error_msg] )
keyword[else] :
keyword[raise] identifier[errors] . identifier[OperationFailed] ( identifier[path] = identifier[path] , identifier[exc] = identifier[error] )
keyword[except] identifier[SSLError] keyword[as] identifier[error] :
keyword[raise] identifier[errors] . identifier[OperationFailed] ( identifier[path] , identifier[exc] = identifier[error] )
keyword[except] identifier[EndpointConnectionError] keyword[as] identifier[error] :
keyword[raise] identifier[errors] . identifier[RemoteConnectionError] ( identifier[path] , identifier[exc] = identifier[error] , identifier[msg] = literal[string] . identifier[format] ( identifier[error] )) | def s3errors(path):
"""Translate S3 errors to FSErrors."""
try:
yield # depends on [control=['try'], data=[]]
except ClientError as error:
_error = error.response.get('Error', {})
error_code = _error.get('Code', None)
response_meta = error.response.get('ResponseMetadata', {})
http_status = response_meta.get('HTTPStatusCode', 200)
error_msg = _error.get('Message', None)
if error_code == 'NoSuchBucket':
raise errors.ResourceError(path, exc=error, msg=error_msg) # depends on [control=['if'], data=[]]
if http_status == 404:
raise errors.ResourceNotFound(path) # depends on [control=['if'], data=[]]
elif http_status == 403:
raise errors.PermissionDenied(path=path, msg=error_msg) # depends on [control=['if'], data=[]]
else:
raise errors.OperationFailed(path=path, exc=error) # depends on [control=['except'], data=['error']]
except SSLError as error:
raise errors.OperationFailed(path, exc=error) # depends on [control=['except'], data=['error']]
except EndpointConnectionError as error:
raise errors.RemoteConnectionError(path, exc=error, msg='{}'.format(error)) # depends on [control=['except'], data=['error']] |
def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover
"""convert integer to bytes"""
blen = max(minlen, PGPObject.int_byte_len(i), 1)
if six.PY2:
r = iter(_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1)))
return bytes(bytearray((i >> c) & 0xff for c in r))
return i.to_bytes(blen, order) | def function[int_to_bytes, parameter[i, minlen, order]]:
constant[convert integer to bytes]
variable[blen] assign[=] call[name[max], parameter[name[minlen], call[name[PGPObject].int_byte_len, parameter[name[i]]], constant[1]]]
if name[six].PY2 begin[:]
variable[r] assign[=] call[name[iter], parameter[<ast.GeneratorExp object at 0x7da1b088d270>]]
return[call[name[bytes], parameter[call[name[bytearray], parameter[<ast.GeneratorExp object at 0x7da1b078e860>]]]]]
return[call[name[i].to_bytes, parameter[name[blen], name[order]]]] | keyword[def] identifier[int_to_bytes] ( identifier[i] , identifier[minlen] = literal[int] , identifier[order] = literal[string] ):
literal[string]
identifier[blen] = identifier[max] ( identifier[minlen] , identifier[PGPObject] . identifier[int_byte_len] ( identifier[i] ), literal[int] )
keyword[if] identifier[six] . identifier[PY2] :
identifier[r] = identifier[iter] ( identifier[_] * literal[int] keyword[for] identifier[_] keyword[in] ( identifier[range] ( identifier[blen] ) keyword[if] identifier[order] == literal[string] keyword[else] identifier[range] ( identifier[blen] - literal[int] ,- literal[int] ,- literal[int] )))
keyword[return] identifier[bytes] ( identifier[bytearray] (( identifier[i] >> identifier[c] )& literal[int] keyword[for] identifier[c] keyword[in] identifier[r] ))
keyword[return] identifier[i] . identifier[to_bytes] ( identifier[blen] , identifier[order] ) | def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover
'convert integer to bytes'
blen = max(minlen, PGPObject.int_byte_len(i), 1)
if six.PY2:
r = iter((_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1))))
return bytes(bytearray((i >> c & 255 for c in r))) # depends on [control=['if'], data=[]]
return i.to_bytes(blen, order) |
def calculate_rsq(self):
"""calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
"""
assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq'
explained_times = self.design_matrix.sum(axis = 0) != 0
explained_signal = self.predict_from_design_matrix(self.design_matrix)
self.rsq = 1.0 - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) / np.sum(self.resampled_signal[:,explained_times].squeeze()**2, axis = -1)
self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1)
return np.squeeze(self.rsq) | def function[calculate_rsq, parameter[self]]:
constant[calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
]
assert[call[name[hasattr], parameter[name[self], constant[betas]]]]
variable[explained_times] assign[=] compare[call[name[self].design_matrix.sum, parameter[]] not_equal[!=] constant[0]]
variable[explained_signal] assign[=] call[name[self].predict_from_design_matrix, parameter[name[self].design_matrix]]
name[self].rsq assign[=] binary_operation[constant[1.0] - binary_operation[call[name[np].sum, parameter[binary_operation[binary_operation[call[name[explained_signal]][tuple[[<ast.Slice object at 0x7da1b0f2ef80>, <ast.Name object at 0x7da1b0f2ca00>]]] - call[name[self].resampled_signal][tuple[[<ast.Slice object at 0x7da1b0f2e710>, <ast.Name object at 0x7da1b0f2cb80>]]]] ** constant[2]]]] / call[name[np].sum, parameter[binary_operation[call[call[name[self].resampled_signal][tuple[[<ast.Slice object at 0x7da1b0f2fdc0>, <ast.Name object at 0x7da1b0f2cee0>]]].squeeze, parameter[]] ** constant[2]]]]]]
name[self].ssr assign[=] call[name[np].sum, parameter[binary_operation[binary_operation[call[name[explained_signal]][tuple[[<ast.Slice object at 0x7da1b0f2ed10>, <ast.Name object at 0x7da1b0f2fac0>]]] - call[name[self].resampled_signal][tuple[[<ast.Slice object at 0x7da1b0f2c910>, <ast.Name object at 0x7da1b0f2fc40>]]]] ** constant[2]]]]
return[call[name[np].squeeze, parameter[name[self].rsq]]] | keyword[def] identifier[calculate_rsq] ( identifier[self] ):
literal[string]
keyword[assert] identifier[hasattr] ( identifier[self] , literal[string] ), literal[string]
identifier[explained_times] = identifier[self] . identifier[design_matrix] . identifier[sum] ( identifier[axis] = literal[int] )!= literal[int]
identifier[explained_signal] = identifier[self] . identifier[predict_from_design_matrix] ( identifier[self] . identifier[design_matrix] )
identifier[self] . identifier[rsq] = literal[int] - identifier[np] . identifier[sum] (( identifier[explained_signal] [:, identifier[explained_times] ]- identifier[self] . identifier[resampled_signal] [:, identifier[explained_times] ])** literal[int] , identifier[axis] =- literal[int] )/ identifier[np] . identifier[sum] ( identifier[self] . identifier[resampled_signal] [:, identifier[explained_times] ]. identifier[squeeze] ()** literal[int] , identifier[axis] =- literal[int] )
identifier[self] . identifier[ssr] = identifier[np] . identifier[sum] (( identifier[explained_signal] [:, identifier[explained_times] ]- identifier[self] . identifier[resampled_signal] [:, identifier[explained_times] ])** literal[int] , identifier[axis] =- literal[int] )
keyword[return] identifier[np] . identifier[squeeze] ( identifier[self] . identifier[rsq] ) | def calculate_rsq(self):
"""calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
"""
assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq'
explained_times = self.design_matrix.sum(axis=0) != 0
explained_signal = self.predict_from_design_matrix(self.design_matrix)
self.rsq = 1.0 - np.sum((explained_signal[:, explained_times] - self.resampled_signal[:, explained_times]) ** 2, axis=-1) / np.sum(self.resampled_signal[:, explained_times].squeeze() ** 2, axis=-1)
self.ssr = np.sum((explained_signal[:, explained_times] - self.resampled_signal[:, explained_times]) ** 2, axis=-1)
return np.squeeze(self.rsq) |
def with_read_after_refresh_expire(self, flag, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.read_after_refresh_expire
True
>>> cache.with_read_after_refresh_expire(False)
>>> cache.read_after_refresh_expire
False
>>> cache.with_read_after_refresh_expire('haha')
>>> cache.read_after_refresh_expire
False
"""
if not isinstance(flag, bool):
self.logger.warning('Parameter flag %s must be boolean' % flag)
return
self.read_after_refresh_expire = flag | def function[with_read_after_refresh_expire, parameter[self, flag, only_read]]:
constant[
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.read_after_refresh_expire
True
>>> cache.with_read_after_refresh_expire(False)
>>> cache.read_after_refresh_expire
False
>>> cache.with_read_after_refresh_expire('haha')
>>> cache.read_after_refresh_expire
False
]
if <ast.UnaryOp object at 0x7da20c6aa4a0> begin[:]
call[name[self].logger.warning, parameter[binary_operation[constant[Parameter flag %s must be boolean] <ast.Mod object at 0x7da2590d6920> name[flag]]]]
return[None]
name[self].read_after_refresh_expire assign[=] name[flag] | keyword[def] identifier[with_read_after_refresh_expire] ( identifier[self] , identifier[flag] , identifier[only_read] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[flag] , identifier[bool] ):
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] % identifier[flag] )
keyword[return]
identifier[self] . identifier[read_after_refresh_expire] = identifier[flag] | def with_read_after_refresh_expire(self, flag, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.read_after_refresh_expire
True
>>> cache.with_read_after_refresh_expire(False)
>>> cache.read_after_refresh_expire
False
>>> cache.with_read_after_refresh_expire('haha')
>>> cache.read_after_refresh_expire
False
"""
if not isinstance(flag, bool):
self.logger.warning('Parameter flag %s must be boolean' % flag)
return # depends on [control=['if'], data=[]]
self.read_after_refresh_expire = flag |
def find_compartment_id_in_model(model, compartment_id):
"""
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id.
"""
if compartment_id not in COMPARTMENT_SHORTLIST.keys():
raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist manually.".format(compartment_id))
if len(model.compartments) == 0:
raise KeyError(
"It was not possible to identify the "
"compartment {}, since the "
"model has no compartments at "
"all.".format(COMPARTMENT_SHORTLIST[compartment_id][0])
)
if compartment_id in model.compartments.keys():
return compartment_id
for name in COMPARTMENT_SHORTLIST[compartment_id]:
for c_id, c_name in model.compartments.items():
if c_name.lower() == name:
return c_id
if compartment_id == 'c':
return largest_compartment_id_met(model) | def function[find_compartment_id_in_model, parameter[model, compartment_id]]:
constant[
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id.
]
if compare[name[compartment_id] <ast.NotIn object at 0x7da2590d7190> call[name[COMPARTMENT_SHORTLIST].keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da20c76f040>
if compare[call[name[len], parameter[name[model].compartments]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c76cb50>
if compare[name[compartment_id] in call[name[model].compartments.keys, parameter[]]] begin[:]
return[name[compartment_id]]
for taget[name[name]] in starred[call[name[COMPARTMENT_SHORTLIST]][name[compartment_id]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c76e1d0>, <ast.Name object at 0x7da20c76f550>]]] in starred[call[name[model].compartments.items, parameter[]]] begin[:]
if compare[call[name[c_name].lower, parameter[]] equal[==] name[name]] begin[:]
return[name[c_id]]
if compare[name[compartment_id] equal[==] constant[c]] begin[:]
return[call[name[largest_compartment_id_met], parameter[name[model]]]] | keyword[def] identifier[find_compartment_id_in_model] ( identifier[model] , identifier[compartment_id] ):
literal[string]
keyword[if] identifier[compartment_id] keyword[not] keyword[in] identifier[COMPARTMENT_SHORTLIST] . identifier[keys] ():
keyword[raise] identifier[KeyError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[compartment_id] ))
keyword[if] identifier[len] ( identifier[model] . identifier[compartments] )== literal[int] :
keyword[raise] identifier[KeyError] (
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[COMPARTMENT_SHORTLIST] [ identifier[compartment_id] ][ literal[int] ])
)
keyword[if] identifier[compartment_id] keyword[in] identifier[model] . identifier[compartments] . identifier[keys] ():
keyword[return] identifier[compartment_id]
keyword[for] identifier[name] keyword[in] identifier[COMPARTMENT_SHORTLIST] [ identifier[compartment_id] ]:
keyword[for] identifier[c_id] , identifier[c_name] keyword[in] identifier[model] . identifier[compartments] . identifier[items] ():
keyword[if] identifier[c_name] . identifier[lower] ()== identifier[name] :
keyword[return] identifier[c_id]
keyword[if] identifier[compartment_id] == literal[string] :
keyword[return] identifier[largest_compartment_id_met] ( identifier[model] ) | def find_compartment_id_in_model(model, compartment_id):
"""
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id.
"""
if compartment_id not in COMPARTMENT_SHORTLIST.keys():
raise KeyError('{} is not in the COMPARTMENT_SHORTLIST! Make sure you typed the ID correctly, if yes, update the shortlist manually.'.format(compartment_id)) # depends on [control=['if'], data=['compartment_id']]
if len(model.compartments) == 0:
raise KeyError('It was not possible to identify the compartment {}, since the model has no compartments at all.'.format(COMPARTMENT_SHORTLIST[compartment_id][0])) # depends on [control=['if'], data=[]]
if compartment_id in model.compartments.keys():
return compartment_id # depends on [control=['if'], data=['compartment_id']]
for name in COMPARTMENT_SHORTLIST[compartment_id]:
for (c_id, c_name) in model.compartments.items():
if c_name.lower() == name:
return c_id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['name']]
if compartment_id == 'c':
return largest_compartment_id_met(model) # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.