code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPLong()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v
|
loads the GP object from a JSON string
|
def awscli_defaults(os_type=None):
"""
Summary:
Parse, update local awscli config credentials
Args:
:user (str): USERNAME, only required when run on windows os
Returns:
TYPE: dict object containing key, value pairs describing
os information
"""
try:
if os_type is None:
os_type = platform.system()
if os_type == 'Linux':
HOME = os.environ['HOME']
awscli_credentials = HOME + '/.aws/credentials'
awscli_config = HOME + '/.aws/config'
elif os_type == 'Windows':
username = os.getenv('username')
awscli_credentials = 'C:\\Users\\' + username + '\\.aws\\credentials'
awscli_config = 'C:\\Users\\' + username + '\\.aws\\config'
elif os_type == 'Java':
logger.warning('Unsupported OS. No information')
HOME = os.environ['HOME']
awscli_credentials = HOME + '/.aws/credentials'
awscli_config = HOME + '/.aws/config'
alt_credentials = os.getenv('AWS_SHARED_CREDENTIALS_FILE')
except OSError as e:
logger.exception(
'%s: problem determining local os environment %s' %
(inspect.stack()[0][3], str(e))
)
raise e
return {
'awscli_defaults': {
'awscli_credentials': awscli_credentials,
'awscli_config': awscli_config,
'alt_credentials': alt_credentials
}
}
|
Summary:
Parse, update local awscli config credentials
Args:
:user (str): USERNAME, only required when run on windows os
Returns:
TYPE: dict object containing key, value pairs describing
os information
|
def p_object_literal(self, p):
"""object_literal : LBRACE RBRACE
| LBRACE property_list RBRACE
| LBRACE property_list COMMA RBRACE
"""
if len(p) == 3:
p[0] = self.asttypes.Object()
else:
p[0] = self.asttypes.Object(properties=p[2])
p[0].setpos(p)
|
object_literal : LBRACE RBRACE
| LBRACE property_list RBRACE
| LBRACE property_list COMMA RBRACE
|
def register_message(self, message):
"""Register a MessageDefinition with consistency in mind.
:param MessageDefinition message: The message definition being added.
"""
self._check_id_and_symbol_consistency(message.msgid, message.symbol)
self._check_symbol(message.msgid, message.symbol)
self._check_msgid(message.msgid, message.symbol)
for old_name in message.old_names:
self._check_symbol(message.msgid, old_name[1])
self._messages_definitions[message.symbol] = message
self._register_alternative_name(message, message.msgid, message.symbol)
for old_id, old_symbol in message.old_names:
self._register_alternative_name(message, old_id, old_symbol)
self._msgs_by_category[message.msgid[0]].append(message.msgid)
|
Register a MessageDefinition with consistency in mind.
:param MessageDefinition message: The message definition being added.
|
def compile_validation_pattern(self, units=None):
"""
Assure that passed in units are valid size units, or if missing, use all possible units.
Return a tuple with a regular expression to be used for validating and an error message
in case this validation failed.
"""
if units is None:
units = list(self.POSSIBLE_UNITS)
else:
for u in units:
if u not in self.POSSIBLE_UNITS:
raise ValidationError('{} is not a valid unit for a size field'.format(u))
regex = re.compile(r'^(-?\d+)({})$'.format('|'.join(units)))
endings = (' %s ' % ugettext("or")).join("'%s'" % u.replace('%', '%%') for u in units)
params = {'label': '%(label)s', 'value': '%(value)s', 'field': '%(field)s', 'endings': endings}
return regex, self.invalid_message % params
|
Assure that passed in units are valid size units, or if missing, use all possible units.
Return a tuple with a regular expression to be used for validating and an error message
in case this validation failed.
|
def get_memory(self,
shutit_pexpect_child=None,
note=None):
"""Returns memory available for use in k as an int"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_memory(note=note)
|
Returns memory available for use in k as an int
|
def _rm_get_reference_coords_from_header(parts):
"""
extract the reference (genomic sequence match) coordinates of a repeat
occurrence from a repeatmakser header line. An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic start and end are always at positions 5 and 6 resepctively. In
the repeatmasker format, the end is inclusive, but in pyokit end coordinates
are exclusive, so we adjust it when we parse here.
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
s = int(parts[5])
e = int(parts[6]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e)
|
extract the reference (genomic sequence match) coordinates of a repeat
occurrence from a repeatmakser header line. An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic start and end are always at positions 5 and 6 resepctively. In
the repeatmasker format, the end is inclusive, but in pyokit end coordinates
are exclusive, so we adjust it when we parse here.
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
|
async def xdel(self, name: str, stream_id: str) -> int:
"""
[NOTICE] Not officially released yet
[NOTICE] In the current implementation, memory is not
really reclaimed until a macro node is completely empty,
so you should not abuse this feature.
remove items from the middle of a stream, just by ID.
:param name: name of the stream
:param stream_id: id of the options appended to the stream.
"""
return await self.execute_command('XDEL', name, stream_id)
|
[NOTICE] Not officially released yet
[NOTICE] In the current implementation, memory is not
really reclaimed until a macro node is completely empty,
so you should not abuse this feature.
remove items from the middle of a stream, just by ID.
:param name: name of the stream
:param stream_id: id of the options appended to the stream.
|
def on_finish(self, exc=None):
"""Used to initiate the garbage collection"""
super(GarbageCollector, self).on_finish(exc)
self._cycles_left -= 1
if self._cycles_left <= 0:
num_collected = gc.collect()
self._cycles_left = self.collection_cycle
LOGGER.debug('garbage collection run, %d objects evicted',
num_collected)
|
Used to initiate the garbage collection
|
def _execute(self, command, stdin=None, stdout=subprocess.PIPE):
"""Executes the specified command relative to the repository root.
Returns a tuple containing the return code and the process output.
"""
process = subprocess.Popen(command, shell=True, cwd=self.root_path, stdin=stdin, stdout=stdout)
return (process.wait(), None if stdout is not subprocess.PIPE else process.communicate()[0].decode('utf-8'))
|
Executes the specified command relative to the repository root.
Returns a tuple containing the return code and the process output.
|
def clone(self, substitutions, **kwargs):
"""
Clone a DAG.
"""
dag = self.explain(**kwargs)
dag.substitutions.update(substitutions)
cloned_dag = dag.clone(ignore=self.ignore)
return self.update_nodes(self.add_edges(cloned_dag))
|
Clone a DAG.
|
def cmd_hasher(f, algorithm):
"""Compute various hashes for the input data, that can be a file or a stream.
Example:
\b
$ habu.hasher README.rst
md5 992a833cd162047daaa6a236b8ac15ae README.rst
ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst
sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst
sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst
sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst
whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst
You can also specify which algorithm to use. In such case, the output is
only the value of the calculated hash:
\b
$ habu.hasher -a md5 README.rst
992a833cd162047daaa6a236b8ac15ae README.rst
"""
data = f.read()
if not data:
print("Empty file or string!")
return 1
if algorithm:
print(hasher(data, algorithm)[algorithm], f.name)
else:
for algo, result in hasher(data).items():
print("{:<12} {} {}".format(algo, result, f.name))
|
Compute various hashes for the input data, that can be a file or a stream.
Example:
\b
$ habu.hasher README.rst
md5 992a833cd162047daaa6a236b8ac15ae README.rst
ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst
sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst
sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst
sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst
whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst
You can also specify which algorithm to use. In such case, the output is
only the value of the calculated hash:
\b
$ habu.hasher -a md5 README.rst
992a833cd162047daaa6a236b8ac15ae README.rst
|
def moderate(self, environ, request, id, action, key):
try:
id = self.isso.unsign(key, max_age=2**32)
except (BadSignature, SignatureExpired):
raise Forbidden
item = self.comments.get(id)
thread = self.threads.get(item['tid'])
link = local("origin") + thread["uri"] + "#isso-%i" % item["id"]
if item is None:
raise NotFound
if request.method == "GET":
modal = (
"<!DOCTYPE html>"
"<html>"
"<head>"
"<script>"
" if (confirm('%s: Are you sure?')) {"
" xhr = new XMLHttpRequest;"
" xhr.open('POST', window.location.href);"
" xhr.send(null);"
" xhr.onload = function() {"
" window.location.href = %s;"
" };"
" }"
"</script>" % (action.capitalize(), json.dumps(link)))
return Response(modal, 200, content_type="text/html")
if action == "activate":
if item['mode'] == 1:
return Response("Already activated", 200)
with self.isso.lock:
self.comments.activate(id)
self.signal("comments.activate", thread, item)
return Response("Yo", 200)
elif action == "edit":
data = request.get_json()
with self.isso.lock:
rv = self.comments.update(id, data)
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
self.signal("comments.edit", rv)
return JSON(rv, 200)
else:
with self.isso.lock:
self.comments.delete(id)
self.cache.delete(
'hash', (item['email'] or item['remote_addr']).encode('utf-8'))
self.signal("comments.delete", id)
return Response("Yo", 200)
"""
@api {get} / get comments
@apiGroup Thread
@apiDescription Queries the comments of a thread.
@apiParam {string} uri
The URI of thread to get the comments from.
@apiParam {number} [parent]
Return only comments that are children of the comment with the provided ID.
@apiUse plainParam
@apiParam {number} [limit]
The maximum number of returned top-level comments. Omit for unlimited results.
@apiParam {number} [nested_limit]
The maximum number of returned nested comments per commint. Omit for unlimited results.
@apiParam {number} [after]
Includes only comments were added after the provided UNIX timestamp.
@apiSuccess {number} total_replies
The number of replies if the `limit` parameter was not set. If `after` is set to `X`, this is the number of comments that were created after `X`. So setting `after` may change this value!
@apiSuccess {Object[]} replies
The list of comments. Each comment also has the `total_replies`, `replies`, `id` and `hidden_replies` properties to represent nested comments.
@apiSuccess {number} id
Id of the comment `replies` is the list of replies of. `null` for the list of toplevel comments.
@apiSuccess {number} hidden_replies
The number of comments that were ommited from the results because of the `limit` request parameter. Usually, this will be `total_replies` - `limit`.
@apiExample {curl} Get 2 comments with 5 responses:
curl 'https://comments.example.com/?uri=/thread/&limit=2&nested_limit=5'
@apiSuccessExample Example reponse:
{
"total_replies": 14,
"replies": [
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.732863,
"text": "<p>Hello, World!</p>",
"total_replies": 1,
"hidden_replies": 0,
"dislikes": 2,
"modified": null,
"mode": 1,
"replies": [
{
"website": null,
"author": null,
"parent": 1,
"created": 1464818460.769638,
"text": "<p>Hi, now some Markdown: <em>Italic</em>, <strong>bold</strong>, <code>monospace</code>.</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"hash": "2af4e1a6c96a",
"id": 2,
"likes": 2
}
],
"hash": "1cb6cc0309a2",
"id": 1,
"likes": 2
},
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.80574,
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Accusantium at commodi cum deserunt dolore, error fugiat harum incidunt, ipsa ipsum mollitia nam provident rerum sapiente suscipit tempora vitae? Est, qui?</p>",
"total_replies": 0,
"hidden_replies": 0,
"dislikes": 0,
"modified": null,
"mode": 1,
"replies": [],
"hash": "1cb6cc0309a2",
"id": 3,
"likes": 0
},
"id": null,
"hidden_replies": 12
}
"""
|
@api {get} / get comments
@apiGroup Thread
@apiDescription Queries the comments of a thread.
@apiParam {string} uri
The URI of thread to get the comments from.
@apiParam {number} [parent]
Return only comments that are children of the comment with the provided ID.
@apiUse plainParam
@apiParam {number} [limit]
The maximum number of returned top-level comments. Omit for unlimited results.
@apiParam {number} [nested_limit]
The maximum number of returned nested comments per commint. Omit for unlimited results.
@apiParam {number} [after]
Includes only comments were added after the provided UNIX timestamp.
@apiSuccess {number} total_replies
The number of replies if the `limit` parameter was not set. If `after` is set to `X`, this is the number of comments that were created after `X`. So setting `after` may change this value!
@apiSuccess {Object[]} replies
The list of comments. Each comment also has the `total_replies`, `replies`, `id` and `hidden_replies` properties to represent nested comments.
@apiSuccess {number} id
Id of the comment `replies` is the list of replies of. `null` for the list of toplevel comments.
@apiSuccess {number} hidden_replies
The number of comments that were ommited from the results because of the `limit` request parameter. Usually, this will be `total_replies` - `limit`.
@apiExample {curl} Get 2 comments with 5 responses:
curl 'https://comments.example.com/?uri=/thread/&limit=2&nested_limit=5'
@apiSuccessExample Example reponse:
{
"total_replies": 14,
"replies": [
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.732863,
"text": "<p>Hello, World!</p>",
"total_replies": 1,
"hidden_replies": 0,
"dislikes": 2,
"modified": null,
"mode": 1,
"replies": [
{
"website": null,
"author": null,
"parent": 1,
"created": 1464818460.769638,
"text": "<p>Hi, now some Markdown: <em>Italic</em>, <strong>bold</strong>, <code>monospace</code>.</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"hash": "2af4e1a6c96a",
"id": 2,
"likes": 2
}
],
"hash": "1cb6cc0309a2",
"id": 1,
"likes": 2
},
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.80574,
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Accusantium at commodi cum deserunt dolore, error fugiat harum incidunt, ipsa ipsum mollitia nam provident rerum sapiente suscipit tempora vitae? Est, qui?</p>",
"total_replies": 0,
"hidden_replies": 0,
"dislikes": 0,
"modified": null,
"mode": 1,
"replies": [],
"hash": "1cb6cc0309a2",
"id": 3,
"likes": 0
},
"id": null,
"hidden_replies": 12
}
|
def tf(self, term):
r"""Return term frequency.
Parameters
----------
term : str
The term for which to calculate tf
Returns
-------
float
The term frequency (tf)
Raises
------
ValueError
tf can only calculate the frequency of individual words
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> ngcorp = NGramCorpus(Corpus(tqbf))
>>> NGramCorpus(Corpus(tqbf)).tf('the')
1.3010299956639813
>>> NGramCorpus(Corpus(tqbf)).tf('fox')
1.0
"""
if ' ' in term:
raise ValueError(
'tf can only calculate the term frequency of individual words'
)
tcount = self.get_count(term)
if tcount == 0:
return 0.0
return 1 + log10(tcount)
|
r"""Return term frequency.
Parameters
----------
term : str
The term for which to calculate tf
Returns
-------
float
The term frequency (tf)
Raises
------
ValueError
tf can only calculate the frequency of individual words
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> ngcorp = NGramCorpus(Corpus(tqbf))
>>> NGramCorpus(Corpus(tqbf)).tf('the')
1.3010299956639813
>>> NGramCorpus(Corpus(tqbf)).tf('fox')
1.0
|
def get_hardware_source_by_id(self, hardware_source_id: str, version: str):
"""Return the hardware source API matching the hardware_source_id and version.
.. versionadded:: 1.0
Scriptable: Yes
"""
actual_version = "1.0.0"
if Utility.compare_versions(version, actual_version) > 0:
raise NotImplementedError("Hardware API requested version %s is greater than %s." % (version, actual_version))
hardware_source = HardwareSourceModule.HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id)
return HardwareSource(hardware_source) if hardware_source else None
|
Return the hardware source API matching the hardware_source_id and version.
.. versionadded:: 1.0
Scriptable: Yes
|
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
'version': 0,
'name': name,
'value': value,
'port': None,
'domain': '',
'path': '/',
'secure': False,
'expires': None,
'discard': True,
'comment': None,
'comment_url': None,
'rest': {'HttpOnly': None},
'rfc2109': False,
}
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
|
Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
|
def run(self, args):
"""
Give the user with user_full_name the auth_role permissions on the remote project with project_name.
:param args Namespace arguments parsed from the command line
"""
email = args.email # email of person to give permissions, will be None if username is specified
username = args.username # username of person to give permissions, will be None if email is specified
auth_role = args.auth_role # type of permission(project_admin)
project = self.fetch_project(args, must_exist=True, include_children=False)
user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
self.remote_store.set_user_project_permission(project, user, auth_role)
print(u'Gave user {} {} permissions for project {}.'.format(user.full_name, auth_role, project.name))
|
Give the user with user_full_name the auth_role permissions on the remote project with project_name.
:param args Namespace arguments parsed from the command line
|
def unwire(awsclient, events, lambda_name, alias_name=ALIAS_NAME):
"""Unwire a list of event from an AWS Lambda function.
'events' is a list of dictionaries, where the dict must contains the
'schedule' of the event as string, and an optional 'name' and 'description'.
:param awsclient:
:param events: list of events
:param lambda_name:
:param alias_name:
:return: exit_code
"""
if not lambda_exists(awsclient, lambda_name):
log.error(colored.red('The function you try to wire up doesn\'t ' +
'exist... Bailing out...'))
return 1
client_lambda = awsclient.get_client('lambda')
lambda_function = client_lambda.get_function(FunctionName=lambda_name)
lambda_arn = client_lambda.get_alias(FunctionName=lambda_name,
Name=alias_name)['AliasArn']
log.info('UN-wiring lambda_arn %s ' % lambda_arn)
# TODO why load the policies here?
'''
policies = None
try:
result = client_lambda.get_policy(FunctionName=lambda_name,
Qualifier=alias_name)
policies = json.loads(result['Policy'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
log.warn("Permission policies not found")
else:
raise e
'''
if lambda_function is not None:
#_unschedule_events(awsclient, events, lambda_arn)
for event in events:
evt_source = event['event_source']
_remove_event_source(awsclient, evt_source, lambda_arn)
return 0
|
Unwire a list of event from an AWS Lambda function.
'events' is a list of dictionaries, where the dict must contains the
'schedule' of the event as string, and an optional 'name' and 'description'.
:param awsclient:
:param events: list of events
:param lambda_name:
:param alias_name:
:return: exit_code
|
def search_data_std(Channel, RunNos, RepeatNos, directoryPath='.'):
"""
Lets you find multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data_filepaths : list
A list containing the filepaths to the matching files
"""
files = glob('{}/*'.format(directoryPath))
files_CorrectChannel = []
for file_ in files:
if 'CH{}'.format(Channel) in file_:
files_CorrectChannel.append(file_)
files_CorrectRunNo = []
for RunNo in RunNos:
files_match = _fnmatch.filter(
files_CorrectChannel, '*RUN*0{}_*'.format(RunNo))
for file_ in files_match:
files_CorrectRunNo.append(file_)
files_CorrectRepeatNo = []
for RepeatNo in RepeatNos:
files_match = _fnmatch.filter(
files_CorrectRunNo, '*REPEAT*0{}.*'.format(RepeatNo))
for file_ in files_match:
files_CorrectRepeatNo.append(file_)
return files_CorrectRepeatNo
|
Lets you find multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data_filepaths : list
A list containing the filepaths to the matching files
|
def run(self):
"""
Run the plugin.
"""
if self.workflow.builder.base_from_scratch:
self.log.info("Skipping comparing components: unsupported for FROM-scratch images")
return
worker_metadatas = self.workflow.postbuild_results.get(PLUGIN_FETCH_WORKER_METADATA_KEY)
comp_list = self.get_component_list_from_workers(worker_metadatas)
if not comp_list:
raise ValueError("No components to compare")
package_comparison_exceptions = get_package_comparison_exceptions(self.workflow)
# master compare list
master_comp = {}
# The basic strategy is to start with empty lists and add new component
# versions as we find them. Upon next iteration, we should notice
# duplicates and be able to compare them. If the match fails, we raise
# an exception. If the component name does not exist, assume it was an
# arch dependency, add it to list and continue. By the time we get to
# the last arch, we should have every possible component in the master
# list to compare with.
# Keep everything separated by component type
failed_components = set()
for components in comp_list:
for component in components:
t = component['type']
name = component['name']
if name in package_comparison_exceptions:
self.log.info("Ignoring comparison of package %s", name)
continue
if t not in SUPPORTED_TYPES:
raise ValueError("Type %s not supported" % t)
if name in failed_components:
# report a failed component only once
continue
identifier = (t, name)
if identifier not in master_comp:
master_comp[identifier] = component
continue
if t == T_RPM:
mc = master_comp[identifier]
try:
self.rpm_compare(mc, component)
except ValueError as ex:
self.log.debug("Mismatch details: %s", ex)
self.log.warning(
"Comparison mismatch for component %s:", name)
# use all components to provide complete list
for comp in filter_components_by_name(name, comp_list):
self.log_rpm_component(comp)
failed_components.add(name)
if failed_components:
raise ValueError(
"Failed component comparison for components: "
"{components}".format(
components=', '.join(sorted(failed_components))
)
)
|
Run the plugin.
|
def store_psm_protein_relations(fn, header, pgdb, proteins):
"""Reads PSMs from file, extracts their proteins and peptides and passes
them to a database backend in chunks.
"""
# TODO do we need an OrderedDict or is regular dict enough?
# Sorting for psm_id useful?
allpsms = OrderedDict()
last_id, psmids_to_store = None, set()
store_soon = False
for psm in tsvreader.generate_tsv_psms(fn, header):
psm_id, prots = tsvreader.get_pepproteins(psm)
prots = [x for x in prots if x in proteins]
try:
# In case the PSMs are presented unrolled
allpsms[psm_id].extend(prots)
except KeyError:
allpsms[psm_id] = prots
if len(psmids_to_store) % DB_STORE_CHUNK == 0:
store_soon = True
if store_soon and last_id != psm_id:
pgdb.store_peptides_proteins(allpsms, psmids_to_store)
store_soon = False
psmids_to_store = set()
psmids_to_store.add(psm_id)
last_id = psm_id
if len(psmids_to_store) > 0:
pgdb.store_peptides_proteins(allpsms, psmids_to_store)
pgdb.index_protein_peptides()
return allpsms
|
Reads PSMs from file, extracts their proteins and peptides and passes
them to a database backend in chunks.
|
def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
archive_url = capture(url, **kwargs)
click.echo(archive_url)
|
Archives the provided URL using archive.is.
|
def close(self):
'''
Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file.
'''
if self.compressed_file is None:
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head+36, 8, eof)
if self.checksum:
f.write(self._md5_compute(f))
return
# %%
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head+36, 8, eof)
with self.compressed_file.open('wb+') as g:
g.write(bytearray.fromhex(CDF.V3magicNUMBER_1))
g.write(bytearray.fromhex(CDF.V3magicNUMBER_2c))
self._write_ccr(f, g, self.compression)
if self.checksum:
g.seek(0, 2)
g.write(self._md5_compute(g))
self.path.unlink() # NOTE: for Windows this is necessary
self.compressed_file.rename(self.path)
|
Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file.
|
def get_connect_redirect_url(self, request, socialaccount):
"""
Returns the default URL to redirect to after successfully
connecting a social account.
"""
assert request.user.is_authenticated
url = reverse('socialaccount_connections')
return url
|
Returns the default URL to redirect to after successfully
connecting a social account.
|
def _release_lock(self, identifier):
'''Release the lock.
This requires you to actually have owned the lock. On return
you definitely do not own it, but if somebody else owned it
before calling this function, they still do.
:param str identifier: the session lock identifier
:return: :const:`True` if you actually did own the lock,
:const:`False` if you didn't
'''
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if redis.call("get", KEYS[1]) == ARGV[1]
then
return redis.call("del", KEYS[1])
else
return -1
end
''')
num_keys_deleted = script(keys=[self._lock_name],
args=[identifier])
return (num_keys_deleted == 1)
|
Release the lock.
This requires you to actually have owned the lock. On return
you definitely do not own it, but if somebody else owned it
before calling this function, they still do.
:param str identifier: the session lock identifier
:return: :const:`True` if you actually did own the lock,
:const:`False` if you didn't
|
def get_default_filepath(cls):
"""Get the default filepath for the configuratin file."""
if not cls.config_files:
return None
if not cls.config_searchpath:
return None
filename = cls.config_files[0]
filepath = cls.config_searchpath[0]
return os.path.join(filepath, filename)
|
Get the default filepath for the configuratin file.
|
def GetRunlevelsNonLSB(states):
"""Accepts a string and returns a list of strings of numeric LSB runlevels."""
if not states:
return set()
convert_table = {
"0": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
# SysV, Gentoo, Solaris, HP-UX all allow an alpha variant
# for single user. https://en.wikipedia.org/wiki/Runlevel
"S": "1",
"s": "1"
}
_LogInvalidRunLevels(states, convert_table)
return set([convert_table[s] for s in states.split() if s in convert_table])
|
Accepts a string and returns a list of strings of numeric LSB runlevels.
|
def send_mass_text(self, group_or_users, content,
is_to_all=False, preview=False,
send_ignore_reprint=0, client_msg_id=None):
"""
群发文本消息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1481187827_i0l21
:param group_or_users: 值为整型数字时为按分组群发,值为列表/元组时为按 OpenID 列表群发
当 is_to_all 为 True 时,传入 None 即对所有用户发送。
:param content: 消息正文
:param is_to_all: 用于设定是否向全部用户发送,值为true或false,选择true该消息群发给所有用户
选择false可根据group_id发送给指定群组的用户
:type is_to_all: bool
:param preview: 是否发送预览,此时 group_or_users 参数应为一个openid字符串
:type preview: bool
:param send_ignore_reprint: 指定待群发的文章被判定为转载时,是否继续群发。
当 send_ignore_reprint 参数设置为1时,文章被判定为转载时,且原创文允许转载时,将继续进行群发操作。
当 send_ignore_reprint 参数设置为0时,文章被判定为转载时,将停止群发操作。
send_ignore_reprint 默认为0。
:type send_ignore_reprint: int
:param client_msg_id: 开发者侧群发 msgid,长度限制 64 字节
:type client_msg_id: str
:return: 返回的 JSON 数据包
"""
return self._send_mass_message(
group_or_users,
'text',
{
'text': {
'content': content
}
},
is_to_all,
preview,
send_ignore_reprint,
client_msg_id,
)
|
群发文本消息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1481187827_i0l21
:param group_or_users: 值为整型数字时为按分组群发,值为列表/元组时为按 OpenID 列表群发
当 is_to_all 为 True 时,传入 None 即对所有用户发送。
:param content: 消息正文
:param is_to_all: 用于设定是否向全部用户发送,值为true或false,选择true该消息群发给所有用户
选择false可根据group_id发送给指定群组的用户
:type is_to_all: bool
:param preview: 是否发送预览,此时 group_or_users 参数应为一个openid字符串
:type preview: bool
:param send_ignore_reprint: 指定待群发的文章被判定为转载时,是否继续群发。
当 send_ignore_reprint 参数设置为1时,文章被判定为转载时,且原创文允许转载时,将继续进行群发操作。
当 send_ignore_reprint 参数设置为0时,文章被判定为转载时,将停止群发操作。
send_ignore_reprint 默认为0。
:type send_ignore_reprint: int
:param client_msg_id: 开发者侧群发 msgid,长度限制 64 字节
:type client_msg_id: str
:return: 返回的 JSON 数据包
|
def get_oxi_state_decorated_structure(self, structure):
"""
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
"""
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
|
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
|
def get_stats(self):
"""
Get repository descriptive stats
:Returns:
#. numberOfDirectories (integer): Number of diretories in repository
#. numberOfFiles (integer): Number of files in repository
"""
if self.__path is None:
return 0,0
nfiles = 0
ndirs = 0
for fdict in self.get_repository_state():
fdname = list(fdict)[0]
if fdname == '':
continue
if fdict[fdname].get('pyrepfileinfo', False):
nfiles += 1
elif fdict[fdname].get('pyrepdirinfo', False):
ndirs += 1
else:
raise Exception('Not sure what to do next. Please report issue')
return ndirs,nfiles
|
Get repository descriptive stats
:Returns:
#. numberOfDirectories (integer): Number of diretories in repository
#. numberOfFiles (integer): Number of files in repository
|
def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator.
"""
if name not in self._options:
self.register(name, self._generator())
return self._options[name].__set__(self, value)
|
Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator.
|
def _compute_ticks(self, element, edges, widths, lims):
"""
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
"""
if self.xticks is None or not isinstance(self.xticks, int):
return None
if self.cyclic:
x0, x1, _, _ = lims
xvals = np.linspace(x0, x1, self.xticks)
labels = ["%.0f" % np.rad2deg(x) + '\N{DEGREE SIGN}' for x in xvals]
elif self.xticks:
dim = element.get_dimension(0)
inds = np.linspace(0, len(edges), self.xticks, dtype=np.int)
edges = list(edges) + [edges[-1] + widths[-1]]
xvals = [edges[i] for i in inds]
labels = [dim.pprint_value(v) for v in xvals]
return [xvals, labels]
|
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
|
def invalidate(self, key):
'''Clear an item from the cache'''
path = self.path(self.xform_key(key))
try:
LOG.debug('invalidate %s (%s)', key, path)
path.unlink()
except OSError:
pass
|
Clear an item from the cache
|
def get_bool(self, key, default=None):
"""
Same as :meth:`dict.get`, but the value is converted to a bool.
The boolean value is considered, respectively, :obj:`True` or
:obj:`False` if the string is equal, ignoring case, to
``'true'`` or ``'false'``.
"""
v = self.get(key, default)
if v != default:
v = v.strip().lower()
if v == 'true':
v = True
elif v == 'false':
v = False
elif default is None:
raise RuntimeError("invalid bool string: %s" % v)
else:
v = default
return v
|
Same as :meth:`dict.get`, but the value is converted to a bool.
The boolean value is considered, respectively, :obj:`True` or
:obj:`False` if the string is equal, ignoring case, to
``'true'`` or ``'false'``.
|
def _remove_string_from_commastring(self, field, string):
# type: (str, str) -> bool
"""Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
"""
commastring = self.data.get(field, '')
if string in commastring:
self.data[field] = commastring.replace(string, '')
return True
return False
|
Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
|
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out
|
Prepares gemini databases in parallel, handling jointly called populations.
|
def remove_file(self, path):
"""Removes the given file"""
self.get_file(path).remove()
self.remove_cache_buster(path)
|
Removes the given file
|
def determine_orig_wcsname(header, wnames, wkeys):
"""
Determine the name of the original, unmodified WCS solution
"""
orig_wcsname = None
orig_key = None
if orig_wcsname is None:
for k,w in wnames.items():
if w[:4] == 'IDC_':
orig_wcsname = w
orig_key = k
break
if orig_wcsname is None:
# No IDC_ wcsname found... revert to second to last if available
if len(wnames) > 1:
orig_key = wkeys[-2]
orig_wcsname = wnames[orig_key]
return orig_wcsname,orig_key
|
Determine the name of the original, unmodified WCS solution
|
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
|
Read a local path, with special support for directories
|
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat)
|
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
|
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with _id {0}".format(repr(file_id)))
return GridOut(self.__collection, doc)
return self.__collection.files.find_one({"_id": file_id}).addCallback(ok)
|
Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
|
def is_valid_mpls_labels(labels):
"""Returns True if the given value is a list of valid MPLS labels.
"""
if not isinstance(labels, (list, tuple)):
return False
for label in labels:
if not is_valid_mpls_label(label):
return False
return True
|
Returns True if the given value is a list of valid MPLS labels.
|
def update_image(self, container_name, image_name):
"""
update a container's image,
:param container_name: `class`:`str`, container name
:param image_name: `class`:`str`, the full image name, like alpine:3.3
:return: `class`:`bool`, True if success, otherwise False.
"""
code, container = self.get_container(container_name)
if code != httplib.OK:
self.logger.error("Container %s is not exists. error code %s, error message %s", container_name, code,
container)
return False
_, old_image_name, _ = utils.parse_image_name(container.image)
repository, name, version = utils.parse_image_name(image_name)
if not repository or repository.lower() != DOCKER_NEG:
self.logger.error("You image %s must have a 'docker.neg/' prefix string", image_name)
return False
if not repo.image_exists(name, tag=version):
self.logger.error("You image %s must be location in docker.neg repository.", image_name)
return False
if old_image_name.lower() != name.lower():
self.logger.error("You image %s must be same with container's Image.", image_name, container.image)
return False
code, result = self.update(container_name, tag=version)
if code != httplib.OK:
self.logger.error("Update container %s with image failure, code %s, result %s", container_name, code,
result)
return False
return True
|
update a container's image,
:param container_name: `class`:`str`, container name
:param image_name: `class`:`str`, the full image name, like alpine:3.3
:return: `class`:`bool`, True if success, otherwise False.
|
def rectgal_to_sphergal(X,Y,Z,vx,vy,vz,degree=False):
"""
NAME:
rectgal_to_sphergal
PURPOSE:
transform phase-space coordinates in rectangular Galactic coordinates to spherical Galactic coordinates (can take vector inputs)
INPUT:
X - component towards the Galactic Center (kpc)
Y - component in the direction of Galactic rotation (kpc)
Z - component towards the North Galactic Pole (kpc)
vx - velocity towards the Galactic Center (km/s)
vy - velocity in the direction of Galactic rotation (km/s)
vz - velocity towards the North Galactic Pole (km/s)
degree - (Bool) if True, return l and b in degrees
OUTPUT:
(l,b,d,vr,pmll x cos(b),pmbb) in (rad,rad,kpc,km/s,mas/yr,mas/yr)
HISTORY:
2009-10-25 - Written - Bovy (NYU)
"""
lbd= XYZ_to_lbd(X,Y,Z,degree=degree)
vrpmllpmbb= vxvyvz_to_vrpmllpmbb(vx,vy,vz,X,Y,Z,XYZ=True)
if sc.array(X).shape == ():
return sc.array([lbd[0],lbd[1],lbd[2],vrpmllpmbb[0],vrpmllpmbb[1],vrpmllpmbb[2]])
else:
out=sc.zeros((len(X),6))
out[:,0:3]= lbd
out[:,3:6]= vrpmllpmbb
return out
|
NAME:
rectgal_to_sphergal
PURPOSE:
transform phase-space coordinates in rectangular Galactic coordinates to spherical Galactic coordinates (can take vector inputs)
INPUT:
X - component towards the Galactic Center (kpc)
Y - component in the direction of Galactic rotation (kpc)
Z - component towards the North Galactic Pole (kpc)
vx - velocity towards the Galactic Center (km/s)
vy - velocity in the direction of Galactic rotation (km/s)
vz - velocity towards the North Galactic Pole (km/s)
degree - (Bool) if True, return l and b in degrees
OUTPUT:
(l,b,d,vr,pmll x cos(b),pmbb) in (rad,rad,kpc,km/s,mas/yr,mas/yr)
HISTORY:
2009-10-25 - Written - Bovy (NYU)
|
def _return_response(self, response):
"""
:type result: HTTPResponse
"""
self.filter_headers(response.msg)
if "content-length" in response.msg:
del response.msg["content-length"]
self.send_response(response.status, response.reason)
for header_key, header_value in response.msg.items():
self.send_header(header_key, header_value)
body = response.read()
self.send_header('Content-Length', str(len(body)))
self.end_headers()
self.wfile.write(body)
|
:type result: HTTPResponse
|
def update_properties(self, properties):
"""
Update writeable properties of this PasswordRule.
The Password Rule must be user-defined. System-defined Password Rules
cannot be updated.
Authorization requirements:
* Task permission to the "Manage Password Rules" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'Password Rule object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(self.uri, body=properties)
# The name of Password Rules cannot be updated. An attempt to do so
# should cause HTTPError to be raised in the POST above, so we assert
# that here, because we omit the extra code for handling name updates:
assert self.manager._name_prop not in properties
self.properties.update(copy.deepcopy(properties))
|
Update writeable properties of this PasswordRule.
The Password Rule must be user-defined. System-defined Password Rules
cannot be updated.
Authorization requirements:
* Task permission to the "Manage Password Rules" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'Password Rule object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
def task_add(self, t, periodic=None):
"""
Register a task in this legion. "periodic" should be None, or
a callback function which will be called periodically when the
legion is otherwise idle.
"""
name = t.get_name()
if name in self._tasknames:
raise TaskError(name, 'Task already exists with %d daemon%s active' %
(len(self._tasknames), ses(len(self._tasknames))))
self._tasknames[name] = (t, periodic)
self._tasks.add(t)
|
Register a task in this legion. "periodic" should be None, or
a callback function which will be called periodically when the
legion is otherwise idle.
|
def incidence(boundary):
"""
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
"""
return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1])
|
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
|
def max_play(w, i, grid):
"Play like Spock, except breaking ties by drunk_value."
return min(successors(grid),
key=lambda succ: (evaluate(succ), drunk_value(succ)))
|
Play like Spock, except breaking ties by drunk_value.
|
def findCells(fnames):
"""
given a list of files, return a list of cells by their ID.
A cell is indicated when an ABF name matches the start of another file.
Example:
123456.abf
123456-whatever.tif
"""
IDs=[]
filesByExt = filesByExtension(fnames)
for abfFname in filesByExt['abf']:
ID=os.path.splitext(abfFname)[0]
for picFname in filesByExt['jpg']+filesByExt['tif']:
if picFname.startswith(ID):
IDs.append(ID)
break
return smartSort(IDs)
|
given a list of files, return a list of cells by their ID.
A cell is indicated when an ABF name matches the start of another file.
Example:
123456.abf
123456-whatever.tif
|
def get(self, guild_id):
""" Returns a player from the cache, or creates one if it does not exist. """
if guild_id not in self._players:
p = self._player(lavalink=self.lavalink, guild_id=guild_id)
self._players[guild_id] = p
return self._players[guild_id]
|
Returns a player from the cache, or creates one if it does not exist.
|
def text(self):
"""Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
"""
if not self.content:
return
if self.encoding:
return self.content.decode(self.encoding, errors='replace')
return self.content.decode(self.guess_encoding(), errors='replace')
|
Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
|
def get_power(self,callb=None):
"""Convenience method to request the power status from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: int
"""
if self.power_level is None:
response = self.req_with_resp(LightGetPower, LightStatePower, callb=callb )
return self.power_level
|
Convenience method to request the power status from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: int
|
def plot_heat_map(z, include_values=False,
cmap=matplotlib.cm.Reds,
ax=None,
xlabel='auto', ylabel='auto',
xtick_labels='auto', ytick_labels='auto',
xtick_locs=None, ytick_locs=None,
xtick_kwargs={}, ytick_kwargs={},
clabel_pos='top',
transpose_y=False, convert_to_log_scale=False,
show_colorbar=False, colorbar_dict={},
values_format='{:.2}', values_font_size='small',
values_color=None, values_text_kw={},
bad_color=None,
**kwargs):
"""Plot a heat map of z.
Parameters
-------------
z : ndarray | DataFrame
ax : None # NOT IMPLEMENTED YET
Axis to be used. If None uses the current axis.
xlabel : str | 'auto' | None
name for the x-axis
ylabel : str | 'auto' | None
name for the y-axis
xtick_labels : list of str
names for the columns
ytick_labels : list of str
names for the rows
transpose_y : bool
Flips the data along the y axis if true
convert_to_log_scale : bool
If true, plots the log of z.
clabel_pos : 'top' | 'bottom'
Location of the column labels. Default is 'top'.
cmap : colormap | str
colormap to use for plotting the values; e.g., matplotlib.cmap.Blues,
if str then expecting something like 'Blues' to look up using getattr(matplotlib.cm, ...)
values_color : None | color
if None, coloring will be the inverse of cmap
Otherwise the color given would be used for the text color of all the values.
bad_color : color
This is the color that will be used for nan values
Returns
---------------
Output from matshow command (matplotlib.image.AxesImage)
"""
# TODO: Add possibility to change rotation, size, etc. of xtick markers
# TODO: Rename in API : xtick_labels and ytick_labels
# TODO: Implement ax
# Setting default font sizes
xtick_kwargs.setdefault('fontsize', 'large')
ytick_kwargs.setdefault('fontsize', 'large')
##
# Figure out annotation for axes based on data frame.
# DataFrame annotation is used only if 'auto' was given
# to the annotation.
auto_col_name, auto_col_labels, auto_row_name, auto_row_labels = extract_annotation(z)
if xtick_labels is 'auto': xtick_labels = auto_col_labels
if ytick_labels is 'auto': ytick_labels = auto_row_labels
if xlabel is 'auto': xlabel = auto_col_name
if ylabel is 'auto': ylabel = auto_row_name
if isinstance(z, pandas.DataFrame):
values = z.values
else:
values = z
if convert_to_log_scale:
values = numpy.log(values)
if transpose_y:
values = numpy.flipud(values)
if isinstance(cmap, str):
cmap = getattr(cm, cmap)
old_ax = plt.gca()
if ax is not None:
plt.sca(ax)
else:
ax = plt.gca()
output = ax.matshow(values, cmap=cmap, **kwargs)
#####
# Make the colorbar pretty
#
if show_colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
colorbar_dict.setdefault('size', "5%")
colorbar_dict.setdefault('pad', 0.05)
cax = divider.append_axes("right", **colorbar_dict)
cb = plt.colorbar(output, cax=cax)
plt.sca(ax) # Switch back to original axes
#######
# Annotate the heat amp
#
if xtick_labels is not None and len(xtick_labels) > 0:
if xtick_locs:
plt.xticks(xtick_locs, xtick_labels, **xtick_kwargs)
else:
plt.xticks(range(len(xtick_labels)), xtick_labels, **xtick_kwargs)
if ytick_labels is not None and len(ytick_labels) > 0:
if ytick_locs:
plt.yticks(ytick_locs, ytick_labels, **ytick_kwargs)
else:
plt.yticks(range(len(ytick_labels)), ytick_labels, **ytick_kwargs)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if include_values:
def text_cmap(x):
if numpy.isnan(x):
return cmap(1.0)
if x > 0.2 and x < 0.5:
x = 0.2
if x < 0.8 and x >= 0.5:
x = 0.8
return cmap(1.0 - x)
values_text_kw['fontsize'] = values_font_size
values_text_kw['color'] = values_color
_plot_table(values, text_format=values_format, cmap=text_cmap, **values_text_kw)
# Changes the default position for the xlabel to the 'top'
xaxis = ax.xaxis
if clabel_pos == 'top':
xaxis.set_label_position('top')
xaxis.tick_top()
else:
ax.xaxis.tick_bottom()
ax.xaxis.set_label_position('bottom')
##
# Can get rid of the part below when the part above
# is rewritten so that changes are applied specifically to the axes
# object rather than using the method interface.
plt.sca(old_ax)
return output
|
Plot a heat map of z.
Parameters
-------------
z : ndarray | DataFrame
ax : None # NOT IMPLEMENTED YET
Axis to be used. If None uses the current axis.
xlabel : str | 'auto' | None
name for the x-axis
ylabel : str | 'auto' | None
name for the y-axis
xtick_labels : list of str
names for the columns
ytick_labels : list of str
names for the rows
transpose_y : bool
Flips the data along the y axis if true
convert_to_log_scale : bool
If true, plots the log of z.
clabel_pos : 'top' | 'bottom'
Location of the column labels. Default is 'top'.
cmap : colormap | str
colormap to use for plotting the values; e.g., matplotlib.cmap.Blues,
if str then expecting something like 'Blues' to look up using getattr(matplotlib.cm, ...)
values_color : None | color
if None, coloring will be the inverse of cmap
Otherwise the color given would be used for the text color of all the values.
bad_color : color
This is the color that will be used for nan values
Returns
---------------
Output from matshow command (matplotlib.image.AxesImage)
|
def get_badge(self):
"""
The related ``Badge`` object.
"""
try:
obj = Badge.objects.using(self.db_read).get(slug=self.slug)
logger.debug('✓ Badge %s: fetched from db (%s)', obj.slug, self.db_read)
except Badge.DoesNotExist:
obj = None
return obj
|
The related ``Badge`` object.
|
def ensure_dir(directory: str) -> None:
"""Create a directory if it doesn't exist."""
if not os.path.isdir(directory):
LOG.debug(f"Directory {directory} does not exist, creating it.")
os.makedirs(directory)
|
Create a directory if it doesn't exist.
|
def get_java_home():
"""\
Try getting JAVA_HOME from system properties.
We are interested in the JDK home, containing include/jni.h, while the
java.home property points to the JRE home. If a JDK is installed, however,
the two are (usually) related: the JDK home is either the same directory
as the JRE home (recent java versions) or its parent (and java.home points
to jdk_home/jre).
"""
error = RuntimeError("java home not found, try setting JAVA_HOME")
try:
return os.environ["JAVA_HOME"]
except KeyError:
wd = tempfile.mkdtemp(prefix='pydoop_')
jclass = "Temp"
jsrc = os.path.join(wd, "%s.java" % jclass)
with open(jsrc, "w") as f:
f.write(JPROG.substitute(classname=jclass))
try:
subprocess.check_call(["javac", jsrc])
path = subprocess.check_output(
["java", "-cp", wd, jclass], universal_newlines=True
)
except (OSError, UnicodeDecodeError, subprocess.CalledProcessError):
raise error
finally:
shutil.rmtree(wd)
path = os.path.normpath(path.strip())
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
path = os.path.dirname(path)
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
raise error
|
\
Try getting JAVA_HOME from system properties.
We are interested in the JDK home, containing include/jni.h, while the
java.home property points to the JRE home. If a JDK is installed, however,
the two are (usually) related: the JDK home is either the same directory
as the JRE home (recent java versions) or its parent (and java.home points
to jdk_home/jre).
|
def component(self, *components):
r"""
When search() is called it will limit results to items in a component.
:param component: items passed in will be turned into a list
:returns: :class:`Search`
"""
for component in components:
self._component.append(component)
return self
|
r"""
When search() is called it will limit results to items in a component.
:param component: items passed in will be turned into a list
:returns: :class:`Search`
|
def has_selector(selector):
"Determine if the current platform has the selector available"
try:
if selector == 'poll':
# the select module offers the poll selector even if the platform
# doesn't support it. Attempt to poll for nothing to make sure
# poll is available
p = select.poll()
p.poll(0)
else:
# the other selectors will fail when instantiated
getattr(select, selector)().close()
return True
except (OSError, AttributeError):
return False
|
Determine if the current platform has the selector available
|
def model_from_list(l, header):
"""Return a model with a collection from a list of entry"""
col = groups.sortableListe(PseudoAccesCategorie(n) for n in l)
return MultiSelectModel(col, header)
|
Return a model with a collection from a list of entry
|
def sanity_check_states(states_spec):
"""
Sanity checks a states dict, used to define the state space for an MDP.
Throws an error or warns if mismatches are found.
Args:
states_spec (Union[None,dict]): The spec-dict to check (or None).
Returns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.
"""
# Leave incoming states dict intact.
states = copy.deepcopy(states_spec)
# Unique state shortform.
is_unique = ('shape' in states)
if is_unique:
states = dict(state=states)
# Normalize states.
for name, state in states.items():
# Convert int to unary tuple.
if isinstance(state['shape'], int):
state['shape'] = (state['shape'],)
# Set default type to float.
if 'type' not in state:
state['type'] = 'float'
return states, is_unique
|
Sanity checks a states dict, used to define the state space for an MDP.
Throws an error or warns if mismatches are found.
Args:
states_spec (Union[None,dict]): The spec-dict to check (or None).
Returns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.
|
def warning(self, msg: str) -> None:
"""
Write a warning message to the Windows Application log
(± to the Python disk log).
"""
# Log messages go to the Windows APPLICATION log.
# noinspection PyUnresolvedReferences
s = "{}: {}".format(self.fullname, msg)
servicemanager.LogWarningMsg(s)
if self.debugging:
log.warning(s)
|
Write a warning message to the Windows Application log
(± to the Python disk log).
|
def delete_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router.
Delete the router and remove the interfaces attached to it.
"""
ret = self.delete_intf_router(name, tenant_id, rout_id, subnet_lst)
if not ret:
return False
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s ret %(ret)s "
"Exc %(exc)s",
{'name': name, 'ret': str(ret), 'exc': str(exc)})
return False
return True
|
Delete the openstack router.
Delete the router and remove the interfaces attached to it.
|
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
|
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
|
def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
'''
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
'''
ret = {}
cloud_opts = salt.config.cloud_config(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud')
)
minions = __runner__['cache.cloud'](tgt)
for minion_id, full_info in minions.items():
profile, provider = full_info.get('profile', None), full_info.get('provider', None)
vm_ = {
'driver': provider,
'profile': profile,
}
public_ips = full_info.get('public_ips', [])
private_ips = full_info.get('private_ips', [])
ip_list = []
for item in (public_ips, private_ips):
if isinstance(item, list):
ip_list = ip_list + item
elif isinstance(item, string_types):
ip_list.append(item)
roster_order = __opts__.get('roster_order', (
'public', 'private', 'local'
))
preferred_ip = extract_ipv4(roster_order, ip_list)
ret[minion_id] = copy.deepcopy(__opts__.get('roster_defaults', {}))
ret[minion_id].update({'host': preferred_ip})
ssh_username = salt.utils.cloud.ssh_usernames(vm_, cloud_opts)
if isinstance(ssh_username, string_types):
ret[minion_id]['user'] = ssh_username
elif isinstance(ssh_username, list):
if ssh_username[0] != 'root':
ret[minion_id]['user'] = ssh_username[0]
password = salt.config.get_cloud_config_value(
'ssh_password', vm_, cloud_opts, search_global=False, default=None
)
if password:
ret[minion_id]['password'] = password
key_filename = salt.config.get_cloud_config_value(
'private_key', vm_, cloud_opts, search_global=False, default=None
)
if key_filename:
ret[minion_id]['priv'] = key_filename
sudo = salt.config.get_cloud_config_value(
'sudo', vm_, cloud_opts, search_global=False, default=None
)
if sudo:
ret[minion_id]['sudo'] = sudo
return ret
|
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
|
def _make_dispatch(cls, func):
'''
Create a dispatch pair for func- a tuple of (bind_args, func), where
bind_args is a function that, when called with (args, kwargs), attempts
to bind those args to the type signature of func, or else raise a
TypeError
'''
sig = signature(func)
matchers = tuple(cls._make_all_matchers(sig.parameters.items()))
return (partial(cls._bind_args, sig, matchers), func)
|
Create a dispatch pair for func- a tuple of (bind_args, func), where
bind_args is a function that, when called with (args, kwargs), attempts
to bind those args to the type signature of func, or else raise a
TypeError
|
def _get_input_name(self, input_str, region=None, describe_output=None):
'''
:param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence)
'''
if '.' in input_str:
stage_identifier, input_name = input_str.split('.', 1)
# Try to parse as a stage ID or name
return self._get_stage_id(stage_identifier) + '.' + input_name
return input_str
|
:param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence)
|
def recv(self, topic, payload, qos):
"""Receive a MQTT message.
Call this method when a message is received from the MQTT broker.
"""
data = self._parse_mqtt_to_message(topic, payload, qos)
if data is None:
return
_LOGGER.debug('Receiving %s', data)
self.add_job(self.logic, data)
|
Receive a MQTT message.
Call this method when a message is received from the MQTT broker.
|
def lastNode(class_, hot_map):
''' Return the very last node (recursively) in the hot map. '''
children = hot_map[-1][2]
if children:
return class_.lastNode(children)
else:
return hot_map[-1][1]
|
Return the very last node (recursively) in the hot map.
|
def pixels_from_coordinates(lat, lon, max_y, max_x):
"""
Return the 2 matrix with lat and lon of each pixel.
Keyword arguments:
lat -- A latitude matrix
lon -- A longitude matrix
max_y -- The max vertical pixels amount of an orthorectified image.
max_x -- The max horizontal pixels amount of an orthorectified image.
"""
# The degrees by pixel of the orthorectified image.
x_ratio, y_ratio = max_x/360., max_y/180.
x, y = np.zeros(lon.shape), np.zeros(lat.shape)
x = (lon + 180.) * x_ratio
y = (lat + 90.) * y_ratio
return x, y
|
Return the 2 matrix with lat and lon of each pixel.
Keyword arguments:
lat -- A latitude matrix
lon -- A longitude matrix
max_y -- The max vertical pixels amount of an orthorectified image.
max_x -- The max horizontal pixels amount of an orthorectified image.
|
async def close(self) -> None:
"""
Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object
"""
LOGGER.debug('Verifier.close >>>')
if self.cfg.get('archive-on-close', {}):
await self.load_cache(True)
Caches.purge_archives(self.dir_cache, True)
await super().close()
LOGGER.debug('Verifier.close <<<')
|
Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object
|
def cli(env, date_min, date_max, obj_event, obj_id, obj_type, utc_offset, metadata, limit):
"""Get Event Logs
Example:
slcli event-log get -d 01/01/2019 -D 02/01/2019 -t User -l 10
"""
columns = ['Event', 'Object', 'Type', 'Date', 'Username']
event_mgr = SoftLayer.EventLogManager(env.client)
user_mgr = SoftLayer.UserManager(env.client)
request_filter = event_mgr.build_filter(date_min, date_max, obj_event, obj_id, obj_type, utc_offset)
logs = event_mgr.get_event_logs(request_filter)
log_time = "%Y-%m-%dT%H:%M:%S.%f%z"
user_data = {}
if metadata:
columns.append('Metadata')
row_count = 0
click.secho(", ".join(columns))
for log in logs:
if log is None:
click.secho('No logs available for filter %s.' % request_filter, fg='red')
return
user = log['userType']
label = log.get('label', '')
if user == "CUSTOMER":
username = user_data.get(log['userId'])
if username is None:
username = user_mgr.get_user(log['userId'], "mask[username]")['username']
user_data[log['userId']] = username
user = username
if metadata:
metadata_data = log['metaData'].strip("\n\t")
click.secho("'{0}','{1}','{2}','{3}','{4}','{5}'".format(
log['eventName'],
label,
log['objectName'],
utils.clean_time(log['eventCreateDate'], in_format=log_time),
user,
metadata_data))
else:
click.secho("'{0}','{1}','{2}','{3}','{4}'".format(
log['eventName'],
label,
log['objectName'],
utils.clean_time(log['eventCreateDate'], in_format=log_time),
user))
row_count = row_count + 1
if row_count >= limit and limit != -1:
return
|
Get Event Logs
Example:
slcli event-log get -d 01/01/2019 -D 02/01/2019 -t User -l 10
|
def member_at_in(self, leaderboard_name, position, **options):
'''
Retrieve a member at the specified index from the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param position [int] Position in named leaderboard.
@param options [Hash] Options to be used when retrieving the member from the named leaderboard.
@return a page of leaders from the named leaderboard.
'''
if position > 0 and position <= self.total_members_in(leaderboard_name):
page_size = options.get('page_size', self.page_size)
current_page = math.ceil(float(position) / float(page_size))
offset = (position - 1) % page_size
leaders = self.leaders_in(
leaderboard_name,
current_page,
**options)
if leaders:
return leaders[offset]
|
Retrieve a member at the specified index from the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param position [int] Position in named leaderboard.
@param options [Hash] Options to be used when retrieving the member from the named leaderboard.
@return a page of leaders from the named leaderboard.
|
def _validate_auth(self, path, obj, _):
""" validate that apiKey and oauth2 requirements """
errs = []
if obj.type == 'apiKey':
if not obj.passAs:
errs.append('need "passAs" for apiKey')
if not obj.keyname:
errs.append('need "keyname" for apiKey')
elif obj.type == 'oauth2':
if not obj.grantTypes:
errs.append('need "grantTypes" for oauth2')
return path, obj.__class__.__name__, errs
|
validate that apiKey and oauth2 requirements
|
def unset_values(self):
"""
Resets the user values of all symbols, as if Kconfig.load_config() or
Symbol.set_value() had never been called.
"""
self._warn_for_no_prompt = False
try:
# set_value() already rejects undefined symbols, and they don't
# need to be invalidated (because their value never changes), so we
# can just iterate over defined symbols
for sym in self.unique_defined_syms:
sym.unset_value()
for choice in self.unique_choices:
choice.unset_value()
finally:
self._warn_for_no_prompt = True
|
Resets the user values of all symbols, as if Kconfig.load_config() or
Symbol.set_value() had never been called.
|
def get_location_from_sina(ip):
"""
{
"ret":1,
"start":"58.18.0.0",
"end":"58.18.15.255",
"country":"中国",
"province":"内蒙古",
"city":"兴安",
"district":"",
"isp":"联通",
"type":"",
"desc":""
}
"""
global sina
response = requests.get(sina % ip)
if not response.status_code == 200:
return
l = json.loads(response.content)
if not l['ret'] == 1:
return
return ("%s,%s,%s,%s" % (l['country'], l['province'], l['city'], l['isp'])).encode('utf8')
|
{
"ret":1,
"start":"58.18.0.0",
"end":"58.18.15.255",
"country":"中国",
"province":"内蒙古",
"city":"兴安",
"district":"",
"isp":"联通",
"type":"",
"desc":""
}
|
def append(self, lines):
"""
Args:
lines (list): List of line strings to append to the end of the editor
"""
if isinstance(lines, list):
self._lines = self._lines + lines
elif isinstance(lines, str):
lines = lines.split('\n')
self._lines = self._lines + lines
else:
raise TypeError('Unsupported type {0} for lines.'.format(type(lines)))
|
Args:
lines (list): List of line strings to append to the end of the editor
|
def last_edit_time(self):
"""
:return: 问题最后编辑时间
:rtype: datetime.datetime
"""
data = {'_xsrf': self.xsrf, 'offset': '1'}
res = self._session.post(self.url + 'log', data=data)
_, content = res.json()['msg']
soup = BeautifulSoup(content)
time_string = soup.find_all('time')[0]['datetime']
return datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S")
|
:return: 问题最后编辑时间
:rtype: datetime.datetime
|
def _original_path(self, path):
"""Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
"""
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
|
Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
|
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
|
Determines who the oldest peer is by comparing unit numbers.
|
def _config_bootstrap(self):
"""Go through and establish the defaults on the file system.
The approach here was stolen from the CLI tool provided with the
module. Idea being that the user should not always need to provide a
username and password in order to run the script. If the configuration
file is already present with valid data, then lets use it.
"""
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
if self._email and self._password:
# Save the configuration locally to pull later on
config['email'] = self._email
config['password'] = str(obfuscate(self._password, 'store'))
self._log.debug("Caching authentication in config file")
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
else:
# Load the config file and override the class
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['email'] and config['password']:
self._email = config['email']
self._password = obfuscate(str(config['password']), 'fetch')
self._log.debug("Loaded authentication from config file")
|
Go through and establish the defaults on the file system.
The approach here was stolen from the CLI tool provided with the
module. Idea being that the user should not always need to provide a
username and password in order to run the script. If the configuration
file is already present with valid data, then lets use it.
|
def sound_touch_stop(self, call_params):
"""REST Remove soundtouch audio effects on a Call
"""
path = '/' + self.api_version + '/SoundTouchStop/'
method = 'POST'
return self.request(path, method, call_params)
|
REST Remove soundtouch audio effects on a Call
|
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
if not api_version:
api_version = 2
sess, auth = self.get_keystone_session(
keystone_ip=keystone_ip,
username=username,
password=password,
api_version=api_version,
admin_port=admin_port,
user_domain_name=user_domain_name,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name
)
if api_version == 2:
client = keystone_client.Client(session=sess)
else:
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
|
Authenticate with Keystone
|
def client_auth(self):
"""Generate an XML element with client auth data populated."""
if not self._client_auth:
self._client_auth = E.Element('merchantAuthentication')
E.SubElement(self._client_auth, 'name').text = self.config.login_id
E.SubElement(self._client_auth, 'transactionKey').text = self.config.transaction_key
return self._client_auth
|
Generate an XML element with client auth data populated.
|
def smart_truncate(value, max_length=0, word_boundaries=False, separator=' '):
""" Truncate a string """
value = value.strip(separator)
if not max_length:
return value
if len(value) < max_length:
return value
if not word_boundaries:
return value[:max_length].strip(separator)
if separator not in value:
return value[:max_length]
truncated = ''
for word in value.split(separator):
if word:
next_len = len(truncated) + len(word) + len(separator)
if next_len <= max_length:
truncated += '{0}{1}'.format(word, separator)
if not truncated:
truncated = value[:max_length]
return truncated.strip(separator)
|
Truncate a string
|
def _set_ldp_protocol_stats_instance_total(self, v, load=False):
"""
Setter method for ldp_protocol_stats_instance_total, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_protocol_stats_instance_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_protocol_stats_instance_total() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container='container', presence=False, yang_name="ldp-protocol-stats-instance-total", rest_name="ldp-protocol-stats-instance-total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_protocol_stats_instance_total must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container='container', presence=False, yang_name="ldp-protocol-stats-instance-total", rest_name="ldp-protocol-stats-instance-total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__ldp_protocol_stats_instance_total = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ldp_protocol_stats_instance_total, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_protocol_stats_instance_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_protocol_stats_instance_total() directly.
|
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
|
Create a new instance where all of the values are 0
|
def rt_location_log(logfile):
"""
Extract location information from a RefTek raw log-file.
Function to read a specific RefTek RT130 log-file and find all location
information.
:type logfile: str
:param logfile: The logfile to look in
:returns: list of tuples of lat, lon, elevation in decimal degrees and km.
:rtype: list
"""
if os.name == 'nt':
f = open(logfile, 'rb')
else:
f = open(logfile, 'rb')
locations = []
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
print(line_binary)
continue
match = re.search("GPS: POSITION:", line)
if match:
# Line is of form:
# jjj:hh:mm:ss GPS: POSITION: xDD:MM:SS.SS xDDD:MM:SS.SS xMMMMMMM
loc = line[match.end() + 1:].rstrip().split(' ')
lat_sign = loc[0][0]
lat = loc[0][1:].split(':')
lat = int(lat[0]) + (int(lat[1]) / 60.0) + (float(lat[2]) / 3600.0)
if lat_sign == 'S':
lat *= -1
lon_sign = loc[1][0]
lon = loc[1][1:].split(':')
lon = int(lon[0]) + (int(lon[1]) / 60.0) + (float(lon[2]) / 3600.0)
if lon_sign == 'W':
lon *= -1
elev_sign = loc[2][0]
elev_unit = loc[2][-1]
if not elev_unit == 'M':
raise NotImplementedError('Elevation is not in M: unit=' +
elev_unit)
elev = int(loc[2][1:-1])
if elev_sign == '-':
elev *= -1
# Convert to km
elev /= 1000
locations.append((lat, lon, elev))
f.close()
return locations
|
Extract location information from a RefTek raw log-file.
Function to read a specific RefTek RT130 log-file and find all location
information.
:type logfile: str
:param logfile: The logfile to look in
:returns: list of tuples of lat, lon, elevation in decimal degrees and km.
:rtype: list
|
def img(self):
'''return a wx image'''
import wx
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = wx.EmptyImage(self.width, self.height)
img.SetData(self.imgstr)
return img
|
return a wx image
|
def get_country_by_id(self, country_id) -> 'Country':
"""
Gets a country in this coalition by its ID
Args:
country_id: country Id
Returns: Country
"""
VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)
if country_id not in self._countries_by_id.keys():
for country in self.countries:
if country.country_id == country_id:
return country
raise ValueError(country_id)
else:
return self._countries_by_id[country_id]
|
Gets a country in this coalition by its ID
Args:
country_id: country Id
Returns: Country
|
def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,
attemptsBeforeDeactivation=3, path="/", statusRegex=None,
bodyRegex=None, hostHeader=None):
"""
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
"""
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
req_body = {"healthMonitor": {
"type": type,
"delay": delay,
"timeout": timeout,
"attemptsBeforeDeactivation": attemptsBeforeDeactivation,
}}
uptype = type.upper()
if uptype.startswith("HTTP"):
lb = self._get_lb(loadbalancer)
if uptype != lb.protocol:
raise exc.ProtocolMismatch("Cannot set the Health Monitor type "
"to '%s' when the Load Balancer's protocol is '%s'." %
(type, lb.protocol))
if not all((path, statusRegex, bodyRegex)):
raise exc.MissingHealthMonitorSettings("When creating an HTTP(S) "
"monitor, you must provide the 'path', 'statusRegex' and "
"'bodyRegex' parameters.")
body_hm = req_body["healthMonitor"]
body_hm["path"] = path
body_hm["statusRegex"] = statusRegex
body_hm["bodyRegex"] = bodyRegex
if hostHeader:
body_hm["hostHeader"] = hostHeader
resp, body = self.api.method_put(uri, body=req_body)
return body
|
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
|
def get_datatype(self, table: str, column: str) -> str:
"""Returns database SQL datatype for a column: e.g. VARCHAR."""
return self.flavour.get_datatype(self, table, column).upper()
|
Returns database SQL datatype for a column: e.g. VARCHAR.
|
def infer(self, number_of_processes=1, *args, **kwargs):
"""
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
"""
if number_of_processes == 1:
results = map(lambda x: x.infer(*args, **kwargs), self._inference_objects)
else:
inference_objects = self._inference_objects
results = raw_results_in_parallel(self._inference_objects, number_of_processes, *args,
**kwargs)
results = [inference._result_from_raw_result(raw_result)
for inference, raw_result in zip(inference_objects, results)]
results = sorted(results, key=lambda x: x.distance_at_minimum)
return InferenceResultsCollection(results)
|
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
|
def ip_rtm_config_route_static_route_oif_vrf_static_route_oif_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf")
static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest")
static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest')
next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf")
next_hop_vrf_key.text = kwargs.pop('next_hop_vrf')
static_route_oif_name_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-name")
static_route_oif_name_key.text = kwargs.pop('static_route_oif_name')
static_route_oif_type = ET.SubElement(static_route_oif_vrf, "static-route-oif-type")
static_route_oif_type.text = kwargs.pop('static_route_oif_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _property_set(self, msg):
"""Set command received and acknowledged."""
prop = self._sent_property.get('prop')
if prop and hasattr(self, prop):
setattr(self, prop, self._sent_property.get('val'))
self._sent_property = {}
|
Set command received and acknowledged.
|
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
y: a list/tuple/ndarray of l true labels (type must be int/double).
It is used for calculating the accuracy. Use [] if true labels are
unavailable.
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
if scipy and isinstance(x, scipy.ndarray):
x = scipy.ascontiguousarray(x) # enforce row-major
elif sparse and isinstance(x, sparse.spmatrix):
x = x.tocsr()
elif not isinstance(x, (list, tuple)):
raise TypeError("type of x: {0} is not supported!".format(type(x)))
if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, scipy.ndarray))):
raise TypeError("type of y: {0} is not supported!".format(type(y)))
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if scipy and isinstance(x, sparse.spmatrix):
nr_instance = x.shape[0]
else:
nr_instance = len(x)
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
else:
xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
else:
xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
if len(y) == 0:
y = [0] * nr_instance
ACC, MSE, SCC = evaluations(y, pred_labels)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(round(nr_instance*ACC/100)), nr_instance))
return pred_labels, (ACC, MSE, SCC), pred_values
|
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
y: a list/tuple/ndarray of l true labels (type must be int/double).
It is used for calculating the accuracy. Use [] if true labels are
unavailable.
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
|
def unload_plugin(name, category=None):
""" remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
"""
if category is not None:
_all_plugins[category].pop(name)
else:
for cat in _all_plugins:
if name in _all_plugins[cat]:
_all_plugins[cat].pop(name)
|
remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
|
def create_tcp_socket (self, host):
"""Create tcp socket, connect to it and return socket object."""
port = int(self['TCPSocket'])
sockinfo = get_sockinfo(host, port=port)
sock = create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(sockinfo[0][4])
except socket.error:
sock.close()
raise
return sock
|
Create tcp socket, connect to it and return socket object.
|
def plot_rolling_volatility(returns, factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling volatility versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
rolling_window : int, optional
The days window over which to compute the volatility.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_vol_ts = timeseries.rolling_volatility(
returns, rolling_window)
rolling_vol_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
if factor_returns is not None:
rolling_vol_ts_factor = timeseries.rolling_volatility(
factor_returns, rolling_window)
rolling_vol_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax,
**kwargs)
ax.set_title('Rolling volatility (6-month)')
ax.axhline(
rolling_vol_ts.mean(),
color='steelblue',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_ylabel('Volatility')
ax.set_xlabel('')
if factor_returns is None:
ax.legend(['Volatility', 'Average volatility'],
loc=legend_loc, frameon=True, framealpha=0.5)
else:
ax.legend(['Volatility', 'Benchmark volatility', 'Average volatility'],
loc=legend_loc, frameon=True, framealpha=0.5)
return ax
|
Plots the rolling volatility versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
rolling_window : int, optional
The days window over which to compute the volatility.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
|
def create_providerinfo(self, capabilities):
"""
Dynamically create the provider info response
:param capabilities:
:return:
"""
_pinfo = self.package_capabilities()
not_supported = {}
for key, val in capabilities.items():
try:
allowed = _pinfo[key]
except KeyError:
_pinfo[key] = val
else:
if isinstance(allowed, bool):
if allowed is False:
if val is True:
not_supported[key] = True
else:
_pinfo[key] = val
elif isinstance(allowed, str):
if val != allowed:
not_supported[key] = val
elif isinstance(allowed, list):
if isinstance(val, str):
sv = {val}
else:
try:
sv = set(val)
except TypeError:
if key == 'response_types_supported':
sv = set()
for v in val:
v.sort()
sv.add(' '.join(v))
else:
raise
else:
sv = set()
for v in val:
vs = v.split(' ')
vs.sort()
sv.add(' '.join(vs))
sa = set(allowed)
if (sv & sa) == sv:
_pinfo[key] = list(sv)
else:
not_supported[key] = list(sv - sa)
if not_supported:
_msg = "Server doesn't support the following features: {}".format(
not_supported)
logger.error(_msg)
raise ConfigurationError(_msg)
if self.jwks_uri and self.keyjar:
_pinfo["jwks_uri"] = self.jwks_uri
for name, instance in self.endpoint.items():
if name not in ['webfinger', 'provider_info']:
_pinfo['{}_endpoint'.format(name)] = instance.full_path
return _pinfo
|
Dynamically create the provider info response
:param capabilities:
:return:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.